net: at91_ether: add pinctrl support
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9e056c03 7 * Copyright (C) 2005-2012 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
6867c843 21#include <linux/stringify.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4 28#include <linux/init.h>
a6b7a407 29#include <linux/interrupt.h>
1da177e4
LT
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
3110f5f5 36#include <linux/mdio.h>
1da177e4 37#include <linux/mii.h>
158d7abd 38#include <linux/phy.h>
a9daf367 39#include <linux/brcmphy.h>
1da177e4
LT
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
61487480 44#include <linux/prefetch.h>
f9a5f7d3 45#include <linux/dma-mapping.h>
077f849d 46#include <linux/firmware.h>
aed93e0b
MC
47#include <linux/hwmon.h>
48#include <linux/hwmon-sysfs.h>
1da177e4
LT
49
50#include <net/checksum.h>
c9bdd4b5 51#include <net/ip.h>
1da177e4 52
27fd9de8 53#include <linux/io.h>
1da177e4 54#include <asm/byteorder.h>
27fd9de8 55#include <linux/uaccess.h>
1da177e4 56
49b6e95f 57#ifdef CONFIG_SPARC
1da177e4 58#include <asm/idprom.h>
49b6e95f 59#include <asm/prom.h>
1da177e4
LT
60#endif
61
63532394
MC
62#define BAR_0 0
63#define BAR_2 2
64
1da177e4
LT
65#include "tg3.h"
66
63c3a66f
JP
67/* Functions & macros to verify TG3_FLAGS types */
68
69static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70{
71 return test_bit(flag, bits);
72}
73
74static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75{
76 set_bit(flag, bits);
77}
78
79static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80{
81 clear_bit(flag, bits);
82}
83
84#define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86#define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88#define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
1da177e4 91#define DRV_MODULE_NAME "tg3"
6867c843 92#define TG3_MAJ_NUM 3
cf6d6ea6 93#define TG3_MIN_NUM 125
6867c843
MC
94#define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
cf6d6ea6 96#define DRV_MODULE_RELDATE "September 26, 2012"
1da177e4 97
fd6d3f0e
MC
98#define RESET_KIND_SHUTDOWN 0
99#define RESET_KIND_INIT 1
100#define RESET_KIND_SUSPEND 2
101
1da177e4
LT
102#define TG3_DEF_RX_MODE 0
103#define TG3_DEF_TX_MODE 0
104#define TG3_DEF_MSG_ENABLE \
105 (NETIF_MSG_DRV | \
106 NETIF_MSG_PROBE | \
107 NETIF_MSG_LINK | \
108 NETIF_MSG_TIMER | \
109 NETIF_MSG_IFDOWN | \
110 NETIF_MSG_IFUP | \
111 NETIF_MSG_RX_ERR | \
112 NETIF_MSG_TX_ERR)
113
520b2756
MC
114#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115
1da177e4
LT
116/* length of time before we decide the hardware is borked,
117 * and dev->tx_timeout() should be called to fix the problem
118 */
63c3a66f 119
1da177e4
LT
120#define TG3_TX_TIMEOUT (5 * HZ)
121
122/* hardware minimum and maximum for a single frame's data payload */
123#define TG3_MIN_MTU 60
124#define TG3_MAX_MTU(tp) \
63c3a66f 125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
126
127/* These numbers seem to be hard coded in the NIC firmware somehow.
128 * You can't change the ring sizes, but you can change where you place
129 * them in the NIC onboard memory.
130 */
7cb32cf2 131#define TG3_RX_STD_RING_SIZE(tp) \
63c3a66f 132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
1da177e4 134#define TG3_DEF_RX_RING_PENDING 200
7cb32cf2 135#define TG3_RX_JMB_RING_SIZE(tp) \
63c3a66f 136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
1da177e4
LT
138#define TG3_DEF_RX_JUMBO_RING_PENDING 100
139
140/* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
145 */
1da177e4
LT
146
147#define TG3_TX_RING_SIZE 512
148#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149
2c49a44d
MC
150#define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152#define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154#define TG3_RX_RCB_RING_BYTES(tp) \
7cb32cf2 155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
1da177e4
LT
156#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
1da177e4
LT
158#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
287be12e
MC
160#define TG3_DMA_BYTE_ENAB 64
161
162#define TG3_RX_STD_DMA_SZ 1536
163#define TG3_RX_JMB_DMA_SZ 9046
164
165#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166
167#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
1da177e4 169
2c49a44d
MC
170#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
2b2cdb65 172
2c49a44d
MC
173#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
2b2cdb65 175
d2757fc4
MC
176/* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
180 *
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
186 */
187#define TG3_RX_COPY_THRESHOLD 256
188#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190#else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192#endif
193
81389f57
MC
194#if (NET_IP_ALIGN != 0)
195#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196#else
9205fd9c 197#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
81389f57
MC
198#endif
199
1da177e4 200/* minimum number of free TX descriptors required to wake up TX process */
f3f3f27e 201#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
55086ad9 202#define TG3_TX_BD_DMA_MAX_2K 2048
a4cb428d 203#define TG3_TX_BD_DMA_MAX_4K 4096
1da177e4 204
ad829268
MC
205#define TG3_RAW_IP_ALIGN 2
206
c6cdf436 207#define TG3_FW_UPDATE_TIMEOUT_SEC 5
21f7638e 208#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
c6cdf436 209
077f849d
JSR
210#define FIRMWARE_TG3 "tigon/tg3.bin"
211#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213
1da177e4 214static char version[] __devinitdata =
05dbe005 215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
1da177e4
LT
216
217MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_MODULE_VERSION);
077f849d
JSR
221MODULE_FIRMWARE(FIRMWARE_TG3);
222MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
1da177e4
LT
225static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226module_param(tg3_debug, int, 0);
227MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
a3aa1884 229static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
13185217
HK
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
13185217 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
13185217
HK
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
c88e668b
MC
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
2befdcea
MC
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
321d32a0
MC
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
5e7ccf20 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
5001e2f6
MC
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
b0f75221
MC
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
302b500b 301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
ba1f3c76 302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
02eca3f5 303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
13185217
HK
304 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
305 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
309 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
310 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
1dcb14d9 311 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
13185217 312 {}
1da177e4
LT
313};
314
315MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
316
50da859d 317static const struct {
1da177e4 318 const char string[ETH_GSTRING_LEN];
48fa55a0 319} ethtool_stats_keys[] = {
1da177e4
LT
320 { "rx_octets" },
321 { "rx_fragments" },
322 { "rx_ucast_packets" },
323 { "rx_mcast_packets" },
324 { "rx_bcast_packets" },
325 { "rx_fcs_errors" },
326 { "rx_align_errors" },
327 { "rx_xon_pause_rcvd" },
328 { "rx_xoff_pause_rcvd" },
329 { "rx_mac_ctrl_rcvd" },
330 { "rx_xoff_entered" },
331 { "rx_frame_too_long_errors" },
332 { "rx_jabbers" },
333 { "rx_undersize_packets" },
334 { "rx_in_length_errors" },
335 { "rx_out_length_errors" },
336 { "rx_64_or_less_octet_packets" },
337 { "rx_65_to_127_octet_packets" },
338 { "rx_128_to_255_octet_packets" },
339 { "rx_256_to_511_octet_packets" },
340 { "rx_512_to_1023_octet_packets" },
341 { "rx_1024_to_1522_octet_packets" },
342 { "rx_1523_to_2047_octet_packets" },
343 { "rx_2048_to_4095_octet_packets" },
344 { "rx_4096_to_8191_octet_packets" },
345 { "rx_8192_to_9022_octet_packets" },
346
347 { "tx_octets" },
348 { "tx_collisions" },
349
350 { "tx_xon_sent" },
351 { "tx_xoff_sent" },
352 { "tx_flow_control" },
353 { "tx_mac_errors" },
354 { "tx_single_collisions" },
355 { "tx_mult_collisions" },
356 { "tx_deferred" },
357 { "tx_excessive_collisions" },
358 { "tx_late_collisions" },
359 { "tx_collide_2times" },
360 { "tx_collide_3times" },
361 { "tx_collide_4times" },
362 { "tx_collide_5times" },
363 { "tx_collide_6times" },
364 { "tx_collide_7times" },
365 { "tx_collide_8times" },
366 { "tx_collide_9times" },
367 { "tx_collide_10times" },
368 { "tx_collide_11times" },
369 { "tx_collide_12times" },
370 { "tx_collide_13times" },
371 { "tx_collide_14times" },
372 { "tx_collide_15times" },
373 { "tx_ucast_packets" },
374 { "tx_mcast_packets" },
375 { "tx_bcast_packets" },
376 { "tx_carrier_sense_errors" },
377 { "tx_discards" },
378 { "tx_errors" },
379
380 { "dma_writeq_full" },
381 { "dma_write_prioq_full" },
382 { "rxbds_empty" },
383 { "rx_discards" },
384 { "rx_errors" },
385 { "rx_threshold_hit" },
386
387 { "dma_readq_full" },
388 { "dma_read_prioq_full" },
389 { "tx_comp_queue_full" },
390
391 { "ring_set_send_prod_index" },
392 { "ring_status_update" },
393 { "nic_irqs" },
394 { "nic_avoided_irqs" },
4452d099
MC
395 { "nic_tx_threshold_hit" },
396
397 { "mbuf_lwm_thresh_hit" },
1da177e4
LT
398};
399
48fa55a0
MC
400#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
401
402
50da859d 403static const struct {
4cafd3f5 404 const char string[ETH_GSTRING_LEN];
48fa55a0 405} ethtool_test_keys[] = {
28a45957
MC
406 { "nvram test (online) " },
407 { "link test (online) " },
408 { "register test (offline)" },
409 { "memory test (offline)" },
410 { "mac loopback test (offline)" },
411 { "phy loopback test (offline)" },
941ec90f 412 { "ext loopback test (offline)" },
28a45957 413 { "interrupt test (offline)" },
4cafd3f5
MC
414};
415
48fa55a0
MC
416#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
417
418
b401e9e2
MC
419static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
420{
421 writel(val, tp->regs + off);
422}
423
424static u32 tg3_read32(struct tg3 *tp, u32 off)
425{
de6f31eb 426 return readl(tp->regs + off);
b401e9e2
MC
427}
428
0d3031d9
MC
429static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
430{
431 writel(val, tp->aperegs + off);
432}
433
434static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
435{
de6f31eb 436 return readl(tp->aperegs + off);
0d3031d9
MC
437}
438
1da177e4
LT
439static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440{
6892914f
MC
441 unsigned long flags;
442
443 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 446 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
447}
448
449static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
450{
451 writel(val, tp->regs + off);
452 readl(tp->regs + off);
1da177e4
LT
453}
454
6892914f 455static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 456{
6892914f
MC
457 unsigned long flags;
458 u32 val;
459
460 spin_lock_irqsave(&tp->indirect_lock, flags);
461 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
462 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
463 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 return val;
465}
466
467static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468{
469 unsigned long flags;
470
471 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
472 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
473 TG3_64BIT_REG_LOW, val);
474 return;
475 }
66711e66 476 if (off == TG3_RX_STD_PROD_IDX_REG) {
6892914f
MC
477 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
478 TG3_64BIT_REG_LOW, val);
479 return;
1da177e4 480 }
6892914f
MC
481
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
486
487 /* In indirect mode when disabling interrupts, we also need
488 * to clear the interrupt bit in the GRC local ctrl register.
489 */
490 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
491 (val == 0x1)) {
492 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
493 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 }
495}
496
497static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
498{
499 unsigned long flags;
500 u32 val;
501
502 spin_lock_irqsave(&tp->indirect_lock, flags);
503 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
504 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
505 spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 return val;
507}
508
b401e9e2
MC
509/* usec_wait specifies the wait time in usec when writing to certain registers
510 * where it is unsafe to read back the register without some delay.
511 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
512 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
513 */
514static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 515{
63c3a66f 516 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
b401e9e2
MC
517 /* Non-posted methods */
518 tp->write32(tp, off, val);
519 else {
520 /* Posted method */
521 tg3_write32(tp, off, val);
522 if (usec_wait)
523 udelay(usec_wait);
524 tp->read32(tp, off);
525 }
526 /* Wait again after the read for the posted method to guarantee that
527 * the wait time is met.
528 */
529 if (usec_wait)
530 udelay(usec_wait);
1da177e4
LT
531}
532
09ee929c
MC
533static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
534{
535 tp->write32_mbox(tp, off, val);
63c3a66f 536 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
6892914f 537 tp->read32_mbox(tp, off);
09ee929c
MC
538}
539
20094930 540static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
541{
542 void __iomem *mbox = tp->regs + off;
543 writel(val, mbox);
63c3a66f 544 if (tg3_flag(tp, TXD_MBOX_HWBUG))
1da177e4 545 writel(val, mbox);
63c3a66f 546 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1da177e4
LT
547 readl(mbox);
548}
549
b5d3772c
MC
550static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
551{
de6f31eb 552 return readl(tp->regs + off + GRCMBOX_BASE);
b5d3772c
MC
553}
554
555static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
556{
557 writel(val, tp->regs + off + GRCMBOX_BASE);
558}
559
c6cdf436 560#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 561#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
c6cdf436
MC
562#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
563#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
564#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930 565
c6cdf436
MC
566#define tw32(reg, val) tp->write32(tp, reg, val)
567#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
568#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
569#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
570
571static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572{
6892914f
MC
573 unsigned long flags;
574
6ff6f81d 575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
576 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
577 return;
578
6892914f 579 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 580 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
581 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 583
bbadf503
MC
584 /* Always leave this as zero. */
585 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
586 } else {
587 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
588 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 589
bbadf503
MC
590 /* Always leave this as zero. */
591 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
592 }
593 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
594}
595
1da177e4
LT
596static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597{
6892914f
MC
598 unsigned long flags;
599
6ff6f81d 600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
601 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
602 *val = 0;
603 return;
604 }
605
6892914f 606 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 607 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
608 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
609 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 610
bbadf503
MC
611 /* Always leave this as zero. */
612 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
613 } else {
614 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
615 *val = tr32(TG3PCI_MEM_WIN_DATA);
616
617 /* Always leave this as zero. */
618 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
619 }
6892914f 620 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
621}
622
0d3031d9
MC
623static void tg3_ape_lock_init(struct tg3 *tp)
624{
625 int i;
6f5c8f83 626 u32 regbase, bit;
f92d9dc1
MC
627
628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
629 regbase = TG3_APE_LOCK_GRANT;
630 else
631 regbase = TG3_APE_PER_LOCK_GRANT;
0d3031d9
MC
632
633 /* Make sure the driver hasn't any stale locks. */
78f94dc7
MC
634 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
635 switch (i) {
636 case TG3_APE_LOCK_PHY0:
637 case TG3_APE_LOCK_PHY1:
638 case TG3_APE_LOCK_PHY2:
639 case TG3_APE_LOCK_PHY3:
640 bit = APE_LOCK_GRANT_DRIVER;
641 break;
642 default:
643 if (!tp->pci_fn)
644 bit = APE_LOCK_GRANT_DRIVER;
645 else
646 bit = 1 << tp->pci_fn;
647 }
648 tg3_ape_write32(tp, regbase + 4 * i, bit);
6f5c8f83
MC
649 }
650
0d3031d9
MC
651}
652
653static int tg3_ape_lock(struct tg3 *tp, int locknum)
654{
655 int i, off;
656 int ret = 0;
6f5c8f83 657 u32 status, req, gnt, bit;
0d3031d9 658
63c3a66f 659 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
660 return 0;
661
662 switch (locknum) {
6f5c8f83
MC
663 case TG3_APE_LOCK_GPIO:
664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
665 return 0;
33f401ae
MC
666 case TG3_APE_LOCK_GRC:
667 case TG3_APE_LOCK_MEM:
78f94dc7
MC
668 if (!tp->pci_fn)
669 bit = APE_LOCK_REQ_DRIVER;
670 else
671 bit = 1 << tp->pci_fn;
33f401ae 672 break;
8151ad57
MC
673 case TG3_APE_LOCK_PHY0:
674 case TG3_APE_LOCK_PHY1:
675 case TG3_APE_LOCK_PHY2:
676 case TG3_APE_LOCK_PHY3:
677 bit = APE_LOCK_REQ_DRIVER;
678 break;
33f401ae
MC
679 default:
680 return -EINVAL;
0d3031d9
MC
681 }
682
f92d9dc1
MC
683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
684 req = TG3_APE_LOCK_REQ;
685 gnt = TG3_APE_LOCK_GRANT;
686 } else {
687 req = TG3_APE_PER_LOCK_REQ;
688 gnt = TG3_APE_PER_LOCK_GRANT;
689 }
690
0d3031d9
MC
691 off = 4 * locknum;
692
6f5c8f83 693 tg3_ape_write32(tp, req + off, bit);
0d3031d9
MC
694
695 /* Wait for up to 1 millisecond to acquire lock. */
696 for (i = 0; i < 100; i++) {
f92d9dc1 697 status = tg3_ape_read32(tp, gnt + off);
6f5c8f83 698 if (status == bit)
0d3031d9
MC
699 break;
700 udelay(10);
701 }
702
6f5c8f83 703 if (status != bit) {
0d3031d9 704 /* Revoke the lock request. */
6f5c8f83 705 tg3_ape_write32(tp, gnt + off, bit);
0d3031d9
MC
706 ret = -EBUSY;
707 }
708
709 return ret;
710}
711
712static void tg3_ape_unlock(struct tg3 *tp, int locknum)
713{
6f5c8f83 714 u32 gnt, bit;
0d3031d9 715
63c3a66f 716 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
717 return;
718
719 switch (locknum) {
6f5c8f83
MC
720 case TG3_APE_LOCK_GPIO:
721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
722 return;
33f401ae
MC
723 case TG3_APE_LOCK_GRC:
724 case TG3_APE_LOCK_MEM:
78f94dc7
MC
725 if (!tp->pci_fn)
726 bit = APE_LOCK_GRANT_DRIVER;
727 else
728 bit = 1 << tp->pci_fn;
33f401ae 729 break;
8151ad57
MC
730 case TG3_APE_LOCK_PHY0:
731 case TG3_APE_LOCK_PHY1:
732 case TG3_APE_LOCK_PHY2:
733 case TG3_APE_LOCK_PHY3:
734 bit = APE_LOCK_GRANT_DRIVER;
735 break;
33f401ae
MC
736 default:
737 return;
0d3031d9
MC
738 }
739
f92d9dc1
MC
740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
741 gnt = TG3_APE_LOCK_GRANT;
742 else
743 gnt = TG3_APE_PER_LOCK_GRANT;
744
6f5c8f83 745 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
0d3031d9
MC
746}
747
b65a372b 748static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
fd6d3f0e 749{
fd6d3f0e
MC
750 u32 apedata;
751
b65a372b
MC
752 while (timeout_us) {
753 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754 return -EBUSY;
755
756 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 break;
759
760 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
761
762 udelay(10);
763 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
764 }
765
766 return timeout_us ? 0 : -EBUSY;
767}
768
cf8d55ae
MC
769static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
770{
771 u32 i, apedata;
772
773 for (i = 0; i < timeout_us / 10; i++) {
774 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
775
776 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
777 break;
778
779 udelay(10);
780 }
781
782 return i == timeout_us / 10;
783}
784
86449944
MC
785static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
786 u32 len)
cf8d55ae
MC
787{
788 int err;
789 u32 i, bufoff, msgoff, maxlen, apedata;
790
791 if (!tg3_flag(tp, APE_HAS_NCSI))
792 return 0;
793
794 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
795 if (apedata != APE_SEG_SIG_MAGIC)
796 return -ENODEV;
797
798 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
799 if (!(apedata & APE_FW_STATUS_READY))
800 return -EAGAIN;
801
802 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
803 TG3_APE_SHMEM_BASE;
804 msgoff = bufoff + 2 * sizeof(u32);
805 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
806
807 while (len) {
808 u32 length;
809
810 /* Cap xfer sizes to scratchpad limits. */
811 length = (len > maxlen) ? maxlen : len;
812 len -= length;
813
814 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
815 if (!(apedata & APE_FW_STATUS_READY))
816 return -EAGAIN;
817
818 /* Wait for up to 1 msec for APE to service previous event. */
819 err = tg3_ape_event_lock(tp, 1000);
820 if (err)
821 return err;
822
823 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
824 APE_EVENT_STATUS_SCRTCHPD_READ |
825 APE_EVENT_STATUS_EVENT_PENDING;
826 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
827
828 tg3_ape_write32(tp, bufoff, base_off);
829 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
830
831 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
832 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
833
834 base_off += length;
835
836 if (tg3_ape_wait_for_event(tp, 30000))
837 return -EAGAIN;
838
839 for (i = 0; length; i += 4, length -= 4) {
840 u32 val = tg3_ape_read32(tp, msgoff + i);
841 memcpy(data, &val, sizeof(u32));
842 data++;
843 }
844 }
845
846 return 0;
847}
848
b65a372b
MC
849static int tg3_ape_send_event(struct tg3 *tp, u32 event)
850{
851 int err;
852 u32 apedata;
fd6d3f0e
MC
853
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
b65a372b 856 return -EAGAIN;
fd6d3f0e
MC
857
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
b65a372b 860 return -EAGAIN;
fd6d3f0e
MC
861
862 /* Wait for up to 1 millisecond for APE to service previous event. */
b65a372b
MC
863 err = tg3_ape_event_lock(tp, 1000);
864 if (err)
865 return err;
fd6d3f0e 866
b65a372b
MC
867 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
868 event | APE_EVENT_STATUS_EVENT_PENDING);
fd6d3f0e 869
b65a372b
MC
870 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
871 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
fd6d3f0e 872
b65a372b 873 return 0;
fd6d3f0e
MC
874}
875
876static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
877{
878 u32 event;
879 u32 apedata;
880
881 if (!tg3_flag(tp, ENABLE_APE))
882 return;
883
884 switch (kind) {
885 case RESET_KIND_INIT:
886 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
887 APE_HOST_SEG_SIG_MAGIC);
888 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
889 APE_HOST_SEG_LEN_MAGIC);
890 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
891 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
892 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
893 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
894 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
895 APE_HOST_BEHAV_NO_PHYLOCK);
896 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
897 TG3_APE_HOST_DRVR_STATE_START);
898
899 event = APE_EVENT_STATUS_STATE_START;
900 break;
901 case RESET_KIND_SHUTDOWN:
902 /* With the interface we are currently using,
903 * APE does not track driver state. Wiping
904 * out the HOST SEGMENT SIGNATURE forces
905 * the APE to assume OS absent status.
906 */
907 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
908
909 if (device_may_wakeup(&tp->pdev->dev) &&
910 tg3_flag(tp, WOL_ENABLE)) {
911 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
912 TG3_APE_HOST_WOL_SPEED_AUTO);
913 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
914 } else
915 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
916
917 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
918
919 event = APE_EVENT_STATUS_STATE_UNLOAD;
920 break;
921 case RESET_KIND_SUSPEND:
922 event = APE_EVENT_STATUS_STATE_SUSPEND;
923 break;
924 default:
925 return;
926 }
927
928 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
929
930 tg3_ape_send_event(tp, event);
931}
932
1da177e4
LT
933static void tg3_disable_ints(struct tg3 *tp)
934{
89aeb3bc
MC
935 int i;
936
1da177e4
LT
937 tw32(TG3PCI_MISC_HOST_CTRL,
938 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc
MC
939 for (i = 0; i < tp->irq_max; i++)
940 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1da177e4
LT
941}
942
1da177e4
LT
943static void tg3_enable_ints(struct tg3 *tp)
944{
89aeb3bc 945 int i;
89aeb3bc 946
bbe832c0
MC
947 tp->irq_sync = 0;
948 wmb();
949
1da177e4
LT
950 tw32(TG3PCI_MISC_HOST_CTRL,
951 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc 952
f89f38b8 953 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
89aeb3bc
MC
954 for (i = 0; i < tp->irq_cnt; i++) {
955 struct tg3_napi *tnapi = &tp->napi[i];
c6cdf436 956
898a56f8 957 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
63c3a66f 958 if (tg3_flag(tp, 1SHOT_MSI))
89aeb3bc 959 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
f19af9c2 960
f89f38b8 961 tp->coal_now |= tnapi->coal_now;
89aeb3bc 962 }
f19af9c2
MC
963
964 /* Force an initial interrupt */
63c3a66f 965 if (!tg3_flag(tp, TAGGED_STATUS) &&
f19af9c2
MC
966 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
967 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
968 else
f89f38b8
MC
969 tw32(HOSTCC_MODE, tp->coal_now);
970
971 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1da177e4
LT
972}
973
17375d25 974static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
04237ddd 975{
17375d25 976 struct tg3 *tp = tnapi->tp;
898a56f8 977 struct tg3_hw_status *sblk = tnapi->hw_status;
04237ddd
MC
978 unsigned int work_exists = 0;
979
980 /* check for phy events */
63c3a66f 981 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
04237ddd
MC
982 if (sblk->status & SD_STATUS_LINK_CHG)
983 work_exists = 1;
984 }
f891ea16
MC
985
986 /* check for TX work to do */
987 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
988 work_exists = 1;
989
990 /* check for RX work to do */
991 if (tnapi->rx_rcb_prod_idx &&
8d9d7cfc 992 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
04237ddd
MC
993 work_exists = 1;
994
995 return work_exists;
996}
997
17375d25 998/* tg3_int_reenable
04237ddd
MC
999 * similar to tg3_enable_ints, but it accurately determines whether there
1000 * is new work pending and can return without flushing the PIO write
6aa20a22 1001 * which reenables interrupts
1da177e4 1002 */
17375d25 1003static void tg3_int_reenable(struct tg3_napi *tnapi)
1da177e4 1004{
17375d25
MC
1005 struct tg3 *tp = tnapi->tp;
1006
898a56f8 1007 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1da177e4
LT
1008 mmiowb();
1009
fac9b83e
DM
1010 /* When doing tagged status, this work check is unnecessary.
1011 * The last_tag we write above tells the chip which piece of
1012 * work we've completed.
1013 */
63c3a66f 1014 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
04237ddd 1015 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 1016 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1da177e4
LT
1017}
1018
1da177e4
LT
1019static void tg3_switch_clocks(struct tg3 *tp)
1020{
f6eb9b1f 1021 u32 clock_ctrl;
1da177e4
LT
1022 u32 orig_clock_ctrl;
1023
63c3a66f 1024 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
4cf78e4f
MC
1025 return;
1026
f6eb9b1f
MC
1027 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1028
1da177e4
LT
1029 orig_clock_ctrl = clock_ctrl;
1030 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1031 CLOCK_CTRL_CLKRUN_OENABLE |
1032 0x1f);
1033 tp->pci_clock_ctrl = clock_ctrl;
1034
63c3a66f 1035 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4 1036 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
1037 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1038 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
1039 }
1040 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
1041 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1042 clock_ctrl |
1043 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1044 40);
1045 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1046 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1047 40);
1da177e4 1048 }
b401e9e2 1049 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
1050}
1051
1052#define PHY_BUSY_LOOPS 5000
1053
1054static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1055{
1056 u32 frame_val;
1057 unsigned int loops;
1058 int ret;
1059
1060 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1061 tw32_f(MAC_MI_MODE,
1062 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1063 udelay(80);
1064 }
1065
8151ad57
MC
1066 tg3_ape_lock(tp, tp->phy_ape_lock);
1067
1da177e4
LT
1068 *val = 0x0;
1069
882e9793 1070 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1071 MI_COM_PHY_ADDR_MASK);
1072 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1073 MI_COM_REG_ADDR_MASK);
1074 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 1075
1da177e4
LT
1076 tw32_f(MAC_MI_COM, frame_val);
1077
1078 loops = PHY_BUSY_LOOPS;
1079 while (loops != 0) {
1080 udelay(10);
1081 frame_val = tr32(MAC_MI_COM);
1082
1083 if ((frame_val & MI_COM_BUSY) == 0) {
1084 udelay(5);
1085 frame_val = tr32(MAC_MI_COM);
1086 break;
1087 }
1088 loops -= 1;
1089 }
1090
1091 ret = -EBUSY;
1092 if (loops != 0) {
1093 *val = frame_val & MI_COM_DATA_MASK;
1094 ret = 0;
1095 }
1096
1097 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098 tw32_f(MAC_MI_MODE, tp->mi_mode);
1099 udelay(80);
1100 }
1101
8151ad57
MC
1102 tg3_ape_unlock(tp, tp->phy_ape_lock);
1103
1da177e4
LT
1104 return ret;
1105}
1106
1107static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1108{
1109 u32 frame_val;
1110 unsigned int loops;
1111 int ret;
1112
f07e9af3 1113 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
221c5637 1114 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
b5d3772c
MC
1115 return 0;
1116
1da177e4
LT
1117 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1118 tw32_f(MAC_MI_MODE,
1119 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1120 udelay(80);
1121 }
1122
8151ad57
MC
1123 tg3_ape_lock(tp, tp->phy_ape_lock);
1124
882e9793 1125 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1126 MI_COM_PHY_ADDR_MASK);
1127 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1128 MI_COM_REG_ADDR_MASK);
1129 frame_val |= (val & MI_COM_DATA_MASK);
1130 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 1131
1da177e4
LT
1132 tw32_f(MAC_MI_COM, frame_val);
1133
1134 loops = PHY_BUSY_LOOPS;
1135 while (loops != 0) {
1136 udelay(10);
1137 frame_val = tr32(MAC_MI_COM);
1138 if ((frame_val & MI_COM_BUSY) == 0) {
1139 udelay(5);
1140 frame_val = tr32(MAC_MI_COM);
1141 break;
1142 }
1143 loops -= 1;
1144 }
1145
1146 ret = -EBUSY;
1147 if (loops != 0)
1148 ret = 0;
1149
1150 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 udelay(80);
1153 }
1154
8151ad57
MC
1155 tg3_ape_unlock(tp, tp->phy_ape_lock);
1156
1da177e4
LT
1157 return ret;
1158}
1159
b0988c15
MC
1160static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1161{
1162 int err;
1163
1164 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1165 if (err)
1166 goto done;
1167
1168 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1169 if (err)
1170 goto done;
1171
1172 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1173 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1174 if (err)
1175 goto done;
1176
1177 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1178
1179done:
1180 return err;
1181}
1182
1183static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1184{
1185 int err;
1186
1187 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1188 if (err)
1189 goto done;
1190
1191 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1192 if (err)
1193 goto done;
1194
1195 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1196 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1197 if (err)
1198 goto done;
1199
1200 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1201
1202done:
1203 return err;
1204}
1205
1206static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1207{
1208 int err;
1209
1210 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1211 if (!err)
1212 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1213
1214 return err;
1215}
1216
1217static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1218{
1219 int err;
1220
1221 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1222 if (!err)
1223 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1224
1225 return err;
1226}
1227
15ee95c3
MC
1228static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1229{
1230 int err;
1231
1232 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1233 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1234 MII_TG3_AUXCTL_SHDWSEL_MISC);
1235 if (!err)
1236 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1237
1238 return err;
1239}
1240
b4bd2929
MC
1241static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1242{
1243 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1244 set |= MII_TG3_AUXCTL_MISC_WREN;
1245
1246 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1247}
1248
1d36ba45
MC
1249#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1250 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1251 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1252 MII_TG3_AUXCTL_ACTL_TX_6DB)
1253
1254#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1255 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1256 MII_TG3_AUXCTL_ACTL_TX_6DB);
1257
95e2869a
MC
1258static int tg3_bmcr_reset(struct tg3 *tp)
1259{
1260 u32 phy_control;
1261 int limit, err;
1262
1263 /* OK, reset it, and poll the BMCR_RESET bit until it
1264 * clears or we time out.
1265 */
1266 phy_control = BMCR_RESET;
1267 err = tg3_writephy(tp, MII_BMCR, phy_control);
1268 if (err != 0)
1269 return -EBUSY;
1270
1271 limit = 5000;
1272 while (limit--) {
1273 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1274 if (err != 0)
1275 return -EBUSY;
1276
1277 if ((phy_control & BMCR_RESET) == 0) {
1278 udelay(40);
1279 break;
1280 }
1281 udelay(10);
1282 }
d4675b52 1283 if (limit < 0)
95e2869a
MC
1284 return -EBUSY;
1285
1286 return 0;
1287}
1288
158d7abd
MC
1289static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1290{
3d16543d 1291 struct tg3 *tp = bp->priv;
158d7abd
MC
1292 u32 val;
1293
24bb4fb6 1294 spin_lock_bh(&tp->lock);
158d7abd
MC
1295
1296 if (tg3_readphy(tp, reg, &val))
24bb4fb6
MC
1297 val = -EIO;
1298
1299 spin_unlock_bh(&tp->lock);
158d7abd
MC
1300
1301 return val;
1302}
1303
1304static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1305{
3d16543d 1306 struct tg3 *tp = bp->priv;
24bb4fb6 1307 u32 ret = 0;
158d7abd 1308
24bb4fb6 1309 spin_lock_bh(&tp->lock);
158d7abd
MC
1310
1311 if (tg3_writephy(tp, reg, val))
24bb4fb6 1312 ret = -EIO;
158d7abd 1313
24bb4fb6
MC
1314 spin_unlock_bh(&tp->lock);
1315
1316 return ret;
158d7abd
MC
1317}
1318
1319static int tg3_mdio_reset(struct mii_bus *bp)
1320{
1321 return 0;
1322}
1323
9c61d6bc 1324static void tg3_mdio_config_5785(struct tg3 *tp)
a9daf367
MC
1325{
1326 u32 val;
fcb389df 1327 struct phy_device *phydev;
a9daf367 1328
3f0e3ad7 1329 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
fcb389df 1330 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f
MC
1331 case PHY_ID_BCM50610:
1332 case PHY_ID_BCM50610M:
fcb389df
MC
1333 val = MAC_PHYCFG2_50610_LED_MODES;
1334 break;
6a443a0f 1335 case PHY_ID_BCMAC131:
fcb389df
MC
1336 val = MAC_PHYCFG2_AC131_LED_MODES;
1337 break;
6a443a0f 1338 case PHY_ID_RTL8211C:
fcb389df
MC
1339 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1340 break;
6a443a0f 1341 case PHY_ID_RTL8201E:
fcb389df
MC
1342 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1343 break;
1344 default:
a9daf367 1345 return;
fcb389df
MC
1346 }
1347
1348 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1349 tw32(MAC_PHYCFG2, val);
1350
1351 val = tr32(MAC_PHYCFG1);
bb85fbb6
MC
1352 val &= ~(MAC_PHYCFG1_RGMII_INT |
1353 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1354 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
fcb389df
MC
1355 tw32(MAC_PHYCFG1, val);
1356
1357 return;
1358 }
1359
63c3a66f 1360 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
fcb389df
MC
1361 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1362 MAC_PHYCFG2_FMODE_MASK_MASK |
1363 MAC_PHYCFG2_GMODE_MASK_MASK |
1364 MAC_PHYCFG2_ACT_MASK_MASK |
1365 MAC_PHYCFG2_QUAL_MASK_MASK |
1366 MAC_PHYCFG2_INBAND_ENABLE;
1367
1368 tw32(MAC_PHYCFG2, val);
a9daf367 1369
bb85fbb6
MC
1370 val = tr32(MAC_PHYCFG1);
1371 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1372 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
63c3a66f
JP
1373 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1374 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1375 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
63c3a66f 1376 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1377 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1378 }
bb85fbb6
MC
1379 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1380 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1381 tw32(MAC_PHYCFG1, val);
a9daf367 1382
a9daf367
MC
1383 val = tr32(MAC_EXT_RGMII_MODE);
1384 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1385 MAC_RGMII_MODE_RX_QUALITY |
1386 MAC_RGMII_MODE_RX_ACTIVITY |
1387 MAC_RGMII_MODE_RX_ENG_DET |
1388 MAC_RGMII_MODE_TX_ENABLE |
1389 MAC_RGMII_MODE_TX_LOWPWR |
1390 MAC_RGMII_MODE_TX_RESET);
63c3a66f
JP
1391 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1392 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367
MC
1393 val |= MAC_RGMII_MODE_RX_INT_B |
1394 MAC_RGMII_MODE_RX_QUALITY |
1395 MAC_RGMII_MODE_RX_ACTIVITY |
1396 MAC_RGMII_MODE_RX_ENG_DET;
63c3a66f 1397 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1398 val |= MAC_RGMII_MODE_TX_ENABLE |
1399 MAC_RGMII_MODE_TX_LOWPWR |
1400 MAC_RGMII_MODE_TX_RESET;
1401 }
1402 tw32(MAC_EXT_RGMII_MODE, val);
1403}
1404
158d7abd
MC
1405static void tg3_mdio_start(struct tg3 *tp)
1406{
158d7abd
MC
1407 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1408 tw32_f(MAC_MI_MODE, tp->mi_mode);
1409 udelay(80);
a9daf367 1410
63c3a66f 1411 if (tg3_flag(tp, MDIOBUS_INITED) &&
9ea4818d
MC
1412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1413 tg3_mdio_config_5785(tp);
1414}
1415
1416static int tg3_mdio_init(struct tg3 *tp)
1417{
1418 int i;
1419 u32 reg;
1420 struct phy_device *phydev;
1421
63c3a66f 1422 if (tg3_flag(tp, 5717_PLUS)) {
9c7df915 1423 u32 is_serdes;
882e9793 1424
69f11c99 1425 tp->phy_addr = tp->pci_fn + 1;
882e9793 1426
d1ec96af
MC
1427 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1428 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1429 else
1430 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1431 TG3_CPMU_PHY_STRAP_IS_SERDES;
882e9793
MC
1432 if (is_serdes)
1433 tp->phy_addr += 7;
1434 } else
3f0e3ad7 1435 tp->phy_addr = TG3_PHY_MII_ADDR;
882e9793 1436
158d7abd
MC
1437 tg3_mdio_start(tp);
1438
63c3a66f 1439 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
158d7abd
MC
1440 return 0;
1441
298cf9be
LB
1442 tp->mdio_bus = mdiobus_alloc();
1443 if (tp->mdio_bus == NULL)
1444 return -ENOMEM;
158d7abd 1445
298cf9be
LB
1446 tp->mdio_bus->name = "tg3 mdio bus";
1447 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
158d7abd 1448 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
298cf9be
LB
1449 tp->mdio_bus->priv = tp;
1450 tp->mdio_bus->parent = &tp->pdev->dev;
1451 tp->mdio_bus->read = &tg3_mdio_read;
1452 tp->mdio_bus->write = &tg3_mdio_write;
1453 tp->mdio_bus->reset = &tg3_mdio_reset;
3f0e3ad7 1454 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
298cf9be 1455 tp->mdio_bus->irq = &tp->mdio_irq[0];
158d7abd
MC
1456
1457 for (i = 0; i < PHY_MAX_ADDR; i++)
298cf9be 1458 tp->mdio_bus->irq[i] = PHY_POLL;
158d7abd
MC
1459
1460 /* The bus registration will look for all the PHYs on the mdio bus.
1461 * Unfortunately, it does not ensure the PHY is powered up before
1462 * accessing the PHY ID registers. A chip reset is the
1463 * quickest way to bring the device back to an operational state..
1464 */
1465 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1466 tg3_bmcr_reset(tp);
1467
298cf9be 1468 i = mdiobus_register(tp->mdio_bus);
a9daf367 1469 if (i) {
ab96b241 1470 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
9c61d6bc 1471 mdiobus_free(tp->mdio_bus);
a9daf367
MC
1472 return i;
1473 }
158d7abd 1474
3f0e3ad7 1475 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
a9daf367 1476
9c61d6bc 1477 if (!phydev || !phydev->drv) {
ab96b241 1478 dev_warn(&tp->pdev->dev, "No PHY devices\n");
9c61d6bc
MC
1479 mdiobus_unregister(tp->mdio_bus);
1480 mdiobus_free(tp->mdio_bus);
1481 return -ENODEV;
1482 }
1483
1484 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f 1485 case PHY_ID_BCM57780:
321d32a0 1486 phydev->interface = PHY_INTERFACE_MODE_GMII;
c704dc23 1487 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
321d32a0 1488 break;
6a443a0f
MC
1489 case PHY_ID_BCM50610:
1490 case PHY_ID_BCM50610M:
32e5a8d6 1491 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
c704dc23 1492 PHY_BRCM_RX_REFCLK_UNUSED |
52fae083 1493 PHY_BRCM_DIS_TXCRXC_NOENRGY |
c704dc23 1494 PHY_BRCM_AUTO_PWRDWN_ENABLE;
63c3a66f 1495 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
a9daf367 1496 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
63c3a66f 1497 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1498 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
63c3a66f 1499 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367 1500 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fcb389df 1501 /* fallthru */
6a443a0f 1502 case PHY_ID_RTL8211C:
fcb389df 1503 phydev->interface = PHY_INTERFACE_MODE_RGMII;
a9daf367 1504 break;
6a443a0f
MC
1505 case PHY_ID_RTL8201E:
1506 case PHY_ID_BCMAC131:
a9daf367 1507 phydev->interface = PHY_INTERFACE_MODE_MII;
cdd4e09d 1508 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
f07e9af3 1509 tp->phy_flags |= TG3_PHYFLG_IS_FET;
a9daf367
MC
1510 break;
1511 }
1512
63c3a66f 1513 tg3_flag_set(tp, MDIOBUS_INITED);
9c61d6bc
MC
1514
1515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1516 tg3_mdio_config_5785(tp);
a9daf367
MC
1517
1518 return 0;
158d7abd
MC
1519}
1520
1521static void tg3_mdio_fini(struct tg3 *tp)
1522{
63c3a66f
JP
1523 if (tg3_flag(tp, MDIOBUS_INITED)) {
1524 tg3_flag_clear(tp, MDIOBUS_INITED);
298cf9be
LB
1525 mdiobus_unregister(tp->mdio_bus);
1526 mdiobus_free(tp->mdio_bus);
158d7abd
MC
1527 }
1528}
1529
4ba526ce
MC
1530/* tp->lock is held. */
1531static inline void tg3_generate_fw_event(struct tg3 *tp)
1532{
1533 u32 val;
1534
1535 val = tr32(GRC_RX_CPU_EVENT);
1536 val |= GRC_RX_CPU_DRIVER_EVENT;
1537 tw32_f(GRC_RX_CPU_EVENT, val);
1538
1539 tp->last_event_jiffies = jiffies;
1540}
1541
1542#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1543
95e2869a
MC
1544/* tp->lock is held. */
1545static void tg3_wait_for_event_ack(struct tg3 *tp)
1546{
1547 int i;
4ba526ce
MC
1548 unsigned int delay_cnt;
1549 long time_remain;
1550
1551 /* If enough time has passed, no wait is necessary. */
1552 time_remain = (long)(tp->last_event_jiffies + 1 +
1553 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1554 (long)jiffies;
1555 if (time_remain < 0)
1556 return;
1557
1558 /* Check if we can shorten the wait time. */
1559 delay_cnt = jiffies_to_usecs(time_remain);
1560 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1561 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1562 delay_cnt = (delay_cnt >> 3) + 1;
95e2869a 1563
4ba526ce 1564 for (i = 0; i < delay_cnt; i++) {
95e2869a
MC
1565 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1566 break;
4ba526ce 1567 udelay(8);
95e2869a
MC
1568 }
1569}
1570
1571/* tp->lock is held. */
b28f389d 1572static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
95e2869a 1573{
b28f389d 1574 u32 reg, val;
95e2869a
MC
1575
1576 val = 0;
1577 if (!tg3_readphy(tp, MII_BMCR, &reg))
1578 val = reg << 16;
1579 if (!tg3_readphy(tp, MII_BMSR, &reg))
1580 val |= (reg & 0xffff);
b28f389d 1581 *data++ = val;
95e2869a
MC
1582
1583 val = 0;
1584 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1585 val = reg << 16;
1586 if (!tg3_readphy(tp, MII_LPA, &reg))
1587 val |= (reg & 0xffff);
b28f389d 1588 *data++ = val;
95e2869a
MC
1589
1590 val = 0;
f07e9af3 1591 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
95e2869a
MC
1592 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1593 val = reg << 16;
1594 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1595 val |= (reg & 0xffff);
1596 }
b28f389d 1597 *data++ = val;
95e2869a
MC
1598
1599 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1600 val = reg << 16;
1601 else
1602 val = 0;
b28f389d
MC
1603 *data++ = val;
1604}
1605
1606/* tp->lock is held. */
1607static void tg3_ump_link_report(struct tg3 *tp)
1608{
1609 u32 data[4];
1610
1611 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1612 return;
1613
1614 tg3_phy_gather_ump_data(tp, data);
1615
1616 tg3_wait_for_event_ack(tp);
1617
1618 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1619 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1620 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1621 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1622 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1623 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
95e2869a 1624
4ba526ce 1625 tg3_generate_fw_event(tp);
95e2869a
MC
1626}
1627
8d5a89b3
MC
1628/* tp->lock is held. */
1629static void tg3_stop_fw(struct tg3 *tp)
1630{
1631 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1632 /* Wait for RX cpu to ACK the previous event. */
1633 tg3_wait_for_event_ack(tp);
1634
1635 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1636
1637 tg3_generate_fw_event(tp);
1638
1639 /* Wait for RX cpu to ACK this event. */
1640 tg3_wait_for_event_ack(tp);
1641 }
1642}
1643
fd6d3f0e
MC
1644/* tp->lock is held. */
1645static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1646{
1647 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1648 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1649
1650 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1651 switch (kind) {
1652 case RESET_KIND_INIT:
1653 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1654 DRV_STATE_START);
1655 break;
1656
1657 case RESET_KIND_SHUTDOWN:
1658 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1659 DRV_STATE_UNLOAD);
1660 break;
1661
1662 case RESET_KIND_SUSPEND:
1663 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1664 DRV_STATE_SUSPEND);
1665 break;
1666
1667 default:
1668 break;
1669 }
1670 }
1671
1672 if (kind == RESET_KIND_INIT ||
1673 kind == RESET_KIND_SUSPEND)
1674 tg3_ape_driver_state_change(tp, kind);
1675}
1676
1677/* tp->lock is held. */
1678static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1679{
1680 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1681 switch (kind) {
1682 case RESET_KIND_INIT:
1683 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1684 DRV_STATE_START_DONE);
1685 break;
1686
1687 case RESET_KIND_SHUTDOWN:
1688 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1689 DRV_STATE_UNLOAD_DONE);
1690 break;
1691
1692 default:
1693 break;
1694 }
1695 }
1696
1697 if (kind == RESET_KIND_SHUTDOWN)
1698 tg3_ape_driver_state_change(tp, kind);
1699}
1700
1701/* tp->lock is held. */
1702static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1703{
1704 if (tg3_flag(tp, ENABLE_ASF)) {
1705 switch (kind) {
1706 case RESET_KIND_INIT:
1707 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1708 DRV_STATE_START);
1709 break;
1710
1711 case RESET_KIND_SHUTDOWN:
1712 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1713 DRV_STATE_UNLOAD);
1714 break;
1715
1716 case RESET_KIND_SUSPEND:
1717 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1718 DRV_STATE_SUSPEND);
1719 break;
1720
1721 default:
1722 break;
1723 }
1724 }
1725}
1726
1727static int tg3_poll_fw(struct tg3 *tp)
1728{
1729 int i;
1730 u32 val;
1731
1732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1733 /* Wait up to 20ms for init done. */
1734 for (i = 0; i < 200; i++) {
1735 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1736 return 0;
1737 udelay(100);
1738 }
1739 return -ENODEV;
1740 }
1741
1742 /* Wait for firmware initialization to complete. */
1743 for (i = 0; i < 100000; i++) {
1744 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1745 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1746 break;
1747 udelay(10);
1748 }
1749
1750 /* Chip might not be fitted with firmware. Some Sun onboard
1751 * parts are configured like that. So don't signal the timeout
1752 * of the above loop as an error, but do report the lack of
1753 * running firmware once.
1754 */
1755 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1756 tg3_flag_set(tp, NO_FWARE_REPORTED);
1757
1758 netdev_info(tp->dev, "No firmware running\n");
1759 }
1760
1761 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1762 /* The 57765 A0 needs a little more
1763 * time to do some important work.
1764 */
1765 mdelay(10);
1766 }
1767
1768 return 0;
1769}
1770
95e2869a
MC
1771static void tg3_link_report(struct tg3 *tp)
1772{
1773 if (!netif_carrier_ok(tp->dev)) {
05dbe005 1774 netif_info(tp, link, tp->dev, "Link is down\n");
95e2869a
MC
1775 tg3_ump_link_report(tp);
1776 } else if (netif_msg_link(tp)) {
05dbe005
JP
1777 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1778 (tp->link_config.active_speed == SPEED_1000 ?
1779 1000 :
1780 (tp->link_config.active_speed == SPEED_100 ?
1781 100 : 10)),
1782 (tp->link_config.active_duplex == DUPLEX_FULL ?
1783 "full" : "half"));
1784
1785 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1786 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1787 "on" : "off",
1788 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1789 "on" : "off");
47007831
MC
1790
1791 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1792 netdev_info(tp->dev, "EEE is %s\n",
1793 tp->setlpicnt ? "enabled" : "disabled");
1794
95e2869a
MC
1795 tg3_ump_link_report(tp);
1796 }
1797}
1798
95e2869a
MC
1799static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1800{
1801 u16 miireg;
1802
e18ce346 1803 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1804 miireg = ADVERTISE_1000XPAUSE;
e18ce346 1805 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1806 miireg = ADVERTISE_1000XPSE_ASYM;
e18ce346 1807 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1808 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1809 else
1810 miireg = 0;
1811
1812 return miireg;
1813}
1814
95e2869a
MC
1815static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1816{
1817 u8 cap = 0;
1818
f3791cdf
MC
1819 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1820 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1821 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1822 if (lcladv & ADVERTISE_1000XPAUSE)
1823 cap = FLOW_CTRL_RX;
1824 if (rmtadv & ADVERTISE_1000XPAUSE)
e18ce346 1825 cap = FLOW_CTRL_TX;
95e2869a
MC
1826 }
1827
1828 return cap;
1829}
1830
f51f3562 1831static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1832{
b02fd9e3 1833 u8 autoneg;
f51f3562 1834 u8 flowctrl = 0;
95e2869a
MC
1835 u32 old_rx_mode = tp->rx_mode;
1836 u32 old_tx_mode = tp->tx_mode;
1837
63c3a66f 1838 if (tg3_flag(tp, USE_PHYLIB))
3f0e3ad7 1839 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
b02fd9e3
MC
1840 else
1841 autoneg = tp->link_config.autoneg;
1842
63c3a66f 1843 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
f07e9af3 1844 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
f51f3562 1845 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1846 else
bc02ff95 1847 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
f51f3562
MC
1848 } else
1849 flowctrl = tp->link_config.flowctrl;
95e2869a 1850
f51f3562 1851 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1852
e18ce346 1853 if (flowctrl & FLOW_CTRL_RX)
95e2869a
MC
1854 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1855 else
1856 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1857
f51f3562 1858 if (old_rx_mode != tp->rx_mode)
95e2869a 1859 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1860
e18ce346 1861 if (flowctrl & FLOW_CTRL_TX)
95e2869a
MC
1862 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1863 else
1864 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1865
f51f3562 1866 if (old_tx_mode != tp->tx_mode)
95e2869a 1867 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1868}
1869
b02fd9e3
MC
1870static void tg3_adjust_link(struct net_device *dev)
1871{
1872 u8 oldflowctrl, linkmesg = 0;
1873 u32 mac_mode, lcl_adv, rmt_adv;
1874 struct tg3 *tp = netdev_priv(dev);
3f0e3ad7 1875 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1876
24bb4fb6 1877 spin_lock_bh(&tp->lock);
b02fd9e3
MC
1878
1879 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1880 MAC_MODE_HALF_DUPLEX);
1881
1882 oldflowctrl = tp->link_config.active_flowctrl;
1883
1884 if (phydev->link) {
1885 lcl_adv = 0;
1886 rmt_adv = 0;
1887
1888 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1889 mac_mode |= MAC_MODE_PORT_MODE_MII;
c3df0748
MC
1890 else if (phydev->speed == SPEED_1000 ||
1891 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
b02fd9e3 1892 mac_mode |= MAC_MODE_PORT_MODE_GMII;
c3df0748
MC
1893 else
1894 mac_mode |= MAC_MODE_PORT_MODE_MII;
b02fd9e3
MC
1895
1896 if (phydev->duplex == DUPLEX_HALF)
1897 mac_mode |= MAC_MODE_HALF_DUPLEX;
1898 else {
f88788f0 1899 lcl_adv = mii_advertise_flowctrl(
b02fd9e3
MC
1900 tp->link_config.flowctrl);
1901
1902 if (phydev->pause)
1903 rmt_adv = LPA_PAUSE_CAP;
1904 if (phydev->asym_pause)
1905 rmt_adv |= LPA_PAUSE_ASYM;
1906 }
1907
1908 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1909 } else
1910 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1911
1912 if (mac_mode != tp->mac_mode) {
1913 tp->mac_mode = mac_mode;
1914 tw32_f(MAC_MODE, tp->mac_mode);
1915 udelay(40);
1916 }
1917
fcb389df
MC
1918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1919 if (phydev->speed == SPEED_10)
1920 tw32(MAC_MI_STAT,
1921 MAC_MI_STAT_10MBPS_MODE |
1922 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1923 else
1924 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1925 }
1926
b02fd9e3
MC
1927 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1928 tw32(MAC_TX_LENGTHS,
1929 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1930 (6 << TX_LENGTHS_IPG_SHIFT) |
1931 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1932 else
1933 tw32(MAC_TX_LENGTHS,
1934 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1935 (6 << TX_LENGTHS_IPG_SHIFT) |
1936 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1937
34655ad6 1938 if (phydev->link != tp->old_link ||
b02fd9e3
MC
1939 phydev->speed != tp->link_config.active_speed ||
1940 phydev->duplex != tp->link_config.active_duplex ||
1941 oldflowctrl != tp->link_config.active_flowctrl)
c6cdf436 1942 linkmesg = 1;
b02fd9e3 1943
34655ad6 1944 tp->old_link = phydev->link;
b02fd9e3
MC
1945 tp->link_config.active_speed = phydev->speed;
1946 tp->link_config.active_duplex = phydev->duplex;
1947
24bb4fb6 1948 spin_unlock_bh(&tp->lock);
b02fd9e3
MC
1949
1950 if (linkmesg)
1951 tg3_link_report(tp);
1952}
1953
1954static int tg3_phy_init(struct tg3 *tp)
1955{
1956 struct phy_device *phydev;
1957
f07e9af3 1958 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
b02fd9e3
MC
1959 return 0;
1960
1961 /* Bring the PHY back to a known state. */
1962 tg3_bmcr_reset(tp);
1963
3f0e3ad7 1964 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3
MC
1965
1966 /* Attach the MAC to the PHY. */
fb28ad35 1967 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
a9daf367 1968 phydev->dev_flags, phydev->interface);
b02fd9e3 1969 if (IS_ERR(phydev)) {
ab96b241 1970 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
b02fd9e3
MC
1971 return PTR_ERR(phydev);
1972 }
1973
b02fd9e3 1974 /* Mask with MAC supported features. */
9c61d6bc
MC
1975 switch (phydev->interface) {
1976 case PHY_INTERFACE_MODE_GMII:
1977 case PHY_INTERFACE_MODE_RGMII:
f07e9af3 1978 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
321d32a0
MC
1979 phydev->supported &= (PHY_GBIT_FEATURES |
1980 SUPPORTED_Pause |
1981 SUPPORTED_Asym_Pause);
1982 break;
1983 }
1984 /* fallthru */
9c61d6bc
MC
1985 case PHY_INTERFACE_MODE_MII:
1986 phydev->supported &= (PHY_BASIC_FEATURES |
1987 SUPPORTED_Pause |
1988 SUPPORTED_Asym_Pause);
1989 break;
1990 default:
3f0e3ad7 1991 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9c61d6bc
MC
1992 return -EINVAL;
1993 }
1994
f07e9af3 1995 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
1996
1997 phydev->advertising = phydev->supported;
1998
b02fd9e3
MC
1999 return 0;
2000}
2001
2002static void tg3_phy_start(struct tg3 *tp)
2003{
2004 struct phy_device *phydev;
2005
f07e9af3 2006 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2007 return;
2008
3f0e3ad7 2009 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 2010
80096068
MC
2011 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2012 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
c6700ce2
MC
2013 phydev->speed = tp->link_config.speed;
2014 phydev->duplex = tp->link_config.duplex;
2015 phydev->autoneg = tp->link_config.autoneg;
2016 phydev->advertising = tp->link_config.advertising;
b02fd9e3
MC
2017 }
2018
2019 phy_start(phydev);
2020
2021 phy_start_aneg(phydev);
2022}
2023
2024static void tg3_phy_stop(struct tg3 *tp)
2025{
f07e9af3 2026 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2027 return;
2028
3f0e3ad7 2029 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
2030}
2031
2032static void tg3_phy_fini(struct tg3 *tp)
2033{
f07e9af3 2034 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7 2035 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
f07e9af3 2036 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
2037 }
2038}
2039
941ec90f
MC
2040static int tg3_phy_set_extloopbk(struct tg3 *tp)
2041{
2042 int err;
2043 u32 val;
2044
2045 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2046 return 0;
2047
2048 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2049 /* Cannot do read-modify-write on 5401 */
2050 err = tg3_phy_auxctl_write(tp,
2051 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2052 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2053 0x4c20);
2054 goto done;
2055 }
2056
2057 err = tg3_phy_auxctl_read(tp,
2058 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2059 if (err)
2060 return err;
2061
2062 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2063 err = tg3_phy_auxctl_write(tp,
2064 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2065
2066done:
2067 return err;
2068}
2069
7f97a4bd
MC
2070static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2071{
2072 u32 phytest;
2073
2074 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2075 u32 phy;
2076
2077 tg3_writephy(tp, MII_TG3_FET_TEST,
2078 phytest | MII_TG3_FET_SHADOW_EN);
2079 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2080 if (enable)
2081 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2082 else
2083 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2084 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2085 }
2086 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2087 }
2088}
2089
6833c043
MC
2090static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2091{
2092 u32 reg;
2093
63c3a66f
JP
2094 if (!tg3_flag(tp, 5705_PLUS) ||
2095 (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2096 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
6833c043
MC
2097 return;
2098
f07e9af3 2099 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd
MC
2100 tg3_phy_fet_toggle_apd(tp, enable);
2101 return;
2102 }
2103
6833c043
MC
2104 reg = MII_TG3_MISC_SHDW_WREN |
2105 MII_TG3_MISC_SHDW_SCR5_SEL |
2106 MII_TG3_MISC_SHDW_SCR5_LPED |
2107 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2108 MII_TG3_MISC_SHDW_SCR5_SDTL |
2109 MII_TG3_MISC_SHDW_SCR5_C125OE;
2110 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2111 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2112
2113 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2114
2115
2116 reg = MII_TG3_MISC_SHDW_WREN |
2117 MII_TG3_MISC_SHDW_APD_SEL |
2118 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2119 if (enable)
2120 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2121
2122 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2123}
2124
9ef8ca99
MC
2125static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2126{
2127 u32 phy;
2128
63c3a66f 2129 if (!tg3_flag(tp, 5705_PLUS) ||
f07e9af3 2130 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9ef8ca99
MC
2131 return;
2132
f07e9af3 2133 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
9ef8ca99
MC
2134 u32 ephy;
2135
535ef6e1
MC
2136 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2137 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2138
2139 tg3_writephy(tp, MII_TG3_FET_TEST,
2140 ephy | MII_TG3_FET_SHADOW_EN);
2141 if (!tg3_readphy(tp, reg, &phy)) {
9ef8ca99 2142 if (enable)
535ef6e1 2143 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
9ef8ca99 2144 else
535ef6e1
MC
2145 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2146 tg3_writephy(tp, reg, phy);
9ef8ca99 2147 }
535ef6e1 2148 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
9ef8ca99
MC
2149 }
2150 } else {
15ee95c3
MC
2151 int ret;
2152
2153 ret = tg3_phy_auxctl_read(tp,
2154 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2155 if (!ret) {
9ef8ca99
MC
2156 if (enable)
2157 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2158 else
2159 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
b4bd2929
MC
2160 tg3_phy_auxctl_write(tp,
2161 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
9ef8ca99
MC
2162 }
2163 }
2164}
2165
1da177e4
LT
2166static void tg3_phy_set_wirespeed(struct tg3 *tp)
2167{
15ee95c3 2168 int ret;
1da177e4
LT
2169 u32 val;
2170
f07e9af3 2171 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1da177e4
LT
2172 return;
2173
15ee95c3
MC
2174 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2175 if (!ret)
b4bd2929
MC
2176 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2177 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1da177e4
LT
2178}
2179
b2a5c19c
MC
2180static void tg3_phy_apply_otp(struct tg3 *tp)
2181{
2182 u32 otp, phy;
2183
2184 if (!tp->phy_otp)
2185 return;
2186
2187 otp = tp->phy_otp;
2188
1d36ba45
MC
2189 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2190 return;
b2a5c19c
MC
2191
2192 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2193 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2194 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2195
2196 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2197 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2198 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2199
2200 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2201 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2202 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2203
2204 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2205 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2206
2207 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2208 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2209
2210 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2211 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2212 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2213
1d36ba45 2214 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
b2a5c19c
MC
2215}
2216
52b02d04
MC
2217static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2218{
2219 u32 val;
2220
2221 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2222 return;
2223
2224 tp->setlpicnt = 0;
2225
2226 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2227 current_link_up == 1 &&
a6b68dab
MC
2228 tp->link_config.active_duplex == DUPLEX_FULL &&
2229 (tp->link_config.active_speed == SPEED_100 ||
2230 tp->link_config.active_speed == SPEED_1000)) {
52b02d04
MC
2231 u32 eeectl;
2232
2233 if (tp->link_config.active_speed == SPEED_1000)
2234 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2235 else
2236 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2237
2238 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2239
3110f5f5
MC
2240 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2241 TG3_CL45_D7_EEERES_STAT, &val);
52b02d04 2242
b0c5943f
MC
2243 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2244 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
52b02d04
MC
2245 tp->setlpicnt = 2;
2246 }
2247
2248 if (!tp->setlpicnt) {
b715ce94
MC
2249 if (current_link_up == 1 &&
2250 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2251 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2252 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2253 }
2254
52b02d04
MC
2255 val = tr32(TG3_CPMU_EEE_MODE);
2256 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2257 }
2258}
2259
b0c5943f
MC
2260static void tg3_phy_eee_enable(struct tg3 *tp)
2261{
2262 u32 val;
2263
2264 if (tp->link_config.active_speed == SPEED_1000 &&
2265 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
55086ad9 2267 tg3_flag(tp, 57765_CLASS)) &&
b0c5943f 2268 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
b715ce94
MC
2269 val = MII_TG3_DSP_TAP26_ALNOKO |
2270 MII_TG3_DSP_TAP26_RMRXSTO;
2271 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
b0c5943f
MC
2272 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2273 }
2274
2275 val = tr32(TG3_CPMU_EEE_MODE);
2276 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2277}
2278
1da177e4
LT
2279static int tg3_wait_macro_done(struct tg3 *tp)
2280{
2281 int limit = 100;
2282
2283 while (limit--) {
2284 u32 tmp32;
2285
f08aa1a8 2286 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1da177e4
LT
2287 if ((tmp32 & 0x1000) == 0)
2288 break;
2289 }
2290 }
d4675b52 2291 if (limit < 0)
1da177e4
LT
2292 return -EBUSY;
2293
2294 return 0;
2295}
2296
2297static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2298{
2299 static const u32 test_pat[4][6] = {
2300 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2301 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2302 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2303 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2304 };
2305 int chan;
2306
2307 for (chan = 0; chan < 4; chan++) {
2308 int i;
2309
2310 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2311 (chan * 0x2000) | 0x0200);
f08aa1a8 2312 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2313
2314 for (i = 0; i < 6; i++)
2315 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2316 test_pat[chan][i]);
2317
f08aa1a8 2318 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2319 if (tg3_wait_macro_done(tp)) {
2320 *resetp = 1;
2321 return -EBUSY;
2322 }
2323
2324 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2325 (chan * 0x2000) | 0x0200);
f08aa1a8 2326 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1da177e4
LT
2327 if (tg3_wait_macro_done(tp)) {
2328 *resetp = 1;
2329 return -EBUSY;
2330 }
2331
f08aa1a8 2332 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1da177e4
LT
2333 if (tg3_wait_macro_done(tp)) {
2334 *resetp = 1;
2335 return -EBUSY;
2336 }
2337
2338 for (i = 0; i < 6; i += 2) {
2339 u32 low, high;
2340
2341 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2342 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2343 tg3_wait_macro_done(tp)) {
2344 *resetp = 1;
2345 return -EBUSY;
2346 }
2347 low &= 0x7fff;
2348 high &= 0x000f;
2349 if (low != test_pat[chan][i] ||
2350 high != test_pat[chan][i+1]) {
2351 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2352 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2353 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2354
2355 return -EBUSY;
2356 }
2357 }
2358 }
2359
2360 return 0;
2361}
2362
2363static int tg3_phy_reset_chanpat(struct tg3 *tp)
2364{
2365 int chan;
2366
2367 for (chan = 0; chan < 4; chan++) {
2368 int i;
2369
2370 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2371 (chan * 0x2000) | 0x0200);
f08aa1a8 2372 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2373 for (i = 0; i < 6; i++)
2374 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
f08aa1a8 2375 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2376 if (tg3_wait_macro_done(tp))
2377 return -EBUSY;
2378 }
2379
2380 return 0;
2381}
2382
2383static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2384{
2385 u32 reg32, phy9_orig;
2386 int retries, do_phy_reset, err;
2387
2388 retries = 10;
2389 do_phy_reset = 1;
2390 do {
2391 if (do_phy_reset) {
2392 err = tg3_bmcr_reset(tp);
2393 if (err)
2394 return err;
2395 do_phy_reset = 0;
2396 }
2397
2398 /* Disable transmitter and interrupt. */
2399 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2400 continue;
2401
2402 reg32 |= 0x3000;
2403 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2404
2405 /* Set full-duplex, 1000 mbps. */
2406 tg3_writephy(tp, MII_BMCR,
221c5637 2407 BMCR_FULLDPLX | BMCR_SPEED1000);
1da177e4
LT
2408
2409 /* Set to master mode. */
221c5637 2410 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1da177e4
LT
2411 continue;
2412
221c5637
MC
2413 tg3_writephy(tp, MII_CTRL1000,
2414 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1da177e4 2415
1d36ba45
MC
2416 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2417 if (err)
2418 return err;
1da177e4
LT
2419
2420 /* Block the PHY control access. */
6ee7c0a0 2421 tg3_phydsp_write(tp, 0x8005, 0x0800);
1da177e4
LT
2422
2423 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2424 if (!err)
2425 break;
2426 } while (--retries);
2427
2428 err = tg3_phy_reset_chanpat(tp);
2429 if (err)
2430 return err;
2431
6ee7c0a0 2432 tg3_phydsp_write(tp, 0x8005, 0x0000);
1da177e4
LT
2433
2434 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
f08aa1a8 2435 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1da177e4 2436
1d36ba45 2437 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2438
221c5637 2439 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1da177e4
LT
2440
2441 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2442 reg32 &= ~0x3000;
2443 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2444 } else if (!err)
2445 err = -EBUSY;
2446
2447 return err;
2448}
2449
2450/* This will reset the tigon3 PHY if there is no valid
2451 * link unless the FORCE argument is non-zero.
2452 */
2453static int tg3_phy_reset(struct tg3 *tp)
2454{
f833c4c1 2455 u32 val, cpmuctrl;
1da177e4
LT
2456 int err;
2457
60189ddf 2458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2459 val = tr32(GRC_MISC_CFG);
2460 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2461 udelay(40);
2462 }
f833c4c1
MC
2463 err = tg3_readphy(tp, MII_BMSR, &val);
2464 err |= tg3_readphy(tp, MII_BMSR, &val);
1da177e4
LT
2465 if (err != 0)
2466 return -EBUSY;
2467
c8e1e82b
MC
2468 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2469 netif_carrier_off(tp->dev);
2470 tg3_link_report(tp);
2471 }
2472
1da177e4
LT
2473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2476 err = tg3_phy_reset_5703_4_5(tp);
2477 if (err)
2478 return err;
2479 goto out;
2480 }
2481
b2a5c19c
MC
2482 cpmuctrl = 0;
2483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2484 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2485 cpmuctrl = tr32(TG3_CPMU_CTRL);
2486 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2487 tw32(TG3_CPMU_CTRL,
2488 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2489 }
2490
1da177e4
LT
2491 err = tg3_bmcr_reset(tp);
2492 if (err)
2493 return err;
2494
b2a5c19c 2495 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
f833c4c1
MC
2496 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2497 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
b2a5c19c
MC
2498
2499 tw32(TG3_CPMU_CTRL, cpmuctrl);
2500 }
2501
bcb37f6c
MC
2502 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2503 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2504 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2505 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2506 CPMU_LSPD_1000MB_MACCLK_12_5) {
2507 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2508 udelay(40);
2509 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2510 }
2511 }
2512
63c3a66f 2513 if (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2514 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
ecf1410b
MC
2515 return 0;
2516
b2a5c19c
MC
2517 tg3_phy_apply_otp(tp);
2518
f07e9af3 2519 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
2520 tg3_phy_toggle_apd(tp, true);
2521 else
2522 tg3_phy_toggle_apd(tp, false);
2523
1da177e4 2524out:
1d36ba45
MC
2525 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2526 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
6ee7c0a0
MC
2527 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2528 tg3_phydsp_write(tp, 0x000a, 0x0323);
1d36ba45 2529 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2530 }
1d36ba45 2531
f07e9af3 2532 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
f08aa1a8
MC
2533 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2534 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1da177e4 2535 }
1d36ba45 2536
f07e9af3 2537 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1d36ba45
MC
2538 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2539 tg3_phydsp_write(tp, 0x000a, 0x310b);
2540 tg3_phydsp_write(tp, 0x201f, 0x9506);
2541 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2542 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2543 }
f07e9af3 2544 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1d36ba45
MC
2545 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2546 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2547 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2548 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2549 tg3_writephy(tp, MII_TG3_TEST1,
2550 MII_TG3_TEST1_TRIM_EN | 0x4);
2551 } else
2552 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2553
2554 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2555 }
c424cb24 2556 }
1d36ba45 2557
1da177e4
LT
2558 /* Set Extended packet length bit (bit 14) on all chips that */
2559 /* support jumbo frames */
79eb6904 2560 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4 2561 /* Cannot do read-modify-write on 5401 */
b4bd2929 2562 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
63c3a66f 2563 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
1da177e4 2564 /* Set bit 14 with read-modify-write to preserve other bits */
15ee95c3
MC
2565 err = tg3_phy_auxctl_read(tp,
2566 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2567 if (!err)
b4bd2929
MC
2568 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2569 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
1da177e4
LT
2570 }
2571
2572 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2573 * jumbo frames transmission.
2574 */
63c3a66f 2575 if (tg3_flag(tp, JUMBO_CAPABLE)) {
f833c4c1 2576 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
c6cdf436 2577 tg3_writephy(tp, MII_TG3_EXT_CTRL,
f833c4c1 2578 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1da177e4
LT
2579 }
2580
715116a1 2581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1 2582 /* adjust output voltage */
535ef6e1 2583 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
715116a1
MC
2584 }
2585
9ef8ca99 2586 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
2587 tg3_phy_set_wirespeed(tp);
2588 return 0;
2589}
2590
3a1e19d3
MC
2591#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2592#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2593#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2594 TG3_GPIO_MSG_NEED_VAUX)
2595#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2596 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2597 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2598 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2599 (TG3_GPIO_MSG_DRVR_PRES << 12))
2600
2601#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2602 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2603 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2604 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2605 (TG3_GPIO_MSG_NEED_VAUX << 12))
2606
2607static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2608{
2609 u32 status, shift;
2610
2611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2613 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2614 else
2615 status = tr32(TG3_CPMU_DRV_STATUS);
2616
2617 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2618 status &= ~(TG3_GPIO_MSG_MASK << shift);
2619 status |= (newstat << shift);
2620
2621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2623 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2624 else
2625 tw32(TG3_CPMU_DRV_STATUS, status);
2626
2627 return status >> TG3_APE_GPIO_MSG_SHIFT;
2628}
2629
520b2756
MC
2630static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2631{
2632 if (!tg3_flag(tp, IS_NIC))
2633 return 0;
2634
3a1e19d3
MC
2635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2638 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2639 return -EIO;
520b2756 2640
3a1e19d3
MC
2641 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2642
2643 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2644 TG3_GRC_LCLCTL_PWRSW_DELAY);
2645
2646 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2647 } else {
2648 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2649 TG3_GRC_LCLCTL_PWRSW_DELAY);
2650 }
6f5c8f83 2651
520b2756
MC
2652 return 0;
2653}
2654
2655static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2656{
2657 u32 grc_local_ctrl;
2658
2659 if (!tg3_flag(tp, IS_NIC) ||
2660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2662 return;
2663
2664 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2665
2666 tw32_wait_f(GRC_LOCAL_CTRL,
2667 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2668 TG3_GRC_LCLCTL_PWRSW_DELAY);
2669
2670 tw32_wait_f(GRC_LOCAL_CTRL,
2671 grc_local_ctrl,
2672 TG3_GRC_LCLCTL_PWRSW_DELAY);
2673
2674 tw32_wait_f(GRC_LOCAL_CTRL,
2675 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2676 TG3_GRC_LCLCTL_PWRSW_DELAY);
2677}
2678
2679static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2680{
2681 if (!tg3_flag(tp, IS_NIC))
2682 return;
2683
2684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2686 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2687 (GRC_LCLCTRL_GPIO_OE0 |
2688 GRC_LCLCTRL_GPIO_OE1 |
2689 GRC_LCLCTRL_GPIO_OE2 |
2690 GRC_LCLCTRL_GPIO_OUTPUT0 |
2691 GRC_LCLCTRL_GPIO_OUTPUT1),
2692 TG3_GRC_LCLCTL_PWRSW_DELAY);
2693 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2694 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2695 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2696 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2697 GRC_LCLCTRL_GPIO_OE1 |
2698 GRC_LCLCTRL_GPIO_OE2 |
2699 GRC_LCLCTRL_GPIO_OUTPUT0 |
2700 GRC_LCLCTRL_GPIO_OUTPUT1 |
2701 tp->grc_local_ctrl;
2702 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2703 TG3_GRC_LCLCTL_PWRSW_DELAY);
2704
2705 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2706 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2707 TG3_GRC_LCLCTL_PWRSW_DELAY);
2708
2709 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2710 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2711 TG3_GRC_LCLCTL_PWRSW_DELAY);
2712 } else {
2713 u32 no_gpio2;
2714 u32 grc_local_ctrl = 0;
2715
2716 /* Workaround to prevent overdrawing Amps. */
2717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2718 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2719 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2720 grc_local_ctrl,
2721 TG3_GRC_LCLCTL_PWRSW_DELAY);
2722 }
2723
2724 /* On 5753 and variants, GPIO2 cannot be used. */
2725 no_gpio2 = tp->nic_sram_data_cfg &
2726 NIC_SRAM_DATA_CFG_NO_GPIO2;
2727
2728 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2729 GRC_LCLCTRL_GPIO_OE1 |
2730 GRC_LCLCTRL_GPIO_OE2 |
2731 GRC_LCLCTRL_GPIO_OUTPUT1 |
2732 GRC_LCLCTRL_GPIO_OUTPUT2;
2733 if (no_gpio2) {
2734 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2735 GRC_LCLCTRL_GPIO_OUTPUT2);
2736 }
2737 tw32_wait_f(GRC_LOCAL_CTRL,
2738 tp->grc_local_ctrl | grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740
2741 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2742
2743 tw32_wait_f(GRC_LOCAL_CTRL,
2744 tp->grc_local_ctrl | grc_local_ctrl,
2745 TG3_GRC_LCLCTL_PWRSW_DELAY);
2746
2747 if (!no_gpio2) {
2748 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2749 tw32_wait_f(GRC_LOCAL_CTRL,
2750 tp->grc_local_ctrl | grc_local_ctrl,
2751 TG3_GRC_LCLCTL_PWRSW_DELAY);
2752 }
2753 }
3a1e19d3
MC
2754}
2755
cd0d7228 2756static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
3a1e19d3
MC
2757{
2758 u32 msg = 0;
2759
2760 /* Serialize power state transitions */
2761 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2762 return;
2763
cd0d7228 2764 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
3a1e19d3
MC
2765 msg = TG3_GPIO_MSG_NEED_VAUX;
2766
2767 msg = tg3_set_function_status(tp, msg);
2768
2769 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2770 goto done;
6f5c8f83 2771
3a1e19d3
MC
2772 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2773 tg3_pwrsrc_switch_to_vaux(tp);
2774 else
2775 tg3_pwrsrc_die_with_vmain(tp);
2776
2777done:
6f5c8f83 2778 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
520b2756
MC
2779}
2780
cd0d7228 2781static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
1da177e4 2782{
683644b7 2783 bool need_vaux = false;
1da177e4 2784
334355aa 2785 /* The GPIOs do something completely different on 57765. */
55086ad9 2786 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
1da177e4
LT
2787 return;
2788
3a1e19d3
MC
2789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2790 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
cd0d7228
MC
2792 tg3_frob_aux_power_5717(tp, include_wol ?
2793 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
3a1e19d3
MC
2794 return;
2795 }
2796
2797 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
8c2dc7e1
MC
2798 struct net_device *dev_peer;
2799
2800 dev_peer = pci_get_drvdata(tp->pdev_peer);
683644b7 2801
bc1c7567 2802 /* remove_one() may have been run on the peer. */
683644b7
MC
2803 if (dev_peer) {
2804 struct tg3 *tp_peer = netdev_priv(dev_peer);
2805
63c3a66f 2806 if (tg3_flag(tp_peer, INIT_COMPLETE))
683644b7
MC
2807 return;
2808
cd0d7228 2809 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
63c3a66f 2810 tg3_flag(tp_peer, ENABLE_ASF))
683644b7
MC
2811 need_vaux = true;
2812 }
1da177e4
LT
2813 }
2814
cd0d7228
MC
2815 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2816 tg3_flag(tp, ENABLE_ASF))
683644b7
MC
2817 need_vaux = true;
2818
520b2756
MC
2819 if (need_vaux)
2820 tg3_pwrsrc_switch_to_vaux(tp);
2821 else
2822 tg3_pwrsrc_die_with_vmain(tp);
1da177e4
LT
2823}
2824
e8f3f6ca
MC
2825static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2826{
2827 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2828 return 1;
79eb6904 2829 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
e8f3f6ca
MC
2830 if (speed != SPEED_10)
2831 return 1;
2832 } else if (speed == SPEED_10)
2833 return 1;
2834
2835 return 0;
2836}
2837
0a459aac 2838static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
15c3b696 2839{
ce057f01
MC
2840 u32 val;
2841
f07e9af3 2842 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
5129724a
MC
2843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2844 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2845 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2846
2847 sg_dig_ctrl |=
2848 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2849 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2850 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2851 }
3f7045c1 2852 return;
5129724a 2853 }
3f7045c1 2854
60189ddf 2855 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2856 tg3_bmcr_reset(tp);
2857 val = tr32(GRC_MISC_CFG);
2858 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2859 udelay(40);
2860 return;
f07e9af3 2861 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
0e5f784c
MC
2862 u32 phytest;
2863 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2864 u32 phy;
2865
2866 tg3_writephy(tp, MII_ADVERTISE, 0);
2867 tg3_writephy(tp, MII_BMCR,
2868 BMCR_ANENABLE | BMCR_ANRESTART);
2869
2870 tg3_writephy(tp, MII_TG3_FET_TEST,
2871 phytest | MII_TG3_FET_SHADOW_EN);
2872 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2873 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2874 tg3_writephy(tp,
2875 MII_TG3_FET_SHDW_AUXMODE4,
2876 phy);
2877 }
2878 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2879 }
2880 return;
0a459aac 2881 } else if (do_low_power) {
715116a1
MC
2882 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2883 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
0a459aac 2884
b4bd2929
MC
2885 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2886 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2887 MII_TG3_AUXCTL_PCTL_VREG_11V;
2888 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
715116a1 2889 }
3f7045c1 2890
15c3b696
MC
2891 /* The PHY should not be powered down on some chips because
2892 * of bugs.
2893 */
2894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2896 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
085f1afc
MC
2897 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2898 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2899 !tp->pci_fn))
15c3b696 2900 return;
ce057f01 2901
bcb37f6c
MC
2902 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2903 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2904 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2905 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2906 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2907 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2908 }
2909
15c3b696
MC
2910 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2911}
2912
ffbcfed4
MC
2913/* tp->lock is held. */
2914static int tg3_nvram_lock(struct tg3 *tp)
2915{
63c3a66f 2916 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2917 int i;
2918
2919 if (tp->nvram_lock_cnt == 0) {
2920 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2921 for (i = 0; i < 8000; i++) {
2922 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2923 break;
2924 udelay(20);
2925 }
2926 if (i == 8000) {
2927 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2928 return -ENODEV;
2929 }
2930 }
2931 tp->nvram_lock_cnt++;
2932 }
2933 return 0;
2934}
2935
2936/* tp->lock is held. */
2937static void tg3_nvram_unlock(struct tg3 *tp)
2938{
63c3a66f 2939 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2940 if (tp->nvram_lock_cnt > 0)
2941 tp->nvram_lock_cnt--;
2942 if (tp->nvram_lock_cnt == 0)
2943 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2944 }
2945}
2946
2947/* tp->lock is held. */
2948static void tg3_enable_nvram_access(struct tg3 *tp)
2949{
63c3a66f 2950 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2951 u32 nvaccess = tr32(NVRAM_ACCESS);
2952
2953 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2954 }
2955}
2956
2957/* tp->lock is held. */
2958static void tg3_disable_nvram_access(struct tg3 *tp)
2959{
63c3a66f 2960 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2961 u32 nvaccess = tr32(NVRAM_ACCESS);
2962
2963 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2964 }
2965}
2966
2967static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2968 u32 offset, u32 *val)
2969{
2970 u32 tmp;
2971 int i;
2972
2973 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2974 return -EINVAL;
2975
2976 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2977 EEPROM_ADDR_DEVID_MASK |
2978 EEPROM_ADDR_READ);
2979 tw32(GRC_EEPROM_ADDR,
2980 tmp |
2981 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2982 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2983 EEPROM_ADDR_ADDR_MASK) |
2984 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2985
2986 for (i = 0; i < 1000; i++) {
2987 tmp = tr32(GRC_EEPROM_ADDR);
2988
2989 if (tmp & EEPROM_ADDR_COMPLETE)
2990 break;
2991 msleep(1);
2992 }
2993 if (!(tmp & EEPROM_ADDR_COMPLETE))
2994 return -EBUSY;
2995
62cedd11
MC
2996 tmp = tr32(GRC_EEPROM_DATA);
2997
2998 /*
2999 * The data will always be opposite the native endian
3000 * format. Perform a blind byteswap to compensate.
3001 */
3002 *val = swab32(tmp);
3003
ffbcfed4
MC
3004 return 0;
3005}
3006
3007#define NVRAM_CMD_TIMEOUT 10000
3008
3009static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3010{
3011 int i;
3012
3013 tw32(NVRAM_CMD, nvram_cmd);
3014 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3015 udelay(10);
3016 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3017 udelay(10);
3018 break;
3019 }
3020 }
3021
3022 if (i == NVRAM_CMD_TIMEOUT)
3023 return -EBUSY;
3024
3025 return 0;
3026}
3027
3028static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3029{
63c3a66f
JP
3030 if (tg3_flag(tp, NVRAM) &&
3031 tg3_flag(tp, NVRAM_BUFFERED) &&
3032 tg3_flag(tp, FLASH) &&
3033 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3034 (tp->nvram_jedecnum == JEDEC_ATMEL))
3035
3036 addr = ((addr / tp->nvram_pagesize) <<
3037 ATMEL_AT45DB0X1B_PAGE_POS) +
3038 (addr % tp->nvram_pagesize);
3039
3040 return addr;
3041}
3042
3043static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3044{
63c3a66f
JP
3045 if (tg3_flag(tp, NVRAM) &&
3046 tg3_flag(tp, NVRAM_BUFFERED) &&
3047 tg3_flag(tp, FLASH) &&
3048 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3049 (tp->nvram_jedecnum == JEDEC_ATMEL))
3050
3051 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3052 tp->nvram_pagesize) +
3053 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3054
3055 return addr;
3056}
3057
e4f34110
MC
3058/* NOTE: Data read in from NVRAM is byteswapped according to
3059 * the byteswapping settings for all other register accesses.
3060 * tg3 devices are BE devices, so on a BE machine, the data
3061 * returned will be exactly as it is seen in NVRAM. On a LE
3062 * machine, the 32-bit value will be byteswapped.
3063 */
ffbcfed4
MC
3064static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3065{
3066 int ret;
3067
63c3a66f 3068 if (!tg3_flag(tp, NVRAM))
ffbcfed4
MC
3069 return tg3_nvram_read_using_eeprom(tp, offset, val);
3070
3071 offset = tg3_nvram_phys_addr(tp, offset);
3072
3073 if (offset > NVRAM_ADDR_MSK)
3074 return -EINVAL;
3075
3076 ret = tg3_nvram_lock(tp);
3077 if (ret)
3078 return ret;
3079
3080 tg3_enable_nvram_access(tp);
3081
3082 tw32(NVRAM_ADDR, offset);
3083 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3084 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3085
3086 if (ret == 0)
e4f34110 3087 *val = tr32(NVRAM_RDDATA);
ffbcfed4
MC
3088
3089 tg3_disable_nvram_access(tp);
3090
3091 tg3_nvram_unlock(tp);
3092
3093 return ret;
3094}
3095
a9dc529d
MC
3096/* Ensures NVRAM data is in bytestream format. */
3097static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
ffbcfed4
MC
3098{
3099 u32 v;
a9dc529d 3100 int res = tg3_nvram_read(tp, offset, &v);
ffbcfed4 3101 if (!res)
a9dc529d 3102 *val = cpu_to_be32(v);
ffbcfed4
MC
3103 return res;
3104}
3105
dbe9b92a
MC
3106static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3107 u32 offset, u32 len, u8 *buf)
3108{
3109 int i, j, rc = 0;
3110 u32 val;
3111
3112 for (i = 0; i < len; i += 4) {
3113 u32 addr;
3114 __be32 data;
3115
3116 addr = offset + i;
3117
3118 memcpy(&data, buf + i, 4);
3119
3120 /*
3121 * The SEEPROM interface expects the data to always be opposite
3122 * the native endian format. We accomplish this by reversing
3123 * all the operations that would have been performed on the
3124 * data from a call to tg3_nvram_read_be32().
3125 */
3126 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3127
3128 val = tr32(GRC_EEPROM_ADDR);
3129 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3130
3131 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3132 EEPROM_ADDR_READ);
3133 tw32(GRC_EEPROM_ADDR, val |
3134 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3135 (addr & EEPROM_ADDR_ADDR_MASK) |
3136 EEPROM_ADDR_START |
3137 EEPROM_ADDR_WRITE);
3138
3139 for (j = 0; j < 1000; j++) {
3140 val = tr32(GRC_EEPROM_ADDR);
3141
3142 if (val & EEPROM_ADDR_COMPLETE)
3143 break;
3144 msleep(1);
3145 }
3146 if (!(val & EEPROM_ADDR_COMPLETE)) {
3147 rc = -EBUSY;
3148 break;
3149 }
3150 }
3151
3152 return rc;
3153}
3154
3155/* offset and length are dword aligned */
3156static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3157 u8 *buf)
3158{
3159 int ret = 0;
3160 u32 pagesize = tp->nvram_pagesize;
3161 u32 pagemask = pagesize - 1;
3162 u32 nvram_cmd;
3163 u8 *tmp;
3164
3165 tmp = kmalloc(pagesize, GFP_KERNEL);
3166 if (tmp == NULL)
3167 return -ENOMEM;
3168
3169 while (len) {
3170 int j;
3171 u32 phy_addr, page_off, size;
3172
3173 phy_addr = offset & ~pagemask;
3174
3175 for (j = 0; j < pagesize; j += 4) {
3176 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3177 (__be32 *) (tmp + j));
3178 if (ret)
3179 break;
3180 }
3181 if (ret)
3182 break;
3183
3184 page_off = offset & pagemask;
3185 size = pagesize;
3186 if (len < size)
3187 size = len;
3188
3189 len -= size;
3190
3191 memcpy(tmp + page_off, buf, size);
3192
3193 offset = offset + (pagesize - page_off);
3194
3195 tg3_enable_nvram_access(tp);
3196
3197 /*
3198 * Before we can erase the flash page, we need
3199 * to issue a special "write enable" command.
3200 */
3201 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3202
3203 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3204 break;
3205
3206 /* Erase the target page */
3207 tw32(NVRAM_ADDR, phy_addr);
3208
3209 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3210 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3211
3212 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3213 break;
3214
3215 /* Issue another write enable to start the write. */
3216 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3217
3218 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3219 break;
3220
3221 for (j = 0; j < pagesize; j += 4) {
3222 __be32 data;
3223
3224 data = *((__be32 *) (tmp + j));
3225
3226 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3227
3228 tw32(NVRAM_ADDR, phy_addr + j);
3229
3230 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3231 NVRAM_CMD_WR;
3232
3233 if (j == 0)
3234 nvram_cmd |= NVRAM_CMD_FIRST;
3235 else if (j == (pagesize - 4))
3236 nvram_cmd |= NVRAM_CMD_LAST;
3237
3238 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3239 if (ret)
3240 break;
3241 }
3242 if (ret)
3243 break;
3244 }
3245
3246 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3247 tg3_nvram_exec_cmd(tp, nvram_cmd);
3248
3249 kfree(tmp);
3250
3251 return ret;
3252}
3253
3254/* offset and length are dword aligned */
3255static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3256 u8 *buf)
3257{
3258 int i, ret = 0;
3259
3260 for (i = 0; i < len; i += 4, offset += 4) {
3261 u32 page_off, phy_addr, nvram_cmd;
3262 __be32 data;
3263
3264 memcpy(&data, buf + i, 4);
3265 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3266
3267 page_off = offset % tp->nvram_pagesize;
3268
3269 phy_addr = tg3_nvram_phys_addr(tp, offset);
3270
dbe9b92a
MC
3271 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3272
3273 if (page_off == 0 || i == 0)
3274 nvram_cmd |= NVRAM_CMD_FIRST;
3275 if (page_off == (tp->nvram_pagesize - 4))
3276 nvram_cmd |= NVRAM_CMD_LAST;
3277
3278 if (i == (len - 4))
3279 nvram_cmd |= NVRAM_CMD_LAST;
3280
42278224
MC
3281 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3282 !tg3_flag(tp, FLASH) ||
3283 !tg3_flag(tp, 57765_PLUS))
3284 tw32(NVRAM_ADDR, phy_addr);
3285
dbe9b92a
MC
3286 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3287 !tg3_flag(tp, 5755_PLUS) &&
3288 (tp->nvram_jedecnum == JEDEC_ST) &&
3289 (nvram_cmd & NVRAM_CMD_FIRST)) {
3290 u32 cmd;
3291
3292 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293 ret = tg3_nvram_exec_cmd(tp, cmd);
3294 if (ret)
3295 break;
3296 }
3297 if (!tg3_flag(tp, FLASH)) {
3298 /* We always do complete word writes to eeprom. */
3299 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3300 }
3301
3302 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3303 if (ret)
3304 break;
3305 }
3306 return ret;
3307}
3308
3309/* offset and length are dword aligned */
3310static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3311{
3312 int ret;
3313
3314 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3315 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3316 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3317 udelay(40);
3318 }
3319
3320 if (!tg3_flag(tp, NVRAM)) {
3321 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3322 } else {
3323 u32 grc_mode;
3324
3325 ret = tg3_nvram_lock(tp);
3326 if (ret)
3327 return ret;
3328
3329 tg3_enable_nvram_access(tp);
3330 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3331 tw32(NVRAM_WRITE1, 0x406);
3332
3333 grc_mode = tr32(GRC_MODE);
3334 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3335
3336 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3337 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3338 buf);
3339 } else {
3340 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3341 buf);
3342 }
3343
3344 grc_mode = tr32(GRC_MODE);
3345 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3346
3347 tg3_disable_nvram_access(tp);
3348 tg3_nvram_unlock(tp);
3349 }
3350
3351 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3352 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3353 udelay(40);
3354 }
3355
3356 return ret;
3357}
3358
997b4f13
MC
3359#define RX_CPU_SCRATCH_BASE 0x30000
3360#define RX_CPU_SCRATCH_SIZE 0x04000
3361#define TX_CPU_SCRATCH_BASE 0x34000
3362#define TX_CPU_SCRATCH_SIZE 0x04000
3363
3364/* tp->lock is held. */
3365static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3366{
3367 int i;
3368
3369 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3370
3371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3372 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3373
3374 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3375 return 0;
3376 }
3377 if (offset == RX_CPU_BASE) {
3378 for (i = 0; i < 10000; i++) {
3379 tw32(offset + CPU_STATE, 0xffffffff);
3380 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3381 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3382 break;
3383 }
3384
3385 tw32(offset + CPU_STATE, 0xffffffff);
3386 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3387 udelay(10);
3388 } else {
3389 for (i = 0; i < 10000; i++) {
3390 tw32(offset + CPU_STATE, 0xffffffff);
3391 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3392 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3393 break;
3394 }
3395 }
3396
3397 if (i >= 10000) {
3398 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3399 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3400 return -ENODEV;
3401 }
3402
3403 /* Clear firmware's nvram arbitration. */
3404 if (tg3_flag(tp, NVRAM))
3405 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3406 return 0;
3407}
3408
3409struct fw_info {
3410 unsigned int fw_base;
3411 unsigned int fw_len;
3412 const __be32 *fw_data;
3413};
3414
3415/* tp->lock is held. */
3416static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3417 u32 cpu_scratch_base, int cpu_scratch_size,
3418 struct fw_info *info)
3419{
3420 int err, lock_err, i;
3421 void (*write_op)(struct tg3 *, u32, u32);
3422
3423 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3424 netdev_err(tp->dev,
3425 "%s: Trying to load TX cpu firmware which is 5705\n",
3426 __func__);
3427 return -EINVAL;
3428 }
3429
3430 if (tg3_flag(tp, 5705_PLUS))
3431 write_op = tg3_write_mem;
3432 else
3433 write_op = tg3_write_indirect_reg32;
3434
3435 /* It is possible that bootcode is still loading at this point.
3436 * Get the nvram lock first before halting the cpu.
3437 */
3438 lock_err = tg3_nvram_lock(tp);
3439 err = tg3_halt_cpu(tp, cpu_base);
3440 if (!lock_err)
3441 tg3_nvram_unlock(tp);
3442 if (err)
3443 goto out;
3444
3445 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3446 write_op(tp, cpu_scratch_base + i, 0);
3447 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3449 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3450 write_op(tp, (cpu_scratch_base +
3451 (info->fw_base & 0xffff) +
3452 (i * sizeof(u32))),
3453 be32_to_cpu(info->fw_data[i]));
3454
3455 err = 0;
3456
3457out:
3458 return err;
3459}
3460
3461/* tp->lock is held. */
3462static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3463{
3464 struct fw_info info;
3465 const __be32 *fw_data;
3466 int err, i;
3467
3468 fw_data = (void *)tp->fw->data;
3469
3470 /* Firmware blob starts with version numbers, followed by
3471 start address and length. We are setting complete length.
3472 length = end_address_of_bss - start_address_of_text.
3473 Remainder is the blob to be loaded contiguously
3474 from start address. */
3475
3476 info.fw_base = be32_to_cpu(fw_data[1]);
3477 info.fw_len = tp->fw->size - 12;
3478 info.fw_data = &fw_data[3];
3479
3480 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3481 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3482 &info);
3483 if (err)
3484 return err;
3485
3486 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3487 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3488 &info);
3489 if (err)
3490 return err;
3491
3492 /* Now startup only the RX cpu. */
3493 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3494 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3495
3496 for (i = 0; i < 5; i++) {
3497 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3498 break;
3499 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3500 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3501 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3502 udelay(1000);
3503 }
3504 if (i >= 5) {
3505 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3506 "should be %08x\n", __func__,
3507 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3508 return -ENODEV;
3509 }
3510 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3512
3513 return 0;
3514}
3515
3516/* tp->lock is held. */
3517static int tg3_load_tso_firmware(struct tg3 *tp)
3518{
3519 struct fw_info info;
3520 const __be32 *fw_data;
3521 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3522 int err, i;
3523
3524 if (tg3_flag(tp, HW_TSO_1) ||
3525 tg3_flag(tp, HW_TSO_2) ||
3526 tg3_flag(tp, HW_TSO_3))
3527 return 0;
3528
3529 fw_data = (void *)tp->fw->data;
3530
3531 /* Firmware blob starts with version numbers, followed by
3532 start address and length. We are setting complete length.
3533 length = end_address_of_bss - start_address_of_text.
3534 Remainder is the blob to be loaded contiguously
3535 from start address. */
3536
3537 info.fw_base = be32_to_cpu(fw_data[1]);
3538 cpu_scratch_size = tp->fw_len;
3539 info.fw_len = tp->fw->size - 12;
3540 info.fw_data = &fw_data[3];
3541
3542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3543 cpu_base = RX_CPU_BASE;
3544 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3545 } else {
3546 cpu_base = TX_CPU_BASE;
3547 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3548 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3549 }
3550
3551 err = tg3_load_firmware_cpu(tp, cpu_base,
3552 cpu_scratch_base, cpu_scratch_size,
3553 &info);
3554 if (err)
3555 return err;
3556
3557 /* Now startup the cpu. */
3558 tw32(cpu_base + CPU_STATE, 0xffffffff);
3559 tw32_f(cpu_base + CPU_PC, info.fw_base);
3560
3561 for (i = 0; i < 5; i++) {
3562 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3563 break;
3564 tw32(cpu_base + CPU_STATE, 0xffffffff);
3565 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3566 tw32_f(cpu_base + CPU_PC, info.fw_base);
3567 udelay(1000);
3568 }
3569 if (i >= 5) {
3570 netdev_err(tp->dev,
3571 "%s fails to set CPU PC, is %08x should be %08x\n",
3572 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3573 return -ENODEV;
3574 }
3575 tw32(cpu_base + CPU_STATE, 0xffffffff);
3576 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3577 return 0;
3578}
3579
3580
3f007891
MC
3581/* tp->lock is held. */
3582static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3583{
3584 u32 addr_high, addr_low;
3585 int i;
3586
3587 addr_high = ((tp->dev->dev_addr[0] << 8) |
3588 tp->dev->dev_addr[1]);
3589 addr_low = ((tp->dev->dev_addr[2] << 24) |
3590 (tp->dev->dev_addr[3] << 16) |
3591 (tp->dev->dev_addr[4] << 8) |
3592 (tp->dev->dev_addr[5] << 0));
3593 for (i = 0; i < 4; i++) {
3594 if (i == 1 && skip_mac_1)
3595 continue;
3596 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3597 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3598 }
3599
3600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3601 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3602 for (i = 0; i < 12; i++) {
3603 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3604 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3605 }
3606 }
3607
3608 addr_high = (tp->dev->dev_addr[0] +
3609 tp->dev->dev_addr[1] +
3610 tp->dev->dev_addr[2] +
3611 tp->dev->dev_addr[3] +
3612 tp->dev->dev_addr[4] +
3613 tp->dev->dev_addr[5]) &
3614 TX_BACKOFF_SEED_MASK;
3615 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3616}
3617
c866b7ea 3618static void tg3_enable_register_access(struct tg3 *tp)
1da177e4 3619{
c866b7ea
RW
3620 /*
3621 * Make sure register accesses (indirect or otherwise) will function
3622 * correctly.
1da177e4
LT
3623 */
3624 pci_write_config_dword(tp->pdev,
c866b7ea
RW
3625 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3626}
1da177e4 3627
c866b7ea
RW
3628static int tg3_power_up(struct tg3 *tp)
3629{
bed9829f 3630 int err;
8c6bda1a 3631
bed9829f 3632 tg3_enable_register_access(tp);
1da177e4 3633
bed9829f
MC
3634 err = pci_set_power_state(tp->pdev, PCI_D0);
3635 if (!err) {
3636 /* Switch out of Vaux if it is a NIC */
3637 tg3_pwrsrc_switch_to_vmain(tp);
3638 } else {
3639 netdev_err(tp->dev, "Transition to D0 failed\n");
3640 }
1da177e4 3641
bed9829f 3642 return err;
c866b7ea 3643}
1da177e4 3644
4b409522
MC
3645static int tg3_setup_phy(struct tg3 *, int);
3646
c866b7ea
RW
3647static int tg3_power_down_prepare(struct tg3 *tp)
3648{
3649 u32 misc_host_ctrl;
3650 bool device_should_wake, do_low_power;
3651
3652 tg3_enable_register_access(tp);
5e7dfd0f
MC
3653
3654 /* Restore the CLKREQ setting. */
0f49bfbd
JL
3655 if (tg3_flag(tp, CLKREQ_BUG))
3656 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3657 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 3658
1da177e4
LT
3659 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3660 tw32(TG3PCI_MISC_HOST_CTRL,
3661 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3662
c866b7ea 3663 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 3664 tg3_flag(tp, WOL_ENABLE);
05ac4cb7 3665
63c3a66f 3666 if (tg3_flag(tp, USE_PHYLIB)) {
0a459aac 3667 do_low_power = false;
f07e9af3 3668 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
80096068 3669 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
b02fd9e3 3670 struct phy_device *phydev;
0a459aac 3671 u32 phyid, advertising;
b02fd9e3 3672
3f0e3ad7 3673 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 3674
80096068 3675 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3 3676
c6700ce2
MC
3677 tp->link_config.speed = phydev->speed;
3678 tp->link_config.duplex = phydev->duplex;
3679 tp->link_config.autoneg = phydev->autoneg;
3680 tp->link_config.advertising = phydev->advertising;
b02fd9e3
MC
3681
3682 advertising = ADVERTISED_TP |
3683 ADVERTISED_Pause |
3684 ADVERTISED_Autoneg |
3685 ADVERTISED_10baseT_Half;
3686
63c3a66f
JP
3687 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3688 if (tg3_flag(tp, WOL_SPEED_100MB))
b02fd9e3
MC
3689 advertising |=
3690 ADVERTISED_100baseT_Half |
3691 ADVERTISED_100baseT_Full |
3692 ADVERTISED_10baseT_Full;
3693 else
3694 advertising |= ADVERTISED_10baseT_Full;
3695 }
3696
3697 phydev->advertising = advertising;
3698
3699 phy_start_aneg(phydev);
0a459aac
MC
3700
3701 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
6a443a0f
MC
3702 if (phyid != PHY_ID_BCMAC131) {
3703 phyid &= PHY_BCM_OUI_MASK;
3704 if (phyid == PHY_BCM_OUI_1 ||
3705 phyid == PHY_BCM_OUI_2 ||
3706 phyid == PHY_BCM_OUI_3)
0a459aac
MC
3707 do_low_power = true;
3708 }
b02fd9e3 3709 }
dd477003 3710 } else {
2023276e 3711 do_low_power = true;
0a459aac 3712
c6700ce2 3713 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
80096068 3714 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
1da177e4 3715
2855b9fe 3716 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
dd477003 3717 tg3_setup_phy(tp, 0);
1da177e4
LT
3718 }
3719
b5d3772c
MC
3720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3721 u32 val;
3722
3723 val = tr32(GRC_VCPU_EXT_CTRL);
3724 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
63c3a66f 3725 } else if (!tg3_flag(tp, ENABLE_ASF)) {
6921d201
MC
3726 int i;
3727 u32 val;
3728
3729 for (i = 0; i < 200; i++) {
3730 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3731 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3732 break;
3733 msleep(1);
3734 }
3735 }
63c3a66f 3736 if (tg3_flag(tp, WOL_CAP))
a85feb8c
GZ
3737 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3738 WOL_DRV_STATE_SHUTDOWN |
3739 WOL_DRV_WOL |
3740 WOL_SET_MAGIC_PKT);
6921d201 3741
05ac4cb7 3742 if (device_should_wake) {
1da177e4
LT
3743 u32 mac_mode;
3744
f07e9af3 3745 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
b4bd2929
MC
3746 if (do_low_power &&
3747 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3748 tg3_phy_auxctl_write(tp,
3749 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3750 MII_TG3_AUXCTL_PCTL_WOL_EN |
3751 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3752 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
dd477003
MC
3753 udelay(40);
3754 }
1da177e4 3755
f07e9af3 3756 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3f7045c1
MC
3757 mac_mode = MAC_MODE_PORT_MODE_GMII;
3758 else
3759 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 3760
e8f3f6ca
MC
3761 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3762 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3763 ASIC_REV_5700) {
63c3a66f 3764 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
e8f3f6ca
MC
3765 SPEED_100 : SPEED_10;
3766 if (tg3_5700_link_polarity(tp, speed))
3767 mac_mode |= MAC_MODE_LINK_POLARITY;
3768 else
3769 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3770 }
1da177e4
LT
3771 } else {
3772 mac_mode = MAC_MODE_PORT_MODE_TBI;
3773 }
3774
63c3a66f 3775 if (!tg3_flag(tp, 5750_PLUS))
1da177e4
LT
3776 tw32(MAC_LED_CTRL, tp->led_ctrl);
3777
05ac4cb7 3778 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
63c3a66f
JP
3779 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3780 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
05ac4cb7 3781 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
1da177e4 3782
63c3a66f 3783 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
3784 mac_mode |= MAC_MODE_APE_TX_EN |
3785 MAC_MODE_APE_RX_EN |
3786 MAC_MODE_TDE_ENABLE;
3bda1258 3787
1da177e4
LT
3788 tw32_f(MAC_MODE, mac_mode);
3789 udelay(100);
3790
3791 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3792 udelay(10);
3793 }
3794
63c3a66f 3795 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
1da177e4
LT
3796 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3798 u32 base_val;
3799
3800 base_val = tp->pci_clock_ctrl;
3801 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3802 CLOCK_CTRL_TXCLK_DISABLE);
3803
b401e9e2
MC
3804 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3805 CLOCK_CTRL_PWRDOWN_PLL133, 40);
63c3a66f
JP
3806 } else if (tg3_flag(tp, 5780_CLASS) ||
3807 tg3_flag(tp, CPMU_PRESENT) ||
6ff6f81d 3808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4cf78e4f 3809 /* do nothing */
63c3a66f 3810 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
1da177e4
LT
3811 u32 newbits1, newbits2;
3812
3813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3815 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3816 CLOCK_CTRL_TXCLK_DISABLE |
3817 CLOCK_CTRL_ALTCLK);
3818 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
63c3a66f 3819 } else if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3820 newbits1 = CLOCK_CTRL_625_CORE;
3821 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3822 } else {
3823 newbits1 = CLOCK_CTRL_ALTCLK;
3824 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3825 }
3826
b401e9e2
MC
3827 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3828 40);
1da177e4 3829
b401e9e2
MC
3830 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3831 40);
1da177e4 3832
63c3a66f 3833 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3834 u32 newbits3;
3835
3836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3838 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3839 CLOCK_CTRL_TXCLK_DISABLE |
3840 CLOCK_CTRL_44MHZ_CORE);
3841 } else {
3842 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3843 }
3844
b401e9e2
MC
3845 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3846 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
3847 }
3848 }
3849
63c3a66f 3850 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
0a459aac 3851 tg3_power_down_phy(tp, do_low_power);
6921d201 3852
cd0d7228 3853 tg3_frob_aux_power(tp, true);
1da177e4
LT
3854
3855 /* Workaround for unstable PLL clock */
3856 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3857 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3858 u32 val = tr32(0x7d00);
3859
3860 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3861 tw32(0x7d00, val);
63c3a66f 3862 if (!tg3_flag(tp, ENABLE_ASF)) {
ec41c7df
MC
3863 int err;
3864
3865 err = tg3_nvram_lock(tp);
1da177e4 3866 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
3867 if (!err)
3868 tg3_nvram_unlock(tp);
6921d201 3869 }
1da177e4
LT
3870 }
3871
bbadf503
MC
3872 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3873
c866b7ea
RW
3874 return 0;
3875}
12dac075 3876
c866b7ea
RW
3877static void tg3_power_down(struct tg3 *tp)
3878{
3879 tg3_power_down_prepare(tp);
1da177e4 3880
63c3a66f 3881 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
c866b7ea 3882 pci_set_power_state(tp->pdev, PCI_D3hot);
1da177e4
LT
3883}
3884
1da177e4
LT
3885static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3886{
3887 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3888 case MII_TG3_AUX_STAT_10HALF:
3889 *speed = SPEED_10;
3890 *duplex = DUPLEX_HALF;
3891 break;
3892
3893 case MII_TG3_AUX_STAT_10FULL:
3894 *speed = SPEED_10;
3895 *duplex = DUPLEX_FULL;
3896 break;
3897
3898 case MII_TG3_AUX_STAT_100HALF:
3899 *speed = SPEED_100;
3900 *duplex = DUPLEX_HALF;
3901 break;
3902
3903 case MII_TG3_AUX_STAT_100FULL:
3904 *speed = SPEED_100;
3905 *duplex = DUPLEX_FULL;
3906 break;
3907
3908 case MII_TG3_AUX_STAT_1000HALF:
3909 *speed = SPEED_1000;
3910 *duplex = DUPLEX_HALF;
3911 break;
3912
3913 case MII_TG3_AUX_STAT_1000FULL:
3914 *speed = SPEED_1000;
3915 *duplex = DUPLEX_FULL;
3916 break;
3917
3918 default:
f07e9af3 3919 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
715116a1
MC
3920 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3921 SPEED_10;
3922 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3923 DUPLEX_HALF;
3924 break;
3925 }
e740522e
MC
3926 *speed = SPEED_UNKNOWN;
3927 *duplex = DUPLEX_UNKNOWN;
1da177e4 3928 break;
855e1111 3929 }
1da177e4
LT
3930}
3931
42b64a45 3932static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1da177e4 3933{
42b64a45
MC
3934 int err = 0;
3935 u32 val, new_adv;
1da177e4 3936
42b64a45 3937 new_adv = ADVERTISE_CSMA;
202ff1c2 3938 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
f88788f0 3939 new_adv |= mii_advertise_flowctrl(flowctrl);
1da177e4 3940
42b64a45
MC
3941 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3942 if (err)
3943 goto done;
ba4d07a8 3944
4f272096
MC
3945 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3946 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
ba4d07a8 3947
4f272096
MC
3948 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3949 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3950 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
ba4d07a8 3951
4f272096
MC
3952 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3953 if (err)
3954 goto done;
3955 }
1da177e4 3956
42b64a45
MC
3957 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3958 goto done;
52b02d04 3959
42b64a45
MC
3960 tw32(TG3_CPMU_EEE_MODE,
3961 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
52b02d04 3962
42b64a45
MC
3963 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3964 if (!err) {
3965 u32 err2;
52b02d04 3966
b715ce94
MC
3967 val = 0;
3968 /* Advertise 100-BaseTX EEE ability */
3969 if (advertise & ADVERTISED_100baseT_Full)
3970 val |= MDIO_AN_EEE_ADV_100TX;
3971 /* Advertise 1000-BaseT EEE ability */
3972 if (advertise & ADVERTISED_1000baseT_Full)
3973 val |= MDIO_AN_EEE_ADV_1000T;
3974 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3975 if (err)
3976 val = 0;
3977
21a00ab2
MC
3978 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3979 case ASIC_REV_5717:
3980 case ASIC_REV_57765:
55086ad9 3981 case ASIC_REV_57766:
21a00ab2 3982 case ASIC_REV_5719:
b715ce94
MC
3983 /* If we advertised any eee advertisements above... */
3984 if (val)
3985 val = MII_TG3_DSP_TAP26_ALNOKO |
3986 MII_TG3_DSP_TAP26_RMRXSTO |
3987 MII_TG3_DSP_TAP26_OPCSINPT;
21a00ab2 3988 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
be671947
MC
3989 /* Fall through */
3990 case ASIC_REV_5720:
3991 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3992 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3993 MII_TG3_DSP_CH34TP2_HIBW01);
21a00ab2 3994 }
52b02d04 3995
42b64a45
MC
3996 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3997 if (!err)
3998 err = err2;
3999 }
4000
4001done:
4002 return err;
4003}
4004
4005static void tg3_phy_copper_begin(struct tg3 *tp)
4006{
d13ba512
MC
4007 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4008 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4009 u32 adv, fc;
4010
4011 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4012 adv = ADVERTISED_10baseT_Half |
4013 ADVERTISED_10baseT_Full;
4014 if (tg3_flag(tp, WOL_SPEED_100MB))
4015 adv |= ADVERTISED_100baseT_Half |
4016 ADVERTISED_100baseT_Full;
4017
4018 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
42b64a45 4019 } else {
d13ba512
MC
4020 adv = tp->link_config.advertising;
4021 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4022 adv &= ~(ADVERTISED_1000baseT_Half |
4023 ADVERTISED_1000baseT_Full);
4024
4025 fc = tp->link_config.flowctrl;
52b02d04 4026 }
52b02d04 4027
d13ba512 4028 tg3_phy_autoneg_cfg(tp, adv, fc);
52b02d04 4029
d13ba512
MC
4030 tg3_writephy(tp, MII_BMCR,
4031 BMCR_ANENABLE | BMCR_ANRESTART);
4032 } else {
4033 int i;
1da177e4
LT
4034 u32 bmcr, orig_bmcr;
4035
4036 tp->link_config.active_speed = tp->link_config.speed;
4037 tp->link_config.active_duplex = tp->link_config.duplex;
4038
4039 bmcr = 0;
4040 switch (tp->link_config.speed) {
4041 default:
4042 case SPEED_10:
4043 break;
4044
4045 case SPEED_100:
4046 bmcr |= BMCR_SPEED100;
4047 break;
4048
4049 case SPEED_1000:
221c5637 4050 bmcr |= BMCR_SPEED1000;
1da177e4 4051 break;
855e1111 4052 }
1da177e4
LT
4053
4054 if (tp->link_config.duplex == DUPLEX_FULL)
4055 bmcr |= BMCR_FULLDPLX;
4056
4057 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4058 (bmcr != orig_bmcr)) {
4059 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4060 for (i = 0; i < 1500; i++) {
4061 u32 tmp;
4062
4063 udelay(10);
4064 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4065 tg3_readphy(tp, MII_BMSR, &tmp))
4066 continue;
4067 if (!(tmp & BMSR_LSTATUS)) {
4068 udelay(40);
4069 break;
4070 }
4071 }
4072 tg3_writephy(tp, MII_BMCR, bmcr);
4073 udelay(40);
4074 }
1da177e4
LT
4075 }
4076}
4077
4078static int tg3_init_5401phy_dsp(struct tg3 *tp)
4079{
4080 int err;
4081
4082 /* Turn off tap power management. */
4083 /* Set Extended packet length bit */
b4bd2929 4084 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1da177e4 4085
6ee7c0a0
MC
4086 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4087 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4088 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4089 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4090 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
1da177e4
LT
4091
4092 udelay(40);
4093
4094 return err;
4095}
4096
e2bf73e7 4097static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
1da177e4 4098{
e2bf73e7 4099 u32 advmsk, tgtadv, advertising;
3600d918 4100
e2bf73e7
MC
4101 advertising = tp->link_config.advertising;
4102 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
1da177e4 4103
e2bf73e7
MC
4104 advmsk = ADVERTISE_ALL;
4105 if (tp->link_config.active_duplex == DUPLEX_FULL) {
f88788f0 4106 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
e2bf73e7
MC
4107 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4108 }
1da177e4 4109
e2bf73e7
MC
4110 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4111 return false;
4112
4113 if ((*lcladv & advmsk) != tgtadv)
4114 return false;
b99d2a57 4115
f07e9af3 4116 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1da177e4
LT
4117 u32 tg3_ctrl;
4118
e2bf73e7 4119 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3600d918 4120
221c5637 4121 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
e2bf73e7 4122 return false;
1da177e4 4123
3198e07f
MC
4124 if (tgtadv &&
4125 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4126 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4127 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4128 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4129 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4130 } else {
4131 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4132 }
4133
e2bf73e7
MC
4134 if (tg3_ctrl != tgtadv)
4135 return false;
ef167e27
MC
4136 }
4137
e2bf73e7 4138 return true;
ef167e27
MC
4139}
4140
859edb26
MC
4141static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4142{
4143 u32 lpeth = 0;
4144
4145 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4146 u32 val;
4147
4148 if (tg3_readphy(tp, MII_STAT1000, &val))
4149 return false;
4150
4151 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4152 }
4153
4154 if (tg3_readphy(tp, MII_LPA, rmtadv))
4155 return false;
4156
4157 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4158 tp->link_config.rmt_adv = lpeth;
4159
4160 return true;
4161}
4162
1da177e4
LT
4163static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4164{
4165 int current_link_up;
f833c4c1 4166 u32 bmsr, val;
ef167e27 4167 u32 lcl_adv, rmt_adv;
1da177e4
LT
4168 u16 current_speed;
4169 u8 current_duplex;
4170 int i, err;
4171
4172 tw32(MAC_EVENT, 0);
4173
4174 tw32_f(MAC_STATUS,
4175 (MAC_STATUS_SYNC_CHANGED |
4176 MAC_STATUS_CFG_CHANGED |
4177 MAC_STATUS_MI_COMPLETION |
4178 MAC_STATUS_LNKSTATE_CHANGED));
4179 udelay(40);
4180
8ef21428
MC
4181 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4182 tw32_f(MAC_MI_MODE,
4183 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4184 udelay(80);
4185 }
1da177e4 4186
b4bd2929 4187 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
1da177e4
LT
4188
4189 /* Some third-party PHYs need to be reset on link going
4190 * down.
4191 */
4192 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4195 netif_carrier_ok(tp->dev)) {
4196 tg3_readphy(tp, MII_BMSR, &bmsr);
4197 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4198 !(bmsr & BMSR_LSTATUS))
4199 force_reset = 1;
4200 }
4201 if (force_reset)
4202 tg3_phy_reset(tp);
4203
79eb6904 4204 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
4205 tg3_readphy(tp, MII_BMSR, &bmsr);
4206 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
63c3a66f 4207 !tg3_flag(tp, INIT_COMPLETE))
1da177e4
LT
4208 bmsr = 0;
4209
4210 if (!(bmsr & BMSR_LSTATUS)) {
4211 err = tg3_init_5401phy_dsp(tp);
4212 if (err)
4213 return err;
4214
4215 tg3_readphy(tp, MII_BMSR, &bmsr);
4216 for (i = 0; i < 1000; i++) {
4217 udelay(10);
4218 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4219 (bmsr & BMSR_LSTATUS)) {
4220 udelay(40);
4221 break;
4222 }
4223 }
4224
79eb6904
MC
4225 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4226 TG3_PHY_REV_BCM5401_B0 &&
1da177e4
LT
4227 !(bmsr & BMSR_LSTATUS) &&
4228 tp->link_config.active_speed == SPEED_1000) {
4229 err = tg3_phy_reset(tp);
4230 if (!err)
4231 err = tg3_init_5401phy_dsp(tp);
4232 if (err)
4233 return err;
4234 }
4235 }
4236 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4237 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4238 /* 5701 {A0,B0} CRC bug workaround */
4239 tg3_writephy(tp, 0x15, 0x0a75);
f08aa1a8
MC
4240 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4241 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4242 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
1da177e4
LT
4243 }
4244
4245 /* Clear pending interrupts... */
f833c4c1
MC
4246 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4247 tg3_readphy(tp, MII_TG3_ISTAT, &val);
1da177e4 4248
f07e9af3 4249 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
1da177e4 4250 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
f07e9af3 4251 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
1da177e4
LT
4252 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4253
4254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4256 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4257 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4258 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4259 else
4260 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4261 }
4262
4263 current_link_up = 0;
e740522e
MC
4264 current_speed = SPEED_UNKNOWN;
4265 current_duplex = DUPLEX_UNKNOWN;
e348c5e7 4266 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
859edb26 4267 tp->link_config.rmt_adv = 0;
1da177e4 4268
f07e9af3 4269 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
15ee95c3
MC
4270 err = tg3_phy_auxctl_read(tp,
4271 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4272 &val);
4273 if (!err && !(val & (1 << 10))) {
b4bd2929
MC
4274 tg3_phy_auxctl_write(tp,
4275 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4276 val | (1 << 10));
1da177e4
LT
4277 goto relink;
4278 }
4279 }
4280
4281 bmsr = 0;
4282 for (i = 0; i < 100; i++) {
4283 tg3_readphy(tp, MII_BMSR, &bmsr);
4284 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4285 (bmsr & BMSR_LSTATUS))
4286 break;
4287 udelay(40);
4288 }
4289
4290 if (bmsr & BMSR_LSTATUS) {
4291 u32 aux_stat, bmcr;
4292
4293 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4294 for (i = 0; i < 2000; i++) {
4295 udelay(10);
4296 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4297 aux_stat)
4298 break;
4299 }
4300
4301 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4302 &current_speed,
4303 &current_duplex);
4304
4305 bmcr = 0;
4306 for (i = 0; i < 200; i++) {
4307 tg3_readphy(tp, MII_BMCR, &bmcr);
4308 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4309 continue;
4310 if (bmcr && bmcr != 0x7fff)
4311 break;
4312 udelay(10);
4313 }
4314
ef167e27
MC
4315 lcl_adv = 0;
4316 rmt_adv = 0;
1da177e4 4317
ef167e27
MC
4318 tp->link_config.active_speed = current_speed;
4319 tp->link_config.active_duplex = current_duplex;
4320
4321 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4322 if ((bmcr & BMCR_ANENABLE) &&
e2bf73e7 4323 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
859edb26 4324 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
e2bf73e7 4325 current_link_up = 1;
1da177e4
LT
4326 } else {
4327 if (!(bmcr & BMCR_ANENABLE) &&
4328 tp->link_config.speed == current_speed &&
ef167e27
MC
4329 tp->link_config.duplex == current_duplex &&
4330 tp->link_config.flowctrl ==
4331 tp->link_config.active_flowctrl) {
1da177e4 4332 current_link_up = 1;
1da177e4
LT
4333 }
4334 }
4335
ef167e27 4336 if (current_link_up == 1 &&
e348c5e7
MC
4337 tp->link_config.active_duplex == DUPLEX_FULL) {
4338 u32 reg, bit;
4339
4340 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4341 reg = MII_TG3_FET_GEN_STAT;
4342 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4343 } else {
4344 reg = MII_TG3_EXT_STAT;
4345 bit = MII_TG3_EXT_STAT_MDIX;
4346 }
4347
4348 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4349 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4350
ef167e27 4351 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
e348c5e7 4352 }
1da177e4
LT
4353 }
4354
1da177e4 4355relink:
80096068 4356 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
1da177e4
LT
4357 tg3_phy_copper_begin(tp);
4358
f833c4c1 4359 tg3_readphy(tp, MII_BMSR, &bmsr);
06c03c02
MB
4360 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4361 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
1da177e4
LT
4362 current_link_up = 1;
4363 }
4364
4365 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4366 if (current_link_up == 1) {
4367 if (tp->link_config.active_speed == SPEED_100 ||
4368 tp->link_config.active_speed == SPEED_10)
4369 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4370 else
4371 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
f07e9af3 4372 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7f97a4bd
MC
4373 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4374 else
1da177e4
LT
4375 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4376
4377 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4378 if (tp->link_config.active_duplex == DUPLEX_HALF)
4379 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4380
1da177e4 4381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
4382 if (current_link_up == 1 &&
4383 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 4384 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
4385 else
4386 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
4387 }
4388
4389 /* ??? Without this setting Netgear GA302T PHY does not
4390 * ??? send/receive packets...
4391 */
79eb6904 4392 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
1da177e4
LT
4393 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4394 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4395 tw32_f(MAC_MI_MODE, tp->mi_mode);
4396 udelay(80);
4397 }
4398
4399 tw32_f(MAC_MODE, tp->mac_mode);
4400 udelay(40);
4401
52b02d04
MC
4402 tg3_phy_eee_adjust(tp, current_link_up);
4403
63c3a66f 4404 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
4405 /* Polled via timer. */
4406 tw32_f(MAC_EVENT, 0);
4407 } else {
4408 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4409 }
4410 udelay(40);
4411
4412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4413 current_link_up == 1 &&
4414 tp->link_config.active_speed == SPEED_1000 &&
63c3a66f 4415 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
1da177e4
LT
4416 udelay(120);
4417 tw32_f(MAC_STATUS,
4418 (MAC_STATUS_SYNC_CHANGED |
4419 MAC_STATUS_CFG_CHANGED));
4420 udelay(40);
4421 tg3_write_mem(tp,
4422 NIC_SRAM_FIRMWARE_MBOX,
4423 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4424 }
4425
5e7dfd0f 4426 /* Prevent send BD corruption. */
63c3a66f 4427 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
4428 if (tp->link_config.active_speed == SPEED_100 ||
4429 tp->link_config.active_speed == SPEED_10)
0f49bfbd
JL
4430 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4431 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 4432 else
0f49bfbd
JL
4433 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4434 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f
MC
4435 }
4436
1da177e4
LT
4437 if (current_link_up != netif_carrier_ok(tp->dev)) {
4438 if (current_link_up)
4439 netif_carrier_on(tp->dev);
4440 else
4441 netif_carrier_off(tp->dev);
4442 tg3_link_report(tp);
4443 }
4444
4445 return 0;
4446}
4447
4448struct tg3_fiber_aneginfo {
4449 int state;
4450#define ANEG_STATE_UNKNOWN 0
4451#define ANEG_STATE_AN_ENABLE 1
4452#define ANEG_STATE_RESTART_INIT 2
4453#define ANEG_STATE_RESTART 3
4454#define ANEG_STATE_DISABLE_LINK_OK 4
4455#define ANEG_STATE_ABILITY_DETECT_INIT 5
4456#define ANEG_STATE_ABILITY_DETECT 6
4457#define ANEG_STATE_ACK_DETECT_INIT 7
4458#define ANEG_STATE_ACK_DETECT 8
4459#define ANEG_STATE_COMPLETE_ACK_INIT 9
4460#define ANEG_STATE_COMPLETE_ACK 10
4461#define ANEG_STATE_IDLE_DETECT_INIT 11
4462#define ANEG_STATE_IDLE_DETECT 12
4463#define ANEG_STATE_LINK_OK 13
4464#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4465#define ANEG_STATE_NEXT_PAGE_WAIT 15
4466
4467 u32 flags;
4468#define MR_AN_ENABLE 0x00000001
4469#define MR_RESTART_AN 0x00000002
4470#define MR_AN_COMPLETE 0x00000004
4471#define MR_PAGE_RX 0x00000008
4472#define MR_NP_LOADED 0x00000010
4473#define MR_TOGGLE_TX 0x00000020
4474#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4475#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4476#define MR_LP_ADV_SYM_PAUSE 0x00000100
4477#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4478#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4479#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4480#define MR_LP_ADV_NEXT_PAGE 0x00001000
4481#define MR_TOGGLE_RX 0x00002000
4482#define MR_NP_RX 0x00004000
4483
4484#define MR_LINK_OK 0x80000000
4485
4486 unsigned long link_time, cur_time;
4487
4488 u32 ability_match_cfg;
4489 int ability_match_count;
4490
4491 char ability_match, idle_match, ack_match;
4492
4493 u32 txconfig, rxconfig;
4494#define ANEG_CFG_NP 0x00000080
4495#define ANEG_CFG_ACK 0x00000040
4496#define ANEG_CFG_RF2 0x00000020
4497#define ANEG_CFG_RF1 0x00000010
4498#define ANEG_CFG_PS2 0x00000001
4499#define ANEG_CFG_PS1 0x00008000
4500#define ANEG_CFG_HD 0x00004000
4501#define ANEG_CFG_FD 0x00002000
4502#define ANEG_CFG_INVAL 0x00001f06
4503
4504};
4505#define ANEG_OK 0
4506#define ANEG_DONE 1
4507#define ANEG_TIMER_ENAB 2
4508#define ANEG_FAILED -1
4509
4510#define ANEG_STATE_SETTLE_TIME 10000
4511
4512static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4513 struct tg3_fiber_aneginfo *ap)
4514{
5be73b47 4515 u16 flowctrl;
1da177e4
LT
4516 unsigned long delta;
4517 u32 rx_cfg_reg;
4518 int ret;
4519
4520 if (ap->state == ANEG_STATE_UNKNOWN) {
4521 ap->rxconfig = 0;
4522 ap->link_time = 0;
4523 ap->cur_time = 0;
4524 ap->ability_match_cfg = 0;
4525 ap->ability_match_count = 0;
4526 ap->ability_match = 0;
4527 ap->idle_match = 0;
4528 ap->ack_match = 0;
4529 }
4530 ap->cur_time++;
4531
4532 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4533 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4534
4535 if (rx_cfg_reg != ap->ability_match_cfg) {
4536 ap->ability_match_cfg = rx_cfg_reg;
4537 ap->ability_match = 0;
4538 ap->ability_match_count = 0;
4539 } else {
4540 if (++ap->ability_match_count > 1) {
4541 ap->ability_match = 1;
4542 ap->ability_match_cfg = rx_cfg_reg;
4543 }
4544 }
4545 if (rx_cfg_reg & ANEG_CFG_ACK)
4546 ap->ack_match = 1;
4547 else
4548 ap->ack_match = 0;
4549
4550 ap->idle_match = 0;
4551 } else {
4552 ap->idle_match = 1;
4553 ap->ability_match_cfg = 0;
4554 ap->ability_match_count = 0;
4555 ap->ability_match = 0;
4556 ap->ack_match = 0;
4557
4558 rx_cfg_reg = 0;
4559 }
4560
4561 ap->rxconfig = rx_cfg_reg;
4562 ret = ANEG_OK;
4563
33f401ae 4564 switch (ap->state) {
1da177e4
LT
4565 case ANEG_STATE_UNKNOWN:
4566 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4567 ap->state = ANEG_STATE_AN_ENABLE;
4568
4569 /* fallthru */
4570 case ANEG_STATE_AN_ENABLE:
4571 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4572 if (ap->flags & MR_AN_ENABLE) {
4573 ap->link_time = 0;
4574 ap->cur_time = 0;
4575 ap->ability_match_cfg = 0;
4576 ap->ability_match_count = 0;
4577 ap->ability_match = 0;
4578 ap->idle_match = 0;
4579 ap->ack_match = 0;
4580
4581 ap->state = ANEG_STATE_RESTART_INIT;
4582 } else {
4583 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4584 }
4585 break;
4586
4587 case ANEG_STATE_RESTART_INIT:
4588 ap->link_time = ap->cur_time;
4589 ap->flags &= ~(MR_NP_LOADED);
4590 ap->txconfig = 0;
4591 tw32(MAC_TX_AUTO_NEG, 0);
4592 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4593 tw32_f(MAC_MODE, tp->mac_mode);
4594 udelay(40);
4595
4596 ret = ANEG_TIMER_ENAB;
4597 ap->state = ANEG_STATE_RESTART;
4598
4599 /* fallthru */
4600 case ANEG_STATE_RESTART:
4601 delta = ap->cur_time - ap->link_time;
859a5887 4602 if (delta > ANEG_STATE_SETTLE_TIME)
1da177e4 4603 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
859a5887 4604 else
1da177e4 4605 ret = ANEG_TIMER_ENAB;
1da177e4
LT
4606 break;
4607
4608 case ANEG_STATE_DISABLE_LINK_OK:
4609 ret = ANEG_DONE;
4610 break;
4611
4612 case ANEG_STATE_ABILITY_DETECT_INIT:
4613 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
4614 ap->txconfig = ANEG_CFG_FD;
4615 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4616 if (flowctrl & ADVERTISE_1000XPAUSE)
4617 ap->txconfig |= ANEG_CFG_PS1;
4618 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4619 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
4620 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4621 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4622 tw32_f(MAC_MODE, tp->mac_mode);
4623 udelay(40);
4624
4625 ap->state = ANEG_STATE_ABILITY_DETECT;
4626 break;
4627
4628 case ANEG_STATE_ABILITY_DETECT:
859a5887 4629 if (ap->ability_match != 0 && ap->rxconfig != 0)
1da177e4 4630 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1da177e4
LT
4631 break;
4632
4633 case ANEG_STATE_ACK_DETECT_INIT:
4634 ap->txconfig |= ANEG_CFG_ACK;
4635 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4636 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4637 tw32_f(MAC_MODE, tp->mac_mode);
4638 udelay(40);
4639
4640 ap->state = ANEG_STATE_ACK_DETECT;
4641
4642 /* fallthru */
4643 case ANEG_STATE_ACK_DETECT:
4644 if (ap->ack_match != 0) {
4645 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4646 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4647 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4648 } else {
4649 ap->state = ANEG_STATE_AN_ENABLE;
4650 }
4651 } else if (ap->ability_match != 0 &&
4652 ap->rxconfig == 0) {
4653 ap->state = ANEG_STATE_AN_ENABLE;
4654 }
4655 break;
4656
4657 case ANEG_STATE_COMPLETE_ACK_INIT:
4658 if (ap->rxconfig & ANEG_CFG_INVAL) {
4659 ret = ANEG_FAILED;
4660 break;
4661 }
4662 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4663 MR_LP_ADV_HALF_DUPLEX |
4664 MR_LP_ADV_SYM_PAUSE |
4665 MR_LP_ADV_ASYM_PAUSE |
4666 MR_LP_ADV_REMOTE_FAULT1 |
4667 MR_LP_ADV_REMOTE_FAULT2 |
4668 MR_LP_ADV_NEXT_PAGE |
4669 MR_TOGGLE_RX |
4670 MR_NP_RX);
4671 if (ap->rxconfig & ANEG_CFG_FD)
4672 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4673 if (ap->rxconfig & ANEG_CFG_HD)
4674 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4675 if (ap->rxconfig & ANEG_CFG_PS1)
4676 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4677 if (ap->rxconfig & ANEG_CFG_PS2)
4678 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4679 if (ap->rxconfig & ANEG_CFG_RF1)
4680 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4681 if (ap->rxconfig & ANEG_CFG_RF2)
4682 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4683 if (ap->rxconfig & ANEG_CFG_NP)
4684 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4685
4686 ap->link_time = ap->cur_time;
4687
4688 ap->flags ^= (MR_TOGGLE_TX);
4689 if (ap->rxconfig & 0x0008)
4690 ap->flags |= MR_TOGGLE_RX;
4691 if (ap->rxconfig & ANEG_CFG_NP)
4692 ap->flags |= MR_NP_RX;
4693 ap->flags |= MR_PAGE_RX;
4694
4695 ap->state = ANEG_STATE_COMPLETE_ACK;
4696 ret = ANEG_TIMER_ENAB;
4697 break;
4698
4699 case ANEG_STATE_COMPLETE_ACK:
4700 if (ap->ability_match != 0 &&
4701 ap->rxconfig == 0) {
4702 ap->state = ANEG_STATE_AN_ENABLE;
4703 break;
4704 }
4705 delta = ap->cur_time - ap->link_time;
4706 if (delta > ANEG_STATE_SETTLE_TIME) {
4707 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4708 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4709 } else {
4710 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4711 !(ap->flags & MR_NP_RX)) {
4712 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4713 } else {
4714 ret = ANEG_FAILED;
4715 }
4716 }
4717 }
4718 break;
4719
4720 case ANEG_STATE_IDLE_DETECT_INIT:
4721 ap->link_time = ap->cur_time;
4722 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4723 tw32_f(MAC_MODE, tp->mac_mode);
4724 udelay(40);
4725
4726 ap->state = ANEG_STATE_IDLE_DETECT;
4727 ret = ANEG_TIMER_ENAB;
4728 break;
4729
4730 case ANEG_STATE_IDLE_DETECT:
4731 if (ap->ability_match != 0 &&
4732 ap->rxconfig == 0) {
4733 ap->state = ANEG_STATE_AN_ENABLE;
4734 break;
4735 }
4736 delta = ap->cur_time - ap->link_time;
4737 if (delta > ANEG_STATE_SETTLE_TIME) {
4738 /* XXX another gem from the Broadcom driver :( */
4739 ap->state = ANEG_STATE_LINK_OK;
4740 }
4741 break;
4742
4743 case ANEG_STATE_LINK_OK:
4744 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4745 ret = ANEG_DONE;
4746 break;
4747
4748 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4749 /* ??? unimplemented */
4750 break;
4751
4752 case ANEG_STATE_NEXT_PAGE_WAIT:
4753 /* ??? unimplemented */
4754 break;
4755
4756 default:
4757 ret = ANEG_FAILED;
4758 break;
855e1111 4759 }
1da177e4
LT
4760
4761 return ret;
4762}
4763
5be73b47 4764static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
4765{
4766 int res = 0;
4767 struct tg3_fiber_aneginfo aninfo;
4768 int status = ANEG_FAILED;
4769 unsigned int tick;
4770 u32 tmp;
4771
4772 tw32_f(MAC_TX_AUTO_NEG, 0);
4773
4774 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4775 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4776 udelay(40);
4777
4778 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4779 udelay(40);
4780
4781 memset(&aninfo, 0, sizeof(aninfo));
4782 aninfo.flags |= MR_AN_ENABLE;
4783 aninfo.state = ANEG_STATE_UNKNOWN;
4784 aninfo.cur_time = 0;
4785 tick = 0;
4786 while (++tick < 195000) {
4787 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4788 if (status == ANEG_DONE || status == ANEG_FAILED)
4789 break;
4790
4791 udelay(1);
4792 }
4793
4794 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4795 tw32_f(MAC_MODE, tp->mac_mode);
4796 udelay(40);
4797
5be73b47
MC
4798 *txflags = aninfo.txconfig;
4799 *rxflags = aninfo.flags;
1da177e4
LT
4800
4801 if (status == ANEG_DONE &&
4802 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4803 MR_LP_ADV_FULL_DUPLEX)))
4804 res = 1;
4805
4806 return res;
4807}
4808
4809static void tg3_init_bcm8002(struct tg3 *tp)
4810{
4811 u32 mac_status = tr32(MAC_STATUS);
4812 int i;
4813
4814 /* Reset when initting first time or we have a link. */
63c3a66f 4815 if (tg3_flag(tp, INIT_COMPLETE) &&
1da177e4
LT
4816 !(mac_status & MAC_STATUS_PCS_SYNCED))
4817 return;
4818
4819 /* Set PLL lock range. */
4820 tg3_writephy(tp, 0x16, 0x8007);
4821
4822 /* SW reset */
4823 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4824
4825 /* Wait for reset to complete. */
4826 /* XXX schedule_timeout() ... */
4827 for (i = 0; i < 500; i++)
4828 udelay(10);
4829
4830 /* Config mode; select PMA/Ch 1 regs. */
4831 tg3_writephy(tp, 0x10, 0x8411);
4832
4833 /* Enable auto-lock and comdet, select txclk for tx. */
4834 tg3_writephy(tp, 0x11, 0x0a10);
4835
4836 tg3_writephy(tp, 0x18, 0x00a0);
4837 tg3_writephy(tp, 0x16, 0x41ff);
4838
4839 /* Assert and deassert POR. */
4840 tg3_writephy(tp, 0x13, 0x0400);
4841 udelay(40);
4842 tg3_writephy(tp, 0x13, 0x0000);
4843
4844 tg3_writephy(tp, 0x11, 0x0a50);
4845 udelay(40);
4846 tg3_writephy(tp, 0x11, 0x0a10);
4847
4848 /* Wait for signal to stabilize */
4849 /* XXX schedule_timeout() ... */
4850 for (i = 0; i < 15000; i++)
4851 udelay(10);
4852
4853 /* Deselect the channel register so we can read the PHYID
4854 * later.
4855 */
4856 tg3_writephy(tp, 0x10, 0x8011);
4857}
4858
4859static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4860{
82cd3d11 4861 u16 flowctrl;
1da177e4
LT
4862 u32 sg_dig_ctrl, sg_dig_status;
4863 u32 serdes_cfg, expected_sg_dig_ctrl;
4864 int workaround, port_a;
4865 int current_link_up;
4866
4867 serdes_cfg = 0;
4868 expected_sg_dig_ctrl = 0;
4869 workaround = 0;
4870 port_a = 1;
4871 current_link_up = 0;
4872
4873 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4874 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4875 workaround = 1;
4876 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4877 port_a = 0;
4878
4879 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4880 /* preserve bits 20-23 for voltage regulator */
4881 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4882 }
4883
4884 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4885
4886 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 4887 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
4888 if (workaround) {
4889 u32 val = serdes_cfg;
4890
4891 if (port_a)
4892 val |= 0xc010000;
4893 else
4894 val |= 0x4010000;
4895 tw32_f(MAC_SERDES_CFG, val);
4896 }
c98f6e3b
MC
4897
4898 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4899 }
4900 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4901 tg3_setup_flow_control(tp, 0, 0);
4902 current_link_up = 1;
4903 }
4904 goto out;
4905 }
4906
4907 /* Want auto-negotiation. */
c98f6e3b 4908 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 4909
82cd3d11
MC
4910 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4911 if (flowctrl & ADVERTISE_1000XPAUSE)
4912 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4913 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4914 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
4915
4916 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
f07e9af3 4917 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3d3ebe74
MC
4918 tp->serdes_counter &&
4919 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4920 MAC_STATUS_RCVD_CFG)) ==
4921 MAC_STATUS_PCS_SYNCED)) {
4922 tp->serdes_counter--;
4923 current_link_up = 1;
4924 goto out;
4925 }
4926restart_autoneg:
1da177e4
LT
4927 if (workaround)
4928 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 4929 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
4930 udelay(5);
4931 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4932
3d3ebe74 4933 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4934 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4935 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4936 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 4937 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
4938 mac_status = tr32(MAC_STATUS);
4939
c98f6e3b 4940 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 4941 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
4942 u32 local_adv = 0, remote_adv = 0;
4943
4944 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4945 local_adv |= ADVERTISE_1000XPAUSE;
4946 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4947 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 4948
c98f6e3b 4949 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 4950 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 4951 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 4952 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 4953
859edb26
MC
4954 tp->link_config.rmt_adv =
4955 mii_adv_to_ethtool_adv_x(remote_adv);
4956
1da177e4
LT
4957 tg3_setup_flow_control(tp, local_adv, remote_adv);
4958 current_link_up = 1;
3d3ebe74 4959 tp->serdes_counter = 0;
f07e9af3 4960 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c98f6e3b 4961 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
4962 if (tp->serdes_counter)
4963 tp->serdes_counter--;
1da177e4
LT
4964 else {
4965 if (workaround) {
4966 u32 val = serdes_cfg;
4967
4968 if (port_a)
4969 val |= 0xc010000;
4970 else
4971 val |= 0x4010000;
4972
4973 tw32_f(MAC_SERDES_CFG, val);
4974 }
4975
c98f6e3b 4976 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4977 udelay(40);
4978
4979 /* Link parallel detection - link is up */
4980 /* only if we have PCS_SYNC and not */
4981 /* receiving config code words */
4982 mac_status = tr32(MAC_STATUS);
4983 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4984 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4985 tg3_setup_flow_control(tp, 0, 0);
4986 current_link_up = 1;
f07e9af3
MC
4987 tp->phy_flags |=
4988 TG3_PHYFLG_PARALLEL_DETECT;
3d3ebe74
MC
4989 tp->serdes_counter =
4990 SERDES_PARALLEL_DET_TIMEOUT;
4991 } else
4992 goto restart_autoneg;
1da177e4
LT
4993 }
4994 }
3d3ebe74
MC
4995 } else {
4996 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4997 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4998 }
4999
5000out:
5001 return current_link_up;
5002}
5003
5004static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5005{
5006 int current_link_up = 0;
5007
5cf64b8a 5008 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 5009 goto out;
1da177e4
LT
5010
5011 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 5012 u32 txflags, rxflags;
1da177e4 5013 int i;
6aa20a22 5014
5be73b47
MC
5015 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5016 u32 local_adv = 0, remote_adv = 0;
1da177e4 5017
5be73b47
MC
5018 if (txflags & ANEG_CFG_PS1)
5019 local_adv |= ADVERTISE_1000XPAUSE;
5020 if (txflags & ANEG_CFG_PS2)
5021 local_adv |= ADVERTISE_1000XPSE_ASYM;
5022
5023 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5024 remote_adv |= LPA_1000XPAUSE;
5025 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5026 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 5027
859edb26
MC
5028 tp->link_config.rmt_adv =
5029 mii_adv_to_ethtool_adv_x(remote_adv);
5030
1da177e4
LT
5031 tg3_setup_flow_control(tp, local_adv, remote_adv);
5032
1da177e4
LT
5033 current_link_up = 1;
5034 }
5035 for (i = 0; i < 30; i++) {
5036 udelay(20);
5037 tw32_f(MAC_STATUS,
5038 (MAC_STATUS_SYNC_CHANGED |
5039 MAC_STATUS_CFG_CHANGED));
5040 udelay(40);
5041 if ((tr32(MAC_STATUS) &
5042 (MAC_STATUS_SYNC_CHANGED |
5043 MAC_STATUS_CFG_CHANGED)) == 0)
5044 break;
5045 }
5046
5047 mac_status = tr32(MAC_STATUS);
5048 if (current_link_up == 0 &&
5049 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5050 !(mac_status & MAC_STATUS_RCVD_CFG))
5051 current_link_up = 1;
5052 } else {
5be73b47
MC
5053 tg3_setup_flow_control(tp, 0, 0);
5054
1da177e4
LT
5055 /* Forcing 1000FD link up. */
5056 current_link_up = 1;
1da177e4
LT
5057
5058 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5059 udelay(40);
e8f3f6ca
MC
5060
5061 tw32_f(MAC_MODE, tp->mac_mode);
5062 udelay(40);
1da177e4
LT
5063 }
5064
5065out:
5066 return current_link_up;
5067}
5068
5069static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5070{
5071 u32 orig_pause_cfg;
5072 u16 orig_active_speed;
5073 u8 orig_active_duplex;
5074 u32 mac_status;
5075 int current_link_up;
5076 int i;
5077
8d018621 5078 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5079 orig_active_speed = tp->link_config.active_speed;
5080 orig_active_duplex = tp->link_config.active_duplex;
5081
63c3a66f 5082 if (!tg3_flag(tp, HW_AUTONEG) &&
1da177e4 5083 netif_carrier_ok(tp->dev) &&
63c3a66f 5084 tg3_flag(tp, INIT_COMPLETE)) {
1da177e4
LT
5085 mac_status = tr32(MAC_STATUS);
5086 mac_status &= (MAC_STATUS_PCS_SYNCED |
5087 MAC_STATUS_SIGNAL_DET |
5088 MAC_STATUS_CFG_CHANGED |
5089 MAC_STATUS_RCVD_CFG);
5090 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5091 MAC_STATUS_SIGNAL_DET)) {
5092 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5093 MAC_STATUS_CFG_CHANGED));
5094 return 0;
5095 }
5096 }
5097
5098 tw32_f(MAC_TX_AUTO_NEG, 0);
5099
5100 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5101 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5102 tw32_f(MAC_MODE, tp->mac_mode);
5103 udelay(40);
5104
79eb6904 5105 if (tp->phy_id == TG3_PHY_ID_BCM8002)
1da177e4
LT
5106 tg3_init_bcm8002(tp);
5107
5108 /* Enable link change event even when serdes polling. */
5109 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5110 udelay(40);
5111
5112 current_link_up = 0;
859edb26 5113 tp->link_config.rmt_adv = 0;
1da177e4
LT
5114 mac_status = tr32(MAC_STATUS);
5115
63c3a66f 5116 if (tg3_flag(tp, HW_AUTONEG))
1da177e4
LT
5117 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5118 else
5119 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5120
898a56f8 5121 tp->napi[0].hw_status->status =
1da177e4 5122 (SD_STATUS_UPDATED |
898a56f8 5123 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
1da177e4
LT
5124
5125 for (i = 0; i < 100; i++) {
5126 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5127 MAC_STATUS_CFG_CHANGED));
5128 udelay(5);
5129 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
5130 MAC_STATUS_CFG_CHANGED |
5131 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
5132 break;
5133 }
5134
5135 mac_status = tr32(MAC_STATUS);
5136 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5137 current_link_up = 0;
3d3ebe74
MC
5138 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5139 tp->serdes_counter == 0) {
1da177e4
LT
5140 tw32_f(MAC_MODE, (tp->mac_mode |
5141 MAC_MODE_SEND_CONFIGS));
5142 udelay(1);
5143 tw32_f(MAC_MODE, tp->mac_mode);
5144 }
5145 }
5146
5147 if (current_link_up == 1) {
5148 tp->link_config.active_speed = SPEED_1000;
5149 tp->link_config.active_duplex = DUPLEX_FULL;
5150 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5151 LED_CTRL_LNKLED_OVERRIDE |
5152 LED_CTRL_1000MBPS_ON));
5153 } else {
e740522e
MC
5154 tp->link_config.active_speed = SPEED_UNKNOWN;
5155 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5156 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5157 LED_CTRL_LNKLED_OVERRIDE |
5158 LED_CTRL_TRAFFIC_OVERRIDE));
5159 }
5160
5161 if (current_link_up != netif_carrier_ok(tp->dev)) {
5162 if (current_link_up)
5163 netif_carrier_on(tp->dev);
5164 else
5165 netif_carrier_off(tp->dev);
5166 tg3_link_report(tp);
5167 } else {
8d018621 5168 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5169 if (orig_pause_cfg != now_pause_cfg ||
5170 orig_active_speed != tp->link_config.active_speed ||
5171 orig_active_duplex != tp->link_config.active_duplex)
5172 tg3_link_report(tp);
5173 }
5174
5175 return 0;
5176}
5177
747e8f8b
MC
5178static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5179{
5180 int current_link_up, err = 0;
5181 u32 bmsr, bmcr;
5182 u16 current_speed;
5183 u8 current_duplex;
ef167e27 5184 u32 local_adv, remote_adv;
747e8f8b
MC
5185
5186 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5187 tw32_f(MAC_MODE, tp->mac_mode);
5188 udelay(40);
5189
5190 tw32(MAC_EVENT, 0);
5191
5192 tw32_f(MAC_STATUS,
5193 (MAC_STATUS_SYNC_CHANGED |
5194 MAC_STATUS_CFG_CHANGED |
5195 MAC_STATUS_MI_COMPLETION |
5196 MAC_STATUS_LNKSTATE_CHANGED));
5197 udelay(40);
5198
5199 if (force_reset)
5200 tg3_phy_reset(tp);
5201
5202 current_link_up = 0;
e740522e
MC
5203 current_speed = SPEED_UNKNOWN;
5204 current_duplex = DUPLEX_UNKNOWN;
859edb26 5205 tp->link_config.rmt_adv = 0;
747e8f8b
MC
5206
5207 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5208 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5210 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5211 bmsr |= BMSR_LSTATUS;
5212 else
5213 bmsr &= ~BMSR_LSTATUS;
5214 }
747e8f8b
MC
5215
5216 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5217
5218 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
f07e9af3 5219 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5220 /* do nothing, just check for link up at the end */
5221 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
28011cf1 5222 u32 adv, newadv;
747e8f8b
MC
5223
5224 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
28011cf1
MC
5225 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5226 ADVERTISE_1000XPAUSE |
5227 ADVERTISE_1000XPSE_ASYM |
5228 ADVERTISE_SLCT);
747e8f8b 5229
28011cf1 5230 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
37f07023 5231 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
747e8f8b 5232
28011cf1
MC
5233 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5234 tg3_writephy(tp, MII_ADVERTISE, newadv);
747e8f8b
MC
5235 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5236 tg3_writephy(tp, MII_BMCR, bmcr);
5237
5238 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 5239 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
f07e9af3 5240 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5241
5242 return err;
5243 }
5244 } else {
5245 u32 new_bmcr;
5246
5247 bmcr &= ~BMCR_SPEED1000;
5248 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5249
5250 if (tp->link_config.duplex == DUPLEX_FULL)
5251 new_bmcr |= BMCR_FULLDPLX;
5252
5253 if (new_bmcr != bmcr) {
5254 /* BMCR_SPEED1000 is a reserved bit that needs
5255 * to be set on write.
5256 */
5257 new_bmcr |= BMCR_SPEED1000;
5258
5259 /* Force a linkdown */
5260 if (netif_carrier_ok(tp->dev)) {
5261 u32 adv;
5262
5263 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5264 adv &= ~(ADVERTISE_1000XFULL |
5265 ADVERTISE_1000XHALF |
5266 ADVERTISE_SLCT);
5267 tg3_writephy(tp, MII_ADVERTISE, adv);
5268 tg3_writephy(tp, MII_BMCR, bmcr |
5269 BMCR_ANRESTART |
5270 BMCR_ANENABLE);
5271 udelay(10);
5272 netif_carrier_off(tp->dev);
5273 }
5274 tg3_writephy(tp, MII_BMCR, new_bmcr);
5275 bmcr = new_bmcr;
5276 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5277 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5278 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5279 ASIC_REV_5714) {
5280 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5281 bmsr |= BMSR_LSTATUS;
5282 else
5283 bmsr &= ~BMSR_LSTATUS;
5284 }
f07e9af3 5285 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5286 }
5287 }
5288
5289 if (bmsr & BMSR_LSTATUS) {
5290 current_speed = SPEED_1000;
5291 current_link_up = 1;
5292 if (bmcr & BMCR_FULLDPLX)
5293 current_duplex = DUPLEX_FULL;
5294 else
5295 current_duplex = DUPLEX_HALF;
5296
ef167e27
MC
5297 local_adv = 0;
5298 remote_adv = 0;
5299
747e8f8b 5300 if (bmcr & BMCR_ANENABLE) {
ef167e27 5301 u32 common;
747e8f8b
MC
5302
5303 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5304 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5305 common = local_adv & remote_adv;
5306 if (common & (ADVERTISE_1000XHALF |
5307 ADVERTISE_1000XFULL)) {
5308 if (common & ADVERTISE_1000XFULL)
5309 current_duplex = DUPLEX_FULL;
5310 else
5311 current_duplex = DUPLEX_HALF;
859edb26
MC
5312
5313 tp->link_config.rmt_adv =
5314 mii_adv_to_ethtool_adv_x(remote_adv);
63c3a66f 5315 } else if (!tg3_flag(tp, 5780_CLASS)) {
57d8b880 5316 /* Link is up via parallel detect */
859a5887 5317 } else {
747e8f8b 5318 current_link_up = 0;
859a5887 5319 }
747e8f8b
MC
5320 }
5321 }
5322
ef167e27
MC
5323 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5324 tg3_setup_flow_control(tp, local_adv, remote_adv);
5325
747e8f8b
MC
5326 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5327 if (tp->link_config.active_duplex == DUPLEX_HALF)
5328 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5329
5330 tw32_f(MAC_MODE, tp->mac_mode);
5331 udelay(40);
5332
5333 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5334
5335 tp->link_config.active_speed = current_speed;
5336 tp->link_config.active_duplex = current_duplex;
5337
5338 if (current_link_up != netif_carrier_ok(tp->dev)) {
5339 if (current_link_up)
5340 netif_carrier_on(tp->dev);
5341 else {
5342 netif_carrier_off(tp->dev);
f07e9af3 5343 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5344 }
5345 tg3_link_report(tp);
5346 }
5347 return err;
5348}
5349
5350static void tg3_serdes_parallel_detect(struct tg3 *tp)
5351{
3d3ebe74 5352 if (tp->serdes_counter) {
747e8f8b 5353 /* Give autoneg time to complete. */
3d3ebe74 5354 tp->serdes_counter--;
747e8f8b
MC
5355 return;
5356 }
c6cdf436 5357
747e8f8b
MC
5358 if (!netif_carrier_ok(tp->dev) &&
5359 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5360 u32 bmcr;
5361
5362 tg3_readphy(tp, MII_BMCR, &bmcr);
5363 if (bmcr & BMCR_ANENABLE) {
5364 u32 phy1, phy2;
5365
5366 /* Select shadow register 0x1f */
f08aa1a8
MC
5367 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5368 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
747e8f8b
MC
5369
5370 /* Select expansion interrupt status register */
f08aa1a8
MC
5371 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5372 MII_TG3_DSP_EXP1_INT_STAT);
5373 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5374 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5375
5376 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5377 /* We have signal detect and not receiving
5378 * config code words, link is up by parallel
5379 * detection.
5380 */
5381
5382 bmcr &= ~BMCR_ANENABLE;
5383 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5384 tg3_writephy(tp, MII_BMCR, bmcr);
f07e9af3 5385 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5386 }
5387 }
859a5887
MC
5388 } else if (netif_carrier_ok(tp->dev) &&
5389 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
f07e9af3 5390 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5391 u32 phy2;
5392
5393 /* Select expansion interrupt status register */
f08aa1a8
MC
5394 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5395 MII_TG3_DSP_EXP1_INT_STAT);
5396 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5397 if (phy2 & 0x20) {
5398 u32 bmcr;
5399
5400 /* Config code words received, turn on autoneg. */
5401 tg3_readphy(tp, MII_BMCR, &bmcr);
5402 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5403
f07e9af3 5404 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5405
5406 }
5407 }
5408}
5409
1da177e4
LT
5410static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5411{
f2096f94 5412 u32 val;
1da177e4
LT
5413 int err;
5414
f07e9af3 5415 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4 5416 err = tg3_setup_fiber_phy(tp, force_reset);
f07e9af3 5417 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
747e8f8b 5418 err = tg3_setup_fiber_mii_phy(tp, force_reset);
859a5887 5419 else
1da177e4 5420 err = tg3_setup_copper_phy(tp, force_reset);
1da177e4 5421
bcb37f6c 5422 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
f2096f94 5423 u32 scale;
aa6c91fe
MC
5424
5425 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5426 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5427 scale = 65;
5428 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5429 scale = 6;
5430 else
5431 scale = 12;
5432
5433 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5434 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5435 tw32(GRC_MISC_CFG, val);
5436 }
5437
f2096f94
MC
5438 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5439 (6 << TX_LENGTHS_IPG_SHIFT);
5440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5441 val |= tr32(MAC_TX_LENGTHS) &
5442 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5443 TX_LENGTHS_CNT_DWN_VAL_MSK);
5444
1da177e4
LT
5445 if (tp->link_config.active_speed == SPEED_1000 &&
5446 tp->link_config.active_duplex == DUPLEX_HALF)
f2096f94
MC
5447 tw32(MAC_TX_LENGTHS, val |
5448 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5449 else
f2096f94
MC
5450 tw32(MAC_TX_LENGTHS, val |
5451 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5452
63c3a66f 5453 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
5454 if (netif_carrier_ok(tp->dev)) {
5455 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 5456 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
5457 } else {
5458 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5459 }
5460 }
5461
63c3a66f 5462 if (tg3_flag(tp, ASPM_WORKAROUND)) {
f2096f94 5463 val = tr32(PCIE_PWR_MGMT_THRESH);
8ed5d97e
MC
5464 if (!netif_carrier_ok(tp->dev))
5465 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5466 tp->pwrmgmt_thresh;
5467 else
5468 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5469 tw32(PCIE_PWR_MGMT_THRESH, val);
5470 }
5471
1da177e4
LT
5472 return err;
5473}
5474
66cfd1bd
MC
5475static inline int tg3_irq_sync(struct tg3 *tp)
5476{
5477 return tp->irq_sync;
5478}
5479
97bd8e49
MC
5480static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5481{
5482 int i;
5483
5484 dst = (u32 *)((u8 *)dst + off);
5485 for (i = 0; i < len; i += sizeof(u32))
5486 *dst++ = tr32(off + i);
5487}
5488
5489static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5490{
5491 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5492 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5493 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5494 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5495 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5496 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5497 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5498 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5499 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5500 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5501 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5502 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5503 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5504 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5505 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5506 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5507 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5508 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5509 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5510
63c3a66f 5511 if (tg3_flag(tp, SUPPORT_MSIX))
97bd8e49
MC
5512 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5513
5514 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5515 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5516 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5517 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5518 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5519 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5520 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5521 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5522
63c3a66f 5523 if (!tg3_flag(tp, 5705_PLUS)) {
97bd8e49
MC
5524 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5525 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5526 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5527 }
5528
5529 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5530 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5531 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5532 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5533 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5534
63c3a66f 5535 if (tg3_flag(tp, NVRAM))
97bd8e49
MC
5536 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5537}
5538
5539static void tg3_dump_state(struct tg3 *tp)
5540{
5541 int i;
5542 u32 *regs;
5543
5544 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5545 if (!regs) {
5546 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5547 return;
5548 }
5549
63c3a66f 5550 if (tg3_flag(tp, PCI_EXPRESS)) {
97bd8e49
MC
5551 /* Read up to but not including private PCI registers */
5552 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5553 regs[i / sizeof(u32)] = tr32(i);
5554 } else
5555 tg3_dump_legacy_regs(tp, regs);
5556
5557 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5558 if (!regs[i + 0] && !regs[i + 1] &&
5559 !regs[i + 2] && !regs[i + 3])
5560 continue;
5561
5562 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5563 i * 4,
5564 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5565 }
5566
5567 kfree(regs);
5568
5569 for (i = 0; i < tp->irq_cnt; i++) {
5570 struct tg3_napi *tnapi = &tp->napi[i];
5571
5572 /* SW status block */
5573 netdev_err(tp->dev,
5574 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5575 i,
5576 tnapi->hw_status->status,
5577 tnapi->hw_status->status_tag,
5578 tnapi->hw_status->rx_jumbo_consumer,
5579 tnapi->hw_status->rx_consumer,
5580 tnapi->hw_status->rx_mini_consumer,
5581 tnapi->hw_status->idx[0].rx_producer,
5582 tnapi->hw_status->idx[0].tx_consumer);
5583
5584 netdev_err(tp->dev,
5585 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5586 i,
5587 tnapi->last_tag, tnapi->last_irq_tag,
5588 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5589 tnapi->rx_rcb_ptr,
5590 tnapi->prodring.rx_std_prod_idx,
5591 tnapi->prodring.rx_std_cons_idx,
5592 tnapi->prodring.rx_jmb_prod_idx,
5593 tnapi->prodring.rx_jmb_cons_idx);
5594 }
5595}
5596
df3e6548
MC
5597/* This is called whenever we suspect that the system chipset is re-
5598 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5599 * is bogus tx completions. We try to recover by setting the
5600 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5601 * in the workqueue.
5602 */
5603static void tg3_tx_recover(struct tg3 *tp)
5604{
63c3a66f 5605 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
df3e6548
MC
5606 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5607
5129c3a3
MC
5608 netdev_warn(tp->dev,
5609 "The system may be re-ordering memory-mapped I/O "
5610 "cycles to the network device, attempting to recover. "
5611 "Please report the problem to the driver maintainer "
5612 "and include system chipset information.\n");
df3e6548
MC
5613
5614 spin_lock(&tp->lock);
63c3a66f 5615 tg3_flag_set(tp, TX_RECOVERY_PENDING);
df3e6548
MC
5616 spin_unlock(&tp->lock);
5617}
5618
f3f3f27e 5619static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
1b2a7205 5620{
f65aac16
MC
5621 /* Tell compiler to fetch tx indices from memory. */
5622 barrier();
f3f3f27e
MC
5623 return tnapi->tx_pending -
5624 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
1b2a7205
MC
5625}
5626
1da177e4
LT
5627/* Tigon3 never reports partial packet sends. So we do not
5628 * need special logic to handle SKBs that have not had all
5629 * of their frags sent yet, like SunGEM does.
5630 */
17375d25 5631static void tg3_tx(struct tg3_napi *tnapi)
1da177e4 5632{
17375d25 5633 struct tg3 *tp = tnapi->tp;
898a56f8 5634 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
f3f3f27e 5635 u32 sw_idx = tnapi->tx_cons;
fe5f5787
MC
5636 struct netdev_queue *txq;
5637 int index = tnapi - tp->napi;
298376d3 5638 unsigned int pkts_compl = 0, bytes_compl = 0;
fe5f5787 5639
63c3a66f 5640 if (tg3_flag(tp, ENABLE_TSS))
fe5f5787
MC
5641 index--;
5642
5643 txq = netdev_get_tx_queue(tp->dev, index);
1da177e4
LT
5644
5645 while (sw_idx != hw_idx) {
df8944cf 5646 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
1da177e4 5647 struct sk_buff *skb = ri->skb;
df3e6548
MC
5648 int i, tx_bug = 0;
5649
5650 if (unlikely(skb == NULL)) {
5651 tg3_tx_recover(tp);
5652 return;
5653 }
1da177e4 5654
f4188d8a 5655 pci_unmap_single(tp->pdev,
4e5e4f0d 5656 dma_unmap_addr(ri, mapping),
f4188d8a
AD
5657 skb_headlen(skb),
5658 PCI_DMA_TODEVICE);
1da177e4
LT
5659
5660 ri->skb = NULL;
5661
e01ee14d
MC
5662 while (ri->fragmented) {
5663 ri->fragmented = false;
5664 sw_idx = NEXT_TX(sw_idx);
5665 ri = &tnapi->tx_buffers[sw_idx];
5666 }
5667
1da177e4
LT
5668 sw_idx = NEXT_TX(sw_idx);
5669
5670 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
f3f3f27e 5671 ri = &tnapi->tx_buffers[sw_idx];
df3e6548
MC
5672 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5673 tx_bug = 1;
f4188d8a
AD
5674
5675 pci_unmap_page(tp->pdev,
4e5e4f0d 5676 dma_unmap_addr(ri, mapping),
9e903e08 5677 skb_frag_size(&skb_shinfo(skb)->frags[i]),
f4188d8a 5678 PCI_DMA_TODEVICE);
e01ee14d
MC
5679
5680 while (ri->fragmented) {
5681 ri->fragmented = false;
5682 sw_idx = NEXT_TX(sw_idx);
5683 ri = &tnapi->tx_buffers[sw_idx];
5684 }
5685
1da177e4
LT
5686 sw_idx = NEXT_TX(sw_idx);
5687 }
5688
298376d3
TH
5689 pkts_compl++;
5690 bytes_compl += skb->len;
5691
f47c11ee 5692 dev_kfree_skb(skb);
df3e6548
MC
5693
5694 if (unlikely(tx_bug)) {
5695 tg3_tx_recover(tp);
5696 return;
5697 }
1da177e4
LT
5698 }
5699
5cb917bc 5700 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298376d3 5701
f3f3f27e 5702 tnapi->tx_cons = sw_idx;
1da177e4 5703
1b2a7205
MC
5704 /* Need to make the tx_cons update visible to tg3_start_xmit()
5705 * before checking for netif_queue_stopped(). Without the
5706 * memory barrier, there is a small possibility that tg3_start_xmit()
5707 * will miss it and cause the queue to be stopped forever.
5708 */
5709 smp_mb();
5710
fe5f5787 5711 if (unlikely(netif_tx_queue_stopped(txq) &&
f3f3f27e 5712 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
fe5f5787
MC
5713 __netif_tx_lock(txq, smp_processor_id());
5714 if (netif_tx_queue_stopped(txq) &&
f3f3f27e 5715 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
fe5f5787
MC
5716 netif_tx_wake_queue(txq);
5717 __netif_tx_unlock(txq);
51b91468 5718 }
1da177e4
LT
5719}
5720
8d4057a9
ED
5721static void tg3_frag_free(bool is_frag, void *data)
5722{
5723 if (is_frag)
5724 put_page(virt_to_head_page(data));
5725 else
5726 kfree(data);
5727}
5728
9205fd9c 5729static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
2b2cdb65 5730{
8d4057a9
ED
5731 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5732 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5733
9205fd9c 5734 if (!ri->data)
2b2cdb65
MC
5735 return;
5736
4e5e4f0d 5737 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
2b2cdb65 5738 map_sz, PCI_DMA_FROMDEVICE);
a1e8b307 5739 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
9205fd9c 5740 ri->data = NULL;
2b2cdb65
MC
5741}
5742
8d4057a9 5743
1da177e4
LT
5744/* Returns size of skb allocated or < 0 on error.
5745 *
5746 * We only need to fill in the address because the other members
5747 * of the RX descriptor are invariant, see tg3_init_rings.
5748 *
5749 * Note the purposeful assymetry of cpu vs. chip accesses. For
5750 * posting buffers we only dirty the first cache line of the RX
5751 * descriptor (containing the address). Whereas for the RX status
5752 * buffers the cpu only reads the last cacheline of the RX descriptor
5753 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5754 */
9205fd9c 5755static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
8d4057a9
ED
5756 u32 opaque_key, u32 dest_idx_unmasked,
5757 unsigned int *frag_size)
1da177e4
LT
5758{
5759 struct tg3_rx_buffer_desc *desc;
f94e290e 5760 struct ring_info *map;
9205fd9c 5761 u8 *data;
1da177e4 5762 dma_addr_t mapping;
9205fd9c 5763 int skb_size, data_size, dest_idx;
1da177e4 5764
1da177e4
LT
5765 switch (opaque_key) {
5766 case RXD_OPAQUE_RING_STD:
2c49a44d 5767 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
21f581a5
MC
5768 desc = &tpr->rx_std[dest_idx];
5769 map = &tpr->rx_std_buffers[dest_idx];
9205fd9c 5770 data_size = tp->rx_pkt_map_sz;
1da177e4
LT
5771 break;
5772
5773 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5774 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
79ed5ac7 5775 desc = &tpr->rx_jmb[dest_idx].std;
21f581a5 5776 map = &tpr->rx_jmb_buffers[dest_idx];
9205fd9c 5777 data_size = TG3_RX_JMB_MAP_SZ;
1da177e4
LT
5778 break;
5779
5780 default:
5781 return -EINVAL;
855e1111 5782 }
1da177e4
LT
5783
5784 /* Do not overwrite any of the map or rp information
5785 * until we are sure we can commit to a new buffer.
5786 *
5787 * Callers depend upon this behavior and assume that
5788 * we leave everything unchanged if we fail.
5789 */
9205fd9c
ED
5790 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5791 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
a1e8b307
ED
5792 if (skb_size <= PAGE_SIZE) {
5793 data = netdev_alloc_frag(skb_size);
5794 *frag_size = skb_size;
8d4057a9
ED
5795 } else {
5796 data = kmalloc(skb_size, GFP_ATOMIC);
5797 *frag_size = 0;
5798 }
9205fd9c 5799 if (!data)
1da177e4
LT
5800 return -ENOMEM;
5801
9205fd9c
ED
5802 mapping = pci_map_single(tp->pdev,
5803 data + TG3_RX_OFFSET(tp),
5804 data_size,
1da177e4 5805 PCI_DMA_FROMDEVICE);
8d4057a9 5806 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
a1e8b307 5807 tg3_frag_free(skb_size <= PAGE_SIZE, data);
a21771dd
MC
5808 return -EIO;
5809 }
1da177e4 5810
9205fd9c 5811 map->data = data;
4e5e4f0d 5812 dma_unmap_addr_set(map, mapping, mapping);
1da177e4 5813
1da177e4
LT
5814 desc->addr_hi = ((u64)mapping >> 32);
5815 desc->addr_lo = ((u64)mapping & 0xffffffff);
5816
9205fd9c 5817 return data_size;
1da177e4
LT
5818}
5819
5820/* We only need to move over in the address because the other
5821 * members of the RX descriptor are invariant. See notes above
9205fd9c 5822 * tg3_alloc_rx_data for full details.
1da177e4 5823 */
a3896167
MC
5824static void tg3_recycle_rx(struct tg3_napi *tnapi,
5825 struct tg3_rx_prodring_set *dpr,
5826 u32 opaque_key, int src_idx,
5827 u32 dest_idx_unmasked)
1da177e4 5828{
17375d25 5829 struct tg3 *tp = tnapi->tp;
1da177e4
LT
5830 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5831 struct ring_info *src_map, *dest_map;
8fea32b9 5832 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
c6cdf436 5833 int dest_idx;
1da177e4
LT
5834
5835 switch (opaque_key) {
5836 case RXD_OPAQUE_RING_STD:
2c49a44d 5837 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
a3896167
MC
5838 dest_desc = &dpr->rx_std[dest_idx];
5839 dest_map = &dpr->rx_std_buffers[dest_idx];
5840 src_desc = &spr->rx_std[src_idx];
5841 src_map = &spr->rx_std_buffers[src_idx];
1da177e4
LT
5842 break;
5843
5844 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5845 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
a3896167
MC
5846 dest_desc = &dpr->rx_jmb[dest_idx].std;
5847 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5848 src_desc = &spr->rx_jmb[src_idx].std;
5849 src_map = &spr->rx_jmb_buffers[src_idx];
1da177e4
LT
5850 break;
5851
5852 default:
5853 return;
855e1111 5854 }
1da177e4 5855
9205fd9c 5856 dest_map->data = src_map->data;
4e5e4f0d
FT
5857 dma_unmap_addr_set(dest_map, mapping,
5858 dma_unmap_addr(src_map, mapping));
1da177e4
LT
5859 dest_desc->addr_hi = src_desc->addr_hi;
5860 dest_desc->addr_lo = src_desc->addr_lo;
e92967bf
MC
5861
5862 /* Ensure that the update to the skb happens after the physical
5863 * addresses have been transferred to the new BD location.
5864 */
5865 smp_wmb();
5866
9205fd9c 5867 src_map->data = NULL;
1da177e4
LT
5868}
5869
1da177e4
LT
5870/* The RX ring scheme is composed of multiple rings which post fresh
5871 * buffers to the chip, and one special ring the chip uses to report
5872 * status back to the host.
5873 *
5874 * The special ring reports the status of received packets to the
5875 * host. The chip does not write into the original descriptor the
5876 * RX buffer was obtained from. The chip simply takes the original
5877 * descriptor as provided by the host, updates the status and length
5878 * field, then writes this into the next status ring entry.
5879 *
5880 * Each ring the host uses to post buffers to the chip is described
5881 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5882 * it is first placed into the on-chip ram. When the packet's length
5883 * is known, it walks down the TG3_BDINFO entries to select the ring.
5884 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5885 * which is within the range of the new packet's length is chosen.
5886 *
5887 * The "separate ring for rx status" scheme may sound queer, but it makes
5888 * sense from a cache coherency perspective. If only the host writes
5889 * to the buffer post rings, and only the chip writes to the rx status
5890 * rings, then cache lines never move beyond shared-modified state.
5891 * If both the host and chip were to write into the same ring, cache line
5892 * eviction could occur since both entities want it in an exclusive state.
5893 */
17375d25 5894static int tg3_rx(struct tg3_napi *tnapi, int budget)
1da177e4 5895{
17375d25 5896 struct tg3 *tp = tnapi->tp;
f92905de 5897 u32 work_mask, rx_std_posted = 0;
4361935a 5898 u32 std_prod_idx, jmb_prod_idx;
72334482 5899 u32 sw_idx = tnapi->rx_rcb_ptr;
483ba50b 5900 u16 hw_idx;
1da177e4 5901 int received;
8fea32b9 5902 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
1da177e4 5903
8d9d7cfc 5904 hw_idx = *(tnapi->rx_rcb_prod_idx);
1da177e4
LT
5905 /*
5906 * We need to order the read of hw_idx and the read of
5907 * the opaque cookie.
5908 */
5909 rmb();
1da177e4
LT
5910 work_mask = 0;
5911 received = 0;
4361935a
MC
5912 std_prod_idx = tpr->rx_std_prod_idx;
5913 jmb_prod_idx = tpr->rx_jmb_prod_idx;
1da177e4 5914 while (sw_idx != hw_idx && budget > 0) {
afc081f8 5915 struct ring_info *ri;
72334482 5916 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
1da177e4
LT
5917 unsigned int len;
5918 struct sk_buff *skb;
5919 dma_addr_t dma_addr;
5920 u32 opaque_key, desc_idx, *post_ptr;
9205fd9c 5921 u8 *data;
1da177e4
LT
5922
5923 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5924 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5925 if (opaque_key == RXD_OPAQUE_RING_STD) {
8fea32b9 5926 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4e5e4f0d 5927 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5928 data = ri->data;
4361935a 5929 post_ptr = &std_prod_idx;
f92905de 5930 rx_std_posted++;
1da177e4 5931 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
8fea32b9 5932 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4e5e4f0d 5933 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5934 data = ri->data;
4361935a 5935 post_ptr = &jmb_prod_idx;
21f581a5 5936 } else
1da177e4 5937 goto next_pkt_nopost;
1da177e4
LT
5938
5939 work_mask |= opaque_key;
5940
5941 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5942 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5943 drop_it:
a3896167 5944 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5945 desc_idx, *post_ptr);
5946 drop_it_no_recycle:
5947 /* Other statistics kept track of by card. */
b0057c51 5948 tp->rx_dropped++;
1da177e4
LT
5949 goto next_pkt;
5950 }
5951
9205fd9c 5952 prefetch(data + TG3_RX_OFFSET(tp));
ad829268
MC
5953 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5954 ETH_FCS_LEN;
1da177e4 5955
d2757fc4 5956 if (len > TG3_RX_COPY_THRESH(tp)) {
1da177e4 5957 int skb_size;
8d4057a9 5958 unsigned int frag_size;
1da177e4 5959
9205fd9c 5960 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
8d4057a9 5961 *post_ptr, &frag_size);
1da177e4
LT
5962 if (skb_size < 0)
5963 goto drop_it;
5964
287be12e 5965 pci_unmap_single(tp->pdev, dma_addr, skb_size,
1da177e4
LT
5966 PCI_DMA_FROMDEVICE);
5967
8d4057a9 5968 skb = build_skb(data, frag_size);
9205fd9c 5969 if (!skb) {
8d4057a9 5970 tg3_frag_free(frag_size != 0, data);
9205fd9c
ED
5971 goto drop_it_no_recycle;
5972 }
5973 skb_reserve(skb, TG3_RX_OFFSET(tp));
5974 /* Ensure that the update to the data happens
61e800cf
MC
5975 * after the usage of the old DMA mapping.
5976 */
5977 smp_wmb();
5978
9205fd9c 5979 ri->data = NULL;
61e800cf 5980
1da177e4 5981 } else {
a3896167 5982 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5983 desc_idx, *post_ptr);
5984
9205fd9c
ED
5985 skb = netdev_alloc_skb(tp->dev,
5986 len + TG3_RAW_IP_ALIGN);
5987 if (skb == NULL)
1da177e4
LT
5988 goto drop_it_no_recycle;
5989
9205fd9c 5990 skb_reserve(skb, TG3_RAW_IP_ALIGN);
1da177e4 5991 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
9205fd9c
ED
5992 memcpy(skb->data,
5993 data + TG3_RX_OFFSET(tp),
5994 len);
1da177e4 5995 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
1da177e4
LT
5996 }
5997
9205fd9c 5998 skb_put(skb, len);
dc668910 5999 if ((tp->dev->features & NETIF_F_RXCSUM) &&
1da177e4
LT
6000 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6001 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6002 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6003 skb->ip_summed = CHECKSUM_UNNECESSARY;
6004 else
bc8acf2c 6005 skb_checksum_none_assert(skb);
1da177e4
LT
6006
6007 skb->protocol = eth_type_trans(skb, tp->dev);
f7b493e0
MC
6008
6009 if (len > (tp->dev->mtu + ETH_HLEN) &&
6010 skb->protocol != htons(ETH_P_8021Q)) {
6011 dev_kfree_skb(skb);
b0057c51 6012 goto drop_it_no_recycle;
f7b493e0
MC
6013 }
6014
9dc7a113 6015 if (desc->type_flags & RXD_FLAG_VLAN &&
bf933c80
MC
6016 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6017 __vlan_hwaccel_put_tag(skb,
6018 desc->err_vlan & RXD_VLAN_MASK);
9dc7a113 6019
bf933c80 6020 napi_gro_receive(&tnapi->napi, skb);
1da177e4 6021
1da177e4
LT
6022 received++;
6023 budget--;
6024
6025next_pkt:
6026 (*post_ptr)++;
f92905de
MC
6027
6028 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
2c49a44d
MC
6029 tpr->rx_std_prod_idx = std_prod_idx &
6030 tp->rx_std_ring_mask;
86cfe4ff
MC
6031 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6032 tpr->rx_std_prod_idx);
f92905de
MC
6033 work_mask &= ~RXD_OPAQUE_RING_STD;
6034 rx_std_posted = 0;
6035 }
1da177e4 6036next_pkt_nopost:
483ba50b 6037 sw_idx++;
7cb32cf2 6038 sw_idx &= tp->rx_ret_ring_mask;
52f6d697
MC
6039
6040 /* Refresh hw_idx to see if there is new work */
6041 if (sw_idx == hw_idx) {
8d9d7cfc 6042 hw_idx = *(tnapi->rx_rcb_prod_idx);
52f6d697
MC
6043 rmb();
6044 }
1da177e4
LT
6045 }
6046
6047 /* ACK the status ring. */
72334482
MC
6048 tnapi->rx_rcb_ptr = sw_idx;
6049 tw32_rx_mbox(tnapi->consmbox, sw_idx);
1da177e4
LT
6050
6051 /* Refill RX ring(s). */
63c3a66f 6052 if (!tg3_flag(tp, ENABLE_RSS)) {
6541b806
MC
6053 /* Sync BD data before updating mailbox */
6054 wmb();
6055
b196c7e4 6056 if (work_mask & RXD_OPAQUE_RING_STD) {
2c49a44d
MC
6057 tpr->rx_std_prod_idx = std_prod_idx &
6058 tp->rx_std_ring_mask;
b196c7e4
MC
6059 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6060 tpr->rx_std_prod_idx);
6061 }
6062 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2c49a44d
MC
6063 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6064 tp->rx_jmb_ring_mask;
b196c7e4
MC
6065 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6066 tpr->rx_jmb_prod_idx);
6067 }
6068 mmiowb();
6069 } else if (work_mask) {
6070 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6071 * updated before the producer indices can be updated.
6072 */
6073 smp_wmb();
6074
2c49a44d
MC
6075 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6076 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
b196c7e4 6077
7ae52890
MC
6078 if (tnapi != &tp->napi[1]) {
6079 tp->rx_refill = true;
e4af1af9 6080 napi_schedule(&tp->napi[1].napi);
7ae52890 6081 }
1da177e4 6082 }
1da177e4
LT
6083
6084 return received;
6085}
6086
35f2d7d0 6087static void tg3_poll_link(struct tg3 *tp)
1da177e4 6088{
1da177e4 6089 /* handle link change and other phy events */
63c3a66f 6090 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
35f2d7d0
MC
6091 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6092
1da177e4
LT
6093 if (sblk->status & SD_STATUS_LINK_CHG) {
6094 sblk->status = SD_STATUS_UPDATED |
35f2d7d0 6095 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 6096 spin_lock(&tp->lock);
63c3a66f 6097 if (tg3_flag(tp, USE_PHYLIB)) {
dd477003
MC
6098 tw32_f(MAC_STATUS,
6099 (MAC_STATUS_SYNC_CHANGED |
6100 MAC_STATUS_CFG_CHANGED |
6101 MAC_STATUS_MI_COMPLETION |
6102 MAC_STATUS_LNKSTATE_CHANGED));
6103 udelay(40);
6104 } else
6105 tg3_setup_phy(tp, 0);
f47c11ee 6106 spin_unlock(&tp->lock);
1da177e4
LT
6107 }
6108 }
35f2d7d0
MC
6109}
6110
f89f38b8
MC
6111static int tg3_rx_prodring_xfer(struct tg3 *tp,
6112 struct tg3_rx_prodring_set *dpr,
6113 struct tg3_rx_prodring_set *spr)
b196c7e4
MC
6114{
6115 u32 si, di, cpycnt, src_prod_idx;
f89f38b8 6116 int i, err = 0;
b196c7e4
MC
6117
6118 while (1) {
6119 src_prod_idx = spr->rx_std_prod_idx;
6120
6121 /* Make sure updates to the rx_std_buffers[] entries and the
6122 * standard producer index are seen in the correct order.
6123 */
6124 smp_rmb();
6125
6126 if (spr->rx_std_cons_idx == src_prod_idx)
6127 break;
6128
6129 if (spr->rx_std_cons_idx < src_prod_idx)
6130 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6131 else
2c49a44d
MC
6132 cpycnt = tp->rx_std_ring_mask + 1 -
6133 spr->rx_std_cons_idx;
b196c7e4 6134
2c49a44d
MC
6135 cpycnt = min(cpycnt,
6136 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
b196c7e4
MC
6137
6138 si = spr->rx_std_cons_idx;
6139 di = dpr->rx_std_prod_idx;
6140
e92967bf 6141 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6142 if (dpr->rx_std_buffers[i].data) {
e92967bf 6143 cpycnt = i - di;
f89f38b8 6144 err = -ENOSPC;
e92967bf
MC
6145 break;
6146 }
6147 }
6148
6149 if (!cpycnt)
6150 break;
6151
6152 /* Ensure that updates to the rx_std_buffers ring and the
6153 * shadowed hardware producer ring from tg3_recycle_skb() are
6154 * ordered correctly WRT the skb check above.
6155 */
6156 smp_rmb();
6157
b196c7e4
MC
6158 memcpy(&dpr->rx_std_buffers[di],
6159 &spr->rx_std_buffers[si],
6160 cpycnt * sizeof(struct ring_info));
6161
6162 for (i = 0; i < cpycnt; i++, di++, si++) {
6163 struct tg3_rx_buffer_desc *sbd, *dbd;
6164 sbd = &spr->rx_std[si];
6165 dbd = &dpr->rx_std[di];
6166 dbd->addr_hi = sbd->addr_hi;
6167 dbd->addr_lo = sbd->addr_lo;
6168 }
6169
2c49a44d
MC
6170 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6171 tp->rx_std_ring_mask;
6172 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6173 tp->rx_std_ring_mask;
b196c7e4
MC
6174 }
6175
6176 while (1) {
6177 src_prod_idx = spr->rx_jmb_prod_idx;
6178
6179 /* Make sure updates to the rx_jmb_buffers[] entries and
6180 * the jumbo producer index are seen in the correct order.
6181 */
6182 smp_rmb();
6183
6184 if (spr->rx_jmb_cons_idx == src_prod_idx)
6185 break;
6186
6187 if (spr->rx_jmb_cons_idx < src_prod_idx)
6188 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6189 else
2c49a44d
MC
6190 cpycnt = tp->rx_jmb_ring_mask + 1 -
6191 spr->rx_jmb_cons_idx;
b196c7e4
MC
6192
6193 cpycnt = min(cpycnt,
2c49a44d 6194 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
b196c7e4
MC
6195
6196 si = spr->rx_jmb_cons_idx;
6197 di = dpr->rx_jmb_prod_idx;
6198
e92967bf 6199 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6200 if (dpr->rx_jmb_buffers[i].data) {
e92967bf 6201 cpycnt = i - di;
f89f38b8 6202 err = -ENOSPC;
e92967bf
MC
6203 break;
6204 }
6205 }
6206
6207 if (!cpycnt)
6208 break;
6209
6210 /* Ensure that updates to the rx_jmb_buffers ring and the
6211 * shadowed hardware producer ring from tg3_recycle_skb() are
6212 * ordered correctly WRT the skb check above.
6213 */
6214 smp_rmb();
6215
b196c7e4
MC
6216 memcpy(&dpr->rx_jmb_buffers[di],
6217 &spr->rx_jmb_buffers[si],
6218 cpycnt * sizeof(struct ring_info));
6219
6220 for (i = 0; i < cpycnt; i++, di++, si++) {
6221 struct tg3_rx_buffer_desc *sbd, *dbd;
6222 sbd = &spr->rx_jmb[si].std;
6223 dbd = &dpr->rx_jmb[di].std;
6224 dbd->addr_hi = sbd->addr_hi;
6225 dbd->addr_lo = sbd->addr_lo;
6226 }
6227
2c49a44d
MC
6228 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6229 tp->rx_jmb_ring_mask;
6230 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6231 tp->rx_jmb_ring_mask;
b196c7e4 6232 }
f89f38b8
MC
6233
6234 return err;
b196c7e4
MC
6235}
6236
35f2d7d0
MC
6237static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6238{
6239 struct tg3 *tp = tnapi->tp;
1da177e4
LT
6240
6241 /* run TX completion thread */
f3f3f27e 6242 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
17375d25 6243 tg3_tx(tnapi);
63c3a66f 6244 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
4fd7ab59 6245 return work_done;
1da177e4
LT
6246 }
6247
f891ea16
MC
6248 if (!tnapi->rx_rcb_prod_idx)
6249 return work_done;
6250
1da177e4
LT
6251 /* run RX thread, within the bounds set by NAPI.
6252 * All RX "locking" is done by ensuring outside
bea3348e 6253 * code synchronizes with tg3->napi.poll()
1da177e4 6254 */
8d9d7cfc 6255 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
17375d25 6256 work_done += tg3_rx(tnapi, budget - work_done);
1da177e4 6257
63c3a66f 6258 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
8fea32b9 6259 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
f89f38b8 6260 int i, err = 0;
e4af1af9
MC
6261 u32 std_prod_idx = dpr->rx_std_prod_idx;
6262 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
b196c7e4 6263
7ae52890 6264 tp->rx_refill = false;
9102426a 6265 for (i = 1; i <= tp->rxq_cnt; i++)
f89f38b8 6266 err |= tg3_rx_prodring_xfer(tp, dpr,
8fea32b9 6267 &tp->napi[i].prodring);
b196c7e4
MC
6268
6269 wmb();
6270
e4af1af9
MC
6271 if (std_prod_idx != dpr->rx_std_prod_idx)
6272 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6273 dpr->rx_std_prod_idx);
b196c7e4 6274
e4af1af9
MC
6275 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6276 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6277 dpr->rx_jmb_prod_idx);
b196c7e4
MC
6278
6279 mmiowb();
f89f38b8
MC
6280
6281 if (err)
6282 tw32_f(HOSTCC_MODE, tp->coal_now);
b196c7e4
MC
6283 }
6284
6f535763
DM
6285 return work_done;
6286}
6287
db219973
MC
6288static inline void tg3_reset_task_schedule(struct tg3 *tp)
6289{
6290 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6291 schedule_work(&tp->reset_task);
6292}
6293
6294static inline void tg3_reset_task_cancel(struct tg3 *tp)
6295{
6296 cancel_work_sync(&tp->reset_task);
6297 tg3_flag_clear(tp, RESET_TASK_PENDING);
c7101359 6298 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
db219973
MC
6299}
6300
35f2d7d0
MC
6301static int tg3_poll_msix(struct napi_struct *napi, int budget)
6302{
6303 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6304 struct tg3 *tp = tnapi->tp;
6305 int work_done = 0;
6306 struct tg3_hw_status *sblk = tnapi->hw_status;
6307
6308 while (1) {
6309 work_done = tg3_poll_work(tnapi, work_done, budget);
6310
63c3a66f 6311 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
35f2d7d0
MC
6312 goto tx_recovery;
6313
6314 if (unlikely(work_done >= budget))
6315 break;
6316
c6cdf436 6317 /* tp->last_tag is used in tg3_int_reenable() below
35f2d7d0
MC
6318 * to tell the hw how much work has been processed,
6319 * so we must read it before checking for more work.
6320 */
6321 tnapi->last_tag = sblk->status_tag;
6322 tnapi->last_irq_tag = tnapi->last_tag;
6323 rmb();
6324
6325 /* check for RX/TX work to do */
6d40db7b
MC
6326 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6327 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7ae52890
MC
6328
6329 /* This test here is not race free, but will reduce
6330 * the number of interrupts by looping again.
6331 */
6332 if (tnapi == &tp->napi[1] && tp->rx_refill)
6333 continue;
6334
35f2d7d0
MC
6335 napi_complete(napi);
6336 /* Reenable interrupts. */
6337 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7ae52890
MC
6338
6339 /* This test here is synchronized by napi_schedule()
6340 * and napi_complete() to close the race condition.
6341 */
6342 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6343 tw32(HOSTCC_MODE, tp->coalesce_mode |
6344 HOSTCC_MODE_ENABLE |
6345 tnapi->coal_now);
6346 }
35f2d7d0
MC
6347 mmiowb();
6348 break;
6349 }
6350 }
6351
6352 return work_done;
6353
6354tx_recovery:
6355 /* work_done is guaranteed to be less than budget. */
6356 napi_complete(napi);
db219973 6357 tg3_reset_task_schedule(tp);
35f2d7d0
MC
6358 return work_done;
6359}
6360
e64de4e6
MC
6361static void tg3_process_error(struct tg3 *tp)
6362{
6363 u32 val;
6364 bool real_error = false;
6365
63c3a66f 6366 if (tg3_flag(tp, ERROR_PROCESSED))
e64de4e6
MC
6367 return;
6368
6369 /* Check Flow Attention register */
6370 val = tr32(HOSTCC_FLOW_ATTN);
6371 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6372 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6373 real_error = true;
6374 }
6375
6376 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6377 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6378 real_error = true;
6379 }
6380
6381 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6382 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6383 real_error = true;
6384 }
6385
6386 if (!real_error)
6387 return;
6388
6389 tg3_dump_state(tp);
6390
63c3a66f 6391 tg3_flag_set(tp, ERROR_PROCESSED);
db219973 6392 tg3_reset_task_schedule(tp);
e64de4e6
MC
6393}
6394
6f535763
DM
6395static int tg3_poll(struct napi_struct *napi, int budget)
6396{
8ef0442f
MC
6397 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6398 struct tg3 *tp = tnapi->tp;
6f535763 6399 int work_done = 0;
898a56f8 6400 struct tg3_hw_status *sblk = tnapi->hw_status;
6f535763
DM
6401
6402 while (1) {
e64de4e6
MC
6403 if (sblk->status & SD_STATUS_ERROR)
6404 tg3_process_error(tp);
6405
35f2d7d0
MC
6406 tg3_poll_link(tp);
6407
17375d25 6408 work_done = tg3_poll_work(tnapi, work_done, budget);
6f535763 6409
63c3a66f 6410 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6f535763
DM
6411 goto tx_recovery;
6412
6413 if (unlikely(work_done >= budget))
6414 break;
6415
63c3a66f 6416 if (tg3_flag(tp, TAGGED_STATUS)) {
17375d25 6417 /* tp->last_tag is used in tg3_int_reenable() below
4fd7ab59
MC
6418 * to tell the hw how much work has been processed,
6419 * so we must read it before checking for more work.
6420 */
898a56f8
MC
6421 tnapi->last_tag = sblk->status_tag;
6422 tnapi->last_irq_tag = tnapi->last_tag;
4fd7ab59
MC
6423 rmb();
6424 } else
6425 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 6426
17375d25 6427 if (likely(!tg3_has_work(tnapi))) {
288379f0 6428 napi_complete(napi);
17375d25 6429 tg3_int_reenable(tnapi);
6f535763
DM
6430 break;
6431 }
1da177e4
LT
6432 }
6433
bea3348e 6434 return work_done;
6f535763
DM
6435
6436tx_recovery:
4fd7ab59 6437 /* work_done is guaranteed to be less than budget. */
288379f0 6438 napi_complete(napi);
db219973 6439 tg3_reset_task_schedule(tp);
4fd7ab59 6440 return work_done;
1da177e4
LT
6441}
6442
66cfd1bd
MC
6443static void tg3_napi_disable(struct tg3 *tp)
6444{
6445 int i;
6446
6447 for (i = tp->irq_cnt - 1; i >= 0; i--)
6448 napi_disable(&tp->napi[i].napi);
6449}
6450
6451static void tg3_napi_enable(struct tg3 *tp)
6452{
6453 int i;
6454
6455 for (i = 0; i < tp->irq_cnt; i++)
6456 napi_enable(&tp->napi[i].napi);
6457}
6458
6459static void tg3_napi_init(struct tg3 *tp)
6460{
6461 int i;
6462
6463 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6464 for (i = 1; i < tp->irq_cnt; i++)
6465 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6466}
6467
6468static void tg3_napi_fini(struct tg3 *tp)
6469{
6470 int i;
6471
6472 for (i = 0; i < tp->irq_cnt; i++)
6473 netif_napi_del(&tp->napi[i].napi);
6474}
6475
6476static inline void tg3_netif_stop(struct tg3 *tp)
6477{
6478 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6479 tg3_napi_disable(tp);
6480 netif_tx_disable(tp->dev);
6481}
6482
6483static inline void tg3_netif_start(struct tg3 *tp)
6484{
6485 /* NOTE: unconditional netif_tx_wake_all_queues is only
6486 * appropriate so long as all callers are assured to
6487 * have free tx slots (such as after tg3_init_hw)
6488 */
6489 netif_tx_wake_all_queues(tp->dev);
6490
6491 tg3_napi_enable(tp);
6492 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6493 tg3_enable_ints(tp);
6494}
6495
f47c11ee
DM
6496static void tg3_irq_quiesce(struct tg3 *tp)
6497{
4f125f42
MC
6498 int i;
6499
f47c11ee
DM
6500 BUG_ON(tp->irq_sync);
6501
6502 tp->irq_sync = 1;
6503 smp_mb();
6504
4f125f42
MC
6505 for (i = 0; i < tp->irq_cnt; i++)
6506 synchronize_irq(tp->napi[i].irq_vec);
f47c11ee
DM
6507}
6508
f47c11ee
DM
6509/* Fully shutdown all tg3 driver activity elsewhere in the system.
6510 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6511 * with as well. Most of the time, this is not necessary except when
6512 * shutting down the device.
6513 */
6514static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6515{
46966545 6516 spin_lock_bh(&tp->lock);
f47c11ee
DM
6517 if (irq_sync)
6518 tg3_irq_quiesce(tp);
f47c11ee
DM
6519}
6520
6521static inline void tg3_full_unlock(struct tg3 *tp)
6522{
f47c11ee
DM
6523 spin_unlock_bh(&tp->lock);
6524}
6525
fcfa0a32
MC
6526/* One-shot MSI handler - Chip automatically disables interrupt
6527 * after sending MSI so driver doesn't have to do it.
6528 */
7d12e780 6529static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32 6530{
09943a18
MC
6531 struct tg3_napi *tnapi = dev_id;
6532 struct tg3 *tp = tnapi->tp;
fcfa0a32 6533
898a56f8 6534 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6535 if (tnapi->rx_rcb)
6536 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
fcfa0a32
MC
6537
6538 if (likely(!tg3_irq_sync(tp)))
09943a18 6539 napi_schedule(&tnapi->napi);
fcfa0a32
MC
6540
6541 return IRQ_HANDLED;
6542}
6543
88b06bc2
MC
6544/* MSI ISR - No need to check for interrupt sharing and no need to
6545 * flush status block and interrupt mailbox. PCI ordering rules
6546 * guarantee that MSI will arrive after the status block.
6547 */
7d12e780 6548static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2 6549{
09943a18
MC
6550 struct tg3_napi *tnapi = dev_id;
6551 struct tg3 *tp = tnapi->tp;
88b06bc2 6552
898a56f8 6553 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6554 if (tnapi->rx_rcb)
6555 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
88b06bc2 6556 /*
fac9b83e 6557 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 6558 * chip-internal interrupt pending events.
fac9b83e 6559 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
6560 * NIC to stop sending us irqs, engaging "in-intr-handler"
6561 * event coalescing.
6562 */
5b39de91 6563 tw32_mailbox(tnapi->int_mbox, 0x00000001);
61487480 6564 if (likely(!tg3_irq_sync(tp)))
09943a18 6565 napi_schedule(&tnapi->napi);
61487480 6566
88b06bc2
MC
6567 return IRQ_RETVAL(1);
6568}
6569
7d12e780 6570static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4 6571{
09943a18
MC
6572 struct tg3_napi *tnapi = dev_id;
6573 struct tg3 *tp = tnapi->tp;
898a56f8 6574 struct tg3_hw_status *sblk = tnapi->hw_status;
1da177e4
LT
6575 unsigned int handled = 1;
6576
1da177e4
LT
6577 /* In INTx mode, it is possible for the interrupt to arrive at
6578 * the CPU before the status block posted prior to the interrupt.
6579 * Reading the PCI State register will confirm whether the
6580 * interrupt is ours and will flush the status block.
6581 */
d18edcb2 6582 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
63c3a66f 6583 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6584 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6585 handled = 0;
f47c11ee 6586 goto out;
fac9b83e 6587 }
d18edcb2
MC
6588 }
6589
6590 /*
6591 * Writing any value to intr-mbox-0 clears PCI INTA# and
6592 * chip-internal interrupt pending events.
6593 * Writing non-zero to intr-mbox-0 additional tells the
6594 * NIC to stop sending us irqs, engaging "in-intr-handler"
6595 * event coalescing.
c04cb347
MC
6596 *
6597 * Flush the mailbox to de-assert the IRQ immediately to prevent
6598 * spurious interrupts. The flush impacts performance but
6599 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6600 */
c04cb347 6601 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
6602 if (tg3_irq_sync(tp))
6603 goto out;
6604 sblk->status &= ~SD_STATUS_UPDATED;
17375d25 6605 if (likely(tg3_has_work(tnapi))) {
72334482 6606 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
09943a18 6607 napi_schedule(&tnapi->napi);
d18edcb2
MC
6608 } else {
6609 /* No work, shared interrupt perhaps? re-enable
6610 * interrupts, and flush that PCI write
6611 */
6612 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6613 0x00000000);
fac9b83e 6614 }
f47c11ee 6615out:
fac9b83e
DM
6616 return IRQ_RETVAL(handled);
6617}
6618
7d12e780 6619static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e 6620{
09943a18
MC
6621 struct tg3_napi *tnapi = dev_id;
6622 struct tg3 *tp = tnapi->tp;
898a56f8 6623 struct tg3_hw_status *sblk = tnapi->hw_status;
fac9b83e
DM
6624 unsigned int handled = 1;
6625
fac9b83e
DM
6626 /* In INTx mode, it is possible for the interrupt to arrive at
6627 * the CPU before the status block posted prior to the interrupt.
6628 * Reading the PCI State register will confirm whether the
6629 * interrupt is ours and will flush the status block.
6630 */
898a56f8 6631 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
63c3a66f 6632 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6633 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6634 handled = 0;
f47c11ee 6635 goto out;
1da177e4 6636 }
d18edcb2
MC
6637 }
6638
6639 /*
6640 * writing any value to intr-mbox-0 clears PCI INTA# and
6641 * chip-internal interrupt pending events.
6642 * writing non-zero to intr-mbox-0 additional tells the
6643 * NIC to stop sending us irqs, engaging "in-intr-handler"
6644 * event coalescing.
c04cb347
MC
6645 *
6646 * Flush the mailbox to de-assert the IRQ immediately to prevent
6647 * spurious interrupts. The flush impacts performance but
6648 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6649 */
c04cb347 6650 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
624f8e50
MC
6651
6652 /*
6653 * In a shared interrupt configuration, sometimes other devices'
6654 * interrupts will scream. We record the current status tag here
6655 * so that the above check can report that the screaming interrupts
6656 * are unhandled. Eventually they will be silenced.
6657 */
898a56f8 6658 tnapi->last_irq_tag = sblk->status_tag;
624f8e50 6659
d18edcb2
MC
6660 if (tg3_irq_sync(tp))
6661 goto out;
624f8e50 6662
72334482 6663 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
624f8e50 6664
09943a18 6665 napi_schedule(&tnapi->napi);
624f8e50 6666
f47c11ee 6667out:
1da177e4
LT
6668 return IRQ_RETVAL(handled);
6669}
6670
7938109f 6671/* ISR for interrupt test */
7d12e780 6672static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f 6673{
09943a18
MC
6674 struct tg3_napi *tnapi = dev_id;
6675 struct tg3 *tp = tnapi->tp;
898a56f8 6676 struct tg3_hw_status *sblk = tnapi->hw_status;
7938109f 6677
f9804ddb
MC
6678 if ((sblk->status & SD_STATUS_UPDATED) ||
6679 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 6680 tg3_disable_ints(tp);
7938109f
MC
6681 return IRQ_RETVAL(1);
6682 }
6683 return IRQ_RETVAL(0);
6684}
6685
1da177e4
LT
6686#ifdef CONFIG_NET_POLL_CONTROLLER
6687static void tg3_poll_controller(struct net_device *dev)
6688{
4f125f42 6689 int i;
88b06bc2
MC
6690 struct tg3 *tp = netdev_priv(dev);
6691
4f125f42 6692 for (i = 0; i < tp->irq_cnt; i++)
fe234f0e 6693 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1da177e4
LT
6694}
6695#endif
6696
1da177e4
LT
6697static void tg3_tx_timeout(struct net_device *dev)
6698{
6699 struct tg3 *tp = netdev_priv(dev);
6700
b0408751 6701 if (netif_msg_tx_err(tp)) {
05dbe005 6702 netdev_err(dev, "transmit timed out, resetting\n");
97bd8e49 6703 tg3_dump_state(tp);
b0408751 6704 }
1da177e4 6705
db219973 6706 tg3_reset_task_schedule(tp);
1da177e4
LT
6707}
6708
c58ec932
MC
6709/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6710static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6711{
6712 u32 base = (u32) mapping & 0xffffffff;
6713
807540ba 6714 return (base > 0xffffdcc0) && (base + len + 8 < base);
c58ec932
MC
6715}
6716
72f2afb8
MC
6717/* Test for DMA addresses > 40-bit */
6718static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6719 int len)
6720{
6721#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
63c3a66f 6722 if (tg3_flag(tp, 40BIT_DMA_BUG))
807540ba 6723 return ((u64) mapping + len) > DMA_BIT_MASK(40);
72f2afb8
MC
6724 return 0;
6725#else
6726 return 0;
6727#endif
6728}
6729
d1a3b737 6730static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
92cd3a17
MC
6731 dma_addr_t mapping, u32 len, u32 flags,
6732 u32 mss, u32 vlan)
2ffcc981 6733{
92cd3a17
MC
6734 txbd->addr_hi = ((u64) mapping >> 32);
6735 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6736 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6737 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
2ffcc981 6738}
1da177e4 6739
84b67b27 6740static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
d1a3b737
MC
6741 dma_addr_t map, u32 len, u32 flags,
6742 u32 mss, u32 vlan)
6743{
6744 struct tg3 *tp = tnapi->tp;
6745 bool hwbug = false;
6746
6747 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
3db1cd5c 6748 hwbug = true;
d1a3b737
MC
6749
6750 if (tg3_4g_overflow_test(map, len))
3db1cd5c 6751 hwbug = true;
d1a3b737
MC
6752
6753 if (tg3_40bit_overflow_test(tp, map, len))
3db1cd5c 6754 hwbug = true;
d1a3b737 6755
a4cb428d 6756 if (tp->dma_limit) {
b9e45482 6757 u32 prvidx = *entry;
e31aa987 6758 u32 tmp_flag = flags & ~TXD_FLAG_END;
a4cb428d
MC
6759 while (len > tp->dma_limit && *budget) {
6760 u32 frag_len = tp->dma_limit;
6761 len -= tp->dma_limit;
e31aa987 6762
b9e45482
MC
6763 /* Avoid the 8byte DMA problem */
6764 if (len <= 8) {
a4cb428d
MC
6765 len += tp->dma_limit / 2;
6766 frag_len = tp->dma_limit / 2;
e31aa987
MC
6767 }
6768
b9e45482
MC
6769 tnapi->tx_buffers[*entry].fragmented = true;
6770
6771 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6772 frag_len, tmp_flag, mss, vlan);
6773 *budget -= 1;
6774 prvidx = *entry;
6775 *entry = NEXT_TX(*entry);
6776
e31aa987
MC
6777 map += frag_len;
6778 }
6779
6780 if (len) {
6781 if (*budget) {
6782 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6783 len, flags, mss, vlan);
b9e45482 6784 *budget -= 1;
e31aa987
MC
6785 *entry = NEXT_TX(*entry);
6786 } else {
3db1cd5c 6787 hwbug = true;
b9e45482 6788 tnapi->tx_buffers[prvidx].fragmented = false;
e31aa987
MC
6789 }
6790 }
6791 } else {
84b67b27
MC
6792 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6793 len, flags, mss, vlan);
e31aa987
MC
6794 *entry = NEXT_TX(*entry);
6795 }
d1a3b737
MC
6796
6797 return hwbug;
6798}
6799
0d681b27 6800static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
432aa7ed
MC
6801{
6802 int i;
0d681b27 6803 struct sk_buff *skb;
df8944cf 6804 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
432aa7ed 6805
0d681b27
MC
6806 skb = txb->skb;
6807 txb->skb = NULL;
6808
432aa7ed
MC
6809 pci_unmap_single(tnapi->tp->pdev,
6810 dma_unmap_addr(txb, mapping),
6811 skb_headlen(skb),
6812 PCI_DMA_TODEVICE);
e01ee14d
MC
6813
6814 while (txb->fragmented) {
6815 txb->fragmented = false;
6816 entry = NEXT_TX(entry);
6817 txb = &tnapi->tx_buffers[entry];
6818 }
6819
ba1142e4 6820 for (i = 0; i <= last; i++) {
9e903e08 6821 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
432aa7ed
MC
6822
6823 entry = NEXT_TX(entry);
6824 txb = &tnapi->tx_buffers[entry];
6825
6826 pci_unmap_page(tnapi->tp->pdev,
6827 dma_unmap_addr(txb, mapping),
9e903e08 6828 skb_frag_size(frag), PCI_DMA_TODEVICE);
e01ee14d
MC
6829
6830 while (txb->fragmented) {
6831 txb->fragmented = false;
6832 entry = NEXT_TX(entry);
6833 txb = &tnapi->tx_buffers[entry];
6834 }
432aa7ed
MC
6835 }
6836}
6837
72f2afb8 6838/* Workaround 4GB and 40-bit hardware DMA bugs. */
24f4efd4 6839static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
f7ff1987 6840 struct sk_buff **pskb,
84b67b27 6841 u32 *entry, u32 *budget,
92cd3a17 6842 u32 base_flags, u32 mss, u32 vlan)
1da177e4 6843{
24f4efd4 6844 struct tg3 *tp = tnapi->tp;
f7ff1987 6845 struct sk_buff *new_skb, *skb = *pskb;
c58ec932 6846 dma_addr_t new_addr = 0;
432aa7ed 6847 int ret = 0;
1da177e4 6848
41588ba1
MC
6849 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6850 new_skb = skb_copy(skb, GFP_ATOMIC);
6851 else {
6852 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6853
6854 new_skb = skb_copy_expand(skb,
6855 skb_headroom(skb) + more_headroom,
6856 skb_tailroom(skb), GFP_ATOMIC);
6857 }
6858
1da177e4 6859 if (!new_skb) {
c58ec932
MC
6860 ret = -1;
6861 } else {
6862 /* New SKB is guaranteed to be linear. */
f4188d8a
AD
6863 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6864 PCI_DMA_TODEVICE);
6865 /* Make sure the mapping succeeded */
6866 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
f4188d8a 6867 dev_kfree_skb(new_skb);
c58ec932 6868 ret = -1;
c58ec932 6869 } else {
b9e45482
MC
6870 u32 save_entry = *entry;
6871
92cd3a17
MC
6872 base_flags |= TXD_FLAG_END;
6873
84b67b27
MC
6874 tnapi->tx_buffers[*entry].skb = new_skb;
6875 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
432aa7ed
MC
6876 mapping, new_addr);
6877
84b67b27 6878 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
d1a3b737
MC
6879 new_skb->len, base_flags,
6880 mss, vlan)) {
ba1142e4 6881 tg3_tx_skb_unmap(tnapi, save_entry, -1);
d1a3b737
MC
6882 dev_kfree_skb(new_skb);
6883 ret = -1;
6884 }
f4188d8a 6885 }
1da177e4
LT
6886 }
6887
6888 dev_kfree_skb(skb);
f7ff1987 6889 *pskb = new_skb;
c58ec932 6890 return ret;
1da177e4
LT
6891}
6892
2ffcc981 6893static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
52c0fd83
MC
6894
6895/* Use GSO to workaround a rare TSO bug that may be triggered when the
6896 * TSO header is greater than 80 bytes.
6897 */
6898static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6899{
6900 struct sk_buff *segs, *nskb;
f3f3f27e 6901 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
52c0fd83
MC
6902
6903 /* Estimate the number of fragments in the worst case */
f3f3f27e 6904 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
52c0fd83 6905 netif_stop_queue(tp->dev);
f65aac16
MC
6906
6907 /* netif_tx_stop_queue() must be done before checking
6908 * checking tx index in tg3_tx_avail() below, because in
6909 * tg3_tx(), we update tx index before checking for
6910 * netif_tx_queue_stopped().
6911 */
6912 smp_mb();
f3f3f27e 6913 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7f62ad5d
MC
6914 return NETDEV_TX_BUSY;
6915
6916 netif_wake_queue(tp->dev);
52c0fd83
MC
6917 }
6918
6919 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 6920 if (IS_ERR(segs))
52c0fd83
MC
6921 goto tg3_tso_bug_end;
6922
6923 do {
6924 nskb = segs;
6925 segs = segs->next;
6926 nskb->next = NULL;
2ffcc981 6927 tg3_start_xmit(nskb, tp->dev);
52c0fd83
MC
6928 } while (segs);
6929
6930tg3_tso_bug_end:
6931 dev_kfree_skb(skb);
6932
6933 return NETDEV_TX_OK;
6934}
52c0fd83 6935
5a6f3074 6936/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
63c3a66f 6937 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5a6f3074 6938 */
2ffcc981 6939static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
6940{
6941 struct tg3 *tp = netdev_priv(dev);
92cd3a17 6942 u32 len, entry, base_flags, mss, vlan = 0;
84b67b27 6943 u32 budget;
432aa7ed 6944 int i = -1, would_hit_hwbug;
90079ce8 6945 dma_addr_t mapping;
24f4efd4
MC
6946 struct tg3_napi *tnapi;
6947 struct netdev_queue *txq;
432aa7ed 6948 unsigned int last;
f4188d8a 6949
24f4efd4
MC
6950 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6951 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
63c3a66f 6952 if (tg3_flag(tp, ENABLE_TSS))
24f4efd4 6953 tnapi++;
1da177e4 6954
84b67b27
MC
6955 budget = tg3_tx_avail(tnapi);
6956
00b70504 6957 /* We are running in BH disabled context with netif_tx_lock
bea3348e 6958 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
6959 * interrupt. Furthermore, IRQ processing runs lockless so we have
6960 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 6961 */
84b67b27 6962 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
24f4efd4
MC
6963 if (!netif_tx_queue_stopped(txq)) {
6964 netif_tx_stop_queue(txq);
1f064a87
SH
6965
6966 /* This is a hard error, log it. */
5129c3a3
MC
6967 netdev_err(dev,
6968 "BUG! Tx Ring full when queue awake!\n");
1f064a87 6969 }
1da177e4
LT
6970 return NETDEV_TX_BUSY;
6971 }
6972
f3f3f27e 6973 entry = tnapi->tx_prod;
1da177e4 6974 base_flags = 0;
84fa7933 6975 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 6976 base_flags |= TXD_FLAG_TCPUDP_CSUM;
24f4efd4 6977
be98da6a
MC
6978 mss = skb_shinfo(skb)->gso_size;
6979 if (mss) {
eddc9ec5 6980 struct iphdr *iph;
34195c3d 6981 u32 tcp_opt_len, hdr_len;
1da177e4
LT
6982
6983 if (skb_header_cloned(skb) &&
48855432
ED
6984 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6985 goto drop;
1da177e4 6986
34195c3d 6987 iph = ip_hdr(skb);
ab6a5bb6 6988 tcp_opt_len = tcp_optlen(skb);
1da177e4 6989
a5a11955 6990 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
34195c3d 6991
a5a11955 6992 if (!skb_is_gso_v6(skb)) {
34195c3d
MC
6993 iph->check = 0;
6994 iph->tot_len = htons(mss + hdr_len);
6995 }
6996
52c0fd83 6997 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
63c3a66f 6998 tg3_flag(tp, TSO_BUG))
de6f31eb 6999 return tg3_tso_bug(tp, skb);
52c0fd83 7000
1da177e4
LT
7001 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7002 TXD_FLAG_CPU_POST_DMA);
7003
63c3a66f
JP
7004 if (tg3_flag(tp, HW_TSO_1) ||
7005 tg3_flag(tp, HW_TSO_2) ||
7006 tg3_flag(tp, HW_TSO_3)) {
aa8223c7 7007 tcp_hdr(skb)->check = 0;
1da177e4 7008 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
7009 } else
7010 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7011 iph->daddr, 0,
7012 IPPROTO_TCP,
7013 0);
1da177e4 7014
63c3a66f 7015 if (tg3_flag(tp, HW_TSO_3)) {
615774fe
MC
7016 mss |= (hdr_len & 0xc) << 12;
7017 if (hdr_len & 0x10)
7018 base_flags |= 0x00000010;
7019 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 7020 } else if (tg3_flag(tp, HW_TSO_2))
92c6b8d1 7021 mss |= hdr_len << 9;
63c3a66f 7022 else if (tg3_flag(tp, HW_TSO_1) ||
92c6b8d1 7023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
eddc9ec5 7024 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7025 int tsflags;
7026
eddc9ec5 7027 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7028 mss |= (tsflags << 11);
7029 }
7030 } else {
eddc9ec5 7031 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7032 int tsflags;
7033
eddc9ec5 7034 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7035 base_flags |= tsflags << 12;
7036 }
7037 }
7038 }
bf933c80 7039
93a700a9
MC
7040 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7041 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7042 base_flags |= TXD_FLAG_JMB_PKT;
7043
92cd3a17
MC
7044 if (vlan_tx_tag_present(skb)) {
7045 base_flags |= TXD_FLAG_VLAN;
7046 vlan = vlan_tx_tag_get(skb);
7047 }
1da177e4 7048
f4188d8a
AD
7049 len = skb_headlen(skb);
7050
7051 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
48855432
ED
7052 if (pci_dma_mapping_error(tp->pdev, mapping))
7053 goto drop;
7054
90079ce8 7055
f3f3f27e 7056 tnapi->tx_buffers[entry].skb = skb;
4e5e4f0d 7057 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
1da177e4
LT
7058
7059 would_hit_hwbug = 0;
7060
63c3a66f 7061 if (tg3_flag(tp, 5701_DMA_BUG))
c58ec932 7062 would_hit_hwbug = 1;
1da177e4 7063
84b67b27 7064 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
d1a3b737 7065 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
ba1142e4 7066 mss, vlan)) {
d1a3b737 7067 would_hit_hwbug = 1;
ba1142e4 7068 } else if (skb_shinfo(skb)->nr_frags > 0) {
92cd3a17
MC
7069 u32 tmp_mss = mss;
7070
7071 if (!tg3_flag(tp, HW_TSO_1) &&
7072 !tg3_flag(tp, HW_TSO_2) &&
7073 !tg3_flag(tp, HW_TSO_3))
7074 tmp_mss = 0;
7075
c5665a53
MC
7076 /* Now loop through additional data
7077 * fragments, and queue them.
7078 */
1da177e4
LT
7079 last = skb_shinfo(skb)->nr_frags - 1;
7080 for (i = 0; i <= last; i++) {
7081 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7082
9e903e08 7083 len = skb_frag_size(frag);
dc234d0b 7084 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
5d6bcdfe 7085 len, DMA_TO_DEVICE);
1da177e4 7086
f3f3f27e 7087 tnapi->tx_buffers[entry].skb = NULL;
4e5e4f0d 7088 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
f4188d8a 7089 mapping);
5d6bcdfe 7090 if (dma_mapping_error(&tp->pdev->dev, mapping))
f4188d8a 7091 goto dma_error;
1da177e4 7092
b9e45482
MC
7093 if (!budget ||
7094 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
84b67b27
MC
7095 len, base_flags |
7096 ((i == last) ? TXD_FLAG_END : 0),
b9e45482 7097 tmp_mss, vlan)) {
72f2afb8 7098 would_hit_hwbug = 1;
b9e45482
MC
7099 break;
7100 }
1da177e4
LT
7101 }
7102 }
7103
7104 if (would_hit_hwbug) {
0d681b27 7105 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
1da177e4
LT
7106
7107 /* If the workaround fails due to memory/mapping
7108 * failure, silently drop this packet.
7109 */
84b67b27
MC
7110 entry = tnapi->tx_prod;
7111 budget = tg3_tx_avail(tnapi);
f7ff1987 7112 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
84b67b27 7113 base_flags, mss, vlan))
48855432 7114 goto drop_nofree;
1da177e4
LT
7115 }
7116
d515b450 7117 skb_tx_timestamp(skb);
5cb917bc 7118 netdev_tx_sent_queue(txq, skb->len);
d515b450 7119
6541b806
MC
7120 /* Sync BD data before updating mailbox */
7121 wmb();
7122
1da177e4 7123 /* Packets are ready, update Tx producer idx local and on card. */
24f4efd4 7124 tw32_tx_mbox(tnapi->prodmbox, entry);
1da177e4 7125
f3f3f27e
MC
7126 tnapi->tx_prod = entry;
7127 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
24f4efd4 7128 netif_tx_stop_queue(txq);
f65aac16
MC
7129
7130 /* netif_tx_stop_queue() must be done before checking
7131 * checking tx index in tg3_tx_avail() below, because in
7132 * tg3_tx(), we update tx index before checking for
7133 * netif_tx_queue_stopped().
7134 */
7135 smp_mb();
f3f3f27e 7136 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
24f4efd4 7137 netif_tx_wake_queue(txq);
51b91468 7138 }
1da177e4 7139
cdd0db05 7140 mmiowb();
1da177e4 7141 return NETDEV_TX_OK;
f4188d8a
AD
7142
7143dma_error:
ba1142e4 7144 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
432aa7ed 7145 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
48855432
ED
7146drop:
7147 dev_kfree_skb(skb);
7148drop_nofree:
7149 tp->tx_dropped++;
f4188d8a 7150 return NETDEV_TX_OK;
1da177e4
LT
7151}
7152
6e01b20b
MC
7153static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7154{
7155 if (enable) {
7156 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7157 MAC_MODE_PORT_MODE_MASK);
7158
7159 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7160
7161 if (!tg3_flag(tp, 5705_PLUS))
7162 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7163
7164 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7165 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7166 else
7167 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7168 } else {
7169 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7170
7171 if (tg3_flag(tp, 5705_PLUS) ||
7172 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7174 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7175 }
7176
7177 tw32(MAC_MODE, tp->mac_mode);
7178 udelay(40);
7179}
7180
941ec90f 7181static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
5e5a7f37 7182{
941ec90f 7183 u32 val, bmcr, mac_mode, ptest = 0;
5e5a7f37
MC
7184
7185 tg3_phy_toggle_apd(tp, false);
7186 tg3_phy_toggle_automdix(tp, 0);
7187
941ec90f
MC
7188 if (extlpbk && tg3_phy_set_extloopbk(tp))
7189 return -EIO;
7190
7191 bmcr = BMCR_FULLDPLX;
5e5a7f37
MC
7192 switch (speed) {
7193 case SPEED_10:
7194 break;
7195 case SPEED_100:
7196 bmcr |= BMCR_SPEED100;
7197 break;
7198 case SPEED_1000:
7199 default:
7200 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7201 speed = SPEED_100;
7202 bmcr |= BMCR_SPEED100;
7203 } else {
7204 speed = SPEED_1000;
7205 bmcr |= BMCR_SPEED1000;
7206 }
7207 }
7208
941ec90f
MC
7209 if (extlpbk) {
7210 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7211 tg3_readphy(tp, MII_CTRL1000, &val);
7212 val |= CTL1000_AS_MASTER |
7213 CTL1000_ENABLE_MASTER;
7214 tg3_writephy(tp, MII_CTRL1000, val);
7215 } else {
7216 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7217 MII_TG3_FET_PTEST_TRIM_2;
7218 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7219 }
7220 } else
7221 bmcr |= BMCR_LOOPBACK;
7222
5e5a7f37
MC
7223 tg3_writephy(tp, MII_BMCR, bmcr);
7224
7225 /* The write needs to be flushed for the FETs */
7226 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7227 tg3_readphy(tp, MII_BMCR, &bmcr);
7228
7229 udelay(40);
7230
7231 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
941ec90f 7233 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
5e5a7f37
MC
7234 MII_TG3_FET_PTEST_FRC_TX_LINK |
7235 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7236
7237 /* The write needs to be flushed for the AC131 */
7238 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7239 }
7240
7241 /* Reset to prevent losing 1st rx packet intermittently */
7242 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7243 tg3_flag(tp, 5780_CLASS)) {
7244 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7245 udelay(10);
7246 tw32_f(MAC_RX_MODE, tp->rx_mode);
7247 }
7248
7249 mac_mode = tp->mac_mode &
7250 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7251 if (speed == SPEED_1000)
7252 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7253 else
7254 mac_mode |= MAC_MODE_PORT_MODE_MII;
7255
7256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7257 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7258
7259 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7260 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7261 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7262 mac_mode |= MAC_MODE_LINK_POLARITY;
7263
7264 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7265 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7266 }
7267
7268 tw32(MAC_MODE, mac_mode);
7269 udelay(40);
941ec90f
MC
7270
7271 return 0;
5e5a7f37
MC
7272}
7273
c8f44aff 7274static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
06c03c02
MB
7275{
7276 struct tg3 *tp = netdev_priv(dev);
7277
7278 if (features & NETIF_F_LOOPBACK) {
7279 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7280 return;
7281
06c03c02 7282 spin_lock_bh(&tp->lock);
6e01b20b 7283 tg3_mac_loopback(tp, true);
06c03c02
MB
7284 netif_carrier_on(tp->dev);
7285 spin_unlock_bh(&tp->lock);
7286 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7287 } else {
7288 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7289 return;
7290
06c03c02 7291 spin_lock_bh(&tp->lock);
6e01b20b 7292 tg3_mac_loopback(tp, false);
06c03c02
MB
7293 /* Force link status check */
7294 tg3_setup_phy(tp, 1);
7295 spin_unlock_bh(&tp->lock);
7296 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7297 }
7298}
7299
c8f44aff
MM
7300static netdev_features_t tg3_fix_features(struct net_device *dev,
7301 netdev_features_t features)
dc668910
MM
7302{
7303 struct tg3 *tp = netdev_priv(dev);
7304
63c3a66f 7305 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
dc668910
MM
7306 features &= ~NETIF_F_ALL_TSO;
7307
7308 return features;
7309}
7310
c8f44aff 7311static int tg3_set_features(struct net_device *dev, netdev_features_t features)
06c03c02 7312{
c8f44aff 7313 netdev_features_t changed = dev->features ^ features;
06c03c02
MB
7314
7315 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7316 tg3_set_loopback(dev, features);
7317
7318 return 0;
7319}
7320
21f581a5
MC
7321static void tg3_rx_prodring_free(struct tg3 *tp,
7322 struct tg3_rx_prodring_set *tpr)
1da177e4 7323{
1da177e4
LT
7324 int i;
7325
8fea32b9 7326 if (tpr != &tp->napi[0].prodring) {
b196c7e4 7327 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
2c49a44d 7328 i = (i + 1) & tp->rx_std_ring_mask)
9205fd9c 7329 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
b196c7e4
MC
7330 tp->rx_pkt_map_sz);
7331
63c3a66f 7332 if (tg3_flag(tp, JUMBO_CAPABLE)) {
b196c7e4
MC
7333 for (i = tpr->rx_jmb_cons_idx;
7334 i != tpr->rx_jmb_prod_idx;
2c49a44d 7335 i = (i + 1) & tp->rx_jmb_ring_mask) {
9205fd9c 7336 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
b196c7e4
MC
7337 TG3_RX_JMB_MAP_SZ);
7338 }
7339 }
7340
2b2cdb65 7341 return;
b196c7e4 7342 }
1da177e4 7343
2c49a44d 7344 for (i = 0; i <= tp->rx_std_ring_mask; i++)
9205fd9c 7345 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
2b2cdb65 7346 tp->rx_pkt_map_sz);
1da177e4 7347
63c3a66f 7348 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7349 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
9205fd9c 7350 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
2b2cdb65 7351 TG3_RX_JMB_MAP_SZ);
1da177e4
LT
7352 }
7353}
7354
c6cdf436 7355/* Initialize rx rings for packet processing.
1da177e4
LT
7356 *
7357 * The chip has been shut down and the driver detached from
7358 * the networking, so no interrupts or new tx packets will
7359 * end up in the driver. tp->{tx,}lock are held and thus
7360 * we may not sleep.
7361 */
21f581a5
MC
7362static int tg3_rx_prodring_alloc(struct tg3 *tp,
7363 struct tg3_rx_prodring_set *tpr)
1da177e4 7364{
287be12e 7365 u32 i, rx_pkt_dma_sz;
1da177e4 7366
b196c7e4
MC
7367 tpr->rx_std_cons_idx = 0;
7368 tpr->rx_std_prod_idx = 0;
7369 tpr->rx_jmb_cons_idx = 0;
7370 tpr->rx_jmb_prod_idx = 0;
7371
8fea32b9 7372 if (tpr != &tp->napi[0].prodring) {
2c49a44d
MC
7373 memset(&tpr->rx_std_buffers[0], 0,
7374 TG3_RX_STD_BUFF_RING_SIZE(tp));
48035728 7375 if (tpr->rx_jmb_buffers)
2b2cdb65 7376 memset(&tpr->rx_jmb_buffers[0], 0,
2c49a44d 7377 TG3_RX_JMB_BUFF_RING_SIZE(tp));
2b2cdb65
MC
7378 goto done;
7379 }
7380
1da177e4 7381 /* Zero out all descriptors. */
2c49a44d 7382 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
1da177e4 7383
287be12e 7384 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
63c3a66f 7385 if (tg3_flag(tp, 5780_CLASS) &&
287be12e
MC
7386 tp->dev->mtu > ETH_DATA_LEN)
7387 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7388 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7e72aad4 7389
1da177e4
LT
7390 /* Initialize invariants of the rings, we only set this
7391 * stuff once. This works because the card does not
7392 * write into the rx buffer posting rings.
7393 */
2c49a44d 7394 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
1da177e4
LT
7395 struct tg3_rx_buffer_desc *rxd;
7396
21f581a5 7397 rxd = &tpr->rx_std[i];
287be12e 7398 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
1da177e4
LT
7399 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7400 rxd->opaque = (RXD_OPAQUE_RING_STD |
7401 (i << RXD_OPAQUE_INDEX_SHIFT));
7402 }
7403
1da177e4
LT
7404 /* Now allocate fresh SKBs for each rx ring. */
7405 for (i = 0; i < tp->rx_pending; i++) {
8d4057a9
ED
7406 unsigned int frag_size;
7407
7408 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7409 &frag_size) < 0) {
5129c3a3
MC
7410 netdev_warn(tp->dev,
7411 "Using a smaller RX standard ring. Only "
7412 "%d out of %d buffers were allocated "
7413 "successfully\n", i, tp->rx_pending);
32d8c572 7414 if (i == 0)
cf7a7298 7415 goto initfail;
32d8c572 7416 tp->rx_pending = i;
1da177e4 7417 break;
32d8c572 7418 }
1da177e4
LT
7419 }
7420
63c3a66f 7421 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
cf7a7298
MC
7422 goto done;
7423
2c49a44d 7424 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
cf7a7298 7425
63c3a66f 7426 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
0d86df80 7427 goto done;
cf7a7298 7428
2c49a44d 7429 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
0d86df80
MC
7430 struct tg3_rx_buffer_desc *rxd;
7431
7432 rxd = &tpr->rx_jmb[i].std;
7433 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7434 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7435 RXD_FLAG_JUMBO;
7436 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7437 (i << RXD_OPAQUE_INDEX_SHIFT));
7438 }
7439
7440 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8d4057a9
ED
7441 unsigned int frag_size;
7442
7443 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7444 &frag_size) < 0) {
5129c3a3
MC
7445 netdev_warn(tp->dev,
7446 "Using a smaller RX jumbo ring. Only %d "
7447 "out of %d buffers were allocated "
7448 "successfully\n", i, tp->rx_jumbo_pending);
0d86df80
MC
7449 if (i == 0)
7450 goto initfail;
7451 tp->rx_jumbo_pending = i;
7452 break;
1da177e4
LT
7453 }
7454 }
cf7a7298
MC
7455
7456done:
32d8c572 7457 return 0;
cf7a7298
MC
7458
7459initfail:
21f581a5 7460 tg3_rx_prodring_free(tp, tpr);
cf7a7298 7461 return -ENOMEM;
1da177e4
LT
7462}
7463
21f581a5
MC
7464static void tg3_rx_prodring_fini(struct tg3 *tp,
7465 struct tg3_rx_prodring_set *tpr)
1da177e4 7466{
21f581a5
MC
7467 kfree(tpr->rx_std_buffers);
7468 tpr->rx_std_buffers = NULL;
7469 kfree(tpr->rx_jmb_buffers);
7470 tpr->rx_jmb_buffers = NULL;
7471 if (tpr->rx_std) {
4bae65c8
MC
7472 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7473 tpr->rx_std, tpr->rx_std_mapping);
21f581a5 7474 tpr->rx_std = NULL;
1da177e4 7475 }
21f581a5 7476 if (tpr->rx_jmb) {
4bae65c8
MC
7477 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7478 tpr->rx_jmb, tpr->rx_jmb_mapping);
21f581a5 7479 tpr->rx_jmb = NULL;
1da177e4 7480 }
cf7a7298
MC
7481}
7482
21f581a5
MC
7483static int tg3_rx_prodring_init(struct tg3 *tp,
7484 struct tg3_rx_prodring_set *tpr)
cf7a7298 7485{
2c49a44d
MC
7486 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7487 GFP_KERNEL);
21f581a5 7488 if (!tpr->rx_std_buffers)
cf7a7298
MC
7489 return -ENOMEM;
7490
4bae65c8
MC
7491 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7492 TG3_RX_STD_RING_BYTES(tp),
7493 &tpr->rx_std_mapping,
7494 GFP_KERNEL);
21f581a5 7495 if (!tpr->rx_std)
cf7a7298
MC
7496 goto err_out;
7497
63c3a66f 7498 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7499 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
21f581a5
MC
7500 GFP_KERNEL);
7501 if (!tpr->rx_jmb_buffers)
cf7a7298
MC
7502 goto err_out;
7503
4bae65c8
MC
7504 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7505 TG3_RX_JMB_RING_BYTES(tp),
7506 &tpr->rx_jmb_mapping,
7507 GFP_KERNEL);
21f581a5 7508 if (!tpr->rx_jmb)
cf7a7298
MC
7509 goto err_out;
7510 }
7511
7512 return 0;
7513
7514err_out:
21f581a5 7515 tg3_rx_prodring_fini(tp, tpr);
cf7a7298
MC
7516 return -ENOMEM;
7517}
7518
7519/* Free up pending packets in all rx/tx rings.
7520 *
7521 * The chip has been shut down and the driver detached from
7522 * the networking, so no interrupts or new tx packets will
7523 * end up in the driver. tp->{tx,}lock is not held and we are not
7524 * in an interrupt context and thus may sleep.
7525 */
7526static void tg3_free_rings(struct tg3 *tp)
7527{
f77a6a8e 7528 int i, j;
cf7a7298 7529
f77a6a8e
MC
7530 for (j = 0; j < tp->irq_cnt; j++) {
7531 struct tg3_napi *tnapi = &tp->napi[j];
cf7a7298 7532
8fea32b9 7533 tg3_rx_prodring_free(tp, &tnapi->prodring);
b28f6428 7534
0c1d0e2b
MC
7535 if (!tnapi->tx_buffers)
7536 continue;
7537
0d681b27
MC
7538 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7539 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
cf7a7298 7540
0d681b27 7541 if (!skb)
f77a6a8e 7542 continue;
cf7a7298 7543
ba1142e4
MC
7544 tg3_tx_skb_unmap(tnapi, i,
7545 skb_shinfo(skb)->nr_frags - 1);
f77a6a8e
MC
7546
7547 dev_kfree_skb_any(skb);
7548 }
5cb917bc 7549 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
2b2cdb65 7550 }
cf7a7298
MC
7551}
7552
7553/* Initialize tx/rx rings for packet processing.
7554 *
7555 * The chip has been shut down and the driver detached from
7556 * the networking, so no interrupts or new tx packets will
7557 * end up in the driver. tp->{tx,}lock are held and thus
7558 * we may not sleep.
7559 */
7560static int tg3_init_rings(struct tg3 *tp)
7561{
f77a6a8e 7562 int i;
72334482 7563
cf7a7298
MC
7564 /* Free up all the SKBs. */
7565 tg3_free_rings(tp);
7566
f77a6a8e
MC
7567 for (i = 0; i < tp->irq_cnt; i++) {
7568 struct tg3_napi *tnapi = &tp->napi[i];
7569
7570 tnapi->last_tag = 0;
7571 tnapi->last_irq_tag = 0;
7572 tnapi->hw_status->status = 0;
7573 tnapi->hw_status->status_tag = 0;
7574 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
cf7a7298 7575
f77a6a8e
MC
7576 tnapi->tx_prod = 0;
7577 tnapi->tx_cons = 0;
0c1d0e2b
MC
7578 if (tnapi->tx_ring)
7579 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
f77a6a8e
MC
7580
7581 tnapi->rx_rcb_ptr = 0;
0c1d0e2b
MC
7582 if (tnapi->rx_rcb)
7583 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
2b2cdb65 7584
8fea32b9 7585 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
e4af1af9 7586 tg3_free_rings(tp);
2b2cdb65 7587 return -ENOMEM;
e4af1af9 7588 }
f77a6a8e 7589 }
72334482 7590
2b2cdb65 7591 return 0;
cf7a7298
MC
7592}
7593
49a359e3 7594static void tg3_mem_tx_release(struct tg3 *tp)
cf7a7298 7595{
f77a6a8e 7596 int i;
898a56f8 7597
49a359e3 7598 for (i = 0; i < tp->irq_max; i++) {
f77a6a8e
MC
7599 struct tg3_napi *tnapi = &tp->napi[i];
7600
7601 if (tnapi->tx_ring) {
4bae65c8 7602 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
f77a6a8e
MC
7603 tnapi->tx_ring, tnapi->tx_desc_mapping);
7604 tnapi->tx_ring = NULL;
7605 }
7606
7607 kfree(tnapi->tx_buffers);
7608 tnapi->tx_buffers = NULL;
49a359e3
MC
7609 }
7610}
f77a6a8e 7611
49a359e3
MC
7612static int tg3_mem_tx_acquire(struct tg3 *tp)
7613{
7614 int i;
7615 struct tg3_napi *tnapi = &tp->napi[0];
7616
7617 /* If multivector TSS is enabled, vector 0 does not handle
7618 * tx interrupts. Don't allocate any resources for it.
7619 */
7620 if (tg3_flag(tp, ENABLE_TSS))
7621 tnapi++;
7622
7623 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7624 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7625 TG3_TX_RING_SIZE, GFP_KERNEL);
7626 if (!tnapi->tx_buffers)
7627 goto err_out;
7628
7629 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7630 TG3_TX_RING_BYTES,
7631 &tnapi->tx_desc_mapping,
7632 GFP_KERNEL);
7633 if (!tnapi->tx_ring)
7634 goto err_out;
7635 }
7636
7637 return 0;
7638
7639err_out:
7640 tg3_mem_tx_release(tp);
7641 return -ENOMEM;
7642}
7643
7644static void tg3_mem_rx_release(struct tg3 *tp)
7645{
7646 int i;
7647
7648 for (i = 0; i < tp->irq_max; i++) {
7649 struct tg3_napi *tnapi = &tp->napi[i];
f77a6a8e 7650
8fea32b9
MC
7651 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7652
49a359e3
MC
7653 if (!tnapi->rx_rcb)
7654 continue;
7655
7656 dma_free_coherent(&tp->pdev->dev,
7657 TG3_RX_RCB_RING_BYTES(tp),
7658 tnapi->rx_rcb,
7659 tnapi->rx_rcb_mapping);
7660 tnapi->rx_rcb = NULL;
7661 }
7662}
7663
7664static int tg3_mem_rx_acquire(struct tg3 *tp)
7665{
7666 unsigned int i, limit;
7667
7668 limit = tp->rxq_cnt;
7669
7670 /* If RSS is enabled, we need a (dummy) producer ring
7671 * set on vector zero. This is the true hw prodring.
7672 */
7673 if (tg3_flag(tp, ENABLE_RSS))
7674 limit++;
7675
7676 for (i = 0; i < limit; i++) {
7677 struct tg3_napi *tnapi = &tp->napi[i];
7678
7679 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7680 goto err_out;
7681
7682 /* If multivector RSS is enabled, vector 0
7683 * does not handle rx or tx interrupts.
7684 * Don't allocate any resources for it.
7685 */
7686 if (!i && tg3_flag(tp, ENABLE_RSS))
7687 continue;
7688
7689 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7690 TG3_RX_RCB_RING_BYTES(tp),
7691 &tnapi->rx_rcb_mapping,
7692 GFP_KERNEL);
7693 if (!tnapi->rx_rcb)
7694 goto err_out;
7695
7696 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7697 }
7698
7699 return 0;
7700
7701err_out:
7702 tg3_mem_rx_release(tp);
7703 return -ENOMEM;
7704}
7705
7706/*
7707 * Must not be invoked with interrupt sources disabled and
7708 * the hardware shutdown down.
7709 */
7710static void tg3_free_consistent(struct tg3 *tp)
7711{
7712 int i;
7713
7714 for (i = 0; i < tp->irq_cnt; i++) {
7715 struct tg3_napi *tnapi = &tp->napi[i];
7716
f77a6a8e 7717 if (tnapi->hw_status) {
4bae65c8
MC
7718 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7719 tnapi->hw_status,
7720 tnapi->status_mapping);
f77a6a8e
MC
7721 tnapi->hw_status = NULL;
7722 }
1da177e4 7723 }
f77a6a8e 7724
49a359e3
MC
7725 tg3_mem_rx_release(tp);
7726 tg3_mem_tx_release(tp);
7727
1da177e4 7728 if (tp->hw_stats) {
4bae65c8
MC
7729 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7730 tp->hw_stats, tp->stats_mapping);
1da177e4
LT
7731 tp->hw_stats = NULL;
7732 }
7733}
7734
7735/*
7736 * Must not be invoked with interrupt sources disabled and
7737 * the hardware shutdown down. Can sleep.
7738 */
7739static int tg3_alloc_consistent(struct tg3 *tp)
7740{
f77a6a8e 7741 int i;
898a56f8 7742
4bae65c8
MC
7743 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7744 sizeof(struct tg3_hw_stats),
7745 &tp->stats_mapping,
7746 GFP_KERNEL);
f77a6a8e 7747 if (!tp->hw_stats)
1da177e4
LT
7748 goto err_out;
7749
f77a6a8e 7750 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
1da177e4 7751
f77a6a8e
MC
7752 for (i = 0; i < tp->irq_cnt; i++) {
7753 struct tg3_napi *tnapi = &tp->napi[i];
8d9d7cfc 7754 struct tg3_hw_status *sblk;
1da177e4 7755
4bae65c8
MC
7756 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7757 TG3_HW_STATUS_SIZE,
7758 &tnapi->status_mapping,
7759 GFP_KERNEL);
f77a6a8e
MC
7760 if (!tnapi->hw_status)
7761 goto err_out;
898a56f8 7762
f77a6a8e 7763 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8d9d7cfc
MC
7764 sblk = tnapi->hw_status;
7765
49a359e3 7766 if (tg3_flag(tp, ENABLE_RSS)) {
86449944 7767 u16 *prodptr = NULL;
8fea32b9 7768
49a359e3
MC
7769 /*
7770 * When RSS is enabled, the status block format changes
7771 * slightly. The "rx_jumbo_consumer", "reserved",
7772 * and "rx_mini_consumer" members get mapped to the
7773 * other three rx return ring producer indexes.
7774 */
7775 switch (i) {
7776 case 1:
7777 prodptr = &sblk->idx[0].rx_producer;
7778 break;
7779 case 2:
7780 prodptr = &sblk->rx_jumbo_consumer;
7781 break;
7782 case 3:
7783 prodptr = &sblk->reserved;
7784 break;
7785 case 4:
7786 prodptr = &sblk->rx_mini_consumer;
f891ea16
MC
7787 break;
7788 }
49a359e3
MC
7789 tnapi->rx_rcb_prod_idx = prodptr;
7790 } else {
8d9d7cfc 7791 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8d9d7cfc 7792 }
f77a6a8e 7793 }
1da177e4 7794
49a359e3
MC
7795 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7796 goto err_out;
7797
1da177e4
LT
7798 return 0;
7799
7800err_out:
7801 tg3_free_consistent(tp);
7802 return -ENOMEM;
7803}
7804
7805#define MAX_WAIT_CNT 1000
7806
7807/* To stop a block, clear the enable bit and poll till it
7808 * clears. tp->lock is held.
7809 */
b3b7d6be 7810static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
7811{
7812 unsigned int i;
7813 u32 val;
7814
63c3a66f 7815 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
7816 switch (ofs) {
7817 case RCVLSC_MODE:
7818 case DMAC_MODE:
7819 case MBFREE_MODE:
7820 case BUFMGR_MODE:
7821 case MEMARB_MODE:
7822 /* We can't enable/disable these bits of the
7823 * 5705/5750, just say success.
7824 */
7825 return 0;
7826
7827 default:
7828 break;
855e1111 7829 }
1da177e4
LT
7830 }
7831
7832 val = tr32(ofs);
7833 val &= ~enable_bit;
7834 tw32_f(ofs, val);
7835
7836 for (i = 0; i < MAX_WAIT_CNT; i++) {
7837 udelay(100);
7838 val = tr32(ofs);
7839 if ((val & enable_bit) == 0)
7840 break;
7841 }
7842
b3b7d6be 7843 if (i == MAX_WAIT_CNT && !silent) {
2445e461
MC
7844 dev_err(&tp->pdev->dev,
7845 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7846 ofs, enable_bit);
1da177e4
LT
7847 return -ENODEV;
7848 }
7849
7850 return 0;
7851}
7852
7853/* tp->lock is held. */
b3b7d6be 7854static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
7855{
7856 int i, err;
7857
7858 tg3_disable_ints(tp);
7859
7860 tp->rx_mode &= ~RX_MODE_ENABLE;
7861 tw32_f(MAC_RX_MODE, tp->rx_mode);
7862 udelay(10);
7863
b3b7d6be
DM
7864 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7865 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7866 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7867 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7868 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7869 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7870
7871 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7872 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7873 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7874 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7875 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7876 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7877 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
7878
7879 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7880 tw32_f(MAC_MODE, tp->mac_mode);
7881 udelay(40);
7882
7883 tp->tx_mode &= ~TX_MODE_ENABLE;
7884 tw32_f(MAC_TX_MODE, tp->tx_mode);
7885
7886 for (i = 0; i < MAX_WAIT_CNT; i++) {
7887 udelay(100);
7888 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7889 break;
7890 }
7891 if (i >= MAX_WAIT_CNT) {
ab96b241
MC
7892 dev_err(&tp->pdev->dev,
7893 "%s timed out, TX_MODE_ENABLE will not clear "
7894 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
e6de8ad1 7895 err |= -ENODEV;
1da177e4
LT
7896 }
7897
e6de8ad1 7898 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
7899 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7900 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
7901
7902 tw32(FTQ_RESET, 0xffffffff);
7903 tw32(FTQ_RESET, 0x00000000);
7904
b3b7d6be
DM
7905 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7906 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4 7907
f77a6a8e
MC
7908 for (i = 0; i < tp->irq_cnt; i++) {
7909 struct tg3_napi *tnapi = &tp->napi[i];
7910 if (tnapi->hw_status)
7911 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7912 }
1da177e4 7913
1da177e4
LT
7914 return err;
7915}
7916
ee6a99b5
MC
7917/* Save PCI command register before chip reset */
7918static void tg3_save_pci_state(struct tg3 *tp)
7919{
8a6eac90 7920 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
7921}
7922
7923/* Restore PCI state after chip reset */
7924static void tg3_restore_pci_state(struct tg3 *tp)
7925{
7926 u32 val;
7927
7928 /* Re-enable indirect register accesses. */
7929 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7930 tp->misc_host_ctrl);
7931
7932 /* Set MAX PCI retry to zero. */
7933 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7934 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 7935 tg3_flag(tp, PCIX_MODE))
ee6a99b5 7936 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9 7937 /* Allow reads and writes to the APE register and memory space. */
63c3a66f 7938 if (tg3_flag(tp, ENABLE_APE))
0d3031d9 7939 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
7940 PCISTATE_ALLOW_APE_SHMEM_WR |
7941 PCISTATE_ALLOW_APE_PSPACE_WR;
ee6a99b5
MC
7942 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7943
8a6eac90 7944 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 7945
2c55a3d0
MC
7946 if (!tg3_flag(tp, PCI_EXPRESS)) {
7947 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7948 tp->pci_cacheline_sz);
7949 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7950 tp->pci_lat_timer);
114342f2 7951 }
5f5c51e3 7952
ee6a99b5 7953 /* Make sure PCI-X relaxed ordering bit is clear. */
63c3a66f 7954 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
7955 u16 pcix_cmd;
7956
7957 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7958 &pcix_cmd);
7959 pcix_cmd &= ~PCI_X_CMD_ERO;
7960 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7961 pcix_cmd);
7962 }
ee6a99b5 7963
63c3a66f 7964 if (tg3_flag(tp, 5780_CLASS)) {
ee6a99b5
MC
7965
7966 /* Chip reset on 5780 will reset MSI enable bit,
7967 * so need to restore it.
7968 */
63c3a66f 7969 if (tg3_flag(tp, USING_MSI)) {
ee6a99b5
MC
7970 u16 ctrl;
7971
7972 pci_read_config_word(tp->pdev,
7973 tp->msi_cap + PCI_MSI_FLAGS,
7974 &ctrl);
7975 pci_write_config_word(tp->pdev,
7976 tp->msi_cap + PCI_MSI_FLAGS,
7977 ctrl | PCI_MSI_FLAGS_ENABLE);
7978 val = tr32(MSGINT_MODE);
7979 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7980 }
7981 }
7982}
7983
1da177e4
LT
7984/* tp->lock is held. */
7985static int tg3_chip_reset(struct tg3 *tp)
7986{
7987 u32 val;
1ee582d8 7988 void (*write_op)(struct tg3 *, u32, u32);
4f125f42 7989 int i, err;
1da177e4 7990
f49639e6
DM
7991 tg3_nvram_lock(tp);
7992
77b483f1
MC
7993 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7994
f49639e6
DM
7995 /* No matching tg3_nvram_unlock() after this because
7996 * chip reset below will undo the nvram lock.
7997 */
7998 tp->nvram_lock_cnt = 0;
1da177e4 7999
ee6a99b5
MC
8000 /* GRC_MISC_CFG core clock reset will clear the memory
8001 * enable bit in PCI register 4 and the MSI enable bit
8002 * on some chips, so we save relevant registers here.
8003 */
8004 tg3_save_pci_state(tp);
8005
d9ab5ad1 8006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
63c3a66f 8007 tg3_flag(tp, 5755_PLUS))
d9ab5ad1
MC
8008 tw32(GRC_FASTBOOT_PC, 0);
8009
1da177e4
LT
8010 /*
8011 * We must avoid the readl() that normally takes place.
8012 * It locks machines, causes machine checks, and other
8013 * fun things. So, temporarily disable the 5701
8014 * hardware workaround, while we do the reset.
8015 */
1ee582d8
MC
8016 write_op = tp->write32;
8017 if (write_op == tg3_write_flush_reg32)
8018 tp->write32 = tg3_write32;
1da177e4 8019
d18edcb2
MC
8020 /* Prevent the irq handler from reading or writing PCI registers
8021 * during chip reset when the memory enable bit in the PCI command
8022 * register may be cleared. The chip does not generate interrupt
8023 * at this time, but the irq handler may still be called due to irq
8024 * sharing or irqpoll.
8025 */
63c3a66f 8026 tg3_flag_set(tp, CHIP_RESETTING);
f77a6a8e
MC
8027 for (i = 0; i < tp->irq_cnt; i++) {
8028 struct tg3_napi *tnapi = &tp->napi[i];
8029 if (tnapi->hw_status) {
8030 tnapi->hw_status->status = 0;
8031 tnapi->hw_status->status_tag = 0;
8032 }
8033 tnapi->last_tag = 0;
8034 tnapi->last_irq_tag = 0;
b8fa2f3a 8035 }
d18edcb2 8036 smp_mb();
4f125f42
MC
8037
8038 for (i = 0; i < tp->irq_cnt; i++)
8039 synchronize_irq(tp->napi[i].irq_vec);
d18edcb2 8040
255ca311
MC
8041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8042 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8043 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8044 }
8045
1da177e4
LT
8046 /* do the reset */
8047 val = GRC_MISC_CFG_CORECLK_RESET;
8048
63c3a66f 8049 if (tg3_flag(tp, PCI_EXPRESS)) {
88075d91
MC
8050 /* Force PCIe 1.0a mode */
8051 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8052 !tg3_flag(tp, 57765_PLUS) &&
88075d91
MC
8053 tr32(TG3_PCIE_PHY_TSTCTL) ==
8054 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8055 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8056
1da177e4
LT
8057 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8058 tw32(GRC_MISC_CFG, (1 << 29));
8059 val |= (1 << 29);
8060 }
8061 }
8062
b5d3772c
MC
8063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8064 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8065 tw32(GRC_VCPU_EXT_CTRL,
8066 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8067 }
8068
f37500d3 8069 /* Manage gphy power for all CPMU absent PCIe devices. */
63c3a66f 8070 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1da177e4 8071 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
f37500d3 8072
1da177e4
LT
8073 tw32(GRC_MISC_CFG, val);
8074
1ee582d8
MC
8075 /* restore 5701 hardware bug workaround write method */
8076 tp->write32 = write_op;
1da177e4
LT
8077
8078 /* Unfortunately, we have to delay before the PCI read back.
8079 * Some 575X chips even will not respond to a PCI cfg access
8080 * when the reset command is given to the chip.
8081 *
8082 * How do these hardware designers expect things to work
8083 * properly if the PCI write is posted for a long period
8084 * of time? It is always necessary to have some method by
8085 * which a register read back can occur to push the write
8086 * out which does the reset.
8087 *
8088 * For most tg3 variants the trick below was working.
8089 * Ho hum...
8090 */
8091 udelay(120);
8092
8093 /* Flush PCI posted writes. The normal MMIO registers
8094 * are inaccessible at this time so this is the only
8095 * way to make this reliably (actually, this is no longer
8096 * the case, see above). I tried to use indirect
8097 * register read/write but this upset some 5701 variants.
8098 */
8099 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8100
8101 udelay(120);
8102
0f49bfbd 8103 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
e7126997
MC
8104 u16 val16;
8105
1da177e4 8106 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
86449944 8107 int j;
1da177e4
LT
8108 u32 cfg_val;
8109
8110 /* Wait for link training to complete. */
86449944 8111 for (j = 0; j < 5000; j++)
1da177e4
LT
8112 udelay(100);
8113
8114 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8115 pci_write_config_dword(tp->pdev, 0xc4,
8116 cfg_val | (1 << 15));
8117 }
5e7dfd0f 8118
e7126997 8119 /* Clear the "no snoop" and "relaxed ordering" bits. */
0f49bfbd 8120 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
e7126997
MC
8121 /*
8122 * Older PCIe devices only support the 128 byte
8123 * MPS setting. Enforce the restriction.
5e7dfd0f 8124 */
63c3a66f 8125 if (!tg3_flag(tp, CPMU_PRESENT))
0f49bfbd
JL
8126 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8127 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
5e7dfd0f 8128
5e7dfd0f 8129 /* Clear error status */
0f49bfbd 8130 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
5e7dfd0f
MC
8131 PCI_EXP_DEVSTA_CED |
8132 PCI_EXP_DEVSTA_NFED |
8133 PCI_EXP_DEVSTA_FED |
8134 PCI_EXP_DEVSTA_URD);
1da177e4
LT
8135 }
8136
ee6a99b5 8137 tg3_restore_pci_state(tp);
1da177e4 8138
63c3a66f
JP
8139 tg3_flag_clear(tp, CHIP_RESETTING);
8140 tg3_flag_clear(tp, ERROR_PROCESSED);
d18edcb2 8141
ee6a99b5 8142 val = 0;
63c3a66f 8143 if (tg3_flag(tp, 5780_CLASS))
4cf78e4f 8144 val = tr32(MEMARB_MODE);
ee6a99b5 8145 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
8146
8147 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8148 tg3_stop_fw(tp);
8149 tw32(0x5000, 0x400);
8150 }
8151
8152 tw32(GRC_MODE, tp->grc_mode);
8153
8154 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 8155 val = tr32(0xc4);
1da177e4
LT
8156
8157 tw32(0xc4, val | (1 << 15));
8158 }
8159
8160 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8162 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8163 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8164 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8165 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8166 }
8167
f07e9af3 8168 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9e975cc2 8169 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
d2394e6b 8170 val = tp->mac_mode;
f07e9af3 8171 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9e975cc2 8172 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
d2394e6b 8173 val = tp->mac_mode;
1da177e4 8174 } else
d2394e6b
MC
8175 val = 0;
8176
8177 tw32_f(MAC_MODE, val);
1da177e4
LT
8178 udelay(40);
8179
77b483f1
MC
8180 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8181
7a6f4369
MC
8182 err = tg3_poll_fw(tp);
8183 if (err)
8184 return err;
1da177e4 8185
0a9140cf
MC
8186 tg3_mdio_start(tp);
8187
63c3a66f 8188 if (tg3_flag(tp, PCI_EXPRESS) &&
f6eb9b1f
MC
8189 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8190 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8191 !tg3_flag(tp, 57765_PLUS)) {
ab0049b4 8192 val = tr32(0x7c00);
1da177e4
LT
8193
8194 tw32(0x7c00, val | (1 << 25));
8195 }
8196
d78b59f5
MC
8197 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8198 val = tr32(TG3_CPMU_CLCK_ORIDE);
8199 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8200 }
8201
1da177e4 8202 /* Reprobe ASF enable state. */
63c3a66f
JP
8203 tg3_flag_clear(tp, ENABLE_ASF);
8204 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8205 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8206 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8207 u32 nic_cfg;
8208
8209 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8210 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f 8211 tg3_flag_set(tp, ENABLE_ASF);
4ba526ce 8212 tp->last_event_jiffies = jiffies;
63c3a66f
JP
8213 if (tg3_flag(tp, 5750_PLUS))
8214 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8215 }
8216 }
8217
8218 return 0;
8219}
8220
65ec698d
MC
8221static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8222static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
92feeabf 8223
1da177e4 8224/* tp->lock is held. */
944d980e 8225static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
8226{
8227 int err;
8228
8229 tg3_stop_fw(tp);
8230
944d980e 8231 tg3_write_sig_pre_reset(tp, kind);
1da177e4 8232
b3b7d6be 8233 tg3_abort_hw(tp, silent);
1da177e4
LT
8234 err = tg3_chip_reset(tp);
8235
daba2a63
MC
8236 __tg3_set_mac_addr(tp, 0);
8237
944d980e
MC
8238 tg3_write_sig_legacy(tp, kind);
8239 tg3_write_sig_post_reset(tp, kind);
1da177e4 8240
92feeabf
MC
8241 if (tp->hw_stats) {
8242 /* Save the stats across chip resets... */
b4017c53 8243 tg3_get_nstats(tp, &tp->net_stats_prev);
92feeabf
MC
8244 tg3_get_estats(tp, &tp->estats_prev);
8245
8246 /* And make sure the next sample is new data */
8247 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8248 }
8249
1da177e4
LT
8250 if (err)
8251 return err;
8252
8253 return 0;
8254}
8255
1da177e4
LT
8256static int tg3_set_mac_addr(struct net_device *dev, void *p)
8257{
8258 struct tg3 *tp = netdev_priv(dev);
8259 struct sockaddr *addr = p;
986e0aeb 8260 int err = 0, skip_mac_1 = 0;
1da177e4 8261
f9804ddb 8262 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 8263 return -EADDRNOTAVAIL;
f9804ddb 8264
1da177e4
LT
8265 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8266
e75f7c90
MC
8267 if (!netif_running(dev))
8268 return 0;
8269
63c3a66f 8270 if (tg3_flag(tp, ENABLE_ASF)) {
986e0aeb 8271 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 8272
986e0aeb
MC
8273 addr0_high = tr32(MAC_ADDR_0_HIGH);
8274 addr0_low = tr32(MAC_ADDR_0_LOW);
8275 addr1_high = tr32(MAC_ADDR_1_HIGH);
8276 addr1_low = tr32(MAC_ADDR_1_LOW);
8277
8278 /* Skip MAC addr 1 if ASF is using it. */
8279 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8280 !(addr1_high == 0 && addr1_low == 0))
8281 skip_mac_1 = 1;
58712ef9 8282 }
986e0aeb
MC
8283 spin_lock_bh(&tp->lock);
8284 __tg3_set_mac_addr(tp, skip_mac_1);
8285 spin_unlock_bh(&tp->lock);
1da177e4 8286
b9ec6c1b 8287 return err;
1da177e4
LT
8288}
8289
8290/* tp->lock is held. */
8291static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8292 dma_addr_t mapping, u32 maxlen_flags,
8293 u32 nic_addr)
8294{
8295 tg3_write_mem(tp,
8296 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8297 ((u64) mapping >> 32));
8298 tg3_write_mem(tp,
8299 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8300 ((u64) mapping & 0xffffffff));
8301 tg3_write_mem(tp,
8302 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8303 maxlen_flags);
8304
63c3a66f 8305 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8306 tg3_write_mem(tp,
8307 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8308 nic_addr);
8309}
8310
a489b6d9
MC
8311
8312static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d 8313{
a489b6d9 8314 int i = 0;
b6080e12 8315
63c3a66f 8316 if (!tg3_flag(tp, ENABLE_TSS)) {
b6080e12
MC
8317 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8318 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8319 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
b6080e12
MC
8320 } else {
8321 tw32(HOSTCC_TXCOL_TICKS, 0);
8322 tw32(HOSTCC_TXMAX_FRAMES, 0);
8323 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
a489b6d9
MC
8324
8325 for (; i < tp->txq_cnt; i++) {
8326 u32 reg;
8327
8328 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8329 tw32(reg, ec->tx_coalesce_usecs);
8330 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8331 tw32(reg, ec->tx_max_coalesced_frames);
8332 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8333 tw32(reg, ec->tx_max_coalesced_frames_irq);
8334 }
19cfaecc 8335 }
b6080e12 8336
a489b6d9
MC
8337 for (; i < tp->irq_max - 1; i++) {
8338 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8339 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8340 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8341 }
8342}
8343
8344static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8345{
8346 int i = 0;
8347 u32 limit = tp->rxq_cnt;
8348
63c3a66f 8349 if (!tg3_flag(tp, ENABLE_RSS)) {
19cfaecc
MC
8350 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8351 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8352 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
a489b6d9 8353 limit--;
19cfaecc 8354 } else {
b6080e12
MC
8355 tw32(HOSTCC_RXCOL_TICKS, 0);
8356 tw32(HOSTCC_RXMAX_FRAMES, 0);
8357 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
15f9850d 8358 }
b6080e12 8359
a489b6d9 8360 for (; i < limit; i++) {
b6080e12
MC
8361 u32 reg;
8362
8363 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8364 tw32(reg, ec->rx_coalesce_usecs);
b6080e12
MC
8365 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8366 tw32(reg, ec->rx_max_coalesced_frames);
b6080e12
MC
8367 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8368 tw32(reg, ec->rx_max_coalesced_frames_irq);
b6080e12
MC
8369 }
8370
8371 for (; i < tp->irq_max - 1; i++) {
8372 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
b6080e12 8373 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
b6080e12 8374 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
a489b6d9
MC
8375 }
8376}
19cfaecc 8377
a489b6d9
MC
8378static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8379{
8380 tg3_coal_tx_init(tp, ec);
8381 tg3_coal_rx_init(tp, ec);
8382
8383 if (!tg3_flag(tp, 5705_PLUS)) {
8384 u32 val = ec->stats_block_coalesce_usecs;
8385
8386 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8387 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8388
8389 if (!netif_carrier_ok(tp->dev))
8390 val = 0;
8391
8392 tw32(HOSTCC_STAT_COAL_TICKS, val);
b6080e12 8393 }
15f9850d 8394}
1da177e4 8395
2d31ecaf
MC
8396/* tp->lock is held. */
8397static void tg3_rings_reset(struct tg3 *tp)
8398{
8399 int i;
f77a6a8e 8400 u32 stblk, txrcb, rxrcb, limit;
2d31ecaf
MC
8401 struct tg3_napi *tnapi = &tp->napi[0];
8402
8403 /* Disable all transmit rings but the first. */
63c3a66f 8404 if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8405 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
63c3a66f 8406 else if (tg3_flag(tp, 5717_PLUS))
3d37728b 8407 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
55086ad9 8408 else if (tg3_flag(tp, 57765_CLASS))
b703df6f 8409 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
2d31ecaf
MC
8410 else
8411 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8412
8413 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8414 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8415 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8416 BDINFO_FLAGS_DISABLED);
8417
8418
8419 /* Disable all receive return rings but the first. */
63c3a66f 8420 if (tg3_flag(tp, 5717_PLUS))
f6eb9b1f 8421 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
63c3a66f 8422 else if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8423 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
b703df6f 8424 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
55086ad9 8425 tg3_flag(tp, 57765_CLASS))
2d31ecaf
MC
8426 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8427 else
8428 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8429
8430 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8431 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8432 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8433 BDINFO_FLAGS_DISABLED);
8434
8435 /* Disable interrupts */
8436 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
0e6cf6a9
MC
8437 tp->napi[0].chk_msi_cnt = 0;
8438 tp->napi[0].last_rx_cons = 0;
8439 tp->napi[0].last_tx_cons = 0;
2d31ecaf
MC
8440
8441 /* Zero mailbox registers. */
63c3a66f 8442 if (tg3_flag(tp, SUPPORT_MSIX)) {
6fd45cb8 8443 for (i = 1; i < tp->irq_max; i++) {
f77a6a8e
MC
8444 tp->napi[i].tx_prod = 0;
8445 tp->napi[i].tx_cons = 0;
63c3a66f 8446 if (tg3_flag(tp, ENABLE_TSS))
c2353a32 8447 tw32_mailbox(tp->napi[i].prodmbox, 0);
f77a6a8e
MC
8448 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8449 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7f230735 8450 tp->napi[i].chk_msi_cnt = 0;
0e6cf6a9
MC
8451 tp->napi[i].last_rx_cons = 0;
8452 tp->napi[i].last_tx_cons = 0;
f77a6a8e 8453 }
63c3a66f 8454 if (!tg3_flag(tp, ENABLE_TSS))
c2353a32 8455 tw32_mailbox(tp->napi[0].prodmbox, 0);
f77a6a8e
MC
8456 } else {
8457 tp->napi[0].tx_prod = 0;
8458 tp->napi[0].tx_cons = 0;
8459 tw32_mailbox(tp->napi[0].prodmbox, 0);
8460 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8461 }
2d31ecaf
MC
8462
8463 /* Make sure the NIC-based send BD rings are disabled. */
63c3a66f 8464 if (!tg3_flag(tp, 5705_PLUS)) {
2d31ecaf
MC
8465 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8466 for (i = 0; i < 16; i++)
8467 tw32_tx_mbox(mbox + i * 8, 0);
8468 }
8469
8470 txrcb = NIC_SRAM_SEND_RCB;
8471 rxrcb = NIC_SRAM_RCV_RET_RCB;
8472
8473 /* Clear status block in ram. */
8474 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8475
8476 /* Set status block DMA address */
8477 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8478 ((u64) tnapi->status_mapping >> 32));
8479 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8480 ((u64) tnapi->status_mapping & 0xffffffff));
8481
f77a6a8e
MC
8482 if (tnapi->tx_ring) {
8483 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8484 (TG3_TX_RING_SIZE <<
8485 BDINFO_FLAGS_MAXLEN_SHIFT),
8486 NIC_SRAM_TX_BUFFER_DESC);
8487 txrcb += TG3_BDINFO_SIZE;
8488 }
8489
8490 if (tnapi->rx_rcb) {
8491 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2
MC
8492 (tp->rx_ret_ring_mask + 1) <<
8493 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
f77a6a8e
MC
8494 rxrcb += TG3_BDINFO_SIZE;
8495 }
8496
8497 stblk = HOSTCC_STATBLCK_RING1;
2d31ecaf 8498
f77a6a8e
MC
8499 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8500 u64 mapping = (u64)tnapi->status_mapping;
8501 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8502 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8503
8504 /* Clear status block in ram. */
8505 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8506
19cfaecc
MC
8507 if (tnapi->tx_ring) {
8508 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8509 (TG3_TX_RING_SIZE <<
8510 BDINFO_FLAGS_MAXLEN_SHIFT),
8511 NIC_SRAM_TX_BUFFER_DESC);
8512 txrcb += TG3_BDINFO_SIZE;
8513 }
f77a6a8e
MC
8514
8515 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2 8516 ((tp->rx_ret_ring_mask + 1) <<
f77a6a8e
MC
8517 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8518
8519 stblk += 8;
f77a6a8e
MC
8520 rxrcb += TG3_BDINFO_SIZE;
8521 }
2d31ecaf
MC
8522}
8523
eb07a940
MC
8524static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8525{
8526 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8527
63c3a66f
JP
8528 if (!tg3_flag(tp, 5750_PLUS) ||
8529 tg3_flag(tp, 5780_CLASS) ||
eb07a940 8530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
513aa6ea
MC
8531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8532 tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8533 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8534 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8536 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8537 else
8538 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8539
8540 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8541 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8542
8543 val = min(nic_rep_thresh, host_rep_thresh);
8544 tw32(RCVBDI_STD_THRESH, val);
8545
63c3a66f 8546 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8547 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8548
63c3a66f 8549 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
eb07a940
MC
8550 return;
8551
513aa6ea 8552 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
eb07a940
MC
8553
8554 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8555
8556 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8557 tw32(RCVBDI_JUMBO_THRESH, val);
8558
63c3a66f 8559 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8560 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8561}
8562
ccd5ba9d
MC
8563static inline u32 calc_crc(unsigned char *buf, int len)
8564{
8565 u32 reg;
8566 u32 tmp;
8567 int j, k;
8568
8569 reg = 0xffffffff;
8570
8571 for (j = 0; j < len; j++) {
8572 reg ^= buf[j];
8573
8574 for (k = 0; k < 8; k++) {
8575 tmp = reg & 0x01;
8576
8577 reg >>= 1;
8578
8579 if (tmp)
8580 reg ^= 0xedb88320;
8581 }
8582 }
8583
8584 return ~reg;
8585}
8586
8587static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8588{
8589 /* accept or reject all multicast frames */
8590 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8591 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8592 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8593 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8594}
8595
8596static void __tg3_set_rx_mode(struct net_device *dev)
8597{
8598 struct tg3 *tp = netdev_priv(dev);
8599 u32 rx_mode;
8600
8601 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8602 RX_MODE_KEEP_VLAN_TAG);
8603
8604#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8605 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8606 * flag clear.
8607 */
8608 if (!tg3_flag(tp, ENABLE_ASF))
8609 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8610#endif
8611
8612 if (dev->flags & IFF_PROMISC) {
8613 /* Promiscuous mode. */
8614 rx_mode |= RX_MODE_PROMISC;
8615 } else if (dev->flags & IFF_ALLMULTI) {
8616 /* Accept all multicast. */
8617 tg3_set_multi(tp, 1);
8618 } else if (netdev_mc_empty(dev)) {
8619 /* Reject all multicast. */
8620 tg3_set_multi(tp, 0);
8621 } else {
8622 /* Accept one or more multicast(s). */
8623 struct netdev_hw_addr *ha;
8624 u32 mc_filter[4] = { 0, };
8625 u32 regidx;
8626 u32 bit;
8627 u32 crc;
8628
8629 netdev_for_each_mc_addr(ha, dev) {
8630 crc = calc_crc(ha->addr, ETH_ALEN);
8631 bit = ~crc & 0x7f;
8632 regidx = (bit & 0x60) >> 5;
8633 bit &= 0x1f;
8634 mc_filter[regidx] |= (1 << bit);
8635 }
8636
8637 tw32(MAC_HASH_REG_0, mc_filter[0]);
8638 tw32(MAC_HASH_REG_1, mc_filter[1]);
8639 tw32(MAC_HASH_REG_2, mc_filter[2]);
8640 tw32(MAC_HASH_REG_3, mc_filter[3]);
8641 }
8642
8643 if (rx_mode != tp->rx_mode) {
8644 tp->rx_mode = rx_mode;
8645 tw32_f(MAC_RX_MODE, rx_mode);
8646 udelay(10);
8647 }
8648}
8649
9102426a 8650static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
90415477
MC
8651{
8652 int i;
8653
8654 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9102426a 8655 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
90415477
MC
8656}
8657
8658static void tg3_rss_check_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8659{
8660 int i;
8661
8662 if (!tg3_flag(tp, SUPPORT_MSIX))
8663 return;
8664
90415477 8665 if (tp->irq_cnt <= 2) {
bcebcc46 8666 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
90415477
MC
8667 return;
8668 }
8669
8670 /* Validate table against current IRQ count */
8671 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8672 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8673 break;
8674 }
8675
8676 if (i != TG3_RSS_INDIR_TBL_SIZE)
9102426a 8677 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
bcebcc46
MC
8678}
8679
90415477 8680static void tg3_rss_write_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8681{
8682 int i = 0;
8683 u32 reg = MAC_RSS_INDIR_TBL_0;
8684
8685 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8686 u32 val = tp->rss_ind_tbl[i];
8687 i++;
8688 for (; i % 8; i++) {
8689 val <<= 4;
8690 val |= tp->rss_ind_tbl[i];
8691 }
8692 tw32(reg, val);
8693 reg += 4;
8694 }
8695}
8696
1da177e4 8697/* tp->lock is held. */
8e7a22e3 8698static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
8699{
8700 u32 val, rdmac_mode;
8701 int i, err, limit;
8fea32b9 8702 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
1da177e4
LT
8703
8704 tg3_disable_ints(tp);
8705
8706 tg3_stop_fw(tp);
8707
8708 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8709
63c3a66f 8710 if (tg3_flag(tp, INIT_COMPLETE))
e6de8ad1 8711 tg3_abort_hw(tp, 1);
1da177e4 8712
699c0193
MC
8713 /* Enable MAC control of LPI */
8714 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8715 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8716 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8717 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8718
8719 tw32_f(TG3_CPMU_EEE_CTRL,
8720 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8721
a386b901
MC
8722 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8723 TG3_CPMU_EEEMD_LPI_IN_TX |
8724 TG3_CPMU_EEEMD_LPI_IN_RX |
8725 TG3_CPMU_EEEMD_EEE_ENABLE;
8726
8727 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8728 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8729
63c3a66f 8730 if (tg3_flag(tp, ENABLE_APE))
a386b901
MC
8731 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8732
8733 tw32_f(TG3_CPMU_EEE_MODE, val);
8734
8735 tw32_f(TG3_CPMU_EEE_DBTMR1,
8736 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8737 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8738
8739 tw32_f(TG3_CPMU_EEE_DBTMR2,
d7f2ab20 8740 TG3_CPMU_DBTMR2_APE_TX_2047US |
a386b901 8741 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
699c0193
MC
8742 }
8743
603f1173 8744 if (reset_phy)
d4d2c558
MC
8745 tg3_phy_reset(tp);
8746
1da177e4
LT
8747 err = tg3_chip_reset(tp);
8748 if (err)
8749 return err;
8750
8751 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8752
bcb37f6c 8753 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
d30cdd28
MC
8754 val = tr32(TG3_CPMU_CTRL);
8755 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8756 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
8757
8758 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8759 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8760 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8761 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8762
8763 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8764 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8765 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8766 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8767
8768 val = tr32(TG3_CPMU_HST_ACC);
8769 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8770 val |= CPMU_HST_ACC_MACCLK_6_25;
8771 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
8772 }
8773
33466d93
MC
8774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8775 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8776 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8777 PCIE_PWR_MGMT_L1_THRESH_4MS;
8778 tw32(PCIE_PWR_MGMT_THRESH, val);
521e6b90
MC
8779
8780 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8781 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8782
8783 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
33466d93 8784
f40386c8
MC
8785 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8786 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
255ca311
MC
8787 }
8788
63c3a66f 8789 if (tg3_flag(tp, L1PLLPD_EN)) {
614b0590
MC
8790 u32 grc_mode = tr32(GRC_MODE);
8791
8792 /* Access the lower 1K of PL PCIE block registers. */
8793 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8794 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8795
8796 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8797 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8798 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8799
8800 tw32(GRC_MODE, grc_mode);
8801 }
8802
55086ad9 8803 if (tg3_flag(tp, 57765_CLASS)) {
5093eedc
MC
8804 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8805 u32 grc_mode = tr32(GRC_MODE);
cea46462 8806
5093eedc
MC
8807 /* Access the lower 1K of PL PCIE block registers. */
8808 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8809 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
cea46462 8810
5093eedc
MC
8811 val = tr32(TG3_PCIE_TLDLPL_PORT +
8812 TG3_PCIE_PL_LO_PHYCTL5);
8813 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8814 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
cea46462 8815
5093eedc
MC
8816 tw32(GRC_MODE, grc_mode);
8817 }
a977dbe8 8818
1ff30a59
MC
8819 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8820 u32 grc_mode = tr32(GRC_MODE);
8821
8822 /* Access the lower 1K of DL PCIE block registers. */
8823 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8824 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8825
8826 val = tr32(TG3_PCIE_TLDLPL_PORT +
8827 TG3_PCIE_DL_LO_FTSMAX);
8828 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8829 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8830 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8831
8832 tw32(GRC_MODE, grc_mode);
8833 }
8834
a977dbe8
MC
8835 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8836 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8837 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8838 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
cea46462
MC
8839 }
8840
1da177e4
LT
8841 /* This works around an issue with Athlon chipsets on
8842 * B3 tigon3 silicon. This bit has no effect on any
8843 * other revision. But do not set this on PCI Express
795d01c5 8844 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 8845 */
63c3a66f
JP
8846 if (!tg3_flag(tp, CPMU_PRESENT)) {
8847 if (!tg3_flag(tp, PCI_EXPRESS))
795d01c5
MC
8848 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8849 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8850 }
1da177e4
LT
8851
8852 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 8853 tg3_flag(tp, PCIX_MODE)) {
1da177e4
LT
8854 val = tr32(TG3PCI_PCISTATE);
8855 val |= PCISTATE_RETRY_SAME_DMA;
8856 tw32(TG3PCI_PCISTATE, val);
8857 }
8858
63c3a66f 8859 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
8860 /* Allow reads and writes to the
8861 * APE register and memory space.
8862 */
8863 val = tr32(TG3PCI_PCISTATE);
8864 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
8865 PCISTATE_ALLOW_APE_SHMEM_WR |
8866 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
8867 tw32(TG3PCI_PCISTATE, val);
8868 }
8869
1da177e4
LT
8870 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8871 /* Enable some hw fixes. */
8872 val = tr32(TG3PCI_MSI_DATA);
8873 val |= (1 << 26) | (1 << 28) | (1 << 29);
8874 tw32(TG3PCI_MSI_DATA, val);
8875 }
8876
8877 /* Descriptor ring init may make accesses to the
8878 * NIC SRAM area to setup the TX descriptors, so we
8879 * can only do this after the hardware has been
8880 * successfully reset.
8881 */
32d8c572
MC
8882 err = tg3_init_rings(tp);
8883 if (err)
8884 return err;
1da177e4 8885
63c3a66f 8886 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
8887 val = tr32(TG3PCI_DMA_RW_CTRL) &
8888 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
1a319025
MC
8889 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8890 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
55086ad9 8891 if (!tg3_flag(tp, 57765_CLASS) &&
0aebff48
MC
8892 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8893 val |= DMA_RWCTRL_TAGGED_STAT_WA;
cbf9ca6c
MC
8894 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8895 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8896 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
8897 /* This value is determined during the probe time DMA
8898 * engine test, tg3_test_dma.
8899 */
8900 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8901 }
1da177e4
LT
8902
8903 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8904 GRC_MODE_4X_NIC_SEND_RINGS |
8905 GRC_MODE_NO_TX_PHDR_CSUM |
8906 GRC_MODE_NO_RX_PHDR_CSUM);
8907 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
8908
8909 /* Pseudo-header checksum is done by hardware logic and not
8910 * the offload processers, so make the chip do the pseudo-
8911 * header checksums on receive. For transmit it is more
8912 * convenient to do the pseudo-header checksum in software
8913 * as Linux does that on transmit for us in all cases.
8914 */
8915 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
8916
8917 tw32(GRC_MODE,
8918 tp->grc_mode |
8919 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8920
8921 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8922 val = tr32(GRC_MISC_CFG);
8923 val &= ~0xff;
8924 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8925 tw32(GRC_MISC_CFG, val);
8926
8927 /* Initialize MBUF/DESC pool. */
63c3a66f 8928 if (tg3_flag(tp, 5750_PLUS)) {
1da177e4
LT
8929 /* Do nothing. */
8930 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8931 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8932 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8933 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8934 else
8935 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8936 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8937 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
63c3a66f 8938 } else if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8939 int fw_len;
8940
077f849d 8941 fw_len = tp->fw_len;
1da177e4
LT
8942 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8943 tw32(BUFMGR_MB_POOL_ADDR,
8944 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8945 tw32(BUFMGR_MB_POOL_SIZE,
8946 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8947 }
1da177e4 8948
0f893dc6 8949 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
8950 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8951 tp->bufmgr_config.mbuf_read_dma_low_water);
8952 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8953 tp->bufmgr_config.mbuf_mac_rx_low_water);
8954 tw32(BUFMGR_MB_HIGH_WATER,
8955 tp->bufmgr_config.mbuf_high_water);
8956 } else {
8957 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8958 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8959 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8960 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8961 tw32(BUFMGR_MB_HIGH_WATER,
8962 tp->bufmgr_config.mbuf_high_water_jumbo);
8963 }
8964 tw32(BUFMGR_DMA_LOW_WATER,
8965 tp->bufmgr_config.dma_low_water);
8966 tw32(BUFMGR_DMA_HIGH_WATER,
8967 tp->bufmgr_config.dma_high_water);
8968
d309a46e
MC
8969 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8971 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
4d958473
MC
8972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8973 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8974 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8975 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
d309a46e 8976 tw32(BUFMGR_MODE, val);
1da177e4
LT
8977 for (i = 0; i < 2000; i++) {
8978 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8979 break;
8980 udelay(10);
8981 }
8982 if (i >= 2000) {
05dbe005 8983 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
1da177e4
LT
8984 return -ENODEV;
8985 }
8986
eb07a940
MC
8987 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8988 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
b5d3772c 8989
eb07a940 8990 tg3_setup_rxbd_thresholds(tp);
1da177e4
LT
8991
8992 /* Initialize TG3_BDINFO's at:
8993 * RCVDBDI_STD_BD: standard eth size rx ring
8994 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8995 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8996 *
8997 * like so:
8998 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8999 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9000 * ring attribute flags
9001 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9002 *
9003 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9004 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9005 *
9006 * The size of each ring is fixed in the firmware, but the location is
9007 * configurable.
9008 */
9009 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9010 ((u64) tpr->rx_std_mapping >> 32));
1da177e4 9011 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9012 ((u64) tpr->rx_std_mapping & 0xffffffff));
63c3a66f 9013 if (!tg3_flag(tp, 5717_PLUS))
87668d35
MC
9014 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9015 NIC_SRAM_RX_BUFFER_DESC);
1da177e4 9016
fdb72b38 9017 /* Disable the mini ring */
63c3a66f 9018 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9019 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9020 BDINFO_FLAGS_DISABLED);
9021
fdb72b38
MC
9022 /* Program the jumbo buffer descriptor ring control
9023 * blocks on those devices that have them.
9024 */
a0512944 9025 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
63c3a66f 9026 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
1da177e4 9027
63c3a66f 9028 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
1da177e4 9029 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9030 ((u64) tpr->rx_jmb_mapping >> 32));
1da177e4 9031 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9032 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
de9f5230
MC
9033 val = TG3_RX_JMB_RING_SIZE(tp) <<
9034 BDINFO_FLAGS_MAXLEN_SHIFT;
1da177e4 9035 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
de9f5230 9036 val | BDINFO_FLAGS_USE_EXT_RECV);
63c3a66f 9037 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
55086ad9 9038 tg3_flag(tp, 57765_CLASS))
87668d35
MC
9039 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9040 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
1da177e4
LT
9041 } else {
9042 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9043 BDINFO_FLAGS_DISABLED);
9044 }
9045
63c3a66f 9046 if (tg3_flag(tp, 57765_PLUS)) {
fa6b2aae 9047 val = TG3_RX_STD_RING_SIZE(tp);
7cb32cf2
MC
9048 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9049 val |= (TG3_RX_STD_DMA_SZ << 2);
9050 } else
04380d40 9051 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38 9052 } else
de9f5230 9053 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38
MC
9054
9055 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
1da177e4 9056
411da640 9057 tpr->rx_std_prod_idx = tp->rx_pending;
66711e66 9058 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
1da177e4 9059
63c3a66f
JP
9060 tpr->rx_jmb_prod_idx =
9061 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
66711e66 9062 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
1da177e4 9063
2d31ecaf
MC
9064 tg3_rings_reset(tp);
9065
1da177e4 9066 /* Initialize MAC address and backoff seed. */
986e0aeb 9067 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
9068
9069 /* MTU + ethernet header + FCS + optional VLAN tag */
f7b493e0
MC
9070 tw32(MAC_RX_MTU_SIZE,
9071 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
1da177e4
LT
9072
9073 /* The slot time is changed by tg3_setup_phy if we
9074 * run at gigabit with half duplex.
9075 */
f2096f94
MC
9076 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9077 (6 << TX_LENGTHS_IPG_SHIFT) |
9078 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9079
9080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9081 val |= tr32(MAC_TX_LENGTHS) &
9082 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9083 TX_LENGTHS_CNT_DWN_VAL_MSK);
9084
9085 tw32(MAC_TX_LENGTHS, val);
1da177e4
LT
9086
9087 /* Receive rules. */
9088 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9089 tw32(RCVLPC_CONFIG, 0x0181);
9090
9091 /* Calculate RDMAC_MODE setting early, we need it to determine
9092 * the RCVLPC_STATE_ENABLE mask.
9093 */
9094 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9095 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9096 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9097 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9098 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 9099
deabaac8 9100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
0339e4e3
MC
9101 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9102
57e6983c 9103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0
MC
9104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
d30cdd28
MC
9106 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9107 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9108 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9109
c5908939
MC
9110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9111 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9112 if (tg3_flag(tp, TSO_CAPABLE) &&
c13e3713 9113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
9114 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9115 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9116 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9117 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9118 }
9119 }
9120
63c3a66f 9121 if (tg3_flag(tp, PCI_EXPRESS))
85e94ced
MC
9122 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9123
63c3a66f
JP
9124 if (tg3_flag(tp, HW_TSO_1) ||
9125 tg3_flag(tp, HW_TSO_2) ||
9126 tg3_flag(tp, HW_TSO_3))
027455ad
MC
9127 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9128
108a6c16 9129 if (tg3_flag(tp, 57765_PLUS) ||
e849cdc3 9130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
027455ad
MC
9131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9132 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
1da177e4 9133
f2096f94
MC
9134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9135 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9136
41a8a7ee
MC
9137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9140 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f 9141 tg3_flag(tp, 57765_PLUS)) {
41a8a7ee 9142 val = tr32(TG3_RDMA_RSRVCTRL_REG);
10ce95d6 9143 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
b4495ed8
MC
9144 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9145 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9146 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9147 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9148 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9149 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
b75cc0e4 9150 }
41a8a7ee
MC
9151 tw32(TG3_RDMA_RSRVCTRL_REG,
9152 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9153 }
9154
d78b59f5
MC
9155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
d309a46e
MC
9157 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9158 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9159 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9160 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9161 }
9162
1da177e4 9163 /* Receive/send statistics. */
63c3a66f 9164 if (tg3_flag(tp, 5750_PLUS)) {
1661394e
MC
9165 val = tr32(RCVLPC_STATS_ENABLE);
9166 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9167 tw32(RCVLPC_STATS_ENABLE, val);
9168 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
63c3a66f 9169 tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9170 val = tr32(RCVLPC_STATS_ENABLE);
9171 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9172 tw32(RCVLPC_STATS_ENABLE, val);
9173 } else {
9174 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9175 }
9176 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9177 tw32(SNDDATAI_STATSENAB, 0xffffff);
9178 tw32(SNDDATAI_STATSCTRL,
9179 (SNDDATAI_SCTRL_ENABLE |
9180 SNDDATAI_SCTRL_FASTUPD));
9181
9182 /* Setup host coalescing engine. */
9183 tw32(HOSTCC_MODE, 0);
9184 for (i = 0; i < 2000; i++) {
9185 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9186 break;
9187 udelay(10);
9188 }
9189
d244c892 9190 __tg3_set_coalesce(tp, &tp->coal);
1da177e4 9191
63c3a66f 9192 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9193 /* Status/statistics block address. See tg3_timer,
9194 * the tg3_periodic_fetch_stats call there, and
9195 * tg3_get_stats to see how this works for 5705/5750 chips.
9196 */
1da177e4
LT
9197 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9198 ((u64) tp->stats_mapping >> 32));
9199 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9200 ((u64) tp->stats_mapping & 0xffffffff));
9201 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
2d31ecaf 9202
1da177e4 9203 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2d31ecaf
MC
9204
9205 /* Clear statistics and status block memory areas */
9206 for (i = NIC_SRAM_STATS_BLK;
9207 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9208 i += sizeof(u32)) {
9209 tg3_write_mem(tp, i, 0);
9210 udelay(40);
9211 }
1da177e4
LT
9212 }
9213
9214 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9215
9216 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9217 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
63c3a66f 9218 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9219 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9220
f07e9af3
MC
9221 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9222 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c94e3941
MC
9223 /* reset to prevent losing 1st rx packet intermittently */
9224 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9225 udelay(10);
9226 }
9227
3bda1258 9228 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9e975cc2
MC
9229 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9230 MAC_MODE_FHDE_ENABLE;
9231 if (tg3_flag(tp, ENABLE_APE))
9232 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
63c3a66f 9233 if (!tg3_flag(tp, 5705_PLUS) &&
f07e9af3 9234 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
e8f3f6ca
MC
9235 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9236 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
9237 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9238 udelay(40);
9239
314fba34 9240 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
63c3a66f 9241 * If TG3_FLAG_IS_NIC is zero, we should read the
314fba34
MC
9242 * register to preserve the GPIO settings for LOMs. The GPIOs,
9243 * whether used as inputs or outputs, are set by boot code after
9244 * reset.
9245 */
63c3a66f 9246 if (!tg3_flag(tp, IS_NIC)) {
314fba34
MC
9247 u32 gpio_mask;
9248
9d26e213
MC
9249 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9250 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9251 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
9252
9253 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9254 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9255 GRC_LCLCTRL_GPIO_OUTPUT3;
9256
af36e6b6
MC
9257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9258 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9259
aaf84465 9260 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
9261 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9262
9263 /* GPIO1 must be driven high for eeprom write protect */
63c3a66f 9264 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9d26e213
MC
9265 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9266 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 9267 }
1da177e4
LT
9268 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9269 udelay(100);
9270
c3b5003b 9271 if (tg3_flag(tp, USING_MSIX)) {
baf8a94a 9272 val = tr32(MSGINT_MODE);
c3b5003b
MC
9273 val |= MSGINT_MODE_ENABLE;
9274 if (tp->irq_cnt > 1)
9275 val |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
9276 if (!tg3_flag(tp, 1SHOT_MSI))
9277 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
baf8a94a
MC
9278 tw32(MSGINT_MODE, val);
9279 }
9280
63c3a66f 9281 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9282 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9283 udelay(40);
9284 }
9285
9286 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9287 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9288 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9289 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9290 WDMAC_MODE_LNGREAD_ENAB);
9291
c5908939
MC
9292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9293 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9294 if (tg3_flag(tp, TSO_CAPABLE) &&
1da177e4
LT
9295 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9296 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9297 /* nothing */
9298 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9299 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9300 val |= WDMAC_MODE_RX_ACCEL;
9301 }
9302 }
9303
d9ab5ad1 9304 /* Enable host coalescing bug fix */
63c3a66f 9305 if (tg3_flag(tp, 5755_PLUS))
f51f3562 9306 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 9307
788a035e
MC
9308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9309 val |= WDMAC_MODE_BURST_ALL_DATA;
9310
1da177e4
LT
9311 tw32_f(WDMAC_MODE, val);
9312 udelay(40);
9313
63c3a66f 9314 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
9315 u16 pcix_cmd;
9316
9317 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9318 &pcix_cmd);
1da177e4 9319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
9320 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9321 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9322 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
9323 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9324 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9325 }
9974a356
MC
9326 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9327 pcix_cmd);
1da177e4
LT
9328 }
9329
9330 tw32_f(RDMAC_MODE, rdmac_mode);
9331 udelay(40);
9332
091f0ea3
MC
9333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9334 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9335 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9336 break;
9337 }
9338 if (i < TG3_NUM_RDMA_CHANNELS) {
9339 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9340 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9341 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9342 tg3_flag_set(tp, 5719_RDMA_BUG);
9343 }
9344 }
9345
1da177e4 9346 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
63c3a66f 9347 if (!tg3_flag(tp, 5705_PLUS))
1da177e4 9348 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
9349
9350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9351 tw32(SNDDATAC_MODE,
9352 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9353 else
9354 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9355
1da177e4
LT
9356 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9357 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7cb32cf2 9358 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
63c3a66f 9359 if (tg3_flag(tp, LRG_PROD_RING_CAP))
7cb32cf2
MC
9360 val |= RCVDBDI_MODE_LRG_RING_SZ;
9361 tw32(RCVDBDI_MODE, val);
1da177e4 9362 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
63c3a66f
JP
9363 if (tg3_flag(tp, HW_TSO_1) ||
9364 tg3_flag(tp, HW_TSO_2) ||
9365 tg3_flag(tp, HW_TSO_3))
1da177e4 9366 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
baf8a94a 9367 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
63c3a66f 9368 if (tg3_flag(tp, ENABLE_TSS))
baf8a94a
MC
9369 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9370 tw32(SNDBDI_MODE, val);
1da177e4
LT
9371 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9372
9373 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9374 err = tg3_load_5701_a0_firmware_fix(tp);
9375 if (err)
9376 return err;
9377 }
9378
63c3a66f 9379 if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9380 err = tg3_load_tso_firmware(tp);
9381 if (err)
9382 return err;
9383 }
1da177e4
LT
9384
9385 tp->tx_mode = TX_MODE_ENABLE;
f2096f94 9386
63c3a66f 9387 if (tg3_flag(tp, 5755_PLUS) ||
b1d05210
MC
9388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9389 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
f2096f94
MC
9390
9391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9392 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9393 tp->tx_mode &= ~val;
9394 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9395 }
9396
1da177e4
LT
9397 tw32_f(MAC_TX_MODE, tp->tx_mode);
9398 udelay(100);
9399
63c3a66f 9400 if (tg3_flag(tp, ENABLE_RSS)) {
bcebcc46 9401 tg3_rss_write_indir_tbl(tp);
baf8a94a
MC
9402
9403 /* Setup the "secret" hash key. */
9404 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9405 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9406 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9407 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9408 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9409 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9410 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9411 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9412 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9413 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9414 }
9415
1da177e4 9416 tp->rx_mode = RX_MODE_ENABLE;
63c3a66f 9417 if (tg3_flag(tp, 5755_PLUS))
af36e6b6
MC
9418 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9419
63c3a66f 9420 if (tg3_flag(tp, ENABLE_RSS))
baf8a94a
MC
9421 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9422 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9423 RX_MODE_RSS_IPV6_HASH_EN |
9424 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9425 RX_MODE_RSS_IPV4_HASH_EN |
9426 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9427
1da177e4
LT
9428 tw32_f(MAC_RX_MODE, tp->rx_mode);
9429 udelay(10);
9430
1da177e4
LT
9431 tw32(MAC_LED_CTRL, tp->led_ctrl);
9432
9433 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
f07e9af3 9434 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4
LT
9435 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9436 udelay(10);
9437 }
9438 tw32_f(MAC_RX_MODE, tp->rx_mode);
9439 udelay(10);
9440
f07e9af3 9441 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4 9442 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
f07e9af3 9443 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
1da177e4
LT
9444 /* Set drive transmission level to 1.2V */
9445 /* only if the signal pre-emphasis bit is not set */
9446 val = tr32(MAC_SERDES_CFG);
9447 val &= 0xfffff000;
9448 val |= 0x880;
9449 tw32(MAC_SERDES_CFG, val);
9450 }
9451 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9452 tw32(MAC_SERDES_CFG, 0x616000);
9453 }
9454
9455 /* Prevent chip from dropping frames when flow control
9456 * is enabled.
9457 */
55086ad9 9458 if (tg3_flag(tp, 57765_CLASS))
666bc831
MC
9459 val = 1;
9460 else
9461 val = 2;
9462 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
1da177e4
LT
9463
9464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
f07e9af3 9465 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
1da177e4 9466 /* Use hardware link auto-negotiation */
63c3a66f 9467 tg3_flag_set(tp, HW_AUTONEG);
1da177e4
LT
9468 }
9469
f07e9af3 9470 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6ff6f81d 9471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
d4d2c558
MC
9472 u32 tmp;
9473
9474 tmp = tr32(SERDES_RX_CTRL);
9475 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9476 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9477 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9478 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9479 }
9480
63c3a66f 9481 if (!tg3_flag(tp, USE_PHYLIB)) {
c6700ce2 9482 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
80096068 9483 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1da177e4 9484
dd477003
MC
9485 err = tg3_setup_phy(tp, 0);
9486 if (err)
9487 return err;
1da177e4 9488
f07e9af3
MC
9489 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9490 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
dd477003
MC
9491 u32 tmp;
9492
9493 /* Clear CRC stats. */
9494 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9495 tg3_writephy(tp, MII_TG3_TEST1,
9496 tmp | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9497 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
dd477003 9498 }
1da177e4
LT
9499 }
9500 }
9501
9502 __tg3_set_rx_mode(tp->dev);
9503
9504 /* Initialize receive rules. */
9505 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9506 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9507 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9508 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9509
63c3a66f 9510 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
1da177e4
LT
9511 limit = 8;
9512 else
9513 limit = 16;
63c3a66f 9514 if (tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
9515 limit -= 4;
9516 switch (limit) {
9517 case 16:
9518 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9519 case 15:
9520 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9521 case 14:
9522 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9523 case 13:
9524 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9525 case 12:
9526 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9527 case 11:
9528 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9529 case 10:
9530 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9531 case 9:
9532 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9533 case 8:
9534 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9535 case 7:
9536 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9537 case 6:
9538 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9539 case 5:
9540 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9541 case 4:
9542 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9543 case 3:
9544 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9545 case 2:
9546 case 1:
9547
9548 default:
9549 break;
855e1111 9550 }
1da177e4 9551
63c3a66f 9552 if (tg3_flag(tp, ENABLE_APE))
9ce768ea
MC
9553 /* Write our heartbeat update interval to APE. */
9554 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9555 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 9556
1da177e4
LT
9557 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9558
1da177e4
LT
9559 return 0;
9560}
9561
9562/* Called at device open time to get the chip ready for
9563 * packet processing. Invoked with tp->lock held.
9564 */
8e7a22e3 9565static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4 9566{
1da177e4
LT
9567 tg3_switch_clocks(tp);
9568
9569 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9570
2f751b67 9571 return tg3_reset_hw(tp, reset_phy);
1da177e4
LT
9572}
9573
aed93e0b
MC
9574static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9575{
9576 int i;
9577
9578 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9579 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9580
9581 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9582 off += len;
9583
9584 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9585 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9586 memset(ocir, 0, TG3_OCIR_LEN);
9587 }
9588}
9589
9590/* sysfs attributes for hwmon */
9591static ssize_t tg3_show_temp(struct device *dev,
9592 struct device_attribute *devattr, char *buf)
9593{
9594 struct pci_dev *pdev = to_pci_dev(dev);
9595 struct net_device *netdev = pci_get_drvdata(pdev);
9596 struct tg3 *tp = netdev_priv(netdev);
9597 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9598 u32 temperature;
9599
9600 spin_lock_bh(&tp->lock);
9601 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9602 sizeof(temperature));
9603 spin_unlock_bh(&tp->lock);
9604 return sprintf(buf, "%u\n", temperature);
9605}
9606
9607
9608static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9609 TG3_TEMP_SENSOR_OFFSET);
9610static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9611 TG3_TEMP_CAUTION_OFFSET);
9612static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9613 TG3_TEMP_MAX_OFFSET);
9614
9615static struct attribute *tg3_attributes[] = {
9616 &sensor_dev_attr_temp1_input.dev_attr.attr,
9617 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9618 &sensor_dev_attr_temp1_max.dev_attr.attr,
9619 NULL
9620};
9621
9622static const struct attribute_group tg3_group = {
9623 .attrs = tg3_attributes,
9624};
9625
aed93e0b
MC
9626static void tg3_hwmon_close(struct tg3 *tp)
9627{
aed93e0b
MC
9628 if (tp->hwmon_dev) {
9629 hwmon_device_unregister(tp->hwmon_dev);
9630 tp->hwmon_dev = NULL;
9631 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9632 }
aed93e0b
MC
9633}
9634
9635static void tg3_hwmon_open(struct tg3 *tp)
9636{
aed93e0b
MC
9637 int i, err;
9638 u32 size = 0;
9639 struct pci_dev *pdev = tp->pdev;
9640 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9641
9642 tg3_sd_scan_scratchpad(tp, ocirs);
9643
9644 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9645 if (!ocirs[i].src_data_length)
9646 continue;
9647
9648 size += ocirs[i].src_hdr_length;
9649 size += ocirs[i].src_data_length;
9650 }
9651
9652 if (!size)
9653 return;
9654
9655 /* Register hwmon sysfs hooks */
9656 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9657 if (err) {
9658 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9659 return;
9660 }
9661
9662 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9663 if (IS_ERR(tp->hwmon_dev)) {
9664 tp->hwmon_dev = NULL;
9665 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9666 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9667 }
aed93e0b
MC
9668}
9669
9670
1da177e4
LT
9671#define TG3_STAT_ADD32(PSTAT, REG) \
9672do { u32 __val = tr32(REG); \
9673 (PSTAT)->low += __val; \
9674 if ((PSTAT)->low < __val) \
9675 (PSTAT)->high += 1; \
9676} while (0)
9677
9678static void tg3_periodic_fetch_stats(struct tg3 *tp)
9679{
9680 struct tg3_hw_stats *sp = tp->hw_stats;
9681
9682 if (!netif_carrier_ok(tp->dev))
9683 return;
9684
9685 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9686 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9687 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9688 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9689 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9690 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9691 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9692 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9693 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9694 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9695 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9696 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9697 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
091f0ea3
MC
9698 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9699 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9700 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9701 u32 val;
9702
9703 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9704 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9705 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9706 tg3_flag_clear(tp, 5719_RDMA_BUG);
9707 }
1da177e4
LT
9708
9709 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9710 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9711 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9712 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9713 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9714 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9715 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9716 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9717 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9718 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9719 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9720 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9721 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9722 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
9723
9724 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
310050fa
MC
9725 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9726 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9727 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
4d958473
MC
9728 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9729 } else {
9730 u32 val = tr32(HOSTCC_FLOW_ATTN);
9731 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9732 if (val) {
9733 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9734 sp->rx_discards.low += val;
9735 if (sp->rx_discards.low < val)
9736 sp->rx_discards.high += 1;
9737 }
9738 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9739 }
463d305b 9740 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
9741}
9742
0e6cf6a9
MC
9743static void tg3_chk_missed_msi(struct tg3 *tp)
9744{
9745 u32 i;
9746
9747 for (i = 0; i < tp->irq_cnt; i++) {
9748 struct tg3_napi *tnapi = &tp->napi[i];
9749
9750 if (tg3_has_work(tnapi)) {
9751 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9752 tnapi->last_tx_cons == tnapi->tx_cons) {
9753 if (tnapi->chk_msi_cnt < 1) {
9754 tnapi->chk_msi_cnt++;
9755 return;
9756 }
7f230735 9757 tg3_msi(0, tnapi);
0e6cf6a9
MC
9758 }
9759 }
9760 tnapi->chk_msi_cnt = 0;
9761 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9762 tnapi->last_tx_cons = tnapi->tx_cons;
9763 }
9764}
9765
1da177e4
LT
9766static void tg3_timer(unsigned long __opaque)
9767{
9768 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 9769
5b190624 9770 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
f475f163
MC
9771 goto restart_timer;
9772
f47c11ee 9773 spin_lock(&tp->lock);
1da177e4 9774
0e6cf6a9 9775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
55086ad9 9776 tg3_flag(tp, 57765_CLASS))
0e6cf6a9
MC
9777 tg3_chk_missed_msi(tp);
9778
63c3a66f 9779 if (!tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
9780 /* All of this garbage is because when using non-tagged
9781 * IRQ status the mailbox/status_block protocol the chip
9782 * uses with the cpu is race prone.
9783 */
898a56f8 9784 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
fac9b83e
DM
9785 tw32(GRC_LOCAL_CTRL,
9786 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9787 } else {
9788 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 9789 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
fac9b83e 9790 }
1da177e4 9791
fac9b83e 9792 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
f47c11ee 9793 spin_unlock(&tp->lock);
db219973 9794 tg3_reset_task_schedule(tp);
5b190624 9795 goto restart_timer;
fac9b83e 9796 }
1da177e4
LT
9797 }
9798
1da177e4
LT
9799 /* This part only runs once per second. */
9800 if (!--tp->timer_counter) {
63c3a66f 9801 if (tg3_flag(tp, 5705_PLUS))
fac9b83e
DM
9802 tg3_periodic_fetch_stats(tp);
9803
b0c5943f
MC
9804 if (tp->setlpicnt && !--tp->setlpicnt)
9805 tg3_phy_eee_enable(tp);
52b02d04 9806
63c3a66f 9807 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
9808 u32 mac_stat;
9809 int phy_event;
9810
9811 mac_stat = tr32(MAC_STATUS);
9812
9813 phy_event = 0;
f07e9af3 9814 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
1da177e4
LT
9815 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9816 phy_event = 1;
9817 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9818 phy_event = 1;
9819
9820 if (phy_event)
9821 tg3_setup_phy(tp, 0);
63c3a66f 9822 } else if (tg3_flag(tp, POLL_SERDES)) {
1da177e4
LT
9823 u32 mac_stat = tr32(MAC_STATUS);
9824 int need_setup = 0;
9825
9826 if (netif_carrier_ok(tp->dev) &&
9827 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9828 need_setup = 1;
9829 }
be98da6a 9830 if (!netif_carrier_ok(tp->dev) &&
1da177e4
LT
9831 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9832 MAC_STATUS_SIGNAL_DET))) {
9833 need_setup = 1;
9834 }
9835 if (need_setup) {
3d3ebe74
MC
9836 if (!tp->serdes_counter) {
9837 tw32_f(MAC_MODE,
9838 (tp->mac_mode &
9839 ~MAC_MODE_PORT_MODE_MASK));
9840 udelay(40);
9841 tw32_f(MAC_MODE, tp->mac_mode);
9842 udelay(40);
9843 }
1da177e4
LT
9844 tg3_setup_phy(tp, 0);
9845 }
f07e9af3 9846 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
63c3a66f 9847 tg3_flag(tp, 5780_CLASS)) {
747e8f8b 9848 tg3_serdes_parallel_detect(tp);
57d8b880 9849 }
1da177e4
LT
9850
9851 tp->timer_counter = tp->timer_multiplier;
9852 }
9853
130b8e4d
MC
9854 /* Heartbeat is only sent once every 2 seconds.
9855 *
9856 * The heartbeat is to tell the ASF firmware that the host
9857 * driver is still alive. In the event that the OS crashes,
9858 * ASF needs to reset the hardware to free up the FIFO space
9859 * that may be filled with rx packets destined for the host.
9860 * If the FIFO is full, ASF will no longer function properly.
9861 *
9862 * Unintended resets have been reported on real time kernels
9863 * where the timer doesn't run on time. Netpoll will also have
9864 * same problem.
9865 *
9866 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9867 * to check the ring condition when the heartbeat is expiring
9868 * before doing the reset. This will prevent most unintended
9869 * resets.
9870 */
1da177e4 9871 if (!--tp->asf_counter) {
63c3a66f 9872 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
9873 tg3_wait_for_event_ack(tp);
9874
bbadf503 9875 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 9876 FWCMD_NICDRV_ALIVE3);
bbadf503 9877 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
c6cdf436
MC
9878 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9879 TG3_FW_UPDATE_TIMEOUT_SEC);
4ba526ce
MC
9880
9881 tg3_generate_fw_event(tp);
1da177e4
LT
9882 }
9883 tp->asf_counter = tp->asf_multiplier;
9884 }
9885
f47c11ee 9886 spin_unlock(&tp->lock);
1da177e4 9887
f475f163 9888restart_timer:
1da177e4
LT
9889 tp->timer.expires = jiffies + tp->timer_offset;
9890 add_timer(&tp->timer);
9891}
9892
21f7638e
MC
9893static void __devinit tg3_timer_init(struct tg3 *tp)
9894{
9895 if (tg3_flag(tp, TAGGED_STATUS) &&
9896 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9897 !tg3_flag(tp, 57765_CLASS))
9898 tp->timer_offset = HZ;
9899 else
9900 tp->timer_offset = HZ / 10;
9901
9902 BUG_ON(tp->timer_offset > HZ);
9903
9904 tp->timer_multiplier = (HZ / tp->timer_offset);
9905 tp->asf_multiplier = (HZ / tp->timer_offset) *
9906 TG3_FW_UPDATE_FREQ_SEC;
9907
9908 init_timer(&tp->timer);
9909 tp->timer.data = (unsigned long) tp;
9910 tp->timer.function = tg3_timer;
9911}
9912
9913static void tg3_timer_start(struct tg3 *tp)
9914{
9915 tp->asf_counter = tp->asf_multiplier;
9916 tp->timer_counter = tp->timer_multiplier;
9917
9918 tp->timer.expires = jiffies + tp->timer_offset;
9919 add_timer(&tp->timer);
9920}
9921
9922static void tg3_timer_stop(struct tg3 *tp)
9923{
9924 del_timer_sync(&tp->timer);
9925}
9926
9927/* Restart hardware after configuration changes, self-test, etc.
9928 * Invoked with tp->lock held.
9929 */
9930static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9931 __releases(tp->lock)
9932 __acquires(tp->lock)
9933{
9934 int err;
9935
9936 err = tg3_init_hw(tp, reset_phy);
9937 if (err) {
9938 netdev_err(tp->dev,
9939 "Failed to re-initialize device, aborting\n");
9940 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9941 tg3_full_unlock(tp);
9942 tg3_timer_stop(tp);
9943 tp->irq_sync = 0;
9944 tg3_napi_enable(tp);
9945 dev_close(tp->dev);
9946 tg3_full_lock(tp, 0);
9947 }
9948 return err;
9949}
9950
9951static void tg3_reset_task(struct work_struct *work)
9952{
9953 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9954 int err;
9955
9956 tg3_full_lock(tp, 0);
9957
9958 if (!netif_running(tp->dev)) {
9959 tg3_flag_clear(tp, RESET_TASK_PENDING);
9960 tg3_full_unlock(tp);
9961 return;
9962 }
9963
9964 tg3_full_unlock(tp);
9965
9966 tg3_phy_stop(tp);
9967
9968 tg3_netif_stop(tp);
9969
9970 tg3_full_lock(tp, 1);
9971
9972 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9973 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9974 tp->write32_rx_mbox = tg3_write_flush_reg32;
9975 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9976 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9977 }
9978
9979 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9980 err = tg3_init_hw(tp, 1);
9981 if (err)
9982 goto out;
9983
9984 tg3_netif_start(tp);
9985
9986out:
9987 tg3_full_unlock(tp);
9988
9989 if (!err)
9990 tg3_phy_start(tp);
9991
9992 tg3_flag_clear(tp, RESET_TASK_PENDING);
9993}
9994
4f125f42 9995static int tg3_request_irq(struct tg3 *tp, int irq_num)
fcfa0a32 9996{
7d12e780 9997 irq_handler_t fn;
fcfa0a32 9998 unsigned long flags;
4f125f42
MC
9999 char *name;
10000 struct tg3_napi *tnapi = &tp->napi[irq_num];
10001
10002 if (tp->irq_cnt == 1)
10003 name = tp->dev->name;
10004 else {
10005 name = &tnapi->irq_lbl[0];
10006 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10007 name[IFNAMSIZ-1] = 0;
10008 }
fcfa0a32 10009
63c3a66f 10010 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fcfa0a32 10011 fn = tg3_msi;
63c3a66f 10012 if (tg3_flag(tp, 1SHOT_MSI))
fcfa0a32 10013 fn = tg3_msi_1shot;
ab392d2d 10014 flags = 0;
fcfa0a32
MC
10015 } else {
10016 fn = tg3_interrupt;
63c3a66f 10017 if (tg3_flag(tp, TAGGED_STATUS))
fcfa0a32 10018 fn = tg3_interrupt_tagged;
ab392d2d 10019 flags = IRQF_SHARED;
fcfa0a32 10020 }
4f125f42
MC
10021
10022 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
fcfa0a32
MC
10023}
10024
7938109f
MC
10025static int tg3_test_interrupt(struct tg3 *tp)
10026{
09943a18 10027 struct tg3_napi *tnapi = &tp->napi[0];
7938109f 10028 struct net_device *dev = tp->dev;
b16250e3 10029 int err, i, intr_ok = 0;
f6eb9b1f 10030 u32 val;
7938109f 10031
d4bc3927
MC
10032 if (!netif_running(dev))
10033 return -ENODEV;
10034
7938109f
MC
10035 tg3_disable_ints(tp);
10036
4f125f42 10037 free_irq(tnapi->irq_vec, tnapi);
7938109f 10038
f6eb9b1f
MC
10039 /*
10040 * Turn off MSI one shot mode. Otherwise this test has no
10041 * observable way to know whether the interrupt was delivered.
10042 */
3aa1cdf8 10043 if (tg3_flag(tp, 57765_PLUS)) {
f6eb9b1f
MC
10044 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10045 tw32(MSGINT_MODE, val);
10046 }
10047
4f125f42 10048 err = request_irq(tnapi->irq_vec, tg3_test_isr,
f274fd9a 10049 IRQF_SHARED, dev->name, tnapi);
7938109f
MC
10050 if (err)
10051 return err;
10052
898a56f8 10053 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
10054 tg3_enable_ints(tp);
10055
10056 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 10057 tnapi->coal_now);
7938109f
MC
10058
10059 for (i = 0; i < 5; i++) {
b16250e3
MC
10060 u32 int_mbox, misc_host_ctrl;
10061
898a56f8 10062 int_mbox = tr32_mailbox(tnapi->int_mbox);
b16250e3
MC
10063 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10064
10065 if ((int_mbox != 0) ||
10066 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10067 intr_ok = 1;
7938109f 10068 break;
b16250e3
MC
10069 }
10070
3aa1cdf8
MC
10071 if (tg3_flag(tp, 57765_PLUS) &&
10072 tnapi->hw_status->status_tag != tnapi->last_tag)
10073 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10074
7938109f
MC
10075 msleep(10);
10076 }
10077
10078 tg3_disable_ints(tp);
10079
4f125f42 10080 free_irq(tnapi->irq_vec, tnapi);
6aa20a22 10081
4f125f42 10082 err = tg3_request_irq(tp, 0);
7938109f
MC
10083
10084 if (err)
10085 return err;
10086
f6eb9b1f
MC
10087 if (intr_ok) {
10088 /* Reenable MSI one shot mode. */
5b39de91 10089 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
f6eb9b1f
MC
10090 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10091 tw32(MSGINT_MODE, val);
10092 }
7938109f 10093 return 0;
f6eb9b1f 10094 }
7938109f
MC
10095
10096 return -EIO;
10097}
10098
10099/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10100 * successfully restored
10101 */
10102static int tg3_test_msi(struct tg3 *tp)
10103{
7938109f
MC
10104 int err;
10105 u16 pci_cmd;
10106
63c3a66f 10107 if (!tg3_flag(tp, USING_MSI))
7938109f
MC
10108 return 0;
10109
10110 /* Turn off SERR reporting in case MSI terminates with Master
10111 * Abort.
10112 */
10113 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10114 pci_write_config_word(tp->pdev, PCI_COMMAND,
10115 pci_cmd & ~PCI_COMMAND_SERR);
10116
10117 err = tg3_test_interrupt(tp);
10118
10119 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10120
10121 if (!err)
10122 return 0;
10123
10124 /* other failures */
10125 if (err != -EIO)
10126 return err;
10127
10128 /* MSI test failed, go back to INTx mode */
5129c3a3
MC
10129 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10130 "to INTx mode. Please report this failure to the PCI "
10131 "maintainer and include system chipset information\n");
7938109f 10132
4f125f42 10133 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
09943a18 10134
7938109f
MC
10135 pci_disable_msi(tp->pdev);
10136
63c3a66f 10137 tg3_flag_clear(tp, USING_MSI);
dc8bf1b1 10138 tp->napi[0].irq_vec = tp->pdev->irq;
7938109f 10139
4f125f42 10140 err = tg3_request_irq(tp, 0);
7938109f
MC
10141 if (err)
10142 return err;
10143
10144 /* Need to reset the chip because the MSI cycle may have terminated
10145 * with Master Abort.
10146 */
f47c11ee 10147 tg3_full_lock(tp, 1);
7938109f 10148
944d980e 10149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 10150 err = tg3_init_hw(tp, 1);
7938109f 10151
f47c11ee 10152 tg3_full_unlock(tp);
7938109f
MC
10153
10154 if (err)
4f125f42 10155 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7938109f
MC
10156
10157 return err;
10158}
10159
9e9fd12d
MC
10160static int tg3_request_firmware(struct tg3 *tp)
10161{
10162 const __be32 *fw_data;
10163
10164 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
05dbe005
JP
10165 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10166 tp->fw_needed);
9e9fd12d
MC
10167 return -ENOENT;
10168 }
10169
10170 fw_data = (void *)tp->fw->data;
10171
10172 /* Firmware blob starts with version numbers, followed by
10173 * start address and _full_ length including BSS sections
10174 * (which must be longer than the actual data, of course
10175 */
10176
10177 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10178 if (tp->fw_len < (tp->fw->size - 12)) {
05dbe005
JP
10179 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10180 tp->fw_len, tp->fw_needed);
9e9fd12d
MC
10181 release_firmware(tp->fw);
10182 tp->fw = NULL;
10183 return -EINVAL;
10184 }
10185
10186 /* We no longer need firmware; we have it. */
10187 tp->fw_needed = NULL;
10188 return 0;
10189}
10190
9102426a 10191static u32 tg3_irq_count(struct tg3 *tp)
679563f4 10192{
9102426a 10193 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
679563f4 10194
9102426a 10195 if (irq_cnt > 1) {
c3b5003b
MC
10196 /* We want as many rx rings enabled as there are cpus.
10197 * In multiqueue MSI-X mode, the first MSI-X vector
10198 * only deals with link interrupts, etc, so we add
10199 * one to the number of vectors we are requesting.
10200 */
9102426a 10201 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
c3b5003b 10202 }
679563f4 10203
9102426a
MC
10204 return irq_cnt;
10205}
10206
10207static bool tg3_enable_msix(struct tg3 *tp)
10208{
10209 int i, rc;
86449944 10210 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
9102426a 10211
0968169c
MC
10212 tp->txq_cnt = tp->txq_req;
10213 tp->rxq_cnt = tp->rxq_req;
10214 if (!tp->rxq_cnt)
10215 tp->rxq_cnt = netif_get_num_default_rss_queues();
9102426a
MC
10216 if (tp->rxq_cnt > tp->rxq_max)
10217 tp->rxq_cnt = tp->rxq_max;
cf6d6ea6
MC
10218
10219 /* Disable multiple TX rings by default. Simple round-robin hardware
10220 * scheduling of the TX rings can cause starvation of rings with
10221 * small packets when other rings have TSO or jumbo packets.
10222 */
10223 if (!tp->txq_req)
10224 tp->txq_cnt = 1;
9102426a
MC
10225
10226 tp->irq_cnt = tg3_irq_count(tp);
10227
679563f4
MC
10228 for (i = 0; i < tp->irq_max; i++) {
10229 msix_ent[i].entry = i;
10230 msix_ent[i].vector = 0;
10231 }
10232
10233 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
2430b031
MC
10234 if (rc < 0) {
10235 return false;
10236 } else if (rc != 0) {
679563f4
MC
10237 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10238 return false;
05dbe005
JP
10239 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10240 tp->irq_cnt, rc);
679563f4 10241 tp->irq_cnt = rc;
49a359e3 10242 tp->rxq_cnt = max(rc - 1, 1);
9102426a
MC
10243 if (tp->txq_cnt)
10244 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
679563f4
MC
10245 }
10246
10247 for (i = 0; i < tp->irq_max; i++)
10248 tp->napi[i].irq_vec = msix_ent[i].vector;
10249
49a359e3 10250 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
2ddaad39
BH
10251 pci_disable_msix(tp->pdev);
10252 return false;
10253 }
b92b9040 10254
9102426a
MC
10255 if (tp->irq_cnt == 1)
10256 return true;
d78b59f5 10257
9102426a
MC
10258 tg3_flag_set(tp, ENABLE_RSS);
10259
10260 if (tp->txq_cnt > 1)
10261 tg3_flag_set(tp, ENABLE_TSS);
10262
10263 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
2430b031 10264
679563f4
MC
10265 return true;
10266}
10267
07b0173c
MC
10268static void tg3_ints_init(struct tg3 *tp)
10269{
63c3a66f
JP
10270 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10271 !tg3_flag(tp, TAGGED_STATUS)) {
07b0173c
MC
10272 /* All MSI supporting chips should support tagged
10273 * status. Assert that this is the case.
10274 */
5129c3a3
MC
10275 netdev_warn(tp->dev,
10276 "MSI without TAGGED_STATUS? Not using MSI\n");
679563f4 10277 goto defcfg;
07b0173c 10278 }
4f125f42 10279
63c3a66f
JP
10280 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10281 tg3_flag_set(tp, USING_MSIX);
10282 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10283 tg3_flag_set(tp, USING_MSI);
679563f4 10284
63c3a66f 10285 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
679563f4 10286 u32 msi_mode = tr32(MSGINT_MODE);
63c3a66f 10287 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
baf8a94a 10288 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
10289 if (!tg3_flag(tp, 1SHOT_MSI))
10290 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
679563f4
MC
10291 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10292 }
10293defcfg:
63c3a66f 10294 if (!tg3_flag(tp, USING_MSIX)) {
679563f4
MC
10295 tp->irq_cnt = 1;
10296 tp->napi[0].irq_vec = tp->pdev->irq;
49a359e3
MC
10297 }
10298
10299 if (tp->irq_cnt == 1) {
10300 tp->txq_cnt = 1;
10301 tp->rxq_cnt = 1;
2ddaad39 10302 netif_set_real_num_tx_queues(tp->dev, 1);
85407885 10303 netif_set_real_num_rx_queues(tp->dev, 1);
679563f4 10304 }
07b0173c
MC
10305}
10306
10307static void tg3_ints_fini(struct tg3 *tp)
10308{
63c3a66f 10309 if (tg3_flag(tp, USING_MSIX))
679563f4 10310 pci_disable_msix(tp->pdev);
63c3a66f 10311 else if (tg3_flag(tp, USING_MSI))
679563f4 10312 pci_disable_msi(tp->pdev);
63c3a66f
JP
10313 tg3_flag_clear(tp, USING_MSI);
10314 tg3_flag_clear(tp, USING_MSIX);
10315 tg3_flag_clear(tp, ENABLE_RSS);
10316 tg3_flag_clear(tp, ENABLE_TSS);
07b0173c
MC
10317}
10318
d8f4cd38 10319static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
1da177e4 10320{
d8f4cd38 10321 struct net_device *dev = tp->dev;
4f125f42 10322 int i, err;
1da177e4 10323
679563f4
MC
10324 /*
10325 * Setup interrupts first so we know how
10326 * many NAPI resources to allocate
10327 */
10328 tg3_ints_init(tp);
10329
90415477 10330 tg3_rss_check_indir_tbl(tp);
bcebcc46 10331
1da177e4
LT
10332 /* The placement of this call is tied
10333 * to the setup and use of Host TX descriptors.
10334 */
10335 err = tg3_alloc_consistent(tp);
10336 if (err)
679563f4 10337 goto err_out1;
88b06bc2 10338
66cfd1bd
MC
10339 tg3_napi_init(tp);
10340
fed97810 10341 tg3_napi_enable(tp);
1da177e4 10342
4f125f42
MC
10343 for (i = 0; i < tp->irq_cnt; i++) {
10344 struct tg3_napi *tnapi = &tp->napi[i];
10345 err = tg3_request_irq(tp, i);
10346 if (err) {
5bc09186
MC
10347 for (i--; i >= 0; i--) {
10348 tnapi = &tp->napi[i];
4f125f42 10349 free_irq(tnapi->irq_vec, tnapi);
5bc09186
MC
10350 }
10351 goto err_out2;
4f125f42
MC
10352 }
10353 }
1da177e4 10354
f47c11ee 10355 tg3_full_lock(tp, 0);
1da177e4 10356
d8f4cd38 10357 err = tg3_init_hw(tp, reset_phy);
1da177e4 10358 if (err) {
944d980e 10359 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10360 tg3_free_rings(tp);
1da177e4
LT
10361 }
10362
f47c11ee 10363 tg3_full_unlock(tp);
1da177e4 10364
07b0173c 10365 if (err)
679563f4 10366 goto err_out3;
1da177e4 10367
d8f4cd38 10368 if (test_irq && tg3_flag(tp, USING_MSI)) {
7938109f 10369 err = tg3_test_msi(tp);
fac9b83e 10370
7938109f 10371 if (err) {
f47c11ee 10372 tg3_full_lock(tp, 0);
944d980e 10373 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f 10374 tg3_free_rings(tp);
f47c11ee 10375 tg3_full_unlock(tp);
7938109f 10376
679563f4 10377 goto err_out2;
7938109f 10378 }
fcfa0a32 10379
63c3a66f 10380 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f 10381 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 10382
f6eb9b1f
MC
10383 tw32(PCIE_TRANSACTION_CFG,
10384 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32 10385 }
7938109f
MC
10386 }
10387
b02fd9e3
MC
10388 tg3_phy_start(tp);
10389
aed93e0b
MC
10390 tg3_hwmon_open(tp);
10391
f47c11ee 10392 tg3_full_lock(tp, 0);
1da177e4 10393
21f7638e 10394 tg3_timer_start(tp);
63c3a66f 10395 tg3_flag_set(tp, INIT_COMPLETE);
1da177e4
LT
10396 tg3_enable_ints(tp);
10397
f47c11ee 10398 tg3_full_unlock(tp);
1da177e4 10399
fe5f5787 10400 netif_tx_start_all_queues(dev);
1da177e4 10401
06c03c02
MB
10402 /*
10403 * Reset loopback feature if it was turned on while the device was down
10404 * make sure that it's installed properly now.
10405 */
10406 if (dev->features & NETIF_F_LOOPBACK)
10407 tg3_set_loopback(dev, dev->features);
10408
1da177e4 10409 return 0;
07b0173c 10410
679563f4 10411err_out3:
4f125f42
MC
10412 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10413 struct tg3_napi *tnapi = &tp->napi[i];
10414 free_irq(tnapi->irq_vec, tnapi);
10415 }
07b0173c 10416
679563f4 10417err_out2:
fed97810 10418 tg3_napi_disable(tp);
66cfd1bd 10419 tg3_napi_fini(tp);
07b0173c 10420 tg3_free_consistent(tp);
679563f4
MC
10421
10422err_out1:
10423 tg3_ints_fini(tp);
d8f4cd38 10424
07b0173c 10425 return err;
1da177e4
LT
10426}
10427
65138594 10428static void tg3_stop(struct tg3 *tp)
1da177e4 10429{
4f125f42 10430 int i;
1da177e4 10431
fed97810 10432 tg3_napi_disable(tp);
db219973 10433 tg3_reset_task_cancel(tp);
7faa006f 10434
65138594 10435 netif_tx_disable(tp->dev);
1da177e4 10436
21f7638e 10437 tg3_timer_stop(tp);
1da177e4 10438
aed93e0b
MC
10439 tg3_hwmon_close(tp);
10440
24bb4fb6
MC
10441 tg3_phy_stop(tp);
10442
f47c11ee 10443 tg3_full_lock(tp, 1);
1da177e4
LT
10444
10445 tg3_disable_ints(tp);
10446
944d980e 10447 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10448 tg3_free_rings(tp);
63c3a66f 10449 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 10450
f47c11ee 10451 tg3_full_unlock(tp);
1da177e4 10452
4f125f42
MC
10453 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10454 struct tg3_napi *tnapi = &tp->napi[i];
10455 free_irq(tnapi->irq_vec, tnapi);
10456 }
07b0173c
MC
10457
10458 tg3_ints_fini(tp);
1da177e4 10459
66cfd1bd
MC
10460 tg3_napi_fini(tp);
10461
1da177e4 10462 tg3_free_consistent(tp);
65138594
MC
10463}
10464
d8f4cd38
MC
10465static int tg3_open(struct net_device *dev)
10466{
10467 struct tg3 *tp = netdev_priv(dev);
10468 int err;
10469
10470 if (tp->fw_needed) {
10471 err = tg3_request_firmware(tp);
10472 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10473 if (err)
10474 return err;
10475 } else if (err) {
10476 netdev_warn(tp->dev, "TSO capability disabled\n");
10477 tg3_flag_clear(tp, TSO_CAPABLE);
10478 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10479 netdev_notice(tp->dev, "TSO capability restored\n");
10480 tg3_flag_set(tp, TSO_CAPABLE);
10481 }
10482 }
10483
10484 netif_carrier_off(tp->dev);
10485
10486 err = tg3_power_up(tp);
10487 if (err)
10488 return err;
10489
10490 tg3_full_lock(tp, 0);
10491
10492 tg3_disable_ints(tp);
10493 tg3_flag_clear(tp, INIT_COMPLETE);
10494
10495 tg3_full_unlock(tp);
10496
10497 err = tg3_start(tp, true, true);
10498 if (err) {
10499 tg3_frob_aux_power(tp, false);
10500 pci_set_power_state(tp->pdev, PCI_D3hot);
10501 }
07b0173c 10502 return err;
1da177e4
LT
10503}
10504
1da177e4
LT
10505static int tg3_close(struct net_device *dev)
10506{
10507 struct tg3 *tp = netdev_priv(dev);
10508
65138594 10509 tg3_stop(tp);
1da177e4 10510
92feeabf
MC
10511 /* Clear stats across close / open calls */
10512 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10513 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
1da177e4 10514
c866b7ea 10515 tg3_power_down(tp);
bc1c7567
MC
10516
10517 netif_carrier_off(tp->dev);
10518
1da177e4
LT
10519 return 0;
10520}
10521
511d2224 10522static inline u64 get_stat64(tg3_stat64_t *val)
816f8b86
SB
10523{
10524 return ((u64)val->high << 32) | ((u64)val->low);
10525}
10526
65ec698d 10527static u64 tg3_calc_crc_errors(struct tg3 *tp)
1da177e4
LT
10528{
10529 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10530
f07e9af3 10531 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
1da177e4
LT
10532 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
10534 u32 val;
10535
569a5df8
MC
10536 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10537 tg3_writephy(tp, MII_TG3_TEST1,
10538 val | MII_TG3_TEST1_CRC_EN);
f08aa1a8 10539 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
1da177e4
LT
10540 } else
10541 val = 0;
1da177e4
LT
10542
10543 tp->phy_crc_errors += val;
10544
10545 return tp->phy_crc_errors;
10546 }
10547
10548 return get_stat64(&hw_stats->rx_fcs_errors);
10549}
10550
10551#define ESTAT_ADD(member) \
10552 estats->member = old_estats->member + \
511d2224 10553 get_stat64(&hw_stats->member)
1da177e4 10554
65ec698d 10555static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
1da177e4 10556{
1da177e4
LT
10557 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10558 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10559
1da177e4
LT
10560 ESTAT_ADD(rx_octets);
10561 ESTAT_ADD(rx_fragments);
10562 ESTAT_ADD(rx_ucast_packets);
10563 ESTAT_ADD(rx_mcast_packets);
10564 ESTAT_ADD(rx_bcast_packets);
10565 ESTAT_ADD(rx_fcs_errors);
10566 ESTAT_ADD(rx_align_errors);
10567 ESTAT_ADD(rx_xon_pause_rcvd);
10568 ESTAT_ADD(rx_xoff_pause_rcvd);
10569 ESTAT_ADD(rx_mac_ctrl_rcvd);
10570 ESTAT_ADD(rx_xoff_entered);
10571 ESTAT_ADD(rx_frame_too_long_errors);
10572 ESTAT_ADD(rx_jabbers);
10573 ESTAT_ADD(rx_undersize_packets);
10574 ESTAT_ADD(rx_in_length_errors);
10575 ESTAT_ADD(rx_out_length_errors);
10576 ESTAT_ADD(rx_64_or_less_octet_packets);
10577 ESTAT_ADD(rx_65_to_127_octet_packets);
10578 ESTAT_ADD(rx_128_to_255_octet_packets);
10579 ESTAT_ADD(rx_256_to_511_octet_packets);
10580 ESTAT_ADD(rx_512_to_1023_octet_packets);
10581 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10582 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10583 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10584 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10585 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10586
10587 ESTAT_ADD(tx_octets);
10588 ESTAT_ADD(tx_collisions);
10589 ESTAT_ADD(tx_xon_sent);
10590 ESTAT_ADD(tx_xoff_sent);
10591 ESTAT_ADD(tx_flow_control);
10592 ESTAT_ADD(tx_mac_errors);
10593 ESTAT_ADD(tx_single_collisions);
10594 ESTAT_ADD(tx_mult_collisions);
10595 ESTAT_ADD(tx_deferred);
10596 ESTAT_ADD(tx_excessive_collisions);
10597 ESTAT_ADD(tx_late_collisions);
10598 ESTAT_ADD(tx_collide_2times);
10599 ESTAT_ADD(tx_collide_3times);
10600 ESTAT_ADD(tx_collide_4times);
10601 ESTAT_ADD(tx_collide_5times);
10602 ESTAT_ADD(tx_collide_6times);
10603 ESTAT_ADD(tx_collide_7times);
10604 ESTAT_ADD(tx_collide_8times);
10605 ESTAT_ADD(tx_collide_9times);
10606 ESTAT_ADD(tx_collide_10times);
10607 ESTAT_ADD(tx_collide_11times);
10608 ESTAT_ADD(tx_collide_12times);
10609 ESTAT_ADD(tx_collide_13times);
10610 ESTAT_ADD(tx_collide_14times);
10611 ESTAT_ADD(tx_collide_15times);
10612 ESTAT_ADD(tx_ucast_packets);
10613 ESTAT_ADD(tx_mcast_packets);
10614 ESTAT_ADD(tx_bcast_packets);
10615 ESTAT_ADD(tx_carrier_sense_errors);
10616 ESTAT_ADD(tx_discards);
10617 ESTAT_ADD(tx_errors);
10618
10619 ESTAT_ADD(dma_writeq_full);
10620 ESTAT_ADD(dma_write_prioq_full);
10621 ESTAT_ADD(rxbds_empty);
10622 ESTAT_ADD(rx_discards);
10623 ESTAT_ADD(rx_errors);
10624 ESTAT_ADD(rx_threshold_hit);
10625
10626 ESTAT_ADD(dma_readq_full);
10627 ESTAT_ADD(dma_read_prioq_full);
10628 ESTAT_ADD(tx_comp_queue_full);
10629
10630 ESTAT_ADD(ring_set_send_prod_index);
10631 ESTAT_ADD(ring_status_update);
10632 ESTAT_ADD(nic_irqs);
10633 ESTAT_ADD(nic_avoided_irqs);
10634 ESTAT_ADD(nic_tx_threshold_hit);
10635
4452d099 10636 ESTAT_ADD(mbuf_lwm_thresh_hit);
1da177e4
LT
10637}
10638
65ec698d 10639static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
1da177e4 10640{
511d2224 10641 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
1da177e4
LT
10642 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10643
1da177e4
LT
10644 stats->rx_packets = old_stats->rx_packets +
10645 get_stat64(&hw_stats->rx_ucast_packets) +
10646 get_stat64(&hw_stats->rx_mcast_packets) +
10647 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 10648
1da177e4
LT
10649 stats->tx_packets = old_stats->tx_packets +
10650 get_stat64(&hw_stats->tx_ucast_packets) +
10651 get_stat64(&hw_stats->tx_mcast_packets) +
10652 get_stat64(&hw_stats->tx_bcast_packets);
10653
10654 stats->rx_bytes = old_stats->rx_bytes +
10655 get_stat64(&hw_stats->rx_octets);
10656 stats->tx_bytes = old_stats->tx_bytes +
10657 get_stat64(&hw_stats->tx_octets);
10658
10659 stats->rx_errors = old_stats->rx_errors +
4f63b877 10660 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
10661 stats->tx_errors = old_stats->tx_errors +
10662 get_stat64(&hw_stats->tx_errors) +
10663 get_stat64(&hw_stats->tx_mac_errors) +
10664 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10665 get_stat64(&hw_stats->tx_discards);
10666
10667 stats->multicast = old_stats->multicast +
10668 get_stat64(&hw_stats->rx_mcast_packets);
10669 stats->collisions = old_stats->collisions +
10670 get_stat64(&hw_stats->tx_collisions);
10671
10672 stats->rx_length_errors = old_stats->rx_length_errors +
10673 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10674 get_stat64(&hw_stats->rx_undersize_packets);
10675
10676 stats->rx_over_errors = old_stats->rx_over_errors +
10677 get_stat64(&hw_stats->rxbds_empty);
10678 stats->rx_frame_errors = old_stats->rx_frame_errors +
10679 get_stat64(&hw_stats->rx_align_errors);
10680 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10681 get_stat64(&hw_stats->tx_discards);
10682 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10683 get_stat64(&hw_stats->tx_carrier_sense_errors);
10684
10685 stats->rx_crc_errors = old_stats->rx_crc_errors +
65ec698d 10686 tg3_calc_crc_errors(tp);
1da177e4 10687
4f63b877
JL
10688 stats->rx_missed_errors = old_stats->rx_missed_errors +
10689 get_stat64(&hw_stats->rx_discards);
10690
b0057c51 10691 stats->rx_dropped = tp->rx_dropped;
48855432 10692 stats->tx_dropped = tp->tx_dropped;
1da177e4
LT
10693}
10694
1da177e4
LT
10695static int tg3_get_regs_len(struct net_device *dev)
10696{
97bd8e49 10697 return TG3_REG_BLK_SIZE;
1da177e4
LT
10698}
10699
10700static void tg3_get_regs(struct net_device *dev,
10701 struct ethtool_regs *regs, void *_p)
10702{
1da177e4 10703 struct tg3 *tp = netdev_priv(dev);
1da177e4
LT
10704
10705 regs->version = 0;
10706
97bd8e49 10707 memset(_p, 0, TG3_REG_BLK_SIZE);
1da177e4 10708
80096068 10709 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10710 return;
10711
f47c11ee 10712 tg3_full_lock(tp, 0);
1da177e4 10713
97bd8e49 10714 tg3_dump_legacy_regs(tp, (u32 *)_p);
1da177e4 10715
f47c11ee 10716 tg3_full_unlock(tp);
1da177e4
LT
10717}
10718
10719static int tg3_get_eeprom_len(struct net_device *dev)
10720{
10721 struct tg3 *tp = netdev_priv(dev);
10722
10723 return tp->nvram_size;
10724}
10725
1da177e4
LT
10726static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10727{
10728 struct tg3 *tp = netdev_priv(dev);
10729 int ret;
10730 u8 *pd;
b9fc7dc5 10731 u32 i, offset, len, b_offset, b_count;
a9dc529d 10732 __be32 val;
1da177e4 10733
63c3a66f 10734 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
10735 return -EINVAL;
10736
80096068 10737 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10738 return -EAGAIN;
10739
1da177e4
LT
10740 offset = eeprom->offset;
10741 len = eeprom->len;
10742 eeprom->len = 0;
10743
10744 eeprom->magic = TG3_EEPROM_MAGIC;
10745
10746 if (offset & 3) {
10747 /* adjustments to start on required 4 byte boundary */
10748 b_offset = offset & 3;
10749 b_count = 4 - b_offset;
10750 if (b_count > len) {
10751 /* i.e. offset=1 len=2 */
10752 b_count = len;
10753 }
a9dc529d 10754 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
1da177e4
LT
10755 if (ret)
10756 return ret;
be98da6a 10757 memcpy(data, ((char *)&val) + b_offset, b_count);
1da177e4
LT
10758 len -= b_count;
10759 offset += b_count;
c6cdf436 10760 eeprom->len += b_count;
1da177e4
LT
10761 }
10762
25985edc 10763 /* read bytes up to the last 4 byte boundary */
1da177e4
LT
10764 pd = &data[eeprom->len];
10765 for (i = 0; i < (len - (len & 3)); i += 4) {
a9dc529d 10766 ret = tg3_nvram_read_be32(tp, offset + i, &val);
1da177e4
LT
10767 if (ret) {
10768 eeprom->len += i;
10769 return ret;
10770 }
1da177e4
LT
10771 memcpy(pd + i, &val, 4);
10772 }
10773 eeprom->len += i;
10774
10775 if (len & 3) {
10776 /* read last bytes not ending on 4 byte boundary */
10777 pd = &data[eeprom->len];
10778 b_count = len & 3;
10779 b_offset = offset + len - b_count;
a9dc529d 10780 ret = tg3_nvram_read_be32(tp, b_offset, &val);
1da177e4
LT
10781 if (ret)
10782 return ret;
b9fc7dc5 10783 memcpy(pd, &val, b_count);
1da177e4
LT
10784 eeprom->len += b_count;
10785 }
10786 return 0;
10787}
10788
1da177e4
LT
10789static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10790{
10791 struct tg3 *tp = netdev_priv(dev);
10792 int ret;
b9fc7dc5 10793 u32 offset, len, b_offset, odd_len;
1da177e4 10794 u8 *buf;
a9dc529d 10795 __be32 start, end;
1da177e4 10796
80096068 10797 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10798 return -EAGAIN;
10799
63c3a66f 10800 if (tg3_flag(tp, NO_NVRAM) ||
df259d8c 10801 eeprom->magic != TG3_EEPROM_MAGIC)
1da177e4
LT
10802 return -EINVAL;
10803
10804 offset = eeprom->offset;
10805 len = eeprom->len;
10806
10807 if ((b_offset = (offset & 3))) {
10808 /* adjustments to start on required 4 byte boundary */
a9dc529d 10809 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
1da177e4
LT
10810 if (ret)
10811 return ret;
1da177e4
LT
10812 len += b_offset;
10813 offset &= ~3;
1c8594b4
MC
10814 if (len < 4)
10815 len = 4;
1da177e4
LT
10816 }
10817
10818 odd_len = 0;
1c8594b4 10819 if (len & 3) {
1da177e4
LT
10820 /* adjustments to end on required 4 byte boundary */
10821 odd_len = 1;
10822 len = (len + 3) & ~3;
a9dc529d 10823 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
1da177e4
LT
10824 if (ret)
10825 return ret;
1da177e4
LT
10826 }
10827
10828 buf = data;
10829 if (b_offset || odd_len) {
10830 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 10831 if (!buf)
1da177e4
LT
10832 return -ENOMEM;
10833 if (b_offset)
10834 memcpy(buf, &start, 4);
10835 if (odd_len)
10836 memcpy(buf+len-4, &end, 4);
10837 memcpy(buf + b_offset, data, eeprom->len);
10838 }
10839
10840 ret = tg3_nvram_write_block(tp, offset, len, buf);
10841
10842 if (buf != data)
10843 kfree(buf);
10844
10845 return ret;
10846}
10847
10848static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10849{
b02fd9e3
MC
10850 struct tg3 *tp = netdev_priv(dev);
10851
63c3a66f 10852 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10853 struct phy_device *phydev;
f07e9af3 10854 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10855 return -EAGAIN;
3f0e3ad7
MC
10856 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10857 return phy_ethtool_gset(phydev, cmd);
b02fd9e3 10858 }
6aa20a22 10859
1da177e4
LT
10860 cmd->supported = (SUPPORTED_Autoneg);
10861
f07e9af3 10862 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
1da177e4
LT
10863 cmd->supported |= (SUPPORTED_1000baseT_Half |
10864 SUPPORTED_1000baseT_Full);
10865
f07e9af3 10866 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
1da177e4
LT
10867 cmd->supported |= (SUPPORTED_100baseT_Half |
10868 SUPPORTED_100baseT_Full |
10869 SUPPORTED_10baseT_Half |
10870 SUPPORTED_10baseT_Full |
3bebab59 10871 SUPPORTED_TP);
ef348144
KK
10872 cmd->port = PORT_TP;
10873 } else {
1da177e4 10874 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
10875 cmd->port = PORT_FIBRE;
10876 }
6aa20a22 10877
1da177e4 10878 cmd->advertising = tp->link_config.advertising;
5bb09778
MC
10879 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10880 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10881 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10882 cmd->advertising |= ADVERTISED_Pause;
10883 } else {
10884 cmd->advertising |= ADVERTISED_Pause |
10885 ADVERTISED_Asym_Pause;
10886 }
10887 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10888 cmd->advertising |= ADVERTISED_Asym_Pause;
10889 }
10890 }
859edb26 10891 if (netif_running(dev) && netif_carrier_ok(dev)) {
70739497 10892 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
1da177e4 10893 cmd->duplex = tp->link_config.active_duplex;
859edb26 10894 cmd->lp_advertising = tp->link_config.rmt_adv;
e348c5e7
MC
10895 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10896 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10897 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10898 else
10899 cmd->eth_tp_mdix = ETH_TP_MDI;
10900 }
64c22182 10901 } else {
e740522e
MC
10902 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10903 cmd->duplex = DUPLEX_UNKNOWN;
e348c5e7 10904 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
1da177e4 10905 }
882e9793 10906 cmd->phy_address = tp->phy_addr;
7e5856bd 10907 cmd->transceiver = XCVR_INTERNAL;
1da177e4
LT
10908 cmd->autoneg = tp->link_config.autoneg;
10909 cmd->maxtxpkt = 0;
10910 cmd->maxrxpkt = 0;
10911 return 0;
10912}
6aa20a22 10913
1da177e4
LT
10914static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10915{
10916 struct tg3 *tp = netdev_priv(dev);
25db0338 10917 u32 speed = ethtool_cmd_speed(cmd);
6aa20a22 10918
63c3a66f 10919 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10920 struct phy_device *phydev;
f07e9af3 10921 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10922 return -EAGAIN;
3f0e3ad7
MC
10923 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10924 return phy_ethtool_sset(phydev, cmd);
b02fd9e3
MC
10925 }
10926
7e5856bd
MC
10927 if (cmd->autoneg != AUTONEG_ENABLE &&
10928 cmd->autoneg != AUTONEG_DISABLE)
37ff238d 10929 return -EINVAL;
7e5856bd
MC
10930
10931 if (cmd->autoneg == AUTONEG_DISABLE &&
10932 cmd->duplex != DUPLEX_FULL &&
10933 cmd->duplex != DUPLEX_HALF)
37ff238d 10934 return -EINVAL;
1da177e4 10935
7e5856bd
MC
10936 if (cmd->autoneg == AUTONEG_ENABLE) {
10937 u32 mask = ADVERTISED_Autoneg |
10938 ADVERTISED_Pause |
10939 ADVERTISED_Asym_Pause;
10940
f07e9af3 10941 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
7e5856bd
MC
10942 mask |= ADVERTISED_1000baseT_Half |
10943 ADVERTISED_1000baseT_Full;
10944
f07e9af3 10945 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
7e5856bd
MC
10946 mask |= ADVERTISED_100baseT_Half |
10947 ADVERTISED_100baseT_Full |
10948 ADVERTISED_10baseT_Half |
10949 ADVERTISED_10baseT_Full |
10950 ADVERTISED_TP;
10951 else
10952 mask |= ADVERTISED_FIBRE;
10953
10954 if (cmd->advertising & ~mask)
10955 return -EINVAL;
10956
10957 mask &= (ADVERTISED_1000baseT_Half |
10958 ADVERTISED_1000baseT_Full |
10959 ADVERTISED_100baseT_Half |
10960 ADVERTISED_100baseT_Full |
10961 ADVERTISED_10baseT_Half |
10962 ADVERTISED_10baseT_Full);
10963
10964 cmd->advertising &= mask;
10965 } else {
f07e9af3 10966 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
25db0338 10967 if (speed != SPEED_1000)
7e5856bd
MC
10968 return -EINVAL;
10969
10970 if (cmd->duplex != DUPLEX_FULL)
10971 return -EINVAL;
10972 } else {
25db0338
DD
10973 if (speed != SPEED_100 &&
10974 speed != SPEED_10)
7e5856bd
MC
10975 return -EINVAL;
10976 }
10977 }
10978
f47c11ee 10979 tg3_full_lock(tp, 0);
1da177e4
LT
10980
10981 tp->link_config.autoneg = cmd->autoneg;
10982 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
10983 tp->link_config.advertising = (cmd->advertising |
10984 ADVERTISED_Autoneg);
e740522e
MC
10985 tp->link_config.speed = SPEED_UNKNOWN;
10986 tp->link_config.duplex = DUPLEX_UNKNOWN;
1da177e4
LT
10987 } else {
10988 tp->link_config.advertising = 0;
25db0338 10989 tp->link_config.speed = speed;
1da177e4 10990 tp->link_config.duplex = cmd->duplex;
b02fd9e3 10991 }
6aa20a22 10992
1da177e4
LT
10993 if (netif_running(dev))
10994 tg3_setup_phy(tp, 1);
10995
f47c11ee 10996 tg3_full_unlock(tp);
6aa20a22 10997
1da177e4
LT
10998 return 0;
10999}
6aa20a22 11000
1da177e4
LT
11001static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11002{
11003 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11004
68aad78c
RJ
11005 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11006 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11007 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11008 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
1da177e4 11009}
6aa20a22 11010
1da177e4
LT
11011static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11012{
11013 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11014
63c3a66f 11015 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
a85feb8c
GZ
11016 wol->supported = WAKE_MAGIC;
11017 else
11018 wol->supported = 0;
1da177e4 11019 wol->wolopts = 0;
63c3a66f 11020 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
1da177e4
LT
11021 wol->wolopts = WAKE_MAGIC;
11022 memset(&wol->sopass, 0, sizeof(wol->sopass));
11023}
6aa20a22 11024
1da177e4
LT
11025static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11026{
11027 struct tg3 *tp = netdev_priv(dev);
12dac075 11028 struct device *dp = &tp->pdev->dev;
6aa20a22 11029
1da177e4
LT
11030 if (wol->wolopts & ~WAKE_MAGIC)
11031 return -EINVAL;
11032 if ((wol->wolopts & WAKE_MAGIC) &&
63c3a66f 11033 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
1da177e4 11034 return -EINVAL;
6aa20a22 11035
f2dc0d18
RW
11036 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11037
f47c11ee 11038 spin_lock_bh(&tp->lock);
f2dc0d18 11039 if (device_may_wakeup(dp))
63c3a66f 11040 tg3_flag_set(tp, WOL_ENABLE);
f2dc0d18 11041 else
63c3a66f 11042 tg3_flag_clear(tp, WOL_ENABLE);
f47c11ee 11043 spin_unlock_bh(&tp->lock);
6aa20a22 11044
1da177e4
LT
11045 return 0;
11046}
6aa20a22 11047
1da177e4
LT
11048static u32 tg3_get_msglevel(struct net_device *dev)
11049{
11050 struct tg3 *tp = netdev_priv(dev);
11051 return tp->msg_enable;
11052}
6aa20a22 11053
1da177e4
LT
11054static void tg3_set_msglevel(struct net_device *dev, u32 value)
11055{
11056 struct tg3 *tp = netdev_priv(dev);
11057 tp->msg_enable = value;
11058}
6aa20a22 11059
1da177e4
LT
11060static int tg3_nway_reset(struct net_device *dev)
11061{
11062 struct tg3 *tp = netdev_priv(dev);
1da177e4 11063 int r;
6aa20a22 11064
1da177e4
LT
11065 if (!netif_running(dev))
11066 return -EAGAIN;
11067
f07e9af3 11068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
c94e3941
MC
11069 return -EINVAL;
11070
63c3a66f 11071 if (tg3_flag(tp, USE_PHYLIB)) {
f07e9af3 11072 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11073 return -EAGAIN;
3f0e3ad7 11074 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
11075 } else {
11076 u32 bmcr;
11077
11078 spin_lock_bh(&tp->lock);
11079 r = -EINVAL;
11080 tg3_readphy(tp, MII_BMCR, &bmcr);
11081 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11082 ((bmcr & BMCR_ANENABLE) ||
f07e9af3 11083 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
b02fd9e3
MC
11084 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11085 BMCR_ANENABLE);
11086 r = 0;
11087 }
11088 spin_unlock_bh(&tp->lock);
1da177e4 11089 }
6aa20a22 11090
1da177e4
LT
11091 return r;
11092}
6aa20a22 11093
1da177e4
LT
11094static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11095{
11096 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11097
2c49a44d 11098 ering->rx_max_pending = tp->rx_std_ring_mask;
63c3a66f 11099 if (tg3_flag(tp, JUMBO_RING_ENABLE))
2c49a44d 11100 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
4f81c32b
MC
11101 else
11102 ering->rx_jumbo_max_pending = 0;
11103
11104 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
11105
11106 ering->rx_pending = tp->rx_pending;
63c3a66f 11107 if (tg3_flag(tp, JUMBO_RING_ENABLE))
4f81c32b
MC
11108 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11109 else
11110 ering->rx_jumbo_pending = 0;
11111
f3f3f27e 11112 ering->tx_pending = tp->napi[0].tx_pending;
1da177e4 11113}
6aa20a22 11114
1da177e4
LT
11115static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11116{
11117 struct tg3 *tp = netdev_priv(dev);
646c9edd 11118 int i, irq_sync = 0, err = 0;
6aa20a22 11119
2c49a44d
MC
11120 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11121 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
bc3a9254
MC
11122 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11123 (ering->tx_pending <= MAX_SKB_FRAGS) ||
63c3a66f 11124 (tg3_flag(tp, TSO_BUG) &&
bc3a9254 11125 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 11126 return -EINVAL;
6aa20a22 11127
bbe832c0 11128 if (netif_running(dev)) {
b02fd9e3 11129 tg3_phy_stop(tp);
1da177e4 11130 tg3_netif_stop(tp);
bbe832c0
MC
11131 irq_sync = 1;
11132 }
1da177e4 11133
bbe832c0 11134 tg3_full_lock(tp, irq_sync);
6aa20a22 11135
1da177e4
LT
11136 tp->rx_pending = ering->rx_pending;
11137
63c3a66f 11138 if (tg3_flag(tp, MAX_RXPEND_64) &&
1da177e4
LT
11139 tp->rx_pending > 63)
11140 tp->rx_pending = 63;
11141 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
646c9edd 11142
6fd45cb8 11143 for (i = 0; i < tp->irq_max; i++)
646c9edd 11144 tp->napi[i].tx_pending = ering->tx_pending;
1da177e4
LT
11145
11146 if (netif_running(dev)) {
944d980e 11147 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
11148 err = tg3_restart_hw(tp, 1);
11149 if (!err)
11150 tg3_netif_start(tp);
1da177e4
LT
11151 }
11152
f47c11ee 11153 tg3_full_unlock(tp);
6aa20a22 11154
b02fd9e3
MC
11155 if (irq_sync && !err)
11156 tg3_phy_start(tp);
11157
b9ec6c1b 11158 return err;
1da177e4 11159}
6aa20a22 11160
1da177e4
LT
11161static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11162{
11163 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11164
63c3a66f 11165 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
8d018621 11166
4a2db503 11167 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
8d018621
MC
11168 epause->rx_pause = 1;
11169 else
11170 epause->rx_pause = 0;
11171
4a2db503 11172 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
8d018621
MC
11173 epause->tx_pause = 1;
11174 else
11175 epause->tx_pause = 0;
1da177e4 11176}
6aa20a22 11177
1da177e4
LT
11178static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11179{
11180 struct tg3 *tp = netdev_priv(dev);
b02fd9e3 11181 int err = 0;
6aa20a22 11182
63c3a66f 11183 if (tg3_flag(tp, USE_PHYLIB)) {
2712168f
MC
11184 u32 newadv;
11185 struct phy_device *phydev;
1da177e4 11186
2712168f 11187 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
f47c11ee 11188
2712168f
MC
11189 if (!(phydev->supported & SUPPORTED_Pause) ||
11190 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
2259dca3 11191 (epause->rx_pause != epause->tx_pause)))
2712168f 11192 return -EINVAL;
1da177e4 11193
2712168f
MC
11194 tp->link_config.flowctrl = 0;
11195 if (epause->rx_pause) {
11196 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11197
11198 if (epause->tx_pause) {
11199 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11200 newadv = ADVERTISED_Pause;
b02fd9e3 11201 } else
2712168f
MC
11202 newadv = ADVERTISED_Pause |
11203 ADVERTISED_Asym_Pause;
11204 } else if (epause->tx_pause) {
11205 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11206 newadv = ADVERTISED_Asym_Pause;
11207 } else
11208 newadv = 0;
11209
11210 if (epause->autoneg)
63c3a66f 11211 tg3_flag_set(tp, PAUSE_AUTONEG);
2712168f 11212 else
63c3a66f 11213 tg3_flag_clear(tp, PAUSE_AUTONEG);
2712168f 11214
f07e9af3 11215 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2712168f
MC
11216 u32 oldadv = phydev->advertising &
11217 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11218 if (oldadv != newadv) {
11219 phydev->advertising &=
11220 ~(ADVERTISED_Pause |
11221 ADVERTISED_Asym_Pause);
11222 phydev->advertising |= newadv;
11223 if (phydev->autoneg) {
11224 /*
11225 * Always renegotiate the link to
11226 * inform our link partner of our
11227 * flow control settings, even if the
11228 * flow control is forced. Let
11229 * tg3_adjust_link() do the final
11230 * flow control setup.
11231 */
11232 return phy_start_aneg(phydev);
b02fd9e3 11233 }
b02fd9e3 11234 }
b02fd9e3 11235
2712168f 11236 if (!epause->autoneg)
b02fd9e3 11237 tg3_setup_flow_control(tp, 0, 0);
2712168f 11238 } else {
c6700ce2 11239 tp->link_config.advertising &=
2712168f
MC
11240 ~(ADVERTISED_Pause |
11241 ADVERTISED_Asym_Pause);
c6700ce2 11242 tp->link_config.advertising |= newadv;
b02fd9e3
MC
11243 }
11244 } else {
11245 int irq_sync = 0;
11246
11247 if (netif_running(dev)) {
11248 tg3_netif_stop(tp);
11249 irq_sync = 1;
11250 }
11251
11252 tg3_full_lock(tp, irq_sync);
11253
11254 if (epause->autoneg)
63c3a66f 11255 tg3_flag_set(tp, PAUSE_AUTONEG);
b02fd9e3 11256 else
63c3a66f 11257 tg3_flag_clear(tp, PAUSE_AUTONEG);
b02fd9e3 11258 if (epause->rx_pause)
e18ce346 11259 tp->link_config.flowctrl |= FLOW_CTRL_RX;
b02fd9e3 11260 else
e18ce346 11261 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
b02fd9e3 11262 if (epause->tx_pause)
e18ce346 11263 tp->link_config.flowctrl |= FLOW_CTRL_TX;
b02fd9e3 11264 else
e18ce346 11265 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
b02fd9e3
MC
11266
11267 if (netif_running(dev)) {
11268 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11269 err = tg3_restart_hw(tp, 1);
11270 if (!err)
11271 tg3_netif_start(tp);
11272 }
11273
11274 tg3_full_unlock(tp);
11275 }
6aa20a22 11276
b9ec6c1b 11277 return err;
1da177e4 11278}
6aa20a22 11279
de6f31eb 11280static int tg3_get_sset_count(struct net_device *dev, int sset)
1da177e4 11281{
b9f2c044
JG
11282 switch (sset) {
11283 case ETH_SS_TEST:
11284 return TG3_NUM_TEST;
11285 case ETH_SS_STATS:
11286 return TG3_NUM_STATS;
11287 default:
11288 return -EOPNOTSUPP;
11289 }
4cafd3f5
MC
11290}
11291
90415477
MC
11292static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11293 u32 *rules __always_unused)
11294{
11295 struct tg3 *tp = netdev_priv(dev);
11296
11297 if (!tg3_flag(tp, SUPPORT_MSIX))
11298 return -EOPNOTSUPP;
11299
11300 switch (info->cmd) {
11301 case ETHTOOL_GRXRINGS:
11302 if (netif_running(tp->dev))
9102426a 11303 info->data = tp->rxq_cnt;
90415477
MC
11304 else {
11305 info->data = num_online_cpus();
9102426a
MC
11306 if (info->data > TG3_RSS_MAX_NUM_QS)
11307 info->data = TG3_RSS_MAX_NUM_QS;
90415477
MC
11308 }
11309
11310 /* The first interrupt vector only
11311 * handles link interrupts.
11312 */
11313 info->data -= 1;
11314 return 0;
11315
11316 default:
11317 return -EOPNOTSUPP;
11318 }
11319}
11320
11321static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11322{
11323 u32 size = 0;
11324 struct tg3 *tp = netdev_priv(dev);
11325
11326 if (tg3_flag(tp, SUPPORT_MSIX))
11327 size = TG3_RSS_INDIR_TBL_SIZE;
11328
11329 return size;
11330}
11331
11332static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11333{
11334 struct tg3 *tp = netdev_priv(dev);
11335 int i;
11336
11337 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11338 indir[i] = tp->rss_ind_tbl[i];
11339
11340 return 0;
11341}
11342
11343static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11344{
11345 struct tg3 *tp = netdev_priv(dev);
11346 size_t i;
11347
11348 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11349 tp->rss_ind_tbl[i] = indir[i];
11350
11351 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11352 return 0;
11353
11354 /* It is legal to write the indirection
11355 * table while the device is running.
11356 */
11357 tg3_full_lock(tp, 0);
11358 tg3_rss_write_indir_tbl(tp);
11359 tg3_full_unlock(tp);
11360
11361 return 0;
11362}
11363
0968169c
MC
11364static void tg3_get_channels(struct net_device *dev,
11365 struct ethtool_channels *channel)
11366{
11367 struct tg3 *tp = netdev_priv(dev);
11368 u32 deflt_qs = netif_get_num_default_rss_queues();
11369
11370 channel->max_rx = tp->rxq_max;
11371 channel->max_tx = tp->txq_max;
11372
11373 if (netif_running(dev)) {
11374 channel->rx_count = tp->rxq_cnt;
11375 channel->tx_count = tp->txq_cnt;
11376 } else {
11377 if (tp->rxq_req)
11378 channel->rx_count = tp->rxq_req;
11379 else
11380 channel->rx_count = min(deflt_qs, tp->rxq_max);
11381
11382 if (tp->txq_req)
11383 channel->tx_count = tp->txq_req;
11384 else
11385 channel->tx_count = min(deflt_qs, tp->txq_max);
11386 }
11387}
11388
11389static int tg3_set_channels(struct net_device *dev,
11390 struct ethtool_channels *channel)
11391{
11392 struct tg3 *tp = netdev_priv(dev);
11393
11394 if (!tg3_flag(tp, SUPPORT_MSIX))
11395 return -EOPNOTSUPP;
11396
11397 if (channel->rx_count > tp->rxq_max ||
11398 channel->tx_count > tp->txq_max)
11399 return -EINVAL;
11400
11401 tp->rxq_req = channel->rx_count;
11402 tp->txq_req = channel->tx_count;
11403
11404 if (!netif_running(dev))
11405 return 0;
11406
11407 tg3_stop(tp);
11408
11409 netif_carrier_off(dev);
11410
11411 tg3_start(tp, true, false);
11412
11413 return 0;
11414}
11415
de6f31eb 11416static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1da177e4
LT
11417{
11418 switch (stringset) {
11419 case ETH_SS_STATS:
11420 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11421 break;
4cafd3f5
MC
11422 case ETH_SS_TEST:
11423 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11424 break;
1da177e4
LT
11425 default:
11426 WARN_ON(1); /* we need a WARN() */
11427 break;
11428 }
11429}
11430
81b8709c 11431static int tg3_set_phys_id(struct net_device *dev,
11432 enum ethtool_phys_id_state state)
4009a93d
MC
11433{
11434 struct tg3 *tp = netdev_priv(dev);
4009a93d
MC
11435
11436 if (!netif_running(tp->dev))
11437 return -EAGAIN;
11438
81b8709c 11439 switch (state) {
11440 case ETHTOOL_ID_ACTIVE:
fce55922 11441 return 1; /* cycle on/off once per second */
4009a93d 11442
81b8709c 11443 case ETHTOOL_ID_ON:
11444 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11445 LED_CTRL_1000MBPS_ON |
11446 LED_CTRL_100MBPS_ON |
11447 LED_CTRL_10MBPS_ON |
11448 LED_CTRL_TRAFFIC_OVERRIDE |
11449 LED_CTRL_TRAFFIC_BLINK |
11450 LED_CTRL_TRAFFIC_LED);
11451 break;
6aa20a22 11452
81b8709c 11453 case ETHTOOL_ID_OFF:
11454 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11455 LED_CTRL_TRAFFIC_OVERRIDE);
11456 break;
4009a93d 11457
81b8709c 11458 case ETHTOOL_ID_INACTIVE:
11459 tw32(MAC_LED_CTRL, tp->led_ctrl);
11460 break;
4009a93d 11461 }
81b8709c 11462
4009a93d
MC
11463 return 0;
11464}
11465
de6f31eb 11466static void tg3_get_ethtool_stats(struct net_device *dev,
1da177e4
LT
11467 struct ethtool_stats *estats, u64 *tmp_stats)
11468{
11469 struct tg3 *tp = netdev_priv(dev);
0e6c9da3 11470
b546e46f
MC
11471 if (tp->hw_stats)
11472 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11473 else
11474 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
1da177e4
LT
11475}
11476
535a490e 11477static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
c3e94500
MC
11478{
11479 int i;
11480 __be32 *buf;
11481 u32 offset = 0, len = 0;
11482 u32 magic, val;
11483
63c3a66f 11484 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
c3e94500
MC
11485 return NULL;
11486
11487 if (magic == TG3_EEPROM_MAGIC) {
11488 for (offset = TG3_NVM_DIR_START;
11489 offset < TG3_NVM_DIR_END;
11490 offset += TG3_NVM_DIRENT_SIZE) {
11491 if (tg3_nvram_read(tp, offset, &val))
11492 return NULL;
11493
11494 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11495 TG3_NVM_DIRTYPE_EXTVPD)
11496 break;
11497 }
11498
11499 if (offset != TG3_NVM_DIR_END) {
11500 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11501 if (tg3_nvram_read(tp, offset + 4, &offset))
11502 return NULL;
11503
11504 offset = tg3_nvram_logical_addr(tp, offset);
11505 }
11506 }
11507
11508 if (!offset || !len) {
11509 offset = TG3_NVM_VPD_OFF;
11510 len = TG3_NVM_VPD_LEN;
11511 }
11512
11513 buf = kmalloc(len, GFP_KERNEL);
11514 if (buf == NULL)
11515 return NULL;
11516
11517 if (magic == TG3_EEPROM_MAGIC) {
11518 for (i = 0; i < len; i += 4) {
11519 /* The data is in little-endian format in NVRAM.
11520 * Use the big-endian read routines to preserve
11521 * the byte order as it exists in NVRAM.
11522 */
11523 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11524 goto error;
11525 }
11526 } else {
11527 u8 *ptr;
11528 ssize_t cnt;
11529 unsigned int pos = 0;
11530
11531 ptr = (u8 *)&buf[0];
11532 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11533 cnt = pci_read_vpd(tp->pdev, pos,
11534 len - pos, ptr);
11535 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11536 cnt = 0;
11537 else if (cnt < 0)
11538 goto error;
11539 }
11540 if (pos != len)
11541 goto error;
11542 }
11543
535a490e
MC
11544 *vpdlen = len;
11545
c3e94500
MC
11546 return buf;
11547
11548error:
11549 kfree(buf);
11550 return NULL;
11551}
11552
566f86ad 11553#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
11554#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11555#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11556#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
727a6d9f
MC
11557#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11558#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
bda18faf 11559#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
b16250e3
MC
11560#define NVRAM_SELFBOOT_HW_SIZE 0x20
11561#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
11562
11563static int tg3_test_nvram(struct tg3 *tp)
11564{
535a490e 11565 u32 csum, magic, len;
a9dc529d 11566 __be32 *buf;
ab0049b4 11567 int i, j, k, err = 0, size;
566f86ad 11568
63c3a66f 11569 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
11570 return 0;
11571
e4f34110 11572 if (tg3_nvram_read(tp, 0, &magic) != 0)
1b27777a
MC
11573 return -EIO;
11574
1b27777a
MC
11575 if (magic == TG3_EEPROM_MAGIC)
11576 size = NVRAM_TEST_SIZE;
b16250e3 11577 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
11578 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11579 TG3_EEPROM_SB_FORMAT_1) {
11580 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11581 case TG3_EEPROM_SB_REVISION_0:
11582 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11583 break;
11584 case TG3_EEPROM_SB_REVISION_2:
11585 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11586 break;
11587 case TG3_EEPROM_SB_REVISION_3:
11588 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11589 break;
727a6d9f
MC
11590 case TG3_EEPROM_SB_REVISION_4:
11591 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11592 break;
11593 case TG3_EEPROM_SB_REVISION_5:
11594 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11595 break;
11596 case TG3_EEPROM_SB_REVISION_6:
11597 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11598 break;
a5767dec 11599 default:
727a6d9f 11600 return -EIO;
a5767dec
MC
11601 }
11602 } else
1b27777a 11603 return 0;
b16250e3
MC
11604 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11605 size = NVRAM_SELFBOOT_HW_SIZE;
11606 else
1b27777a
MC
11607 return -EIO;
11608
11609 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
11610 if (buf == NULL)
11611 return -ENOMEM;
11612
1b27777a
MC
11613 err = -EIO;
11614 for (i = 0, j = 0; i < size; i += 4, j++) {
a9dc529d
MC
11615 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11616 if (err)
566f86ad 11617 break;
566f86ad 11618 }
1b27777a 11619 if (i < size)
566f86ad
MC
11620 goto out;
11621
1b27777a 11622 /* Selfboot format */
a9dc529d 11623 magic = be32_to_cpu(buf[0]);
b9fc7dc5 11624 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 11625 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
11626 u8 *buf8 = (u8 *) buf, csum8 = 0;
11627
b9fc7dc5 11628 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
11629 TG3_EEPROM_SB_REVISION_2) {
11630 /* For rev 2, the csum doesn't include the MBA. */
11631 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11632 csum8 += buf8[i];
11633 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11634 csum8 += buf8[i];
11635 } else {
11636 for (i = 0; i < size; i++)
11637 csum8 += buf8[i];
11638 }
1b27777a 11639
ad96b485
AB
11640 if (csum8 == 0) {
11641 err = 0;
11642 goto out;
11643 }
11644
11645 err = -EIO;
11646 goto out;
1b27777a 11647 }
566f86ad 11648
b9fc7dc5 11649 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
11650 TG3_EEPROM_MAGIC_HW) {
11651 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
a9dc529d 11652 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
b16250e3 11653 u8 *buf8 = (u8 *) buf;
b16250e3
MC
11654
11655 /* Separate the parity bits and the data bytes. */
11656 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11657 if ((i == 0) || (i == 8)) {
11658 int l;
11659 u8 msk;
11660
11661 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11662 parity[k++] = buf8[i] & msk;
11663 i++;
859a5887 11664 } else if (i == 16) {
b16250e3
MC
11665 int l;
11666 u8 msk;
11667
11668 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11669 parity[k++] = buf8[i] & msk;
11670 i++;
11671
11672 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11673 parity[k++] = buf8[i] & msk;
11674 i++;
11675 }
11676 data[j++] = buf8[i];
11677 }
11678
11679 err = -EIO;
11680 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11681 u8 hw8 = hweight8(data[i]);
11682
11683 if ((hw8 & 0x1) && parity[i])
11684 goto out;
11685 else if (!(hw8 & 0x1) && !parity[i])
11686 goto out;
11687 }
11688 err = 0;
11689 goto out;
11690 }
11691
01c3a392
MC
11692 err = -EIO;
11693
566f86ad
MC
11694 /* Bootstrap checksum at offset 0x10 */
11695 csum = calc_crc((unsigned char *) buf, 0x10);
01c3a392 11696 if (csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
11697 goto out;
11698
11699 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11700 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
01c3a392 11701 if (csum != le32_to_cpu(buf[0xfc/4]))
a9dc529d 11702 goto out;
566f86ad 11703
c3e94500
MC
11704 kfree(buf);
11705
535a490e 11706 buf = tg3_vpd_readblock(tp, &len);
c3e94500
MC
11707 if (!buf)
11708 return -ENOMEM;
d4894f3e 11709
535a490e 11710 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
d4894f3e
MC
11711 if (i > 0) {
11712 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11713 if (j < 0)
11714 goto out;
11715
535a490e 11716 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
d4894f3e
MC
11717 goto out;
11718
11719 i += PCI_VPD_LRDT_TAG_SIZE;
11720 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11721 PCI_VPD_RO_KEYWORD_CHKSUM);
11722 if (j > 0) {
11723 u8 csum8 = 0;
11724
11725 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11726
11727 for (i = 0; i <= j; i++)
11728 csum8 += ((u8 *)buf)[i];
11729
11730 if (csum8)
11731 goto out;
11732 }
11733 }
11734
566f86ad
MC
11735 err = 0;
11736
11737out:
11738 kfree(buf);
11739 return err;
11740}
11741
ca43007a
MC
11742#define TG3_SERDES_TIMEOUT_SEC 2
11743#define TG3_COPPER_TIMEOUT_SEC 6
11744
11745static int tg3_test_link(struct tg3 *tp)
11746{
11747 int i, max;
11748
11749 if (!netif_running(tp->dev))
11750 return -ENODEV;
11751
f07e9af3 11752 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
ca43007a
MC
11753 max = TG3_SERDES_TIMEOUT_SEC;
11754 else
11755 max = TG3_COPPER_TIMEOUT_SEC;
11756
11757 for (i = 0; i < max; i++) {
11758 if (netif_carrier_ok(tp->dev))
11759 return 0;
11760
11761 if (msleep_interruptible(1000))
11762 break;
11763 }
11764
11765 return -EIO;
11766}
11767
a71116d1 11768/* Only test the commonly used registers */
30ca3e37 11769static int tg3_test_registers(struct tg3 *tp)
a71116d1 11770{
b16250e3 11771 int i, is_5705, is_5750;
a71116d1
MC
11772 u32 offset, read_mask, write_mask, val, save_val, read_val;
11773 static struct {
11774 u16 offset;
11775 u16 flags;
11776#define TG3_FL_5705 0x1
11777#define TG3_FL_NOT_5705 0x2
11778#define TG3_FL_NOT_5788 0x4
b16250e3 11779#define TG3_FL_NOT_5750 0x8
a71116d1
MC
11780 u32 read_mask;
11781 u32 write_mask;
11782 } reg_tbl[] = {
11783 /* MAC Control Registers */
11784 { MAC_MODE, TG3_FL_NOT_5705,
11785 0x00000000, 0x00ef6f8c },
11786 { MAC_MODE, TG3_FL_5705,
11787 0x00000000, 0x01ef6b8c },
11788 { MAC_STATUS, TG3_FL_NOT_5705,
11789 0x03800107, 0x00000000 },
11790 { MAC_STATUS, TG3_FL_5705,
11791 0x03800100, 0x00000000 },
11792 { MAC_ADDR_0_HIGH, 0x0000,
11793 0x00000000, 0x0000ffff },
11794 { MAC_ADDR_0_LOW, 0x0000,
c6cdf436 11795 0x00000000, 0xffffffff },
a71116d1
MC
11796 { MAC_RX_MTU_SIZE, 0x0000,
11797 0x00000000, 0x0000ffff },
11798 { MAC_TX_MODE, 0x0000,
11799 0x00000000, 0x00000070 },
11800 { MAC_TX_LENGTHS, 0x0000,
11801 0x00000000, 0x00003fff },
11802 { MAC_RX_MODE, TG3_FL_NOT_5705,
11803 0x00000000, 0x000007fc },
11804 { MAC_RX_MODE, TG3_FL_5705,
11805 0x00000000, 0x000007dc },
11806 { MAC_HASH_REG_0, 0x0000,
11807 0x00000000, 0xffffffff },
11808 { MAC_HASH_REG_1, 0x0000,
11809 0x00000000, 0xffffffff },
11810 { MAC_HASH_REG_2, 0x0000,
11811 0x00000000, 0xffffffff },
11812 { MAC_HASH_REG_3, 0x0000,
11813 0x00000000, 0xffffffff },
11814
11815 /* Receive Data and Receive BD Initiator Control Registers. */
11816 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11817 0x00000000, 0xffffffff },
11818 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11819 0x00000000, 0xffffffff },
11820 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11821 0x00000000, 0x00000003 },
11822 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11823 0x00000000, 0xffffffff },
11824 { RCVDBDI_STD_BD+0, 0x0000,
11825 0x00000000, 0xffffffff },
11826 { RCVDBDI_STD_BD+4, 0x0000,
11827 0x00000000, 0xffffffff },
11828 { RCVDBDI_STD_BD+8, 0x0000,
11829 0x00000000, 0xffff0002 },
11830 { RCVDBDI_STD_BD+0xc, 0x0000,
11831 0x00000000, 0xffffffff },
6aa20a22 11832
a71116d1
MC
11833 /* Receive BD Initiator Control Registers. */
11834 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11835 0x00000000, 0xffffffff },
11836 { RCVBDI_STD_THRESH, TG3_FL_5705,
11837 0x00000000, 0x000003ff },
11838 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11839 0x00000000, 0xffffffff },
6aa20a22 11840
a71116d1
MC
11841 /* Host Coalescing Control Registers. */
11842 { HOSTCC_MODE, TG3_FL_NOT_5705,
11843 0x00000000, 0x00000004 },
11844 { HOSTCC_MODE, TG3_FL_5705,
11845 0x00000000, 0x000000f6 },
11846 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11847 0x00000000, 0xffffffff },
11848 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11849 0x00000000, 0x000003ff },
11850 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11851 0x00000000, 0xffffffff },
11852 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11853 0x00000000, 0x000003ff },
11854 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11855 0x00000000, 0xffffffff },
11856 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11857 0x00000000, 0x000000ff },
11858 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11859 0x00000000, 0xffffffff },
11860 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11861 0x00000000, 0x000000ff },
11862 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11863 0x00000000, 0xffffffff },
11864 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11865 0x00000000, 0xffffffff },
11866 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11867 0x00000000, 0xffffffff },
11868 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11869 0x00000000, 0x000000ff },
11870 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11871 0x00000000, 0xffffffff },
11872 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11873 0x00000000, 0x000000ff },
11874 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11875 0x00000000, 0xffffffff },
11876 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11877 0x00000000, 0xffffffff },
11878 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11879 0x00000000, 0xffffffff },
11880 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11881 0x00000000, 0xffffffff },
11882 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11883 0x00000000, 0xffffffff },
11884 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11885 0xffffffff, 0x00000000 },
11886 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11887 0xffffffff, 0x00000000 },
11888
11889 /* Buffer Manager Control Registers. */
b16250e3 11890 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 11891 0x00000000, 0x007fff80 },
b16250e3 11892 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
11893 0x00000000, 0x007fffff },
11894 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11895 0x00000000, 0x0000003f },
11896 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11897 0x00000000, 0x000001ff },
11898 { BUFMGR_MB_HIGH_WATER, 0x0000,
11899 0x00000000, 0x000001ff },
11900 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11901 0xffffffff, 0x00000000 },
11902 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11903 0xffffffff, 0x00000000 },
6aa20a22 11904
a71116d1
MC
11905 /* Mailbox Registers */
11906 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11907 0x00000000, 0x000001ff },
11908 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11909 0x00000000, 0x000001ff },
11910 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11911 0x00000000, 0x000007ff },
11912 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11913 0x00000000, 0x000001ff },
11914
11915 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11916 };
11917
b16250e3 11918 is_5705 = is_5750 = 0;
63c3a66f 11919 if (tg3_flag(tp, 5705_PLUS)) {
a71116d1 11920 is_5705 = 1;
63c3a66f 11921 if (tg3_flag(tp, 5750_PLUS))
b16250e3
MC
11922 is_5750 = 1;
11923 }
a71116d1
MC
11924
11925 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11926 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11927 continue;
11928
11929 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11930 continue;
11931
63c3a66f 11932 if (tg3_flag(tp, IS_5788) &&
a71116d1
MC
11933 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11934 continue;
11935
b16250e3
MC
11936 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11937 continue;
11938
a71116d1
MC
11939 offset = (u32) reg_tbl[i].offset;
11940 read_mask = reg_tbl[i].read_mask;
11941 write_mask = reg_tbl[i].write_mask;
11942
11943 /* Save the original register content */
11944 save_val = tr32(offset);
11945
11946 /* Determine the read-only value. */
11947 read_val = save_val & read_mask;
11948
11949 /* Write zero to the register, then make sure the read-only bits
11950 * are not changed and the read/write bits are all zeros.
11951 */
11952 tw32(offset, 0);
11953
11954 val = tr32(offset);
11955
11956 /* Test the read-only and read/write bits. */
11957 if (((val & read_mask) != read_val) || (val & write_mask))
11958 goto out;
11959
11960 /* Write ones to all the bits defined by RdMask and WrMask, then
11961 * make sure the read-only bits are not changed and the
11962 * read/write bits are all ones.
11963 */
11964 tw32(offset, read_mask | write_mask);
11965
11966 val = tr32(offset);
11967
11968 /* Test the read-only bits. */
11969 if ((val & read_mask) != read_val)
11970 goto out;
11971
11972 /* Test the read/write bits. */
11973 if ((val & write_mask) != write_mask)
11974 goto out;
11975
11976 tw32(offset, save_val);
11977 }
11978
11979 return 0;
11980
11981out:
9f88f29f 11982 if (netif_msg_hw(tp))
2445e461
MC
11983 netdev_err(tp->dev,
11984 "Register test failed at offset %x\n", offset);
a71116d1
MC
11985 tw32(offset, save_val);
11986 return -EIO;
11987}
11988
7942e1db
MC
11989static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11990{
f71e1309 11991 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
11992 int i;
11993 u32 j;
11994
e9edda69 11995 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
11996 for (j = 0; j < len; j += 4) {
11997 u32 val;
11998
11999 tg3_write_mem(tp, offset + j, test_pattern[i]);
12000 tg3_read_mem(tp, offset + j, &val);
12001 if (val != test_pattern[i])
12002 return -EIO;
12003 }
12004 }
12005 return 0;
12006}
12007
12008static int tg3_test_memory(struct tg3 *tp)
12009{
12010 static struct mem_entry {
12011 u32 offset;
12012 u32 len;
12013 } mem_tbl_570x[] = {
38690194 12014 { 0x00000000, 0x00b50},
7942e1db
MC
12015 { 0x00002000, 0x1c000},
12016 { 0xffffffff, 0x00000}
12017 }, mem_tbl_5705[] = {
12018 { 0x00000100, 0x0000c},
12019 { 0x00000200, 0x00008},
7942e1db
MC
12020 { 0x00004000, 0x00800},
12021 { 0x00006000, 0x01000},
12022 { 0x00008000, 0x02000},
12023 { 0x00010000, 0x0e000},
12024 { 0xffffffff, 0x00000}
79f4d13a
MC
12025 }, mem_tbl_5755[] = {
12026 { 0x00000200, 0x00008},
12027 { 0x00004000, 0x00800},
12028 { 0x00006000, 0x00800},
12029 { 0x00008000, 0x02000},
12030 { 0x00010000, 0x0c000},
12031 { 0xffffffff, 0x00000}
b16250e3
MC
12032 }, mem_tbl_5906[] = {
12033 { 0x00000200, 0x00008},
12034 { 0x00004000, 0x00400},
12035 { 0x00006000, 0x00400},
12036 { 0x00008000, 0x01000},
12037 { 0x00010000, 0x01000},
12038 { 0xffffffff, 0x00000}
8b5a6c42
MC
12039 }, mem_tbl_5717[] = {
12040 { 0x00000200, 0x00008},
12041 { 0x00010000, 0x0a000},
12042 { 0x00020000, 0x13c00},
12043 { 0xffffffff, 0x00000}
12044 }, mem_tbl_57765[] = {
12045 { 0x00000200, 0x00008},
12046 { 0x00004000, 0x00800},
12047 { 0x00006000, 0x09800},
12048 { 0x00010000, 0x0a000},
12049 { 0xffffffff, 0x00000}
7942e1db
MC
12050 };
12051 struct mem_entry *mem_tbl;
12052 int err = 0;
12053 int i;
12054
63c3a66f 12055 if (tg3_flag(tp, 5717_PLUS))
8b5a6c42 12056 mem_tbl = mem_tbl_5717;
55086ad9 12057 else if (tg3_flag(tp, 57765_CLASS))
8b5a6c42 12058 mem_tbl = mem_tbl_57765;
63c3a66f 12059 else if (tg3_flag(tp, 5755_PLUS))
321d32a0
MC
12060 mem_tbl = mem_tbl_5755;
12061 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12062 mem_tbl = mem_tbl_5906;
63c3a66f 12063 else if (tg3_flag(tp, 5705_PLUS))
321d32a0
MC
12064 mem_tbl = mem_tbl_5705;
12065 else
7942e1db
MC
12066 mem_tbl = mem_tbl_570x;
12067
12068 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
be98da6a
MC
12069 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12070 if (err)
7942e1db
MC
12071 break;
12072 }
6aa20a22 12073
7942e1db
MC
12074 return err;
12075}
12076
bb158d69
MC
12077#define TG3_TSO_MSS 500
12078
12079#define TG3_TSO_IP_HDR_LEN 20
12080#define TG3_TSO_TCP_HDR_LEN 20
12081#define TG3_TSO_TCP_OPT_LEN 12
12082
12083static const u8 tg3_tso_header[] = {
120840x08, 0x00,
120850x45, 0x00, 0x00, 0x00,
120860x00, 0x00, 0x40, 0x00,
120870x40, 0x06, 0x00, 0x00,
120880x0a, 0x00, 0x00, 0x01,
120890x0a, 0x00, 0x00, 0x02,
120900x0d, 0x00, 0xe0, 0x00,
120910x00, 0x00, 0x01, 0x00,
120920x00, 0x00, 0x02, 0x00,
120930x80, 0x10, 0x10, 0x00,
120940x14, 0x09, 0x00, 0x00,
120950x01, 0x01, 0x08, 0x0a,
120960x11, 0x11, 0x11, 0x11,
120970x11, 0x11, 0x11, 0x11,
12098};
9f40dead 12099
28a45957 12100static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
c76949a6 12101{
5e5a7f37 12102 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
bb158d69 12103 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
84b67b27 12104 u32 budget;
9205fd9c
ED
12105 struct sk_buff *skb;
12106 u8 *tx_data, *rx_data;
c76949a6
MC
12107 dma_addr_t map;
12108 int num_pkts, tx_len, rx_len, i, err;
12109 struct tg3_rx_buffer_desc *desc;
898a56f8 12110 struct tg3_napi *tnapi, *rnapi;
8fea32b9 12111 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
c76949a6 12112
c8873405
MC
12113 tnapi = &tp->napi[0];
12114 rnapi = &tp->napi[0];
0c1d0e2b 12115 if (tp->irq_cnt > 1) {
63c3a66f 12116 if (tg3_flag(tp, ENABLE_RSS))
1da85aa3 12117 rnapi = &tp->napi[1];
63c3a66f 12118 if (tg3_flag(tp, ENABLE_TSS))
c8873405 12119 tnapi = &tp->napi[1];
0c1d0e2b 12120 }
fd2ce37f 12121 coal_now = tnapi->coal_now | rnapi->coal_now;
898a56f8 12122
c76949a6
MC
12123 err = -EIO;
12124
4852a861 12125 tx_len = pktsz;
a20e9c62 12126 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
12127 if (!skb)
12128 return -ENOMEM;
12129
c76949a6
MC
12130 tx_data = skb_put(skb, tx_len);
12131 memcpy(tx_data, tp->dev->dev_addr, 6);
12132 memset(tx_data + 6, 0x0, 8);
12133
4852a861 12134 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
c76949a6 12135
28a45957 12136 if (tso_loopback) {
bb158d69
MC
12137 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12138
12139 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12140 TG3_TSO_TCP_OPT_LEN;
12141
12142 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12143 sizeof(tg3_tso_header));
12144 mss = TG3_TSO_MSS;
12145
12146 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12147 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12148
12149 /* Set the total length field in the IP header */
12150 iph->tot_len = htons((u16)(mss + hdr_len));
12151
12152 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12153 TXD_FLAG_CPU_POST_DMA);
12154
63c3a66f
JP
12155 if (tg3_flag(tp, HW_TSO_1) ||
12156 tg3_flag(tp, HW_TSO_2) ||
12157 tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12158 struct tcphdr *th;
12159 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12160 th = (struct tcphdr *)&tx_data[val];
12161 th->check = 0;
12162 } else
12163 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12164
63c3a66f 12165 if (tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12166 mss |= (hdr_len & 0xc) << 12;
12167 if (hdr_len & 0x10)
12168 base_flags |= 0x00000010;
12169 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 12170 } else if (tg3_flag(tp, HW_TSO_2))
bb158d69 12171 mss |= hdr_len << 9;
63c3a66f 12172 else if (tg3_flag(tp, HW_TSO_1) ||
bb158d69
MC
12173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12174 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12175 } else {
12176 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12177 }
12178
12179 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12180 } else {
12181 num_pkts = 1;
12182 data_off = ETH_HLEN;
c441b456
MC
12183
12184 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12185 tx_len > VLAN_ETH_FRAME_LEN)
12186 base_flags |= TXD_FLAG_JMB_PKT;
bb158d69
MC
12187 }
12188
12189 for (i = data_off; i < tx_len; i++)
c76949a6
MC
12190 tx_data[i] = (u8) (i & 0xff);
12191
f4188d8a
AD
12192 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12193 if (pci_dma_mapping_error(tp->pdev, map)) {
a21771dd
MC
12194 dev_kfree_skb(skb);
12195 return -EIO;
12196 }
c76949a6 12197
0d681b27
MC
12198 val = tnapi->tx_prod;
12199 tnapi->tx_buffers[val].skb = skb;
12200 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12201
c76949a6 12202 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12203 rnapi->coal_now);
c76949a6
MC
12204
12205 udelay(10);
12206
898a56f8 12207 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
c76949a6 12208
84b67b27
MC
12209 budget = tg3_tx_avail(tnapi);
12210 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
d1a3b737
MC
12211 base_flags | TXD_FLAG_END, mss, 0)) {
12212 tnapi->tx_buffers[val].skb = NULL;
12213 dev_kfree_skb(skb);
12214 return -EIO;
12215 }
c76949a6 12216
f3f3f27e 12217 tnapi->tx_prod++;
c76949a6 12218
6541b806
MC
12219 /* Sync BD data before updating mailbox */
12220 wmb();
12221
f3f3f27e
MC
12222 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12223 tr32_mailbox(tnapi->prodmbox);
c76949a6
MC
12224
12225 udelay(10);
12226
303fc921
MC
12227 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12228 for (i = 0; i < 35; i++) {
c76949a6 12229 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12230 coal_now);
c76949a6
MC
12231
12232 udelay(10);
12233
898a56f8
MC
12234 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12235 rx_idx = rnapi->hw_status->idx[0].rx_producer;
f3f3f27e 12236 if ((tx_idx == tnapi->tx_prod) &&
c76949a6
MC
12237 (rx_idx == (rx_start_idx + num_pkts)))
12238 break;
12239 }
12240
ba1142e4 12241 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
c76949a6
MC
12242 dev_kfree_skb(skb);
12243
f3f3f27e 12244 if (tx_idx != tnapi->tx_prod)
c76949a6
MC
12245 goto out;
12246
12247 if (rx_idx != rx_start_idx + num_pkts)
12248 goto out;
12249
bb158d69
MC
12250 val = data_off;
12251 while (rx_idx != rx_start_idx) {
12252 desc = &rnapi->rx_rcb[rx_start_idx++];
12253 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12254 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
c76949a6 12255
bb158d69
MC
12256 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12257 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12258 goto out;
c76949a6 12259
bb158d69
MC
12260 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12261 - ETH_FCS_LEN;
c76949a6 12262
28a45957 12263 if (!tso_loopback) {
bb158d69
MC
12264 if (rx_len != tx_len)
12265 goto out;
4852a861 12266
bb158d69
MC
12267 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12268 if (opaque_key != RXD_OPAQUE_RING_STD)
12269 goto out;
12270 } else {
12271 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12272 goto out;
12273 }
12274 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12275 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
54e0a67f 12276 >> RXD_TCPCSUM_SHIFT != 0xffff) {
4852a861 12277 goto out;
bb158d69 12278 }
4852a861 12279
bb158d69 12280 if (opaque_key == RXD_OPAQUE_RING_STD) {
9205fd9c 12281 rx_data = tpr->rx_std_buffers[desc_idx].data;
bb158d69
MC
12282 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12283 mapping);
12284 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
9205fd9c 12285 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
bb158d69
MC
12286 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12287 mapping);
12288 } else
12289 goto out;
c76949a6 12290
bb158d69
MC
12291 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12292 PCI_DMA_FROMDEVICE);
c76949a6 12293
9205fd9c 12294 rx_data += TG3_RX_OFFSET(tp);
bb158d69 12295 for (i = data_off; i < rx_len; i++, val++) {
9205fd9c 12296 if (*(rx_data + i) != (u8) (val & 0xff))
bb158d69
MC
12297 goto out;
12298 }
c76949a6 12299 }
bb158d69 12300
c76949a6 12301 err = 0;
6aa20a22 12302
9205fd9c 12303 /* tg3_free_rings will unmap and free the rx_data */
c76949a6
MC
12304out:
12305 return err;
12306}
12307
00c266b7
MC
12308#define TG3_STD_LOOPBACK_FAILED 1
12309#define TG3_JMB_LOOPBACK_FAILED 2
bb158d69 12310#define TG3_TSO_LOOPBACK_FAILED 4
28a45957
MC
12311#define TG3_LOOPBACK_FAILED \
12312 (TG3_STD_LOOPBACK_FAILED | \
12313 TG3_JMB_LOOPBACK_FAILED | \
12314 TG3_TSO_LOOPBACK_FAILED)
00c266b7 12315
941ec90f 12316static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
9f40dead 12317{
28a45957 12318 int err = -EIO;
2215e24c 12319 u32 eee_cap;
c441b456
MC
12320 u32 jmb_pkt_sz = 9000;
12321
12322 if (tp->dma_limit)
12323 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
9f40dead 12324
ab789046
MC
12325 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12326 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12327
28a45957
MC
12328 if (!netif_running(tp->dev)) {
12329 data[0] = TG3_LOOPBACK_FAILED;
12330 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
12331 if (do_extlpbk)
12332 data[2] = TG3_LOOPBACK_FAILED;
28a45957
MC
12333 goto done;
12334 }
12335
b9ec6c1b 12336 err = tg3_reset_hw(tp, 1);
ab789046 12337 if (err) {
28a45957
MC
12338 data[0] = TG3_LOOPBACK_FAILED;
12339 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
12340 if (do_extlpbk)
12341 data[2] = TG3_LOOPBACK_FAILED;
ab789046
MC
12342 goto done;
12343 }
9f40dead 12344
63c3a66f 12345 if (tg3_flag(tp, ENABLE_RSS)) {
4a85f098
MC
12346 int i;
12347
12348 /* Reroute all rx packets to the 1st queue */
12349 for (i = MAC_RSS_INDIR_TBL_0;
12350 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12351 tw32(i, 0x0);
12352 }
12353
6e01b20b
MC
12354 /* HW errata - mac loopback fails in some cases on 5780.
12355 * Normal traffic and PHY loopback are not affected by
12356 * errata. Also, the MAC loopback test is deprecated for
12357 * all newer ASIC revisions.
12358 */
12359 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12360 !tg3_flag(tp, CPMU_PRESENT)) {
12361 tg3_mac_loopback(tp, true);
9936bcf6 12362
28a45957
MC
12363 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12364 data[0] |= TG3_STD_LOOPBACK_FAILED;
6e01b20b
MC
12365
12366 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12367 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
28a45957 12368 data[0] |= TG3_JMB_LOOPBACK_FAILED;
6e01b20b
MC
12369
12370 tg3_mac_loopback(tp, false);
12371 }
4852a861 12372
f07e9af3 12373 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
63c3a66f 12374 !tg3_flag(tp, USE_PHYLIB)) {
5e5a7f37
MC
12375 int i;
12376
941ec90f 12377 tg3_phy_lpbk_set(tp, 0, false);
5e5a7f37
MC
12378
12379 /* Wait for link */
12380 for (i = 0; i < 100; i++) {
12381 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12382 break;
12383 mdelay(1);
12384 }
12385
28a45957
MC
12386 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12387 data[1] |= TG3_STD_LOOPBACK_FAILED;
63c3a66f 12388 if (tg3_flag(tp, TSO_CAPABLE) &&
28a45957
MC
12389 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12390 data[1] |= TG3_TSO_LOOPBACK_FAILED;
63c3a66f 12391 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12392 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
28a45957 12393 data[1] |= TG3_JMB_LOOPBACK_FAILED;
9f40dead 12394
941ec90f
MC
12395 if (do_extlpbk) {
12396 tg3_phy_lpbk_set(tp, 0, true);
12397
12398 /* All link indications report up, but the hardware
12399 * isn't really ready for about 20 msec. Double it
12400 * to be sure.
12401 */
12402 mdelay(40);
12403
12404 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12405 data[2] |= TG3_STD_LOOPBACK_FAILED;
12406 if (tg3_flag(tp, TSO_CAPABLE) &&
12407 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12408 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12409 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12410 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
941ec90f
MC
12411 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12412 }
12413
5e5a7f37
MC
12414 /* Re-enable gphy autopowerdown. */
12415 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12416 tg3_phy_toggle_apd(tp, true);
12417 }
6833c043 12418
941ec90f 12419 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
28a45957 12420
ab789046
MC
12421done:
12422 tp->phy_flags |= eee_cap;
12423
9f40dead
MC
12424 return err;
12425}
12426
4cafd3f5
MC
12427static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12428 u64 *data)
12429{
566f86ad 12430 struct tg3 *tp = netdev_priv(dev);
941ec90f 12431 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
566f86ad 12432
bed9829f
MC
12433 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12434 tg3_power_up(tp)) {
12435 etest->flags |= ETH_TEST_FL_FAILED;
12436 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12437 return;
12438 }
bc1c7567 12439
566f86ad
MC
12440 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12441
12442 if (tg3_test_nvram(tp) != 0) {
12443 etest->flags |= ETH_TEST_FL_FAILED;
12444 data[0] = 1;
12445 }
941ec90f 12446 if (!doextlpbk && tg3_test_link(tp)) {
ca43007a
MC
12447 etest->flags |= ETH_TEST_FL_FAILED;
12448 data[1] = 1;
12449 }
a71116d1 12450 if (etest->flags & ETH_TEST_FL_OFFLINE) {
b02fd9e3 12451 int err, err2 = 0, irq_sync = 0;
bbe832c0
MC
12452
12453 if (netif_running(dev)) {
b02fd9e3 12454 tg3_phy_stop(tp);
a71116d1 12455 tg3_netif_stop(tp);
bbe832c0
MC
12456 irq_sync = 1;
12457 }
a71116d1 12458
bbe832c0 12459 tg3_full_lock(tp, irq_sync);
a71116d1
MC
12460
12461 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 12462 err = tg3_nvram_lock(tp);
a71116d1 12463 tg3_halt_cpu(tp, RX_CPU_BASE);
63c3a66f 12464 if (!tg3_flag(tp, 5705_PLUS))
a71116d1 12465 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
12466 if (!err)
12467 tg3_nvram_unlock(tp);
a71116d1 12468
f07e9af3 12469 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
d9ab5ad1
MC
12470 tg3_phy_reset(tp);
12471
a71116d1
MC
12472 if (tg3_test_registers(tp) != 0) {
12473 etest->flags |= ETH_TEST_FL_FAILED;
12474 data[2] = 1;
12475 }
28a45957 12476
7942e1db
MC
12477 if (tg3_test_memory(tp) != 0) {
12478 etest->flags |= ETH_TEST_FL_FAILED;
12479 data[3] = 1;
12480 }
28a45957 12481
941ec90f
MC
12482 if (doextlpbk)
12483 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12484
12485 if (tg3_test_loopback(tp, &data[4], doextlpbk))
c76949a6 12486 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 12487
f47c11ee
DM
12488 tg3_full_unlock(tp);
12489
d4bc3927
MC
12490 if (tg3_test_interrupt(tp) != 0) {
12491 etest->flags |= ETH_TEST_FL_FAILED;
941ec90f 12492 data[7] = 1;
d4bc3927 12493 }
f47c11ee
DM
12494
12495 tg3_full_lock(tp, 0);
d4bc3927 12496
a71116d1
MC
12497 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12498 if (netif_running(dev)) {
63c3a66f 12499 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
12500 err2 = tg3_restart_hw(tp, 1);
12501 if (!err2)
b9ec6c1b 12502 tg3_netif_start(tp);
a71116d1 12503 }
f47c11ee
DM
12504
12505 tg3_full_unlock(tp);
b02fd9e3
MC
12506
12507 if (irq_sync && !err2)
12508 tg3_phy_start(tp);
a71116d1 12509 }
80096068 12510 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
c866b7ea 12511 tg3_power_down(tp);
bc1c7567 12512
4cafd3f5
MC
12513}
12514
1da177e4
LT
12515static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12516{
12517 struct mii_ioctl_data *data = if_mii(ifr);
12518 struct tg3 *tp = netdev_priv(dev);
12519 int err;
12520
63c3a66f 12521 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 12522 struct phy_device *phydev;
f07e9af3 12523 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 12524 return -EAGAIN;
3f0e3ad7 12525 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
28b04113 12526 return phy_mii_ioctl(phydev, ifr, cmd);
b02fd9e3
MC
12527 }
12528
33f401ae 12529 switch (cmd) {
1da177e4 12530 case SIOCGMIIPHY:
882e9793 12531 data->phy_id = tp->phy_addr;
1da177e4
LT
12532
12533 /* fallthru */
12534 case SIOCGMIIREG: {
12535 u32 mii_regval;
12536
f07e9af3 12537 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12538 break; /* We have no PHY */
12539
34eea5ac 12540 if (!netif_running(dev))
bc1c7567
MC
12541 return -EAGAIN;
12542
f47c11ee 12543 spin_lock_bh(&tp->lock);
1da177e4 12544 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 12545 spin_unlock_bh(&tp->lock);
1da177e4
LT
12546
12547 data->val_out = mii_regval;
12548
12549 return err;
12550 }
12551
12552 case SIOCSMIIREG:
f07e9af3 12553 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12554 break; /* We have no PHY */
12555
34eea5ac 12556 if (!netif_running(dev))
bc1c7567
MC
12557 return -EAGAIN;
12558
f47c11ee 12559 spin_lock_bh(&tp->lock);
1da177e4 12560 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 12561 spin_unlock_bh(&tp->lock);
1da177e4
LT
12562
12563 return err;
12564
12565 default:
12566 /* do nothing */
12567 break;
12568 }
12569 return -EOPNOTSUPP;
12570}
12571
15f9850d
DM
12572static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12573{
12574 struct tg3 *tp = netdev_priv(dev);
12575
12576 memcpy(ec, &tp->coal, sizeof(*ec));
12577 return 0;
12578}
12579
d244c892
MC
12580static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12581{
12582 struct tg3 *tp = netdev_priv(dev);
12583 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12584 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12585
63c3a66f 12586 if (!tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
12587 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12588 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12589 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12590 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12591 }
12592
12593 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12594 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12595 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12596 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12597 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12598 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12599 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12600 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12601 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12602 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12603 return -EINVAL;
12604
12605 /* No rx interrupts will be generated if both are zero */
12606 if ((ec->rx_coalesce_usecs == 0) &&
12607 (ec->rx_max_coalesced_frames == 0))
12608 return -EINVAL;
12609
12610 /* No tx interrupts will be generated if both are zero */
12611 if ((ec->tx_coalesce_usecs == 0) &&
12612 (ec->tx_max_coalesced_frames == 0))
12613 return -EINVAL;
12614
12615 /* Only copy relevant parameters, ignore all others. */
12616 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12617 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12618 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12619 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12620 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12621 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12622 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12623 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12624 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12625
12626 if (netif_running(dev)) {
12627 tg3_full_lock(tp, 0);
12628 __tg3_set_coalesce(tp, &tp->coal);
12629 tg3_full_unlock(tp);
12630 }
12631 return 0;
12632}
12633
7282d491 12634static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
12635 .get_settings = tg3_get_settings,
12636 .set_settings = tg3_set_settings,
12637 .get_drvinfo = tg3_get_drvinfo,
12638 .get_regs_len = tg3_get_regs_len,
12639 .get_regs = tg3_get_regs,
12640 .get_wol = tg3_get_wol,
12641 .set_wol = tg3_set_wol,
12642 .get_msglevel = tg3_get_msglevel,
12643 .set_msglevel = tg3_set_msglevel,
12644 .nway_reset = tg3_nway_reset,
12645 .get_link = ethtool_op_get_link,
12646 .get_eeprom_len = tg3_get_eeprom_len,
12647 .get_eeprom = tg3_get_eeprom,
12648 .set_eeprom = tg3_set_eeprom,
12649 .get_ringparam = tg3_get_ringparam,
12650 .set_ringparam = tg3_set_ringparam,
12651 .get_pauseparam = tg3_get_pauseparam,
12652 .set_pauseparam = tg3_set_pauseparam,
4cafd3f5 12653 .self_test = tg3_self_test,
1da177e4 12654 .get_strings = tg3_get_strings,
81b8709c 12655 .set_phys_id = tg3_set_phys_id,
1da177e4 12656 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 12657 .get_coalesce = tg3_get_coalesce,
d244c892 12658 .set_coalesce = tg3_set_coalesce,
b9f2c044 12659 .get_sset_count = tg3_get_sset_count,
90415477
MC
12660 .get_rxnfc = tg3_get_rxnfc,
12661 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12662 .get_rxfh_indir = tg3_get_rxfh_indir,
12663 .set_rxfh_indir = tg3_set_rxfh_indir,
0968169c
MC
12664 .get_channels = tg3_get_channels,
12665 .set_channels = tg3_set_channels,
3f847490 12666 .get_ts_info = ethtool_op_get_ts_info,
1da177e4
LT
12667};
12668
b4017c53
DM
12669static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12670 struct rtnl_link_stats64 *stats)
12671{
12672 struct tg3 *tp = netdev_priv(dev);
12673
0f566b20
MC
12674 spin_lock_bh(&tp->lock);
12675 if (!tp->hw_stats) {
12676 spin_unlock_bh(&tp->lock);
b4017c53 12677 return &tp->net_stats_prev;
0f566b20 12678 }
b4017c53 12679
b4017c53
DM
12680 tg3_get_nstats(tp, stats);
12681 spin_unlock_bh(&tp->lock);
12682
12683 return stats;
12684}
12685
ccd5ba9d
MC
12686static void tg3_set_rx_mode(struct net_device *dev)
12687{
12688 struct tg3 *tp = netdev_priv(dev);
12689
12690 if (!netif_running(dev))
12691 return;
12692
12693 tg3_full_lock(tp, 0);
12694 __tg3_set_rx_mode(dev);
12695 tg3_full_unlock(tp);
12696}
12697
faf1627a
MC
12698static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12699 int new_mtu)
12700{
12701 dev->mtu = new_mtu;
12702
12703 if (new_mtu > ETH_DATA_LEN) {
12704 if (tg3_flag(tp, 5780_CLASS)) {
12705 netdev_update_features(dev);
12706 tg3_flag_clear(tp, TSO_CAPABLE);
12707 } else {
12708 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12709 }
12710 } else {
12711 if (tg3_flag(tp, 5780_CLASS)) {
12712 tg3_flag_set(tp, TSO_CAPABLE);
12713 netdev_update_features(dev);
12714 }
12715 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12716 }
12717}
12718
12719static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12720{
12721 struct tg3 *tp = netdev_priv(dev);
2fae5e36 12722 int err, reset_phy = 0;
faf1627a
MC
12723
12724 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12725 return -EINVAL;
12726
12727 if (!netif_running(dev)) {
12728 /* We'll just catch it later when the
12729 * device is up'd.
12730 */
12731 tg3_set_mtu(dev, tp, new_mtu);
12732 return 0;
12733 }
12734
12735 tg3_phy_stop(tp);
12736
12737 tg3_netif_stop(tp);
12738
12739 tg3_full_lock(tp, 1);
12740
12741 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12742
12743 tg3_set_mtu(dev, tp, new_mtu);
12744
2fae5e36
MC
12745 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12746 * breaks all requests to 256 bytes.
12747 */
12748 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12749 reset_phy = 1;
12750
12751 err = tg3_restart_hw(tp, reset_phy);
faf1627a
MC
12752
12753 if (!err)
12754 tg3_netif_start(tp);
12755
12756 tg3_full_unlock(tp);
12757
12758 if (!err)
12759 tg3_phy_start(tp);
12760
12761 return err;
12762}
12763
12764static const struct net_device_ops tg3_netdev_ops = {
12765 .ndo_open = tg3_open,
12766 .ndo_stop = tg3_close,
12767 .ndo_start_xmit = tg3_start_xmit,
12768 .ndo_get_stats64 = tg3_get_stats64,
12769 .ndo_validate_addr = eth_validate_addr,
12770 .ndo_set_rx_mode = tg3_set_rx_mode,
12771 .ndo_set_mac_address = tg3_set_mac_addr,
12772 .ndo_do_ioctl = tg3_ioctl,
12773 .ndo_tx_timeout = tg3_tx_timeout,
12774 .ndo_change_mtu = tg3_change_mtu,
12775 .ndo_fix_features = tg3_fix_features,
12776 .ndo_set_features = tg3_set_features,
12777#ifdef CONFIG_NET_POLL_CONTROLLER
12778 .ndo_poll_controller = tg3_poll_controller,
12779#endif
12780};
12781
1da177e4
LT
12782static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12783{
1b27777a 12784 u32 cursize, val, magic;
1da177e4
LT
12785
12786 tp->nvram_size = EEPROM_CHIP_SIZE;
12787
e4f34110 12788 if (tg3_nvram_read(tp, 0, &magic) != 0)
1da177e4
LT
12789 return;
12790
b16250e3
MC
12791 if ((magic != TG3_EEPROM_MAGIC) &&
12792 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12793 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
12794 return;
12795
12796 /*
12797 * Size the chip by reading offsets at increasing powers of two.
12798 * When we encounter our validation signature, we know the addressing
12799 * has wrapped around, and thus have our chip size.
12800 */
1b27777a 12801 cursize = 0x10;
1da177e4
LT
12802
12803 while (cursize < tp->nvram_size) {
e4f34110 12804 if (tg3_nvram_read(tp, cursize, &val) != 0)
1da177e4
LT
12805 return;
12806
1820180b 12807 if (val == magic)
1da177e4
LT
12808 break;
12809
12810 cursize <<= 1;
12811 }
12812
12813 tp->nvram_size = cursize;
12814}
6aa20a22 12815
1da177e4
LT
12816static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12817{
12818 u32 val;
12819
63c3a66f 12820 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
1b27777a
MC
12821 return;
12822
12823 /* Selfboot format */
1820180b 12824 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
12825 tg3_get_eeprom_size(tp);
12826 return;
12827 }
12828
6d348f2c 12829 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
1da177e4 12830 if (val != 0) {
6d348f2c
MC
12831 /* This is confusing. We want to operate on the
12832 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12833 * call will read from NVRAM and byteswap the data
12834 * according to the byteswapping settings for all
12835 * other register accesses. This ensures the data we
12836 * want will always reside in the lower 16-bits.
12837 * However, the data in NVRAM is in LE format, which
12838 * means the data from the NVRAM read will always be
12839 * opposite the endianness of the CPU. The 16-bit
12840 * byteswap then brings the data to CPU endianness.
12841 */
12842 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
1da177e4
LT
12843 return;
12844 }
12845 }
fd1122a2 12846 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
12847}
12848
12849static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12850{
12851 u32 nvcfg1;
12852
12853 nvcfg1 = tr32(NVRAM_CFG1);
12854 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
63c3a66f 12855 tg3_flag_set(tp, FLASH);
8590a603 12856 } else {
1da177e4
LT
12857 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12858 tw32(NVRAM_CFG1, nvcfg1);
12859 }
12860
6ff6f81d 12861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
63c3a66f 12862 tg3_flag(tp, 5780_CLASS)) {
1da177e4 12863 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8590a603
MC
12864 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12865 tp->nvram_jedecnum = JEDEC_ATMEL;
12866 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12867 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12868 break;
12869 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12870 tp->nvram_jedecnum = JEDEC_ATMEL;
12871 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12872 break;
12873 case FLASH_VENDOR_ATMEL_EEPROM:
12874 tp->nvram_jedecnum = JEDEC_ATMEL;
12875 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
63c3a66f 12876 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12877 break;
12878 case FLASH_VENDOR_ST:
12879 tp->nvram_jedecnum = JEDEC_ST;
12880 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
63c3a66f 12881 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12882 break;
12883 case FLASH_VENDOR_SAIFUN:
12884 tp->nvram_jedecnum = JEDEC_SAIFUN;
12885 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12886 break;
12887 case FLASH_VENDOR_SST_SMALL:
12888 case FLASH_VENDOR_SST_LARGE:
12889 tp->nvram_jedecnum = JEDEC_SST;
12890 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12891 break;
1da177e4 12892 }
8590a603 12893 } else {
1da177e4
LT
12894 tp->nvram_jedecnum = JEDEC_ATMEL;
12895 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12896 tg3_flag_set(tp, NVRAM_BUFFERED);
1da177e4
LT
12897 }
12898}
12899
a1b950d5
MC
12900static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12901{
12902 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12903 case FLASH_5752PAGE_SIZE_256:
12904 tp->nvram_pagesize = 256;
12905 break;
12906 case FLASH_5752PAGE_SIZE_512:
12907 tp->nvram_pagesize = 512;
12908 break;
12909 case FLASH_5752PAGE_SIZE_1K:
12910 tp->nvram_pagesize = 1024;
12911 break;
12912 case FLASH_5752PAGE_SIZE_2K:
12913 tp->nvram_pagesize = 2048;
12914 break;
12915 case FLASH_5752PAGE_SIZE_4K:
12916 tp->nvram_pagesize = 4096;
12917 break;
12918 case FLASH_5752PAGE_SIZE_264:
12919 tp->nvram_pagesize = 264;
12920 break;
12921 case FLASH_5752PAGE_SIZE_528:
12922 tp->nvram_pagesize = 528;
12923 break;
12924 }
12925}
12926
361b4ac2
MC
12927static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12928{
12929 u32 nvcfg1;
12930
12931 nvcfg1 = tr32(NVRAM_CFG1);
12932
e6af301b
MC
12933 /* NVRAM protection for TPM */
12934 if (nvcfg1 & (1 << 27))
63c3a66f 12935 tg3_flag_set(tp, PROTECTED_NVRAM);
e6af301b 12936
361b4ac2 12937 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12938 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12939 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12940 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12941 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12942 break;
12943 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12944 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12945 tg3_flag_set(tp, NVRAM_BUFFERED);
12946 tg3_flag_set(tp, FLASH);
8590a603
MC
12947 break;
12948 case FLASH_5752VENDOR_ST_M45PE10:
12949 case FLASH_5752VENDOR_ST_M45PE20:
12950 case FLASH_5752VENDOR_ST_M45PE40:
12951 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12952 tg3_flag_set(tp, NVRAM_BUFFERED);
12953 tg3_flag_set(tp, FLASH);
8590a603 12954 break;
361b4ac2
MC
12955 }
12956
63c3a66f 12957 if (tg3_flag(tp, FLASH)) {
a1b950d5 12958 tg3_nvram_get_pagesize(tp, nvcfg1);
8590a603 12959 } else {
361b4ac2
MC
12960 /* For eeprom, set pagesize to maximum eeprom size */
12961 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12962
12963 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12964 tw32(NVRAM_CFG1, nvcfg1);
12965 }
12966}
12967
d3c7b886
MC
12968static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12969{
989a9d23 12970 u32 nvcfg1, protect = 0;
d3c7b886
MC
12971
12972 nvcfg1 = tr32(NVRAM_CFG1);
12973
12974 /* NVRAM protection for TPM */
989a9d23 12975 if (nvcfg1 & (1 << 27)) {
63c3a66f 12976 tg3_flag_set(tp, PROTECTED_NVRAM);
989a9d23
MC
12977 protect = 1;
12978 }
d3c7b886 12979
989a9d23
MC
12980 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12981 switch (nvcfg1) {
8590a603
MC
12982 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12983 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12984 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12985 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12986 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12987 tg3_flag_set(tp, NVRAM_BUFFERED);
12988 tg3_flag_set(tp, FLASH);
8590a603
MC
12989 tp->nvram_pagesize = 264;
12990 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12991 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12992 tp->nvram_size = (protect ? 0x3e200 :
12993 TG3_NVRAM_SIZE_512KB);
12994 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12995 tp->nvram_size = (protect ? 0x1f200 :
12996 TG3_NVRAM_SIZE_256KB);
12997 else
12998 tp->nvram_size = (protect ? 0x1f200 :
12999 TG3_NVRAM_SIZE_128KB);
13000 break;
13001 case FLASH_5752VENDOR_ST_M45PE10:
13002 case FLASH_5752VENDOR_ST_M45PE20:
13003 case FLASH_5752VENDOR_ST_M45PE40:
13004 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13005 tg3_flag_set(tp, NVRAM_BUFFERED);
13006 tg3_flag_set(tp, FLASH);
8590a603
MC
13007 tp->nvram_pagesize = 256;
13008 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13009 tp->nvram_size = (protect ?
13010 TG3_NVRAM_SIZE_64KB :
13011 TG3_NVRAM_SIZE_128KB);
13012 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13013 tp->nvram_size = (protect ?
13014 TG3_NVRAM_SIZE_64KB :
13015 TG3_NVRAM_SIZE_256KB);
13016 else
13017 tp->nvram_size = (protect ?
13018 TG3_NVRAM_SIZE_128KB :
13019 TG3_NVRAM_SIZE_512KB);
13020 break;
d3c7b886
MC
13021 }
13022}
13023
1b27777a
MC
13024static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13025{
13026 u32 nvcfg1;
13027
13028 nvcfg1 = tr32(NVRAM_CFG1);
13029
13030 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
13031 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13032 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13033 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13034 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13035 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13036 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603 13037 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
1b27777a 13038
8590a603
MC
13039 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13040 tw32(NVRAM_CFG1, nvcfg1);
13041 break;
13042 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13043 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13044 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13045 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13046 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13047 tg3_flag_set(tp, NVRAM_BUFFERED);
13048 tg3_flag_set(tp, FLASH);
8590a603
MC
13049 tp->nvram_pagesize = 264;
13050 break;
13051 case FLASH_5752VENDOR_ST_M45PE10:
13052 case FLASH_5752VENDOR_ST_M45PE20:
13053 case FLASH_5752VENDOR_ST_M45PE40:
13054 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13055 tg3_flag_set(tp, NVRAM_BUFFERED);
13056 tg3_flag_set(tp, FLASH);
8590a603
MC
13057 tp->nvram_pagesize = 256;
13058 break;
1b27777a
MC
13059 }
13060}
13061
6b91fa02
MC
13062static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13063{
13064 u32 nvcfg1, protect = 0;
13065
13066 nvcfg1 = tr32(NVRAM_CFG1);
13067
13068 /* NVRAM protection for TPM */
13069 if (nvcfg1 & (1 << 27)) {
63c3a66f 13070 tg3_flag_set(tp, PROTECTED_NVRAM);
6b91fa02
MC
13071 protect = 1;
13072 }
13073
13074 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13075 switch (nvcfg1) {
8590a603
MC
13076 case FLASH_5761VENDOR_ATMEL_ADB021D:
13077 case FLASH_5761VENDOR_ATMEL_ADB041D:
13078 case FLASH_5761VENDOR_ATMEL_ADB081D:
13079 case FLASH_5761VENDOR_ATMEL_ADB161D:
13080 case FLASH_5761VENDOR_ATMEL_MDB021D:
13081 case FLASH_5761VENDOR_ATMEL_MDB041D:
13082 case FLASH_5761VENDOR_ATMEL_MDB081D:
13083 case FLASH_5761VENDOR_ATMEL_MDB161D:
13084 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13085 tg3_flag_set(tp, NVRAM_BUFFERED);
13086 tg3_flag_set(tp, FLASH);
13087 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
8590a603
MC
13088 tp->nvram_pagesize = 256;
13089 break;
13090 case FLASH_5761VENDOR_ST_A_M45PE20:
13091 case FLASH_5761VENDOR_ST_A_M45PE40:
13092 case FLASH_5761VENDOR_ST_A_M45PE80:
13093 case FLASH_5761VENDOR_ST_A_M45PE16:
13094 case FLASH_5761VENDOR_ST_M_M45PE20:
13095 case FLASH_5761VENDOR_ST_M_M45PE40:
13096 case FLASH_5761VENDOR_ST_M_M45PE80:
13097 case FLASH_5761VENDOR_ST_M_M45PE16:
13098 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13099 tg3_flag_set(tp, NVRAM_BUFFERED);
13100 tg3_flag_set(tp, FLASH);
8590a603
MC
13101 tp->nvram_pagesize = 256;
13102 break;
6b91fa02
MC
13103 }
13104
13105 if (protect) {
13106 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13107 } else {
13108 switch (nvcfg1) {
8590a603
MC
13109 case FLASH_5761VENDOR_ATMEL_ADB161D:
13110 case FLASH_5761VENDOR_ATMEL_MDB161D:
13111 case FLASH_5761VENDOR_ST_A_M45PE16:
13112 case FLASH_5761VENDOR_ST_M_M45PE16:
13113 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13114 break;
13115 case FLASH_5761VENDOR_ATMEL_ADB081D:
13116 case FLASH_5761VENDOR_ATMEL_MDB081D:
13117 case FLASH_5761VENDOR_ST_A_M45PE80:
13118 case FLASH_5761VENDOR_ST_M_M45PE80:
13119 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13120 break;
13121 case FLASH_5761VENDOR_ATMEL_ADB041D:
13122 case FLASH_5761VENDOR_ATMEL_MDB041D:
13123 case FLASH_5761VENDOR_ST_A_M45PE40:
13124 case FLASH_5761VENDOR_ST_M_M45PE40:
13125 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13126 break;
13127 case FLASH_5761VENDOR_ATMEL_ADB021D:
13128 case FLASH_5761VENDOR_ATMEL_MDB021D:
13129 case FLASH_5761VENDOR_ST_A_M45PE20:
13130 case FLASH_5761VENDOR_ST_M_M45PE20:
13131 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13132 break;
6b91fa02
MC
13133 }
13134 }
13135}
13136
b5d3772c
MC
13137static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13138{
13139 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13140 tg3_flag_set(tp, NVRAM_BUFFERED);
b5d3772c
MC
13141 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13142}
13143
321d32a0
MC
13144static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13145{
13146 u32 nvcfg1;
13147
13148 nvcfg1 = tr32(NVRAM_CFG1);
13149
13150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13151 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13152 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13153 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13154 tg3_flag_set(tp, NVRAM_BUFFERED);
321d32a0
MC
13155 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13156
13157 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13158 tw32(NVRAM_CFG1, nvcfg1);
13159 return;
13160 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13161 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13162 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13163 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13164 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13165 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13166 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13167 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13168 tg3_flag_set(tp, NVRAM_BUFFERED);
13169 tg3_flag_set(tp, FLASH);
321d32a0
MC
13170
13171 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13172 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13173 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13174 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13175 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13176 break;
13177 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13178 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13179 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13180 break;
13181 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13182 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13183 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13184 break;
13185 }
13186 break;
13187 case FLASH_5752VENDOR_ST_M45PE10:
13188 case FLASH_5752VENDOR_ST_M45PE20:
13189 case FLASH_5752VENDOR_ST_M45PE40:
13190 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13191 tg3_flag_set(tp, NVRAM_BUFFERED);
13192 tg3_flag_set(tp, FLASH);
321d32a0
MC
13193
13194 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13195 case FLASH_5752VENDOR_ST_M45PE10:
13196 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13197 break;
13198 case FLASH_5752VENDOR_ST_M45PE20:
13199 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13200 break;
13201 case FLASH_5752VENDOR_ST_M45PE40:
13202 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13203 break;
13204 }
13205 break;
13206 default:
63c3a66f 13207 tg3_flag_set(tp, NO_NVRAM);
321d32a0
MC
13208 return;
13209 }
13210
a1b950d5
MC
13211 tg3_nvram_get_pagesize(tp, nvcfg1);
13212 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13213 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
a1b950d5
MC
13214}
13215
13216
13217static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13218{
13219 u32 nvcfg1;
13220
13221 nvcfg1 = tr32(NVRAM_CFG1);
13222
13223 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13224 case FLASH_5717VENDOR_ATMEL_EEPROM:
13225 case FLASH_5717VENDOR_MICRO_EEPROM:
13226 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13227 tg3_flag_set(tp, NVRAM_BUFFERED);
a1b950d5
MC
13228 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13229
13230 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13231 tw32(NVRAM_CFG1, nvcfg1);
13232 return;
13233 case FLASH_5717VENDOR_ATMEL_MDB011D:
13234 case FLASH_5717VENDOR_ATMEL_ADB011B:
13235 case FLASH_5717VENDOR_ATMEL_ADB011D:
13236 case FLASH_5717VENDOR_ATMEL_MDB021D:
13237 case FLASH_5717VENDOR_ATMEL_ADB021B:
13238 case FLASH_5717VENDOR_ATMEL_ADB021D:
13239 case FLASH_5717VENDOR_ATMEL_45USPT:
13240 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13241 tg3_flag_set(tp, NVRAM_BUFFERED);
13242 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13243
13244 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13245 case FLASH_5717VENDOR_ATMEL_MDB021D:
66ee33bf
MC
13246 /* Detect size with tg3_nvram_get_size() */
13247 break;
a1b950d5
MC
13248 case FLASH_5717VENDOR_ATMEL_ADB021B:
13249 case FLASH_5717VENDOR_ATMEL_ADB021D:
13250 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13251 break;
13252 default:
13253 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13254 break;
13255 }
321d32a0 13256 break;
a1b950d5
MC
13257 case FLASH_5717VENDOR_ST_M_M25PE10:
13258 case FLASH_5717VENDOR_ST_A_M25PE10:
13259 case FLASH_5717VENDOR_ST_M_M45PE10:
13260 case FLASH_5717VENDOR_ST_A_M45PE10:
13261 case FLASH_5717VENDOR_ST_M_M25PE20:
13262 case FLASH_5717VENDOR_ST_A_M25PE20:
13263 case FLASH_5717VENDOR_ST_M_M45PE20:
13264 case FLASH_5717VENDOR_ST_A_M45PE20:
13265 case FLASH_5717VENDOR_ST_25USPT:
13266 case FLASH_5717VENDOR_ST_45USPT:
13267 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13268 tg3_flag_set(tp, NVRAM_BUFFERED);
13269 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13270
13271 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13272 case FLASH_5717VENDOR_ST_M_M25PE20:
a1b950d5 13273 case FLASH_5717VENDOR_ST_M_M45PE20:
66ee33bf
MC
13274 /* Detect size with tg3_nvram_get_size() */
13275 break;
13276 case FLASH_5717VENDOR_ST_A_M25PE20:
a1b950d5
MC
13277 case FLASH_5717VENDOR_ST_A_M45PE20:
13278 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13279 break;
13280 default:
13281 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13282 break;
13283 }
321d32a0 13284 break;
a1b950d5 13285 default:
63c3a66f 13286 tg3_flag_set(tp, NO_NVRAM);
a1b950d5 13287 return;
321d32a0 13288 }
a1b950d5
MC
13289
13290 tg3_nvram_get_pagesize(tp, nvcfg1);
13291 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13292 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
321d32a0
MC
13293}
13294
9b91b5f1
MC
13295static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13296{
13297 u32 nvcfg1, nvmpinstrp;
13298
13299 nvcfg1 = tr32(NVRAM_CFG1);
13300 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13301
13302 switch (nvmpinstrp) {
13303 case FLASH_5720_EEPROM_HD:
13304 case FLASH_5720_EEPROM_LD:
13305 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13306 tg3_flag_set(tp, NVRAM_BUFFERED);
9b91b5f1
MC
13307
13308 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13309 tw32(NVRAM_CFG1, nvcfg1);
13310 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13311 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13312 else
13313 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13314 return;
13315 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13316 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13317 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13318 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13319 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13320 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13321 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13322 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13323 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13324 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13325 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13326 case FLASH_5720VENDOR_ATMEL_45USPT:
13327 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13328 tg3_flag_set(tp, NVRAM_BUFFERED);
13329 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13330
13331 switch (nvmpinstrp) {
13332 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13333 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13334 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13335 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13336 break;
13337 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13338 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13339 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13340 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13341 break;
13342 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13343 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13344 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13345 break;
13346 default:
13347 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13348 break;
13349 }
13350 break;
13351 case FLASH_5720VENDOR_M_ST_M25PE10:
13352 case FLASH_5720VENDOR_M_ST_M45PE10:
13353 case FLASH_5720VENDOR_A_ST_M25PE10:
13354 case FLASH_5720VENDOR_A_ST_M45PE10:
13355 case FLASH_5720VENDOR_M_ST_M25PE20:
13356 case FLASH_5720VENDOR_M_ST_M45PE20:
13357 case FLASH_5720VENDOR_A_ST_M25PE20:
13358 case FLASH_5720VENDOR_A_ST_M45PE20:
13359 case FLASH_5720VENDOR_M_ST_M25PE40:
13360 case FLASH_5720VENDOR_M_ST_M45PE40:
13361 case FLASH_5720VENDOR_A_ST_M25PE40:
13362 case FLASH_5720VENDOR_A_ST_M45PE40:
13363 case FLASH_5720VENDOR_M_ST_M25PE80:
13364 case FLASH_5720VENDOR_M_ST_M45PE80:
13365 case FLASH_5720VENDOR_A_ST_M25PE80:
13366 case FLASH_5720VENDOR_A_ST_M45PE80:
13367 case FLASH_5720VENDOR_ST_25USPT:
13368 case FLASH_5720VENDOR_ST_45USPT:
13369 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13370 tg3_flag_set(tp, NVRAM_BUFFERED);
13371 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13372
13373 switch (nvmpinstrp) {
13374 case FLASH_5720VENDOR_M_ST_M25PE20:
13375 case FLASH_5720VENDOR_M_ST_M45PE20:
13376 case FLASH_5720VENDOR_A_ST_M25PE20:
13377 case FLASH_5720VENDOR_A_ST_M45PE20:
13378 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13379 break;
13380 case FLASH_5720VENDOR_M_ST_M25PE40:
13381 case FLASH_5720VENDOR_M_ST_M45PE40:
13382 case FLASH_5720VENDOR_A_ST_M25PE40:
13383 case FLASH_5720VENDOR_A_ST_M45PE40:
13384 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13385 break;
13386 case FLASH_5720VENDOR_M_ST_M25PE80:
13387 case FLASH_5720VENDOR_M_ST_M45PE80:
13388 case FLASH_5720VENDOR_A_ST_M25PE80:
13389 case FLASH_5720VENDOR_A_ST_M45PE80:
13390 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13391 break;
13392 default:
13393 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13394 break;
13395 }
13396 break;
13397 default:
63c3a66f 13398 tg3_flag_set(tp, NO_NVRAM);
9b91b5f1
MC
13399 return;
13400 }
13401
13402 tg3_nvram_get_pagesize(tp, nvcfg1);
13403 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13404 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
9b91b5f1
MC
13405}
13406
1da177e4
LT
13407/* Chips other than 5700/5701 use the NVRAM for fetching info. */
13408static void __devinit tg3_nvram_init(struct tg3 *tp)
13409{
1da177e4
LT
13410 tw32_f(GRC_EEPROM_ADDR,
13411 (EEPROM_ADDR_FSM_RESET |
13412 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13413 EEPROM_ADDR_CLKPERD_SHIFT)));
13414
9d57f01c 13415 msleep(1);
1da177e4
LT
13416
13417 /* Enable seeprom accesses. */
13418 tw32_f(GRC_LOCAL_CTRL,
13419 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13420 udelay(100);
13421
13422 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13423 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
63c3a66f 13424 tg3_flag_set(tp, NVRAM);
1da177e4 13425
ec41c7df 13426 if (tg3_nvram_lock(tp)) {
5129c3a3
MC
13427 netdev_warn(tp->dev,
13428 "Cannot get nvram lock, %s failed\n",
05dbe005 13429 __func__);
ec41c7df
MC
13430 return;
13431 }
e6af301b 13432 tg3_enable_nvram_access(tp);
1da177e4 13433
989a9d23
MC
13434 tp->nvram_size = 0;
13435
361b4ac2
MC
13436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13437 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
13438 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13439 tg3_get_5755_nvram_info(tp);
d30cdd28 13440 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
57e6983c
MC
13441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1b27777a 13443 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
13444 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13445 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
13446 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13447 tg3_get_5906_nvram_info(tp);
b703df6f 13448 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 13449 tg3_flag(tp, 57765_CLASS))
321d32a0 13450 tg3_get_57780_nvram_info(tp);
9b91b5f1
MC
13451 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a1b950d5 13453 tg3_get_5717_nvram_info(tp);
9b91b5f1
MC
13454 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13455 tg3_get_5720_nvram_info(tp);
361b4ac2
MC
13456 else
13457 tg3_get_nvram_info(tp);
13458
989a9d23
MC
13459 if (tp->nvram_size == 0)
13460 tg3_get_nvram_size(tp);
1da177e4 13461
e6af301b 13462 tg3_disable_nvram_access(tp);
381291b7 13463 tg3_nvram_unlock(tp);
1da177e4
LT
13464
13465 } else {
63c3a66f
JP
13466 tg3_flag_clear(tp, NVRAM);
13467 tg3_flag_clear(tp, NVRAM_BUFFERED);
1da177e4
LT
13468
13469 tg3_get_eeprom_size(tp);
13470 }
13471}
13472
1da177e4
LT
13473struct subsys_tbl_ent {
13474 u16 subsys_vendor, subsys_devid;
13475 u32 phy_id;
13476};
13477
24daf2b0 13478static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
1da177e4 13479 /* Broadcom boards. */
24daf2b0 13480 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13481 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
24daf2b0 13482 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13483 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
24daf2b0 13484 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13485 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
24daf2b0
MC
13486 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13487 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13488 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13489 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
24daf2b0 13490 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13491 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13492 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13493 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13494 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13495 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
24daf2b0 13496 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13497 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
24daf2b0 13498 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13499 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
24daf2b0 13500 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13501 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
1da177e4
LT
13502
13503 /* 3com boards. */
24daf2b0 13504 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13505 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
24daf2b0 13506 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13507 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13508 { TG3PCI_SUBVENDOR_ID_3COM,
13509 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13510 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13511 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
24daf2b0 13512 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13513 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13514
13515 /* DELL boards. */
24daf2b0 13516 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13517 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
24daf2b0 13518 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13519 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
24daf2b0 13520 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13521 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
24daf2b0 13522 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13523 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
1da177e4
LT
13524
13525 /* Compaq boards. */
24daf2b0 13526 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13527 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
24daf2b0 13528 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13529 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13530 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13531 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13532 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13533 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
24daf2b0 13534 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13535 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13536
13537 /* IBM boards. */
24daf2b0
MC
13538 { TG3PCI_SUBVENDOR_ID_IBM,
13539 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
1da177e4
LT
13540};
13541
24daf2b0 13542static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
1da177e4
LT
13543{
13544 int i;
13545
13546 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13547 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13548 tp->pdev->subsystem_vendor) &&
13549 (subsys_id_to_phy_id[i].subsys_devid ==
13550 tp->pdev->subsystem_device))
13551 return &subsys_id_to_phy_id[i];
13552 }
13553 return NULL;
13554}
13555
7d0c41ef 13556static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 13557{
1da177e4 13558 u32 val;
f49639e6 13559
79eb6904 13560 tp->phy_id = TG3_PHY_ID_INVALID;
7d0c41ef
MC
13561 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13562
a85feb8c 13563 /* Assume an onboard device and WOL capable by default. */
63c3a66f
JP
13564 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13565 tg3_flag_set(tp, WOL_CAP);
72b845e0 13566
b5d3772c 13567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 13568 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
63c3a66f
JP
13569 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13570 tg3_flag_set(tp, IS_NIC);
9d26e213 13571 }
0527ba35
MC
13572 val = tr32(VCPU_CFGSHDW);
13573 if (val & VCPU_CFGSHDW_ASPM_DBNC)
63c3a66f 13574 tg3_flag_set(tp, ASPM_WORKAROUND);
0527ba35 13575 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
6fdbab9d 13576 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
63c3a66f 13577 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13578 device_set_wakeup_enable(&tp->pdev->dev, true);
13579 }
05ac4cb7 13580 goto done;
b5d3772c
MC
13581 }
13582
1da177e4
LT
13583 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13584 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13585 u32 nic_cfg, led_cfg;
a9daf367 13586 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
7d0c41ef 13587 int eeprom_phy_serdes = 0;
1da177e4
LT
13588
13589 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13590 tp->nic_sram_data_cfg = nic_cfg;
13591
13592 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13593 ver >>= NIC_SRAM_DATA_VER_SHIFT;
6ff6f81d
MC
13594 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13595 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13596 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
1da177e4
LT
13597 (ver > 0) && (ver < 0x100))
13598 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13599
a9daf367
MC
13600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13601 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13602
1da177e4
LT
13603 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13604 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13605 eeprom_phy_serdes = 1;
13606
13607 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13608 if (nic_phy_id != 0) {
13609 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13610 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13611
13612 eeprom_phy_id = (id1 >> 16) << 10;
13613 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13614 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13615 } else
13616 eeprom_phy_id = 0;
13617
7d0c41ef 13618 tp->phy_id = eeprom_phy_id;
747e8f8b 13619 if (eeprom_phy_serdes) {
63c3a66f 13620 if (!tg3_flag(tp, 5705_PLUS))
f07e9af3 13621 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
a50d0796 13622 else
f07e9af3 13623 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
747e8f8b 13624 }
7d0c41ef 13625
63c3a66f 13626 if (tg3_flag(tp, 5750_PLUS))
1da177e4
LT
13627 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13628 SHASTA_EXT_LED_MODE_MASK);
cbf46853 13629 else
1da177e4
LT
13630 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13631
13632 switch (led_cfg) {
13633 default:
13634 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13635 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13636 break;
13637
13638 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13639 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13640 break;
13641
13642 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13643 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
13644
13645 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13646 * read on some older 5700/5701 bootcode.
13647 */
13648 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13649 ASIC_REV_5700 ||
13650 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13651 ASIC_REV_5701)
13652 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13653
1da177e4
LT
13654 break;
13655
13656 case SHASTA_EXT_LED_SHARED:
13657 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13658 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13659 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13660 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13661 LED_CTRL_MODE_PHY_2);
13662 break;
13663
13664 case SHASTA_EXT_LED_MAC:
13665 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13666 break;
13667
13668 case SHASTA_EXT_LED_COMBO:
13669 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13670 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13671 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13672 LED_CTRL_MODE_PHY_2);
13673 break;
13674
855e1111 13675 }
1da177e4
LT
13676
13677 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13679 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13680 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13681
b2a5c19c
MC
13682 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13683 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 13684
9d26e213 13685 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
63c3a66f 13686 tg3_flag_set(tp, EEPROM_WRITE_PROT);
9d26e213
MC
13687 if ((tp->pdev->subsystem_vendor ==
13688 PCI_VENDOR_ID_ARIMA) &&
13689 (tp->pdev->subsystem_device == 0x205a ||
13690 tp->pdev->subsystem_device == 0x2063))
63c3a66f 13691 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
9d26e213 13692 } else {
63c3a66f
JP
13693 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13694 tg3_flag_set(tp, IS_NIC);
9d26e213 13695 }
1da177e4
LT
13696
13697 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f
JP
13698 tg3_flag_set(tp, ENABLE_ASF);
13699 if (tg3_flag(tp, 5750_PLUS))
13700 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4 13701 }
b2b98d4a
MC
13702
13703 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
63c3a66f
JP
13704 tg3_flag(tp, 5750_PLUS))
13705 tg3_flag_set(tp, ENABLE_APE);
b2b98d4a 13706
f07e9af3 13707 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
a85feb8c 13708 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
63c3a66f 13709 tg3_flag_clear(tp, WOL_CAP);
1da177e4 13710
63c3a66f 13711 if (tg3_flag(tp, WOL_CAP) &&
6fdbab9d 13712 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
63c3a66f 13713 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13714 device_set_wakeup_enable(&tp->pdev->dev, true);
13715 }
0527ba35 13716
1da177e4 13717 if (cfg2 & (1 << 17))
f07e9af3 13718 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
1da177e4
LT
13719
13720 /* serdes signal pre-emphasis in register 0x590 set by */
13721 /* bootcode if bit 18 is set */
13722 if (cfg2 & (1 << 18))
f07e9af3 13723 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
8ed5d97e 13724
63c3a66f
JP
13725 if ((tg3_flag(tp, 57765_PLUS) ||
13726 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13727 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
6833c043 13728 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
f07e9af3 13729 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
6833c043 13730
63c3a66f 13731 if (tg3_flag(tp, PCI_EXPRESS) &&
8c69b1e7 13732 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 13733 !tg3_flag(tp, 57765_PLUS)) {
8ed5d97e
MC
13734 u32 cfg3;
13735
13736 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13737 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
63c3a66f 13738 tg3_flag_set(tp, ASPM_WORKAROUND);
8ed5d97e 13739 }
a9daf367 13740
14417063 13741 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
63c3a66f 13742 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
a9daf367 13743 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
63c3a66f 13744 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
a9daf367 13745 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
63c3a66f 13746 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
1da177e4 13747 }
05ac4cb7 13748done:
63c3a66f 13749 if (tg3_flag(tp, WOL_CAP))
43067ed8 13750 device_set_wakeup_enable(&tp->pdev->dev,
63c3a66f 13751 tg3_flag(tp, WOL_ENABLE));
43067ed8
RW
13752 else
13753 device_set_wakeup_capable(&tp->pdev->dev, false);
7d0c41ef
MC
13754}
13755
b2a5c19c
MC
13756static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13757{
13758 int i;
13759 u32 val;
13760
13761 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13762 tw32(OTP_CTRL, cmd);
13763
13764 /* Wait for up to 1 ms for command to execute. */
13765 for (i = 0; i < 100; i++) {
13766 val = tr32(OTP_STATUS);
13767 if (val & OTP_STATUS_CMD_DONE)
13768 break;
13769 udelay(10);
13770 }
13771
13772 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13773}
13774
13775/* Read the gphy configuration from the OTP region of the chip. The gphy
13776 * configuration is a 32-bit value that straddles the alignment boundary.
13777 * We do two 32-bit reads and then shift and merge the results.
13778 */
13779static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13780{
13781 u32 bhalf_otp, thalf_otp;
13782
13783 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13784
13785 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13786 return 0;
13787
13788 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13789
13790 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13791 return 0;
13792
13793 thalf_otp = tr32(OTP_READ_DATA);
13794
13795 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13796
13797 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13798 return 0;
13799
13800 bhalf_otp = tr32(OTP_READ_DATA);
13801
13802 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13803}
13804
e256f8a3
MC
13805static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13806{
202ff1c2 13807 u32 adv = ADVERTISED_Autoneg;
e256f8a3
MC
13808
13809 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13810 adv |= ADVERTISED_1000baseT_Half |
13811 ADVERTISED_1000baseT_Full;
13812
13813 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13814 adv |= ADVERTISED_100baseT_Half |
13815 ADVERTISED_100baseT_Full |
13816 ADVERTISED_10baseT_Half |
13817 ADVERTISED_10baseT_Full |
13818 ADVERTISED_TP;
13819 else
13820 adv |= ADVERTISED_FIBRE;
13821
13822 tp->link_config.advertising = adv;
e740522e
MC
13823 tp->link_config.speed = SPEED_UNKNOWN;
13824 tp->link_config.duplex = DUPLEX_UNKNOWN;
e256f8a3 13825 tp->link_config.autoneg = AUTONEG_ENABLE;
e740522e
MC
13826 tp->link_config.active_speed = SPEED_UNKNOWN;
13827 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
34655ad6
MC
13828
13829 tp->old_link = -1;
e256f8a3
MC
13830}
13831
7d0c41ef
MC
13832static int __devinit tg3_phy_probe(struct tg3 *tp)
13833{
13834 u32 hw_phy_id_1, hw_phy_id_2;
13835 u32 hw_phy_id, hw_phy_id_masked;
13836 int err;
1da177e4 13837
e256f8a3 13838 /* flow control autonegotiation is default behavior */
63c3a66f 13839 tg3_flag_set(tp, PAUSE_AUTONEG);
e256f8a3
MC
13840 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13841
8151ad57
MC
13842 if (tg3_flag(tp, ENABLE_APE)) {
13843 switch (tp->pci_fn) {
13844 case 0:
13845 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13846 break;
13847 case 1:
13848 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13849 break;
13850 case 2:
13851 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13852 break;
13853 case 3:
13854 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13855 break;
13856 }
13857 }
13858
63c3a66f 13859 if (tg3_flag(tp, USE_PHYLIB))
b02fd9e3
MC
13860 return tg3_phy_init(tp);
13861
1da177e4 13862 /* Reading the PHY ID register can conflict with ASF
877d0310 13863 * firmware access to the PHY hardware.
1da177e4
LT
13864 */
13865 err = 0;
63c3a66f 13866 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
79eb6904 13867 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
1da177e4
LT
13868 } else {
13869 /* Now read the physical PHY_ID from the chip and verify
13870 * that it is sane. If it doesn't look good, we fall back
13871 * to either the hard-coded table based PHY_ID and failing
13872 * that the value found in the eeprom area.
13873 */
13874 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13875 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13876
13877 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13878 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13879 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13880
79eb6904 13881 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
1da177e4
LT
13882 }
13883
79eb6904 13884 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
1da177e4 13885 tp->phy_id = hw_phy_id;
79eb6904 13886 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
f07e9af3 13887 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
da6b2d01 13888 else
f07e9af3 13889 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
1da177e4 13890 } else {
79eb6904 13891 if (tp->phy_id != TG3_PHY_ID_INVALID) {
7d0c41ef
MC
13892 /* Do nothing, phy ID already set up in
13893 * tg3_get_eeprom_hw_cfg().
13894 */
1da177e4
LT
13895 } else {
13896 struct subsys_tbl_ent *p;
13897
13898 /* No eeprom signature? Try the hardcoded
13899 * subsys device table.
13900 */
24daf2b0 13901 p = tg3_lookup_by_subsys(tp);
1da177e4
LT
13902 if (!p)
13903 return -ENODEV;
13904
13905 tp->phy_id = p->phy_id;
13906 if (!tp->phy_id ||
79eb6904 13907 tp->phy_id == TG3_PHY_ID_BCM8002)
f07e9af3 13908 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
1da177e4
LT
13909 }
13910 }
13911
a6b68dab 13912 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
5baa5e9a
MC
13913 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13915 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
a6b68dab
MC
13916 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13917 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13918 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
52b02d04
MC
13919 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13920
e256f8a3
MC
13921 tg3_phy_init_link_config(tp);
13922
f07e9af3 13923 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
63c3a66f
JP
13924 !tg3_flag(tp, ENABLE_APE) &&
13925 !tg3_flag(tp, ENABLE_ASF)) {
e2bf73e7 13926 u32 bmsr, dummy;
1da177e4
LT
13927
13928 tg3_readphy(tp, MII_BMSR, &bmsr);
13929 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13930 (bmsr & BMSR_LSTATUS))
13931 goto skip_phy_reset;
6aa20a22 13932
1da177e4
LT
13933 err = tg3_phy_reset(tp);
13934 if (err)
13935 return err;
13936
42b64a45 13937 tg3_phy_set_wirespeed(tp);
1da177e4 13938
e2bf73e7 13939 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
42b64a45
MC
13940 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13941 tp->link_config.flowctrl);
1da177e4
LT
13942
13943 tg3_writephy(tp, MII_BMCR,
13944 BMCR_ANENABLE | BMCR_ANRESTART);
13945 }
1da177e4
LT
13946 }
13947
13948skip_phy_reset:
79eb6904 13949 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
13950 err = tg3_init_5401phy_dsp(tp);
13951 if (err)
13952 return err;
1da177e4 13953
1da177e4
LT
13954 err = tg3_init_5401phy_dsp(tp);
13955 }
13956
1da177e4
LT
13957 return err;
13958}
13959
184b8904 13960static void __devinit tg3_read_vpd(struct tg3 *tp)
1da177e4 13961{
a4a8bb15 13962 u8 *vpd_data;
4181b2c8 13963 unsigned int block_end, rosize, len;
535a490e 13964 u32 vpdlen;
184b8904 13965 int j, i = 0;
a4a8bb15 13966
535a490e 13967 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
a4a8bb15
MC
13968 if (!vpd_data)
13969 goto out_no_vpd;
1da177e4 13970
535a490e 13971 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
4181b2c8
MC
13972 if (i < 0)
13973 goto out_not_found;
1da177e4 13974
4181b2c8
MC
13975 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13976 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13977 i += PCI_VPD_LRDT_TAG_SIZE;
1da177e4 13978
535a490e 13979 if (block_end > vpdlen)
4181b2c8 13980 goto out_not_found;
af2c6a4a 13981
184b8904
MC
13982 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13983 PCI_VPD_RO_KEYWORD_MFR_ID);
13984 if (j > 0) {
13985 len = pci_vpd_info_field_size(&vpd_data[j]);
13986
13987 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13988 if (j + len > block_end || len != 4 ||
13989 memcmp(&vpd_data[j], "1028", 4))
13990 goto partno;
13991
13992 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13993 PCI_VPD_RO_KEYWORD_VENDOR0);
13994 if (j < 0)
13995 goto partno;
13996
13997 len = pci_vpd_info_field_size(&vpd_data[j]);
13998
13999 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14000 if (j + len > block_end)
14001 goto partno;
14002
14003 memcpy(tp->fw_ver, &vpd_data[j], len);
535a490e 14004 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
184b8904
MC
14005 }
14006
14007partno:
4181b2c8
MC
14008 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14009 PCI_VPD_RO_KEYWORD_PARTNO);
14010 if (i < 0)
14011 goto out_not_found;
af2c6a4a 14012
4181b2c8 14013 len = pci_vpd_info_field_size(&vpd_data[i]);
1da177e4 14014
4181b2c8
MC
14015 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14016 if (len > TG3_BPN_SIZE ||
535a490e 14017 (len + i) > vpdlen)
4181b2c8 14018 goto out_not_found;
1da177e4 14019
4181b2c8 14020 memcpy(tp->board_part_number, &vpd_data[i], len);
1da177e4 14021
1da177e4 14022out_not_found:
a4a8bb15 14023 kfree(vpd_data);
37a949c5 14024 if (tp->board_part_number[0])
a4a8bb15
MC
14025 return;
14026
14027out_no_vpd:
37a949c5
MC
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14029 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
14030 strcpy(tp->board_part_number, "BCM5717");
14031 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14032 strcpy(tp->board_part_number, "BCM5718");
14033 else
14034 goto nomatch;
14035 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14036 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14037 strcpy(tp->board_part_number, "BCM57780");
14038 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14039 strcpy(tp->board_part_number, "BCM57760");
14040 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14041 strcpy(tp->board_part_number, "BCM57790");
14042 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14043 strcpy(tp->board_part_number, "BCM57788");
14044 else
14045 goto nomatch;
14046 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14047 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14048 strcpy(tp->board_part_number, "BCM57761");
14049 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14050 strcpy(tp->board_part_number, "BCM57765");
14051 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14052 strcpy(tp->board_part_number, "BCM57781");
14053 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14054 strcpy(tp->board_part_number, "BCM57785");
14055 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14056 strcpy(tp->board_part_number, "BCM57791");
14057 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14058 strcpy(tp->board_part_number, "BCM57795");
14059 else
14060 goto nomatch;
55086ad9
MC
14061 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14062 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14063 strcpy(tp->board_part_number, "BCM57762");
14064 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14065 strcpy(tp->board_part_number, "BCM57766");
14066 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14067 strcpy(tp->board_part_number, "BCM57782");
14068 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14069 strcpy(tp->board_part_number, "BCM57786");
14070 else
14071 goto nomatch;
37a949c5 14072 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b5d3772c 14073 strcpy(tp->board_part_number, "BCM95906");
37a949c5
MC
14074 } else {
14075nomatch:
b5d3772c 14076 strcpy(tp->board_part_number, "none");
37a949c5 14077 }
1da177e4
LT
14078}
14079
9c8a620e
MC
14080static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14081{
14082 u32 val;
14083
e4f34110 14084 if (tg3_nvram_read(tp, offset, &val) ||
9c8a620e 14085 (val & 0xfc000000) != 0x0c000000 ||
e4f34110 14086 tg3_nvram_read(tp, offset + 4, &val) ||
9c8a620e
MC
14087 val != 0)
14088 return 0;
14089
14090 return 1;
14091}
14092
acd9c119
MC
14093static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14094{
ff3a7cb2 14095 u32 val, offset, start, ver_offset;
75f9936e 14096 int i, dst_off;
ff3a7cb2 14097 bool newver = false;
acd9c119
MC
14098
14099 if (tg3_nvram_read(tp, 0xc, &offset) ||
14100 tg3_nvram_read(tp, 0x4, &start))
14101 return;
14102
14103 offset = tg3_nvram_logical_addr(tp, offset);
14104
ff3a7cb2 14105 if (tg3_nvram_read(tp, offset, &val))
acd9c119
MC
14106 return;
14107
ff3a7cb2
MC
14108 if ((val & 0xfc000000) == 0x0c000000) {
14109 if (tg3_nvram_read(tp, offset + 4, &val))
acd9c119
MC
14110 return;
14111
ff3a7cb2
MC
14112 if (val == 0)
14113 newver = true;
14114 }
14115
75f9936e
MC
14116 dst_off = strlen(tp->fw_ver);
14117
ff3a7cb2 14118 if (newver) {
75f9936e
MC
14119 if (TG3_VER_SIZE - dst_off < 16 ||
14120 tg3_nvram_read(tp, offset + 8, &ver_offset))
ff3a7cb2
MC
14121 return;
14122
14123 offset = offset + ver_offset - start;
14124 for (i = 0; i < 16; i += 4) {
14125 __be32 v;
14126 if (tg3_nvram_read_be32(tp, offset + i, &v))
14127 return;
14128
75f9936e 14129 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
ff3a7cb2
MC
14130 }
14131 } else {
14132 u32 major, minor;
14133
14134 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14135 return;
14136
14137 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14138 TG3_NVM_BCVER_MAJSFT;
14139 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
75f9936e
MC
14140 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14141 "v%d.%02d", major, minor);
acd9c119
MC
14142 }
14143}
14144
a6f6cb1c
MC
14145static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14146{
14147 u32 val, major, minor;
14148
14149 /* Use native endian representation */
14150 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14151 return;
14152
14153 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14154 TG3_NVM_HWSB_CFG1_MAJSFT;
14155 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14156 TG3_NVM_HWSB_CFG1_MINSFT;
14157
14158 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14159}
14160
dfe00d7d
MC
14161static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14162{
14163 u32 offset, major, minor, build;
14164
75f9936e 14165 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
dfe00d7d
MC
14166
14167 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14168 return;
14169
14170 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14171 case TG3_EEPROM_SB_REVISION_0:
14172 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14173 break;
14174 case TG3_EEPROM_SB_REVISION_2:
14175 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14176 break;
14177 case TG3_EEPROM_SB_REVISION_3:
14178 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14179 break;
a4153d40
MC
14180 case TG3_EEPROM_SB_REVISION_4:
14181 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14182 break;
14183 case TG3_EEPROM_SB_REVISION_5:
14184 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14185 break;
bba226ac
MC
14186 case TG3_EEPROM_SB_REVISION_6:
14187 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14188 break;
dfe00d7d
MC
14189 default:
14190 return;
14191 }
14192
e4f34110 14193 if (tg3_nvram_read(tp, offset, &val))
dfe00d7d
MC
14194 return;
14195
14196 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14197 TG3_EEPROM_SB_EDH_BLD_SHFT;
14198 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14199 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14200 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14201
14202 if (minor > 99 || build > 26)
14203 return;
14204
75f9936e
MC
14205 offset = strlen(tp->fw_ver);
14206 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14207 " v%d.%02d", major, minor);
dfe00d7d
MC
14208
14209 if (build > 0) {
75f9936e
MC
14210 offset = strlen(tp->fw_ver);
14211 if (offset < TG3_VER_SIZE - 1)
14212 tp->fw_ver[offset] = 'a' + build - 1;
dfe00d7d
MC
14213 }
14214}
14215
acd9c119 14216static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
c4e6575c
MC
14217{
14218 u32 val, offset, start;
acd9c119 14219 int i, vlen;
9c8a620e
MC
14220
14221 for (offset = TG3_NVM_DIR_START;
14222 offset < TG3_NVM_DIR_END;
14223 offset += TG3_NVM_DIRENT_SIZE) {
e4f34110 14224 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
14225 return;
14226
9c8a620e
MC
14227 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14228 break;
14229 }
14230
14231 if (offset == TG3_NVM_DIR_END)
14232 return;
14233
63c3a66f 14234 if (!tg3_flag(tp, 5705_PLUS))
9c8a620e 14235 start = 0x08000000;
e4f34110 14236 else if (tg3_nvram_read(tp, offset - 4, &start))
9c8a620e
MC
14237 return;
14238
e4f34110 14239 if (tg3_nvram_read(tp, offset + 4, &offset) ||
9c8a620e 14240 !tg3_fw_img_is_valid(tp, offset) ||
e4f34110 14241 tg3_nvram_read(tp, offset + 8, &val))
9c8a620e
MC
14242 return;
14243
14244 offset += val - start;
14245
acd9c119 14246 vlen = strlen(tp->fw_ver);
9c8a620e 14247
acd9c119
MC
14248 tp->fw_ver[vlen++] = ',';
14249 tp->fw_ver[vlen++] = ' ';
9c8a620e
MC
14250
14251 for (i = 0; i < 4; i++) {
a9dc529d
MC
14252 __be32 v;
14253 if (tg3_nvram_read_be32(tp, offset, &v))
c4e6575c
MC
14254 return;
14255
b9fc7dc5 14256 offset += sizeof(v);
c4e6575c 14257
acd9c119
MC
14258 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14259 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
9c8a620e 14260 break;
c4e6575c 14261 }
9c8a620e 14262
acd9c119
MC
14263 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14264 vlen += sizeof(v);
c4e6575c 14265 }
acd9c119
MC
14266}
14267
165f4d1c 14268static void __devinit tg3_probe_ncsi(struct tg3 *tp)
7fd76445 14269{
7fd76445 14270 u32 apedata;
7fd76445
MC
14271
14272 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14273 if (apedata != APE_SEG_SIG_MAGIC)
14274 return;
14275
14276 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14277 if (!(apedata & APE_FW_STATUS_READY))
14278 return;
14279
165f4d1c
MC
14280 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14281 tg3_flag_set(tp, APE_HAS_NCSI);
14282}
14283
14284static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14285{
14286 int vlen;
14287 u32 apedata;
14288 char *fwtype;
14289
7fd76445
MC
14290 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14291
165f4d1c 14292 if (tg3_flag(tp, APE_HAS_NCSI))
ecc79648 14293 fwtype = "NCSI";
165f4d1c 14294 else
ecc79648
MC
14295 fwtype = "DASH";
14296
7fd76445
MC
14297 vlen = strlen(tp->fw_ver);
14298
ecc79648
MC
14299 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14300 fwtype,
7fd76445
MC
14301 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14302 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14303 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14304 (apedata & APE_FW_VERSION_BLDMSK));
14305}
14306
acd9c119
MC
14307static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14308{
14309 u32 val;
75f9936e 14310 bool vpd_vers = false;
acd9c119 14311
75f9936e
MC
14312 if (tp->fw_ver[0] != 0)
14313 vpd_vers = true;
df259d8c 14314
63c3a66f 14315 if (tg3_flag(tp, NO_NVRAM)) {
75f9936e 14316 strcat(tp->fw_ver, "sb");
df259d8c
MC
14317 return;
14318 }
14319
acd9c119
MC
14320 if (tg3_nvram_read(tp, 0, &val))
14321 return;
14322
14323 if (val == TG3_EEPROM_MAGIC)
14324 tg3_read_bc_ver(tp);
14325 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14326 tg3_read_sb_ver(tp, val);
a6f6cb1c
MC
14327 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14328 tg3_read_hwsb_ver(tp);
acd9c119 14329
165f4d1c
MC
14330 if (tg3_flag(tp, ENABLE_ASF)) {
14331 if (tg3_flag(tp, ENABLE_APE)) {
14332 tg3_probe_ncsi(tp);
14333 if (!vpd_vers)
14334 tg3_read_dash_ver(tp);
14335 } else if (!vpd_vers) {
14336 tg3_read_mgmtfw_ver(tp);
14337 }
c9cab24e 14338 }
9c8a620e
MC
14339
14340 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
14341}
14342
7cb32cf2
MC
14343static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14344{
63c3a66f 14345 if (tg3_flag(tp, LRG_PROD_RING_CAP))
de9f5230 14346 return TG3_RX_RET_MAX_SIZE_5717;
63c3a66f 14347 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
de9f5230 14348 return TG3_RX_RET_MAX_SIZE_5700;
7cb32cf2 14349 else
de9f5230 14350 return TG3_RX_RET_MAX_SIZE_5705;
7cb32cf2
MC
14351}
14352
4143470c 14353static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
895950c2
JP
14354 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14355 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14356 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14357 { },
14358};
14359
16c7fa7d
MC
14360static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14361{
14362 struct pci_dev *peer;
14363 unsigned int func, devnr = tp->pdev->devfn & ~7;
14364
14365 for (func = 0; func < 8; func++) {
14366 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14367 if (peer && peer != tp->pdev)
14368 break;
14369 pci_dev_put(peer);
14370 }
14371 /* 5704 can be configured in single-port mode, set peer to
14372 * tp->pdev in that case.
14373 */
14374 if (!peer) {
14375 peer = tp->pdev;
14376 return peer;
14377 }
14378
14379 /*
14380 * We don't need to keep the refcount elevated; there's no way
14381 * to remove one half of this device without removing the other
14382 */
14383 pci_dev_put(peer);
14384
14385 return peer;
14386}
14387
42b123b1
MC
14388static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14389{
14390 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14392 u32 reg;
14393
14394 /* All devices that use the alternate
14395 * ASIC REV location have a CPMU.
14396 */
14397 tg3_flag_set(tp, CPMU_PRESENT);
14398
14399 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14402 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14403 reg = TG3PCI_GEN2_PRODID_ASICREV;
14404 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14408 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14414 reg = TG3PCI_GEN15_PRODID_ASICREV;
14415 else
14416 reg = TG3PCI_PRODID_ASICREV;
14417
14418 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14419 }
14420
14421 /* Wrong chip ID in 5752 A0. This code can be removed later
14422 * as A0 is not in production.
14423 */
14424 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14425 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14426
14427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14430 tg3_flag_set(tp, 5717_PLUS);
14431
14432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14434 tg3_flag_set(tp, 57765_CLASS);
14435
14436 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14437 tg3_flag_set(tp, 57765_PLUS);
14438
14439 /* Intentionally exclude ASIC_REV_5906 */
14440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14446 tg3_flag(tp, 57765_PLUS))
14447 tg3_flag_set(tp, 5755_PLUS);
14448
14449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14451 tg3_flag_set(tp, 5780_CLASS);
14452
14453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14456 tg3_flag(tp, 5755_PLUS) ||
14457 tg3_flag(tp, 5780_CLASS))
14458 tg3_flag_set(tp, 5750_PLUS);
14459
14460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14461 tg3_flag(tp, 5750_PLUS))
14462 tg3_flag_set(tp, 5705_PLUS);
14463}
14464
1da177e4
LT
14465static int __devinit tg3_get_invariants(struct tg3 *tp)
14466{
1da177e4 14467 u32 misc_ctrl_reg;
1da177e4
LT
14468 u32 pci_state_reg, grc_misc_cfg;
14469 u32 val;
14470 u16 pci_cmd;
5e7dfd0f 14471 int err;
1da177e4 14472
1da177e4
LT
14473 /* Force memory write invalidate off. If we leave it on,
14474 * then on 5700_BX chips we have to enable a workaround.
14475 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14476 * to match the cacheline size. The Broadcom driver have this
14477 * workaround but turns MWI off all the times so never uses
14478 * it. This seems to suggest that the workaround is insufficient.
14479 */
14480 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14481 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14482 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14483
16821285
MC
14484 /* Important! -- Make sure register accesses are byteswapped
14485 * correctly. Also, for those chips that require it, make
14486 * sure that indirect register accesses are enabled before
14487 * the first operation.
1da177e4
LT
14488 */
14489 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14490 &misc_ctrl_reg);
16821285
MC
14491 tp->misc_host_ctrl |= (misc_ctrl_reg &
14492 MISC_HOST_CTRL_CHIPREV);
14493 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14494 tp->misc_host_ctrl);
1da177e4 14495
42b123b1 14496 tg3_detect_asic_rev(tp, misc_ctrl_reg);
ff645bec 14497
6892914f
MC
14498 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14499 * we need to disable memory and use config. cycles
14500 * only to access all registers. The 5702/03 chips
14501 * can mistakenly decode the special cycles from the
14502 * ICH chipsets as memory write cycles, causing corruption
14503 * of register and memory space. Only certain ICH bridges
14504 * will drive special cycles with non-zero data during the
14505 * address phase which can fall within the 5703's address
14506 * range. This is not an ICH bug as the PCI spec allows
14507 * non-zero address during special cycles. However, only
14508 * these ICH bridges are known to drive non-zero addresses
14509 * during special cycles.
14510 *
14511 * Since special cycles do not cross PCI bridges, we only
14512 * enable this workaround if the 5703 is on the secondary
14513 * bus of these ICH bridges.
14514 */
14515 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14516 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14517 static struct tg3_dev_id {
14518 u32 vendor;
14519 u32 device;
14520 u32 rev;
14521 } ich_chipsets[] = {
14522 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14523 PCI_ANY_ID },
14524 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14525 PCI_ANY_ID },
14526 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14527 0xa },
14528 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14529 PCI_ANY_ID },
14530 { },
14531 };
14532 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14533 struct pci_dev *bridge = NULL;
14534
14535 while (pci_id->vendor != 0) {
14536 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14537 bridge);
14538 if (!bridge) {
14539 pci_id++;
14540 continue;
14541 }
14542 if (pci_id->rev != PCI_ANY_ID) {
44c10138 14543 if (bridge->revision > pci_id->rev)
6892914f
MC
14544 continue;
14545 }
14546 if (bridge->subordinate &&
14547 (bridge->subordinate->number ==
14548 tp->pdev->bus->number)) {
63c3a66f 14549 tg3_flag_set(tp, ICH_WORKAROUND);
6892914f
MC
14550 pci_dev_put(bridge);
14551 break;
14552 }
14553 }
14554 }
14555
6ff6f81d 14556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
41588ba1
MC
14557 static struct tg3_dev_id {
14558 u32 vendor;
14559 u32 device;
14560 } bridge_chipsets[] = {
14561 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14562 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14563 { },
14564 };
14565 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14566 struct pci_dev *bridge = NULL;
14567
14568 while (pci_id->vendor != 0) {
14569 bridge = pci_get_device(pci_id->vendor,
14570 pci_id->device,
14571 bridge);
14572 if (!bridge) {
14573 pci_id++;
14574 continue;
14575 }
14576 if (bridge->subordinate &&
14577 (bridge->subordinate->number <=
14578 tp->pdev->bus->number) &&
b918c62e 14579 (bridge->subordinate->busn_res.end >=
41588ba1 14580 tp->pdev->bus->number)) {
63c3a66f 14581 tg3_flag_set(tp, 5701_DMA_BUG);
41588ba1
MC
14582 pci_dev_put(bridge);
14583 break;
14584 }
14585 }
14586 }
14587
4a29cc2e
MC
14588 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14589 * DMA addresses > 40-bit. This bridge may have other additional
14590 * 57xx devices behind it in some 4-port NIC designs for example.
14591 * Any tg3 device found behind the bridge will also need the 40-bit
14592 * DMA workaround.
14593 */
42b123b1 14594 if (tg3_flag(tp, 5780_CLASS)) {
63c3a66f 14595 tg3_flag_set(tp, 40BIT_DMA_BUG);
4cf78e4f 14596 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
859a5887 14597 } else {
4a29cc2e
MC
14598 struct pci_dev *bridge = NULL;
14599
14600 do {
14601 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14602 PCI_DEVICE_ID_SERVERWORKS_EPB,
14603 bridge);
14604 if (bridge && bridge->subordinate &&
14605 (bridge->subordinate->number <=
14606 tp->pdev->bus->number) &&
b918c62e 14607 (bridge->subordinate->busn_res.end >=
4a29cc2e 14608 tp->pdev->bus->number)) {
63c3a66f 14609 tg3_flag_set(tp, 40BIT_DMA_BUG);
4a29cc2e
MC
14610 pci_dev_put(bridge);
14611 break;
14612 }
14613 } while (bridge);
14614 }
4cf78e4f 14615
f6eb9b1f 14616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3a1e19d3 14617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
7544b097
MC
14618 tp->pdev_peer = tg3_find_peer(tp);
14619
507399f1 14620 /* Determine TSO capabilities */
a0512944 14621 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
4d163b75 14622 ; /* Do nothing. HW bug. */
63c3a66f
JP
14623 else if (tg3_flag(tp, 57765_PLUS))
14624 tg3_flag_set(tp, HW_TSO_3);
14625 else if (tg3_flag(tp, 5755_PLUS) ||
e849cdc3 14626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f
JP
14627 tg3_flag_set(tp, HW_TSO_2);
14628 else if (tg3_flag(tp, 5750_PLUS)) {
14629 tg3_flag_set(tp, HW_TSO_1);
14630 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14632 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
63c3a66f 14633 tg3_flag_clear(tp, TSO_BUG);
507399f1
MC
14634 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14635 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14636 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 14637 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14639 tp->fw_needed = FIRMWARE_TG3TSO5;
14640 else
14641 tp->fw_needed = FIRMWARE_TG3TSO;
14642 }
14643
dabc5c67 14644 /* Selectively allow TSO based on operating conditions */
6ff6f81d
MC
14645 if (tg3_flag(tp, HW_TSO_1) ||
14646 tg3_flag(tp, HW_TSO_2) ||
14647 tg3_flag(tp, HW_TSO_3) ||
cf9ecf4b
MC
14648 tp->fw_needed) {
14649 /* For firmware TSO, assume ASF is disabled.
14650 * We'll disable TSO later if we discover ASF
14651 * is enabled in tg3_get_eeprom_hw_cfg().
14652 */
dabc5c67 14653 tg3_flag_set(tp, TSO_CAPABLE);
cf9ecf4b 14654 } else {
dabc5c67
MC
14655 tg3_flag_clear(tp, TSO_CAPABLE);
14656 tg3_flag_clear(tp, TSO_BUG);
14657 tp->fw_needed = NULL;
14658 }
14659
14660 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14661 tp->fw_needed = FIRMWARE_TG3;
14662
507399f1
MC
14663 tp->irq_max = 1;
14664
63c3a66f
JP
14665 if (tg3_flag(tp, 5750_PLUS)) {
14666 tg3_flag_set(tp, SUPPORT_MSI);
7544b097
MC
14667 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14668 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14669 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14670 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14671 tp->pdev_peer == tp->pdev))
63c3a66f 14672 tg3_flag_clear(tp, SUPPORT_MSI);
7544b097 14673
63c3a66f 14674 if (tg3_flag(tp, 5755_PLUS) ||
b5d3772c 14675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
63c3a66f 14676 tg3_flag_set(tp, 1SHOT_MSI);
52c0fd83 14677 }
4f125f42 14678
63c3a66f
JP
14679 if (tg3_flag(tp, 57765_PLUS)) {
14680 tg3_flag_set(tp, SUPPORT_MSIX);
507399f1
MC
14681 tp->irq_max = TG3_IRQ_MAX_VECS;
14682 }
f6eb9b1f 14683 }
0e1406dd 14684
9102426a
MC
14685 tp->txq_max = 1;
14686 tp->rxq_max = 1;
14687 if (tp->irq_max > 1) {
14688 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14689 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14690
14691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14692 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14693 tp->txq_max = tp->irq_max - 1;
14694 }
14695
b7abee6e
MC
14696 if (tg3_flag(tp, 5755_PLUS) ||
14697 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f 14698 tg3_flag_set(tp, SHORT_DMA_BUG);
f6eb9b1f 14699
e31aa987 14700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a4cb428d 14701 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
e31aa987 14702
fa6b2aae
MC
14703 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
63c3a66f 14706 tg3_flag_set(tp, LRG_PROD_RING_CAP);
de9f5230 14707
63c3a66f 14708 if (tg3_flag(tp, 57765_PLUS) &&
a0512944 14709 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
63c3a66f 14710 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
b703df6f 14711
63c3a66f
JP
14712 if (!tg3_flag(tp, 5705_PLUS) ||
14713 tg3_flag(tp, 5780_CLASS) ||
14714 tg3_flag(tp, USE_JUMBO_BDFLAG))
14715 tg3_flag_set(tp, JUMBO_CAPABLE);
0f893dc6 14716
52f4490c
MC
14717 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14718 &pci_state_reg);
14719
708ebb3a 14720 if (pci_is_pcie(tp->pdev)) {
5e7dfd0f
MC
14721 u16 lnkctl;
14722
63c3a66f 14723 tg3_flag_set(tp, PCI_EXPRESS);
5f5c51e3 14724
0f49bfbd 14725 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
5e7dfd0f 14726 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
7196cd6c
MC
14727 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14728 ASIC_REV_5906) {
63c3a66f 14729 tg3_flag_clear(tp, HW_TSO_2);
dabc5c67 14730 tg3_flag_clear(tp, TSO_CAPABLE);
7196cd6c 14731 }
5e7dfd0f 14732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0 14733 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9cf74ebb
MC
14734 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14735 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
63c3a66f 14736 tg3_flag_set(tp, CLKREQ_BUG);
614b0590 14737 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
63c3a66f 14738 tg3_flag_set(tp, L1PLLPD_EN);
c7835a77 14739 }
52f4490c 14740 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
708ebb3a
JM
14741 /* BCM5785 devices are effectively PCIe devices, and should
14742 * follow PCIe codepaths, but do not have a PCIe capabilities
14743 * section.
93a700a9 14744 */
63c3a66f
JP
14745 tg3_flag_set(tp, PCI_EXPRESS);
14746 } else if (!tg3_flag(tp, 5705_PLUS) ||
14747 tg3_flag(tp, 5780_CLASS)) {
52f4490c
MC
14748 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14749 if (!tp->pcix_cap) {
2445e461
MC
14750 dev_err(&tp->pdev->dev,
14751 "Cannot find PCI-X capability, aborting\n");
52f4490c
MC
14752 return -EIO;
14753 }
14754
14755 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
63c3a66f 14756 tg3_flag_set(tp, PCIX_MODE);
52f4490c 14757 }
1da177e4 14758
399de50b
MC
14759 /* If we have an AMD 762 or VIA K8T800 chipset, write
14760 * reordering to the mailbox registers done by the host
14761 * controller can cause major troubles. We read back from
14762 * every mailbox register write to force the writes to be
14763 * posted to the chip in order.
14764 */
4143470c 14765 if (pci_dev_present(tg3_write_reorder_chipsets) &&
63c3a66f
JP
14766 !tg3_flag(tp, PCI_EXPRESS))
14767 tg3_flag_set(tp, MBOX_WRITE_REORDER);
399de50b 14768
69fc4053
MC
14769 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14770 &tp->pci_cacheline_sz);
14771 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14772 &tp->pci_lat_timer);
1da177e4
LT
14773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14774 tp->pci_lat_timer < 64) {
14775 tp->pci_lat_timer = 64;
69fc4053
MC
14776 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14777 tp->pci_lat_timer);
1da177e4
LT
14778 }
14779
16821285
MC
14780 /* Important! -- It is critical that the PCI-X hw workaround
14781 * situation is decided before the first MMIO register access.
14782 */
52f4490c
MC
14783 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14784 /* 5700 BX chips need to have their TX producer index
14785 * mailboxes written twice to workaround a bug.
14786 */
63c3a66f 14787 tg3_flag_set(tp, TXD_MBOX_HWBUG);
1da177e4 14788
52f4490c 14789 /* If we are in PCI-X mode, enable register write workaround.
1da177e4
LT
14790 *
14791 * The workaround is to use indirect register accesses
14792 * for all chip writes not to mailbox registers.
14793 */
63c3a66f 14794 if (tg3_flag(tp, PCIX_MODE)) {
1da177e4 14795 u32 pm_reg;
1da177e4 14796
63c3a66f 14797 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14798
14799 /* The chip can have it's power management PCI config
14800 * space registers clobbered due to this bug.
14801 * So explicitly force the chip into D0 here.
14802 */
9974a356
MC
14803 pci_read_config_dword(tp->pdev,
14804 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14805 &pm_reg);
14806 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14807 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
14808 pci_write_config_dword(tp->pdev,
14809 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14810 pm_reg);
14811
14812 /* Also, force SERR#/PERR# in PCI command. */
14813 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14814 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14815 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14816 }
14817 }
14818
1da177e4 14819 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
63c3a66f 14820 tg3_flag_set(tp, PCI_HIGH_SPEED);
1da177e4 14821 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
63c3a66f 14822 tg3_flag_set(tp, PCI_32BIT);
1da177e4
LT
14823
14824 /* Chip-specific fixup from Broadcom driver */
14825 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14826 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14827 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14828 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14829 }
14830
1ee582d8 14831 /* Default fast path register access methods */
20094930 14832 tp->read32 = tg3_read32;
1ee582d8 14833 tp->write32 = tg3_write32;
09ee929c 14834 tp->read32_mbox = tg3_read32;
20094930 14835 tp->write32_mbox = tg3_write32;
1ee582d8
MC
14836 tp->write32_tx_mbox = tg3_write32;
14837 tp->write32_rx_mbox = tg3_write32;
14838
14839 /* Various workaround register access methods */
63c3a66f 14840 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
1ee582d8 14841 tp->write32 = tg3_write_indirect_reg32;
98efd8a6 14842 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
63c3a66f 14843 (tg3_flag(tp, PCI_EXPRESS) &&
98efd8a6
MC
14844 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14845 /*
14846 * Back to back register writes can cause problems on these
14847 * chips, the workaround is to read back all reg writes
14848 * except those to mailbox regs.
14849 *
14850 * See tg3_write_indirect_reg32().
14851 */
1ee582d8 14852 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
14853 }
14854
63c3a66f 14855 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
1ee582d8 14856 tp->write32_tx_mbox = tg3_write32_tx_mbox;
63c3a66f 14857 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1ee582d8
MC
14858 tp->write32_rx_mbox = tg3_write_flush_reg32;
14859 }
20094930 14860
63c3a66f 14861 if (tg3_flag(tp, ICH_WORKAROUND)) {
6892914f
MC
14862 tp->read32 = tg3_read_indirect_reg32;
14863 tp->write32 = tg3_write_indirect_reg32;
14864 tp->read32_mbox = tg3_read_indirect_mbox;
14865 tp->write32_mbox = tg3_write_indirect_mbox;
14866 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14867 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14868
14869 iounmap(tp->regs);
22abe310 14870 tp->regs = NULL;
6892914f
MC
14871
14872 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14873 pci_cmd &= ~PCI_COMMAND_MEMORY;
14874 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14875 }
b5d3772c
MC
14876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14877 tp->read32_mbox = tg3_read32_mbox_5906;
14878 tp->write32_mbox = tg3_write32_mbox_5906;
14879 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14880 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14881 }
6892914f 14882
bbadf503 14883 if (tp->write32 == tg3_write_indirect_reg32 ||
63c3a66f 14884 (tg3_flag(tp, PCIX_MODE) &&
bbadf503 14885 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 14886 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
63c3a66f 14887 tg3_flag_set(tp, SRAM_USE_CONFIG);
bbadf503 14888
16821285
MC
14889 /* The memory arbiter has to be enabled in order for SRAM accesses
14890 * to succeed. Normally on powerup the tg3 chip firmware will make
14891 * sure it is enabled, but other entities such as system netboot
14892 * code might disable it.
14893 */
14894 val = tr32(MEMARB_MODE);
14895 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14896
9dc5e342
MC
14897 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14899 tg3_flag(tp, 5780_CLASS)) {
14900 if (tg3_flag(tp, PCIX_MODE)) {
14901 pci_read_config_dword(tp->pdev,
14902 tp->pcix_cap + PCI_X_STATUS,
14903 &val);
14904 tp->pci_fn = val & 0x7;
14905 }
14906 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14907 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14908 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14909 NIC_SRAM_CPMUSTAT_SIG) {
14910 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14911 tp->pci_fn = tp->pci_fn ? 1 : 0;
14912 }
14913 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14915 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14916 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14917 NIC_SRAM_CPMUSTAT_SIG) {
14918 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14919 TG3_CPMU_STATUS_FSHFT_5719;
14920 }
69f11c99
MC
14921 }
14922
7d0c41ef 14923 /* Get eeprom hw config before calling tg3_set_power_state().
63c3a66f 14924 * In particular, the TG3_FLAG_IS_NIC flag must be
7d0c41ef
MC
14925 * determined before calling tg3_set_power_state() so that
14926 * we know whether or not to switch out of Vaux power.
14927 * When the flag is set, it means that GPIO1 is used for eeprom
14928 * write protect and also implies that it is a LOM where GPIOs
14929 * are not used to switch power.
6aa20a22 14930 */
7d0c41ef
MC
14931 tg3_get_eeprom_hw_cfg(tp);
14932
cf9ecf4b
MC
14933 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14934 tg3_flag_clear(tp, TSO_CAPABLE);
14935 tg3_flag_clear(tp, TSO_BUG);
14936 tp->fw_needed = NULL;
14937 }
14938
63c3a66f 14939 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
14940 /* Allow reads and writes to the
14941 * APE register and memory space.
14942 */
14943 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
14944 PCISTATE_ALLOW_APE_SHMEM_WR |
14945 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
14946 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14947 pci_state_reg);
c9cab24e
MC
14948
14949 tg3_ape_lock_init(tp);
0d3031d9
MC
14950 }
14951
16821285
MC
14952 /* Set up tp->grc_local_ctrl before calling
14953 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14954 * will bring 5700's external PHY out of reset.
314fba34
MC
14955 * It is also used as eeprom write protect on LOMs.
14956 */
14957 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
6ff6f81d 14958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
63c3a66f 14959 tg3_flag(tp, EEPROM_WRITE_PROT))
314fba34
MC
14960 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14961 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
14962 /* Unused GPIO3 must be driven as output on 5752 because there
14963 * are no pull-up resistors on unused GPIO pins.
14964 */
14965 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14966 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 14967
321d32a0 14968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
cb4ed1fd 14969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 14970 tg3_flag(tp, 57765_CLASS))
af36e6b6
MC
14971 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14972
8d519ab2
MC
14973 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14974 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
5f0c4a3c
MC
14975 /* Turn off the debug UART. */
14976 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
63c3a66f 14977 if (tg3_flag(tp, IS_NIC))
5f0c4a3c
MC
14978 /* Keep VMain power. */
14979 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14980 GRC_LCLCTRL_GPIO_OUTPUT0;
14981 }
14982
16821285
MC
14983 /* Switch out of Vaux if it is a NIC */
14984 tg3_pwrsrc_switch_to_vmain(tp);
1da177e4 14985
1da177e4
LT
14986 /* Derive initial jumbo mode from MTU assigned in
14987 * ether_setup() via the alloc_etherdev() call
14988 */
63c3a66f
JP
14989 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14990 tg3_flag_set(tp, JUMBO_RING_ENABLE);
1da177e4
LT
14991
14992 /* Determine WakeOnLan speed to use. */
14993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14994 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14995 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14996 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
63c3a66f 14997 tg3_flag_clear(tp, WOL_SPEED_100MB);
1da177e4 14998 } else {
63c3a66f 14999 tg3_flag_set(tp, WOL_SPEED_100MB);
1da177e4
LT
15000 }
15001
7f97a4bd 15002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
f07e9af3 15003 tp->phy_flags |= TG3_PHYFLG_IS_FET;
7f97a4bd 15004
1da177e4 15005 /* A few boards don't want Ethernet@WireSpeed phy feature */
6ff6f81d
MC
15006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15007 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
1da177e4 15008 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 15009 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
f07e9af3
MC
15010 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15011 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15012 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
1da177e4
LT
15013
15014 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15015 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
f07e9af3 15016 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
1da177e4 15017 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
f07e9af3 15018 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
1da177e4 15019
63c3a66f 15020 if (tg3_flag(tp, 5705_PLUS) &&
f07e9af3 15021 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
321d32a0 15022 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
f6eb9b1f 15023 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
63c3a66f 15024 !tg3_flag(tp, 57765_PLUS)) {
c424cb24 15025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 15026 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
15027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
15029 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15030 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
f07e9af3 15031 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
c1d2a196 15032 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
f07e9af3 15033 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
321d32a0 15034 } else
f07e9af3 15035 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
c424cb24 15036 }
1da177e4 15037
b2a5c19c
MC
15038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15039 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15040 tp->phy_otp = tg3_read_otp_phycfg(tp);
15041 if (tp->phy_otp == 0)
15042 tp->phy_otp = TG3_OTP_DEFAULT;
15043 }
15044
63c3a66f 15045 if (tg3_flag(tp, CPMU_PRESENT))
8ef21428
MC
15046 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15047 else
15048 tp->mi_mode = MAC_MI_MODE_BASE;
15049
1da177e4 15050 tp->coalesce_mode = 0;
1da177e4
LT
15051 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15052 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15053 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15054
4d958473
MC
15055 /* Set these bits to enable statistics workaround. */
15056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15057 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15058 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15059 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15060 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15061 }
15062
321d32a0
MC
15063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
63c3a66f 15065 tg3_flag_set(tp, USE_PHYLIB);
57e6983c 15066
158d7abd
MC
15067 err = tg3_mdio_init(tp);
15068 if (err)
15069 return err;
1da177e4
LT
15070
15071 /* Initialize data/descriptor byte/word swapping. */
15072 val = tr32(GRC_MODE);
f2096f94
MC
15073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15074 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15075 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15076 GRC_MODE_B2HRX_ENABLE |
15077 GRC_MODE_HTX2B_ENABLE |
15078 GRC_MODE_HOST_STACKUP);
15079 else
15080 val &= GRC_MODE_HOST_STACKUP;
15081
1da177e4
LT
15082 tw32(GRC_MODE, val | tp->grc_mode);
15083
15084 tg3_switch_clocks(tp);
15085
15086 /* Clear this out for sanity. */
15087 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15088
15089 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15090 &pci_state_reg);
15091 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
63c3a66f 15092 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
1da177e4
LT
15093 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15094
15095 if (chiprevid == CHIPREV_ID_5701_A0 ||
15096 chiprevid == CHIPREV_ID_5701_B0 ||
15097 chiprevid == CHIPREV_ID_5701_B2 ||
15098 chiprevid == CHIPREV_ID_5701_B5) {
15099 void __iomem *sram_base;
15100
15101 /* Write some dummy words into the SRAM status block
15102 * area, see if it reads back correctly. If the return
15103 * value is bad, force enable the PCIX workaround.
15104 */
15105 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15106
15107 writel(0x00000000, sram_base);
15108 writel(0x00000000, sram_base + 4);
15109 writel(0xffffffff, sram_base + 4);
15110 if (readl(sram_base) != 0x00000000)
63c3a66f 15111 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
15112 }
15113 }
15114
15115 udelay(50);
15116 tg3_nvram_init(tp);
15117
15118 grc_misc_cfg = tr32(GRC_MISC_CFG);
15119 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15120
1da177e4
LT
15121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15122 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15123 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
63c3a66f 15124 tg3_flag_set(tp, IS_5788);
1da177e4 15125
63c3a66f 15126 if (!tg3_flag(tp, IS_5788) &&
6ff6f81d 15127 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
63c3a66f
JP
15128 tg3_flag_set(tp, TAGGED_STATUS);
15129 if (tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
15130 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15131 HOSTCC_MODE_CLRTICK_TXBD);
15132
15133 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15134 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15135 tp->misc_host_ctrl);
15136 }
15137
3bda1258 15138 /* Preserve the APE MAC_MODE bits */
63c3a66f 15139 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 15140 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258 15141 else
6e01b20b 15142 tp->mac_mode = 0;
3bda1258 15143
1da177e4
LT
15144 /* these are limited to 10/100 only */
15145 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15146 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15147 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15148 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15149 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15150 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15151 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15152 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15153 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
15154 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15155 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
321d32a0 15156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
d1101142
MC
15157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
f07e9af3
MC
15159 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15160 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
1da177e4
LT
15161
15162 err = tg3_phy_probe(tp);
15163 if (err) {
2445e461 15164 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
1da177e4 15165 /* ... but do not return immediately ... */
b02fd9e3 15166 tg3_mdio_fini(tp);
1da177e4
LT
15167 }
15168
184b8904 15169 tg3_read_vpd(tp);
c4e6575c 15170 tg3_read_fw_ver(tp);
1da177e4 15171
f07e9af3
MC
15172 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15173 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15174 } else {
15175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
f07e9af3 15176 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4 15177 else
f07e9af3 15178 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15179 }
15180
15181 /* 5700 {AX,BX} chips have a broken status block link
15182 * change bit implementation, so we must use the
15183 * status register in those cases.
15184 */
15185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
63c3a66f 15186 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4 15187 else
63c3a66f 15188 tg3_flag_clear(tp, USE_LINKCHG_REG);
1da177e4
LT
15189
15190 /* The led_ctrl is set during tg3_phy_probe, here we might
15191 * have to force the link status polling mechanism based
15192 * upon subsystem IDs.
15193 */
15194 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 15195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
f07e9af3
MC
15196 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15197 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
63c3a66f 15198 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4
LT
15199 }
15200
15201 /* For all SERDES we poll the MAC status register. */
f07e9af3 15202 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
63c3a66f 15203 tg3_flag_set(tp, POLL_SERDES);
1da177e4 15204 else
63c3a66f 15205 tg3_flag_clear(tp, POLL_SERDES);
1da177e4 15206
9205fd9c 15207 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
d2757fc4 15208 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
1da177e4 15209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
63c3a66f 15210 tg3_flag(tp, PCIX_MODE)) {
9205fd9c 15211 tp->rx_offset = NET_SKB_PAD;
d2757fc4 15212#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9dc7a113 15213 tp->rx_copy_thresh = ~(u16)0;
d2757fc4
MC
15214#endif
15215 }
1da177e4 15216
2c49a44d
MC
15217 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15218 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
7cb32cf2
MC
15219 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15220
2c49a44d 15221 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
f92905de
MC
15222
15223 /* Increment the rx prod index on the rx std ring by at most
15224 * 8 for these chips to workaround hw errata.
15225 */
15226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15229 tp->rx_std_max_post = 8;
15230
63c3a66f 15231 if (tg3_flag(tp, ASPM_WORKAROUND))
8ed5d97e
MC
15232 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15233 PCIE_PWR_MGMT_L1_THRESH_MSK;
15234
1da177e4
LT
15235 return err;
15236}
15237
49b6e95f 15238#ifdef CONFIG_SPARC
1da177e4
LT
15239static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15240{
15241 struct net_device *dev = tp->dev;
15242 struct pci_dev *pdev = tp->pdev;
49b6e95f 15243 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 15244 const unsigned char *addr;
49b6e95f
DM
15245 int len;
15246
15247 addr = of_get_property(dp, "local-mac-address", &len);
15248 if (addr && len == 6) {
15249 memcpy(dev->dev_addr, addr, 6);
15250 memcpy(dev->perm_addr, dev->dev_addr, 6);
15251 return 0;
1da177e4
LT
15252 }
15253 return -ENODEV;
15254}
15255
15256static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15257{
15258 struct net_device *dev = tp->dev;
15259
15260 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 15261 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
15262 return 0;
15263}
15264#endif
15265
15266static int __devinit tg3_get_device_address(struct tg3 *tp)
15267{
15268 struct net_device *dev = tp->dev;
15269 u32 hi, lo, mac_offset;
008652b3 15270 int addr_ok = 0;
1da177e4 15271
49b6e95f 15272#ifdef CONFIG_SPARC
1da177e4
LT
15273 if (!tg3_get_macaddr_sparc(tp))
15274 return 0;
15275#endif
15276
15277 mac_offset = 0x7c;
6ff6f81d 15278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
63c3a66f 15279 tg3_flag(tp, 5780_CLASS)) {
1da177e4
LT
15280 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15281 mac_offset = 0xcc;
15282 if (tg3_nvram_lock(tp))
15283 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15284 else
15285 tg3_nvram_unlock(tp);
63c3a66f 15286 } else if (tg3_flag(tp, 5717_PLUS)) {
69f11c99 15287 if (tp->pci_fn & 1)
a1b950d5 15288 mac_offset = 0xcc;
69f11c99 15289 if (tp->pci_fn > 1)
a50d0796 15290 mac_offset += 0x18c;
a1b950d5 15291 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
b5d3772c 15292 mac_offset = 0x10;
1da177e4
LT
15293
15294 /* First try to get it from MAC address mailbox. */
15295 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15296 if ((hi >> 16) == 0x484b) {
15297 dev->dev_addr[0] = (hi >> 8) & 0xff;
15298 dev->dev_addr[1] = (hi >> 0) & 0xff;
15299
15300 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15301 dev->dev_addr[2] = (lo >> 24) & 0xff;
15302 dev->dev_addr[3] = (lo >> 16) & 0xff;
15303 dev->dev_addr[4] = (lo >> 8) & 0xff;
15304 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 15305
008652b3
MC
15306 /* Some old bootcode may report a 0 MAC address in SRAM */
15307 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15308 }
15309 if (!addr_ok) {
15310 /* Next, try NVRAM. */
63c3a66f 15311 if (!tg3_flag(tp, NO_NVRAM) &&
df259d8c 15312 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
6d348f2c 15313 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
62cedd11
MC
15314 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15315 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
008652b3
MC
15316 }
15317 /* Finally just fetch it out of the MAC control regs. */
15318 else {
15319 hi = tr32(MAC_ADDR_0_HIGH);
15320 lo = tr32(MAC_ADDR_0_LOW);
15321
15322 dev->dev_addr[5] = lo & 0xff;
15323 dev->dev_addr[4] = (lo >> 8) & 0xff;
15324 dev->dev_addr[3] = (lo >> 16) & 0xff;
15325 dev->dev_addr[2] = (lo >> 24) & 0xff;
15326 dev->dev_addr[1] = hi & 0xff;
15327 dev->dev_addr[0] = (hi >> 8) & 0xff;
15328 }
1da177e4
LT
15329 }
15330
15331 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 15332#ifdef CONFIG_SPARC
1da177e4
LT
15333 if (!tg3_get_default_macaddr_sparc(tp))
15334 return 0;
15335#endif
15336 return -EINVAL;
15337 }
2ff43697 15338 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
15339 return 0;
15340}
15341
59e6b434
DM
15342#define BOUNDARY_SINGLE_CACHELINE 1
15343#define BOUNDARY_MULTI_CACHELINE 2
15344
15345static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15346{
15347 int cacheline_size;
15348 u8 byte;
15349 int goal;
15350
15351 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15352 if (byte == 0)
15353 cacheline_size = 1024;
15354 else
15355 cacheline_size = (int) byte * 4;
15356
15357 /* On 5703 and later chips, the boundary bits have no
15358 * effect.
15359 */
15360 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15361 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
63c3a66f 15362 !tg3_flag(tp, PCI_EXPRESS))
59e6b434
DM
15363 goto out;
15364
15365#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15366 goal = BOUNDARY_MULTI_CACHELINE;
15367#else
15368#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15369 goal = BOUNDARY_SINGLE_CACHELINE;
15370#else
15371 goal = 0;
15372#endif
15373#endif
15374
63c3a66f 15375 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
15376 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15377 goto out;
15378 }
15379
59e6b434
DM
15380 if (!goal)
15381 goto out;
15382
15383 /* PCI controllers on most RISC systems tend to disconnect
15384 * when a device tries to burst across a cache-line boundary.
15385 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15386 *
15387 * Unfortunately, for PCI-E there are only limited
15388 * write-side controls for this, and thus for reads
15389 * we will still get the disconnects. We'll also waste
15390 * these PCI cycles for both read and write for chips
15391 * other than 5700 and 5701 which do not implement the
15392 * boundary bits.
15393 */
63c3a66f 15394 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15395 switch (cacheline_size) {
15396 case 16:
15397 case 32:
15398 case 64:
15399 case 128:
15400 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15401 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15402 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15403 } else {
15404 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15405 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15406 }
15407 break;
15408
15409 case 256:
15410 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15411 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15412 break;
15413
15414 default:
15415 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15416 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15417 break;
855e1111 15418 }
63c3a66f 15419 } else if (tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15420 switch (cacheline_size) {
15421 case 16:
15422 case 32:
15423 case 64:
15424 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15425 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15426 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15427 break;
15428 }
15429 /* fallthrough */
15430 case 128:
15431 default:
15432 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15433 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15434 break;
855e1111 15435 }
59e6b434
DM
15436 } else {
15437 switch (cacheline_size) {
15438 case 16:
15439 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15440 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15441 DMA_RWCTRL_WRITE_BNDRY_16);
15442 break;
15443 }
15444 /* fallthrough */
15445 case 32:
15446 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15447 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15448 DMA_RWCTRL_WRITE_BNDRY_32);
15449 break;
15450 }
15451 /* fallthrough */
15452 case 64:
15453 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15454 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15455 DMA_RWCTRL_WRITE_BNDRY_64);
15456 break;
15457 }
15458 /* fallthrough */
15459 case 128:
15460 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15461 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15462 DMA_RWCTRL_WRITE_BNDRY_128);
15463 break;
15464 }
15465 /* fallthrough */
15466 case 256:
15467 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15468 DMA_RWCTRL_WRITE_BNDRY_256);
15469 break;
15470 case 512:
15471 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15472 DMA_RWCTRL_WRITE_BNDRY_512);
15473 break;
15474 case 1024:
15475 default:
15476 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15477 DMA_RWCTRL_WRITE_BNDRY_1024);
15478 break;
855e1111 15479 }
59e6b434
DM
15480 }
15481
15482out:
15483 return val;
15484}
15485
1da177e4
LT
15486static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15487{
15488 struct tg3_internal_buffer_desc test_desc;
15489 u32 sram_dma_descs;
15490 int i, ret;
15491
15492 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15493
15494 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15495 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15496 tw32(RDMAC_STATUS, 0);
15497 tw32(WDMAC_STATUS, 0);
15498
15499 tw32(BUFMGR_MODE, 0);
15500 tw32(FTQ_RESET, 0);
15501
15502 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15503 test_desc.addr_lo = buf_dma & 0xffffffff;
15504 test_desc.nic_mbuf = 0x00002100;
15505 test_desc.len = size;
15506
15507 /*
15508 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15509 * the *second* time the tg3 driver was getting loaded after an
15510 * initial scan.
15511 *
15512 * Broadcom tells me:
15513 * ...the DMA engine is connected to the GRC block and a DMA
15514 * reset may affect the GRC block in some unpredictable way...
15515 * The behavior of resets to individual blocks has not been tested.
15516 *
15517 * Broadcom noted the GRC reset will also reset all sub-components.
15518 */
15519 if (to_device) {
15520 test_desc.cqid_sqid = (13 << 8) | 2;
15521
15522 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15523 udelay(40);
15524 } else {
15525 test_desc.cqid_sqid = (16 << 8) | 7;
15526
15527 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15528 udelay(40);
15529 }
15530 test_desc.flags = 0x00000005;
15531
15532 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15533 u32 val;
15534
15535 val = *(((u32 *)&test_desc) + i);
15536 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15537 sram_dma_descs + (i * sizeof(u32)));
15538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15539 }
15540 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15541
859a5887 15542 if (to_device)
1da177e4 15543 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
859a5887 15544 else
1da177e4 15545 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
1da177e4
LT
15546
15547 ret = -ENODEV;
15548 for (i = 0; i < 40; i++) {
15549 u32 val;
15550
15551 if (to_device)
15552 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15553 else
15554 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15555 if ((val & 0xffff) == sram_dma_descs) {
15556 ret = 0;
15557 break;
15558 }
15559
15560 udelay(100);
15561 }
15562
15563 return ret;
15564}
15565
ded7340d 15566#define TEST_BUFFER_SIZE 0x2000
1da177e4 15567
4143470c 15568static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
895950c2
JP
15569 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15570 { },
15571};
15572
1da177e4
LT
15573static int __devinit tg3_test_dma(struct tg3 *tp)
15574{
15575 dma_addr_t buf_dma;
59e6b434 15576 u32 *buf, saved_dma_rwctrl;
cbf9ca6c 15577 int ret = 0;
1da177e4 15578
4bae65c8
MC
15579 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15580 &buf_dma, GFP_KERNEL);
1da177e4
LT
15581 if (!buf) {
15582 ret = -ENOMEM;
15583 goto out_nofree;
15584 }
15585
15586 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15587 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15588
59e6b434 15589 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4 15590
63c3a66f 15591 if (tg3_flag(tp, 57765_PLUS))
cbf9ca6c
MC
15592 goto out;
15593
63c3a66f 15594 if (tg3_flag(tp, PCI_EXPRESS)) {
1da177e4
LT
15595 /* DMA read watermark not used on PCIE */
15596 tp->dma_rwctrl |= 0x00180000;
63c3a66f 15597 } else if (!tg3_flag(tp, PCIX_MODE)) {
85e94ced
MC
15598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
15600 tp->dma_rwctrl |= 0x003f0000;
15601 else
15602 tp->dma_rwctrl |= 0x003f000f;
15603 } else {
15604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15606 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 15607 u32 read_water = 0x7;
1da177e4 15608
4a29cc2e
MC
15609 /* If the 5704 is behind the EPB bridge, we can
15610 * do the less restrictive ONE_DMA workaround for
15611 * better performance.
15612 */
63c3a66f 15613 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
4a29cc2e
MC
15614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15615 tp->dma_rwctrl |= 0x8000;
15616 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
15617 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15618
49afdeb6
MC
15619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15620 read_water = 4;
59e6b434 15621 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
15622 tp->dma_rwctrl |=
15623 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15624 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15625 (1 << 23);
4cf78e4f
MC
15626 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15627 /* 5780 always in PCIX mode */
15628 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
15629 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15630 /* 5714 always in PCIX mode */
15631 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
15632 } else {
15633 tp->dma_rwctrl |= 0x001b000f;
15634 }
15635 }
15636
15637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15639 tp->dma_rwctrl &= 0xfffffff0;
15640
15641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15643 /* Remove this if it causes problems for some boards. */
15644 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15645
15646 /* On 5700/5701 chips, we need to set this bit.
15647 * Otherwise the chip will issue cacheline transactions
15648 * to streamable DMA memory with not all the byte
15649 * enables turned on. This is an error on several
15650 * RISC PCI controllers, in particular sparc64.
15651 *
15652 * On 5703/5704 chips, this bit has been reassigned
15653 * a different meaning. In particular, it is used
15654 * on those chips to enable a PCI-X workaround.
15655 */
15656 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15657 }
15658
15659 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15660
15661#if 0
15662 /* Unneeded, already done by tg3_get_invariants. */
15663 tg3_switch_clocks(tp);
15664#endif
15665
1da177e4
LT
15666 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15667 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15668 goto out;
15669
59e6b434
DM
15670 /* It is best to perform DMA test with maximum write burst size
15671 * to expose the 5700/5701 write DMA bug.
15672 */
15673 saved_dma_rwctrl = tp->dma_rwctrl;
15674 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15675 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15676
1da177e4
LT
15677 while (1) {
15678 u32 *p = buf, i;
15679
15680 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15681 p[i] = i;
15682
15683 /* Send the buffer to the chip. */
15684 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15685 if (ret) {
2445e461
MC
15686 dev_err(&tp->pdev->dev,
15687 "%s: Buffer write failed. err = %d\n",
15688 __func__, ret);
1da177e4
LT
15689 break;
15690 }
15691
15692#if 0
15693 /* validate data reached card RAM correctly. */
15694 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15695 u32 val;
15696 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15697 if (le32_to_cpu(val) != p[i]) {
2445e461
MC
15698 dev_err(&tp->pdev->dev,
15699 "%s: Buffer corrupted on device! "
15700 "(%d != %d)\n", __func__, val, i);
1da177e4
LT
15701 /* ret = -ENODEV here? */
15702 }
15703 p[i] = 0;
15704 }
15705#endif
15706 /* Now read it back. */
15707 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15708 if (ret) {
5129c3a3
MC
15709 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15710 "err = %d\n", __func__, ret);
1da177e4
LT
15711 break;
15712 }
15713
15714 /* Verify it. */
15715 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15716 if (p[i] == i)
15717 continue;
15718
59e6b434
DM
15719 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15720 DMA_RWCTRL_WRITE_BNDRY_16) {
15721 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
15722 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15723 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15724 break;
15725 } else {
2445e461
MC
15726 dev_err(&tp->pdev->dev,
15727 "%s: Buffer corrupted on read back! "
15728 "(%d != %d)\n", __func__, p[i], i);
1da177e4
LT
15729 ret = -ENODEV;
15730 goto out;
15731 }
15732 }
15733
15734 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15735 /* Success. */
15736 ret = 0;
15737 break;
15738 }
15739 }
59e6b434
DM
15740 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15741 DMA_RWCTRL_WRITE_BNDRY_16) {
15742 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
15743 * now look for chipsets that are known to expose the
15744 * DMA bug without failing the test.
59e6b434 15745 */
4143470c 15746 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
6d1cfbab
MC
15747 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15748 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
859a5887 15749 } else {
6d1cfbab
MC
15750 /* Safe to use the calculated DMA boundary. */
15751 tp->dma_rwctrl = saved_dma_rwctrl;
859a5887 15752 }
6d1cfbab 15753
59e6b434
DM
15754 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15755 }
1da177e4
LT
15756
15757out:
4bae65c8 15758 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
1da177e4
LT
15759out_nofree:
15760 return ret;
15761}
15762
1da177e4
LT
15763static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15764{
63c3a66f 15765 if (tg3_flag(tp, 57765_PLUS)) {
666bc831
MC
15766 tp->bufmgr_config.mbuf_read_dma_low_water =
15767 DEFAULT_MB_RDMA_LOW_WATER_5705;
15768 tp->bufmgr_config.mbuf_mac_rx_low_water =
15769 DEFAULT_MB_MACRX_LOW_WATER_57765;
15770 tp->bufmgr_config.mbuf_high_water =
15771 DEFAULT_MB_HIGH_WATER_57765;
15772
15773 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15774 DEFAULT_MB_RDMA_LOW_WATER_5705;
15775 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15776 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15777 tp->bufmgr_config.mbuf_high_water_jumbo =
15778 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
63c3a66f 15779 } else if (tg3_flag(tp, 5705_PLUS)) {
fdfec172
MC
15780 tp->bufmgr_config.mbuf_read_dma_low_water =
15781 DEFAULT_MB_RDMA_LOW_WATER_5705;
15782 tp->bufmgr_config.mbuf_mac_rx_low_water =
15783 DEFAULT_MB_MACRX_LOW_WATER_5705;
15784 tp->bufmgr_config.mbuf_high_water =
15785 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
15786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15787 tp->bufmgr_config.mbuf_mac_rx_low_water =
15788 DEFAULT_MB_MACRX_LOW_WATER_5906;
15789 tp->bufmgr_config.mbuf_high_water =
15790 DEFAULT_MB_HIGH_WATER_5906;
15791 }
fdfec172
MC
15792
15793 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15794 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15795 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15796 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15797 tp->bufmgr_config.mbuf_high_water_jumbo =
15798 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15799 } else {
15800 tp->bufmgr_config.mbuf_read_dma_low_water =
15801 DEFAULT_MB_RDMA_LOW_WATER;
15802 tp->bufmgr_config.mbuf_mac_rx_low_water =
15803 DEFAULT_MB_MACRX_LOW_WATER;
15804 tp->bufmgr_config.mbuf_high_water =
15805 DEFAULT_MB_HIGH_WATER;
15806
15807 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15808 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15809 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15810 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15811 tp->bufmgr_config.mbuf_high_water_jumbo =
15812 DEFAULT_MB_HIGH_WATER_JUMBO;
15813 }
1da177e4
LT
15814
15815 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15816 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15817}
15818
15819static char * __devinit tg3_phy_string(struct tg3 *tp)
15820{
79eb6904
MC
15821 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15822 case TG3_PHY_ID_BCM5400: return "5400";
15823 case TG3_PHY_ID_BCM5401: return "5401";
15824 case TG3_PHY_ID_BCM5411: return "5411";
15825 case TG3_PHY_ID_BCM5701: return "5701";
15826 case TG3_PHY_ID_BCM5703: return "5703";
15827 case TG3_PHY_ID_BCM5704: return "5704";
15828 case TG3_PHY_ID_BCM5705: return "5705";
15829 case TG3_PHY_ID_BCM5750: return "5750";
15830 case TG3_PHY_ID_BCM5752: return "5752";
15831 case TG3_PHY_ID_BCM5714: return "5714";
15832 case TG3_PHY_ID_BCM5780: return "5780";
15833 case TG3_PHY_ID_BCM5755: return "5755";
15834 case TG3_PHY_ID_BCM5787: return "5787";
15835 case TG3_PHY_ID_BCM5784: return "5784";
15836 case TG3_PHY_ID_BCM5756: return "5722/5756";
15837 case TG3_PHY_ID_BCM5906: return "5906";
15838 case TG3_PHY_ID_BCM5761: return "5761";
15839 case TG3_PHY_ID_BCM5718C: return "5718C";
15840 case TG3_PHY_ID_BCM5718S: return "5718S";
15841 case TG3_PHY_ID_BCM57765: return "57765";
302b500b 15842 case TG3_PHY_ID_BCM5719C: return "5719C";
6418f2c1 15843 case TG3_PHY_ID_BCM5720C: return "5720C";
79eb6904 15844 case TG3_PHY_ID_BCM8002: return "8002/serdes";
1da177e4
LT
15845 case 0: return "serdes";
15846 default: return "unknown";
855e1111 15847 }
1da177e4
LT
15848}
15849
f9804ddb
MC
15850static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15851{
63c3a66f 15852 if (tg3_flag(tp, PCI_EXPRESS)) {
f9804ddb
MC
15853 strcpy(str, "PCI Express");
15854 return str;
63c3a66f 15855 } else if (tg3_flag(tp, PCIX_MODE)) {
f9804ddb
MC
15856 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15857
15858 strcpy(str, "PCIX:");
15859
15860 if ((clock_ctrl == 7) ||
15861 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15862 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15863 strcat(str, "133MHz");
15864 else if (clock_ctrl == 0)
15865 strcat(str, "33MHz");
15866 else if (clock_ctrl == 2)
15867 strcat(str, "50MHz");
15868 else if (clock_ctrl == 4)
15869 strcat(str, "66MHz");
15870 else if (clock_ctrl == 6)
15871 strcat(str, "100MHz");
f9804ddb
MC
15872 } else {
15873 strcpy(str, "PCI:");
63c3a66f 15874 if (tg3_flag(tp, PCI_HIGH_SPEED))
f9804ddb
MC
15875 strcat(str, "66MHz");
15876 else
15877 strcat(str, "33MHz");
15878 }
63c3a66f 15879 if (tg3_flag(tp, PCI_32BIT))
f9804ddb
MC
15880 strcat(str, ":32-bit");
15881 else
15882 strcat(str, ":64-bit");
15883 return str;
15884}
15885
15f9850d
DM
15886static void __devinit tg3_init_coal(struct tg3 *tp)
15887{
15888 struct ethtool_coalesce *ec = &tp->coal;
15889
15890 memset(ec, 0, sizeof(*ec));
15891 ec->cmd = ETHTOOL_GCOALESCE;
15892 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15893 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15894 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15895 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15896 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15897 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15898 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15899 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15900 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15901
15902 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15903 HOSTCC_MODE_CLRTICK_TXBD)) {
15904 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15905 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15906 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15907 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15908 }
d244c892 15909
63c3a66f 15910 if (tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
15911 ec->rx_coalesce_usecs_irq = 0;
15912 ec->tx_coalesce_usecs_irq = 0;
15913 ec->stats_block_coalesce_usecs = 0;
15914 }
15f9850d
DM
15915}
15916
1da177e4
LT
15917static int __devinit tg3_init_one(struct pci_dev *pdev,
15918 const struct pci_device_id *ent)
15919{
1da177e4
LT
15920 struct net_device *dev;
15921 struct tg3 *tp;
646c9edd
MC
15922 int i, err, pm_cap;
15923 u32 sndmbx, rcvmbx, intmbx;
f9804ddb 15924 char str[40];
72f2afb8 15925 u64 dma_mask, persist_dma_mask;
c8f44aff 15926 netdev_features_t features = 0;
1da177e4 15927
05dbe005 15928 printk_once(KERN_INFO "%s\n", version);
1da177e4
LT
15929
15930 err = pci_enable_device(pdev);
15931 if (err) {
2445e461 15932 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
15933 return err;
15934 }
15935
1da177e4
LT
15936 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15937 if (err) {
2445e461 15938 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
15939 goto err_out_disable_pdev;
15940 }
15941
15942 pci_set_master(pdev);
15943
15944 /* Find power-management capability. */
15945 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15946 if (pm_cap == 0) {
2445e461
MC
15947 dev_err(&pdev->dev,
15948 "Cannot find Power Management capability, aborting\n");
1da177e4
LT
15949 err = -EIO;
15950 goto err_out_free_res;
15951 }
15952
16821285
MC
15953 err = pci_set_power_state(pdev, PCI_D0);
15954 if (err) {
15955 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15956 goto err_out_free_res;
15957 }
15958
fe5f5787 15959 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
1da177e4 15960 if (!dev) {
1da177e4 15961 err = -ENOMEM;
16821285 15962 goto err_out_power_down;
1da177e4
LT
15963 }
15964
1da177e4
LT
15965 SET_NETDEV_DEV(dev, &pdev->dev);
15966
1da177e4
LT
15967 tp = netdev_priv(dev);
15968 tp->pdev = pdev;
15969 tp->dev = dev;
15970 tp->pm_cap = pm_cap;
1da177e4
LT
15971 tp->rx_mode = TG3_DEF_RX_MODE;
15972 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 15973
1da177e4
LT
15974 if (tg3_debug > 0)
15975 tp->msg_enable = tg3_debug;
15976 else
15977 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15978
15979 /* The word/byte swap controls here control register access byte
15980 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15981 * setting below.
15982 */
15983 tp->misc_host_ctrl =
15984 MISC_HOST_CTRL_MASK_PCI_INT |
15985 MISC_HOST_CTRL_WORD_SWAP |
15986 MISC_HOST_CTRL_INDIR_ACCESS |
15987 MISC_HOST_CTRL_PCISTATE_RW;
15988
15989 /* The NONFRM (non-frame) byte/word swap controls take effect
15990 * on descriptor entries, anything which isn't packet data.
15991 *
15992 * The StrongARM chips on the board (one for tx, one for rx)
15993 * are running in big-endian mode.
15994 */
15995 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15996 GRC_MODE_WSWAP_NONFRM_DATA);
15997#ifdef __BIG_ENDIAN
15998 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15999#endif
16000 spin_lock_init(&tp->lock);
1da177e4 16001 spin_lock_init(&tp->indirect_lock);
c4028958 16002 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4 16003
d5fe488a 16004 tp->regs = pci_ioremap_bar(pdev, BAR_0);
ab0049b4 16005 if (!tp->regs) {
ab96b241 16006 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
1da177e4
LT
16007 err = -ENOMEM;
16008 goto err_out_free_dev;
16009 }
16010
c9cab24e
MC
16011 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16012 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16013 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16014 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16015 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16016 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16017 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16018 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16019 tg3_flag_set(tp, ENABLE_APE);
16020 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16021 if (!tp->aperegs) {
16022 dev_err(&pdev->dev,
16023 "Cannot map APE registers, aborting\n");
16024 err = -ENOMEM;
16025 goto err_out_iounmap;
16026 }
16027 }
16028
1da177e4
LT
16029 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16030 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
1da177e4 16031
1da177e4 16032 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4 16033 dev->watchdog_timeo = TG3_TX_TIMEOUT;
2ffcc981 16034 dev->netdev_ops = &tg3_netdev_ops;
1da177e4 16035 dev->irq = pdev->irq;
1da177e4
LT
16036
16037 err = tg3_get_invariants(tp);
16038 if (err) {
ab96b241
MC
16039 dev_err(&pdev->dev,
16040 "Problem fetching invariants of chip, aborting\n");
c9cab24e 16041 goto err_out_apeunmap;
1da177e4
LT
16042 }
16043
4a29cc2e
MC
16044 /* The EPB bridge inside 5714, 5715, and 5780 and any
16045 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
16046 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16047 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16048 * do DMA address check in tg3_start_xmit().
16049 */
63c3a66f 16050 if (tg3_flag(tp, IS_5788))
284901a9 16051 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
63c3a66f 16052 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
50cf156a 16053 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
72f2afb8 16054#ifdef CONFIG_HIGHMEM
6a35528a 16055 dma_mask = DMA_BIT_MASK(64);
72f2afb8 16056#endif
4a29cc2e 16057 } else
6a35528a 16058 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
72f2afb8
MC
16059
16060 /* Configure DMA attributes. */
284901a9 16061 if (dma_mask > DMA_BIT_MASK(32)) {
72f2afb8
MC
16062 err = pci_set_dma_mask(pdev, dma_mask);
16063 if (!err) {
0da0606f 16064 features |= NETIF_F_HIGHDMA;
72f2afb8
MC
16065 err = pci_set_consistent_dma_mask(pdev,
16066 persist_dma_mask);
16067 if (err < 0) {
ab96b241
MC
16068 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16069 "DMA for consistent allocations\n");
c9cab24e 16070 goto err_out_apeunmap;
72f2afb8
MC
16071 }
16072 }
16073 }
284901a9
YH
16074 if (err || dma_mask == DMA_BIT_MASK(32)) {
16075 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
72f2afb8 16076 if (err) {
ab96b241
MC
16077 dev_err(&pdev->dev,
16078 "No usable DMA configuration, aborting\n");
c9cab24e 16079 goto err_out_apeunmap;
72f2afb8
MC
16080 }
16081 }
16082
fdfec172 16083 tg3_init_bufmgr_config(tp);
1da177e4 16084
0da0606f
MC
16085 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16086
16087 /* 5700 B0 chips do not support checksumming correctly due
16088 * to hardware bugs.
16089 */
16090 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16091 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16092
16093 if (tg3_flag(tp, 5755_PLUS))
16094 features |= NETIF_F_IPV6_CSUM;
16095 }
16096
4e3a7aaa
MC
16097 /* TSO is on by default on chips that support hardware TSO.
16098 * Firmware TSO on older chips gives lower performance, so it
16099 * is off by default, but can be enabled using ethtool.
16100 */
63c3a66f
JP
16101 if ((tg3_flag(tp, HW_TSO_1) ||
16102 tg3_flag(tp, HW_TSO_2) ||
16103 tg3_flag(tp, HW_TSO_3)) &&
0da0606f
MC
16104 (features & NETIF_F_IP_CSUM))
16105 features |= NETIF_F_TSO;
63c3a66f 16106 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
0da0606f
MC
16107 if (features & NETIF_F_IPV6_CSUM)
16108 features |= NETIF_F_TSO6;
63c3a66f 16109 if (tg3_flag(tp, HW_TSO_3) ||
e849cdc3 16110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c
MC
16111 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16112 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
63c3a66f 16113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
dc668910 16114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
0da0606f 16115 features |= NETIF_F_TSO_ECN;
b0026624 16116 }
1da177e4 16117
d542fe27
MC
16118 dev->features |= features;
16119 dev->vlan_features |= features;
16120
06c03c02
MB
16121 /*
16122 * Add loopback capability only for a subset of devices that support
16123 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16124 * loopback for the remaining devices.
16125 */
16126 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16127 !tg3_flag(tp, CPMU_PRESENT))
16128 /* Add the loopback capability */
0da0606f
MC
16129 features |= NETIF_F_LOOPBACK;
16130
0da0606f 16131 dev->hw_features |= features;
06c03c02 16132
1da177e4 16133 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
63c3a66f 16134 !tg3_flag(tp, TSO_CAPABLE) &&
1da177e4 16135 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
63c3a66f 16136 tg3_flag_set(tp, MAX_RXPEND_64);
1da177e4
LT
16137 tp->rx_pending = 63;
16138 }
16139
1da177e4
LT
16140 err = tg3_get_device_address(tp);
16141 if (err) {
ab96b241
MC
16142 dev_err(&pdev->dev,
16143 "Could not obtain valid ethernet address, aborting\n");
c9cab24e 16144 goto err_out_apeunmap;
c88864df
MC
16145 }
16146
1da177e4
LT
16147 /*
16148 * Reset chip in case UNDI or EFI driver did not shutdown
16149 * DMA self test will enable WDMAC and we'll see (spurious)
16150 * pending DMA on the PCI bus at that point.
16151 */
16152 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16153 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 16154 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 16155 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
16156 }
16157
16158 err = tg3_test_dma(tp);
16159 if (err) {
ab96b241 16160 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
c88864df 16161 goto err_out_apeunmap;
1da177e4
LT
16162 }
16163
78f90dcf
MC
16164 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16165 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16166 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6fd45cb8 16167 for (i = 0; i < tp->irq_max; i++) {
78f90dcf
MC
16168 struct tg3_napi *tnapi = &tp->napi[i];
16169
16170 tnapi->tp = tp;
16171 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16172
16173 tnapi->int_mbox = intmbx;
93a700a9 16174 if (i <= 4)
78f90dcf
MC
16175 intmbx += 0x8;
16176 else
16177 intmbx += 0x4;
16178
16179 tnapi->consmbox = rcvmbx;
16180 tnapi->prodmbox = sndmbx;
16181
66cfd1bd 16182 if (i)
78f90dcf 16183 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
66cfd1bd 16184 else
78f90dcf 16185 tnapi->coal_now = HOSTCC_MODE_NOW;
78f90dcf 16186
63c3a66f 16187 if (!tg3_flag(tp, SUPPORT_MSIX))
78f90dcf
MC
16188 break;
16189
16190 /*
16191 * If we support MSIX, we'll be using RSS. If we're using
16192 * RSS, the first vector only handles link interrupts and the
16193 * remaining vectors handle rx and tx interrupts. Reuse the
16194 * mailbox values for the next iteration. The values we setup
16195 * above are still useful for the single vectored mode.
16196 */
16197 if (!i)
16198 continue;
16199
16200 rcvmbx += 0x8;
16201
16202 if (sndmbx & 0x4)
16203 sndmbx -= 0x4;
16204 else
16205 sndmbx += 0xc;
16206 }
16207
15f9850d
DM
16208 tg3_init_coal(tp);
16209
c49a1561
MC
16210 pci_set_drvdata(pdev, dev);
16211
cd0d7228
MC
16212 if (tg3_flag(tp, 5717_PLUS)) {
16213 /* Resume a low-power mode */
16214 tg3_frob_aux_power(tp, false);
16215 }
16216
21f7638e
MC
16217 tg3_timer_init(tp);
16218
1da177e4
LT
16219 err = register_netdev(dev);
16220 if (err) {
ab96b241 16221 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
0d3031d9 16222 goto err_out_apeunmap;
1da177e4
LT
16223 }
16224
05dbe005
JP
16225 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16226 tp->board_part_number,
16227 tp->pci_chip_rev_id,
16228 tg3_bus_string(tp, str),
16229 dev->dev_addr);
1da177e4 16230
f07e9af3 16231 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7
MC
16232 struct phy_device *phydev;
16233 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
5129c3a3
MC
16234 netdev_info(dev,
16235 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
05dbe005 16236 phydev->drv->name, dev_name(&phydev->dev));
f07e9af3
MC
16237 } else {
16238 char *ethtype;
16239
16240 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16241 ethtype = "10/100Base-TX";
16242 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16243 ethtype = "1000Base-SX";
16244 else
16245 ethtype = "10/100/1000Base-T";
16246
5129c3a3 16247 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
47007831
MC
16248 "(WireSpeed[%d], EEE[%d])\n",
16249 tg3_phy_string(tp), ethtype,
16250 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16251 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
f07e9af3 16252 }
05dbe005
JP
16253
16254 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
dc668910 16255 (dev->features & NETIF_F_RXCSUM) != 0,
63c3a66f 16256 tg3_flag(tp, USE_LINKCHG_REG) != 0,
f07e9af3 16257 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
63c3a66f
JP
16258 tg3_flag(tp, ENABLE_ASF) != 0,
16259 tg3_flag(tp, TSO_CAPABLE) != 0);
05dbe005
JP
16260 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16261 tp->dma_rwctrl,
16262 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16263 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
1da177e4 16264
b45aa2f6
MC
16265 pci_save_state(pdev);
16266
1da177e4
LT
16267 return 0;
16268
0d3031d9
MC
16269err_out_apeunmap:
16270 if (tp->aperegs) {
16271 iounmap(tp->aperegs);
16272 tp->aperegs = NULL;
16273 }
16274
1da177e4 16275err_out_iounmap:
6892914f
MC
16276 if (tp->regs) {
16277 iounmap(tp->regs);
22abe310 16278 tp->regs = NULL;
6892914f 16279 }
1da177e4
LT
16280
16281err_out_free_dev:
16282 free_netdev(dev);
16283
16821285
MC
16284err_out_power_down:
16285 pci_set_power_state(pdev, PCI_D3hot);
16286
1da177e4
LT
16287err_out_free_res:
16288 pci_release_regions(pdev);
16289
16290err_out_disable_pdev:
16291 pci_disable_device(pdev);
16292 pci_set_drvdata(pdev, NULL);
16293 return err;
16294}
16295
16296static void __devexit tg3_remove_one(struct pci_dev *pdev)
16297{
16298 struct net_device *dev = pci_get_drvdata(pdev);
16299
16300 if (dev) {
16301 struct tg3 *tp = netdev_priv(dev);
16302
e3c5530b 16303 release_firmware(tp->fw);
077f849d 16304
db219973 16305 tg3_reset_task_cancel(tp);
158d7abd 16306
e730c823 16307 if (tg3_flag(tp, USE_PHYLIB)) {
b02fd9e3 16308 tg3_phy_fini(tp);
158d7abd 16309 tg3_mdio_fini(tp);
b02fd9e3 16310 }
158d7abd 16311
1da177e4 16312 unregister_netdev(dev);
0d3031d9
MC
16313 if (tp->aperegs) {
16314 iounmap(tp->aperegs);
16315 tp->aperegs = NULL;
16316 }
6892914f
MC
16317 if (tp->regs) {
16318 iounmap(tp->regs);
22abe310 16319 tp->regs = NULL;
6892914f 16320 }
1da177e4
LT
16321 free_netdev(dev);
16322 pci_release_regions(pdev);
16323 pci_disable_device(pdev);
16324 pci_set_drvdata(pdev, NULL);
16325 }
16326}
16327
aa6027ca 16328#ifdef CONFIG_PM_SLEEP
c866b7ea 16329static int tg3_suspend(struct device *device)
1da177e4 16330{
c866b7ea 16331 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16332 struct net_device *dev = pci_get_drvdata(pdev);
16333 struct tg3 *tp = netdev_priv(dev);
16334 int err;
16335
16336 if (!netif_running(dev))
16337 return 0;
16338
db219973 16339 tg3_reset_task_cancel(tp);
b02fd9e3 16340 tg3_phy_stop(tp);
1da177e4
LT
16341 tg3_netif_stop(tp);
16342
21f7638e 16343 tg3_timer_stop(tp);
1da177e4 16344
f47c11ee 16345 tg3_full_lock(tp, 1);
1da177e4 16346 tg3_disable_ints(tp);
f47c11ee 16347 tg3_full_unlock(tp);
1da177e4
LT
16348
16349 netif_device_detach(dev);
16350
f47c11ee 16351 tg3_full_lock(tp, 0);
944d980e 16352 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
63c3a66f 16353 tg3_flag_clear(tp, INIT_COMPLETE);
f47c11ee 16354 tg3_full_unlock(tp);
1da177e4 16355
c866b7ea 16356 err = tg3_power_down_prepare(tp);
1da177e4 16357 if (err) {
b02fd9e3
MC
16358 int err2;
16359
f47c11ee 16360 tg3_full_lock(tp, 0);
1da177e4 16361
63c3a66f 16362 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
16363 err2 = tg3_restart_hw(tp, 1);
16364 if (err2)
b9ec6c1b 16365 goto out;
1da177e4 16366
21f7638e 16367 tg3_timer_start(tp);
1da177e4
LT
16368
16369 netif_device_attach(dev);
16370 tg3_netif_start(tp);
16371
b9ec6c1b 16372out:
f47c11ee 16373 tg3_full_unlock(tp);
b02fd9e3
MC
16374
16375 if (!err2)
16376 tg3_phy_start(tp);
1da177e4
LT
16377 }
16378
16379 return err;
16380}
16381
c866b7ea 16382static int tg3_resume(struct device *device)
1da177e4 16383{
c866b7ea 16384 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16385 struct net_device *dev = pci_get_drvdata(pdev);
16386 struct tg3 *tp = netdev_priv(dev);
16387 int err;
16388
16389 if (!netif_running(dev))
16390 return 0;
16391
1da177e4
LT
16392 netif_device_attach(dev);
16393
f47c11ee 16394 tg3_full_lock(tp, 0);
1da177e4 16395
63c3a66f 16396 tg3_flag_set(tp, INIT_COMPLETE);
b9ec6c1b
MC
16397 err = tg3_restart_hw(tp, 1);
16398 if (err)
16399 goto out;
1da177e4 16400
21f7638e 16401 tg3_timer_start(tp);
1da177e4 16402
1da177e4
LT
16403 tg3_netif_start(tp);
16404
b9ec6c1b 16405out:
f47c11ee 16406 tg3_full_unlock(tp);
1da177e4 16407
b02fd9e3
MC
16408 if (!err)
16409 tg3_phy_start(tp);
16410
b9ec6c1b 16411 return err;
1da177e4
LT
16412}
16413
c866b7ea 16414static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
aa6027ca
ED
16415#define TG3_PM_OPS (&tg3_pm_ops)
16416
16417#else
16418
16419#define TG3_PM_OPS NULL
16420
16421#endif /* CONFIG_PM_SLEEP */
c866b7ea 16422
b45aa2f6
MC
16423/**
16424 * tg3_io_error_detected - called when PCI error is detected
16425 * @pdev: Pointer to PCI device
16426 * @state: The current pci connection state
16427 *
16428 * This function is called after a PCI bus error affecting
16429 * this device has been detected.
16430 */
16431static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16432 pci_channel_state_t state)
16433{
16434 struct net_device *netdev = pci_get_drvdata(pdev);
16435 struct tg3 *tp = netdev_priv(netdev);
16436 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16437
16438 netdev_info(netdev, "PCI I/O error detected\n");
16439
16440 rtnl_lock();
16441
16442 if (!netif_running(netdev))
16443 goto done;
16444
16445 tg3_phy_stop(tp);
16446
16447 tg3_netif_stop(tp);
16448
21f7638e 16449 tg3_timer_stop(tp);
b45aa2f6
MC
16450
16451 /* Want to make sure that the reset task doesn't run */
db219973 16452 tg3_reset_task_cancel(tp);
b45aa2f6
MC
16453
16454 netif_device_detach(netdev);
16455
16456 /* Clean up software state, even if MMIO is blocked */
16457 tg3_full_lock(tp, 0);
16458 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16459 tg3_full_unlock(tp);
16460
16461done:
16462 if (state == pci_channel_io_perm_failure)
16463 err = PCI_ERS_RESULT_DISCONNECT;
16464 else
16465 pci_disable_device(pdev);
16466
16467 rtnl_unlock();
16468
16469 return err;
16470}
16471
16472/**
16473 * tg3_io_slot_reset - called after the pci bus has been reset.
16474 * @pdev: Pointer to PCI device
16475 *
16476 * Restart the card from scratch, as if from a cold-boot.
16477 * At this point, the card has exprienced a hard reset,
16478 * followed by fixups by BIOS, and has its config space
16479 * set up identically to what it was at cold boot.
16480 */
16481static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16482{
16483 struct net_device *netdev = pci_get_drvdata(pdev);
16484 struct tg3 *tp = netdev_priv(netdev);
16485 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16486 int err;
16487
16488 rtnl_lock();
16489
16490 if (pci_enable_device(pdev)) {
16491 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16492 goto done;
16493 }
16494
16495 pci_set_master(pdev);
16496 pci_restore_state(pdev);
16497 pci_save_state(pdev);
16498
16499 if (!netif_running(netdev)) {
16500 rc = PCI_ERS_RESULT_RECOVERED;
16501 goto done;
16502 }
16503
16504 err = tg3_power_up(tp);
bed9829f 16505 if (err)
b45aa2f6 16506 goto done;
b45aa2f6
MC
16507
16508 rc = PCI_ERS_RESULT_RECOVERED;
16509
16510done:
16511 rtnl_unlock();
16512
16513 return rc;
16514}
16515
16516/**
16517 * tg3_io_resume - called when traffic can start flowing again.
16518 * @pdev: Pointer to PCI device
16519 *
16520 * This callback is called when the error recovery driver tells
16521 * us that its OK to resume normal operation.
16522 */
16523static void tg3_io_resume(struct pci_dev *pdev)
16524{
16525 struct net_device *netdev = pci_get_drvdata(pdev);
16526 struct tg3 *tp = netdev_priv(netdev);
16527 int err;
16528
16529 rtnl_lock();
16530
16531 if (!netif_running(netdev))
16532 goto done;
16533
16534 tg3_full_lock(tp, 0);
63c3a66f 16535 tg3_flag_set(tp, INIT_COMPLETE);
b45aa2f6
MC
16536 err = tg3_restart_hw(tp, 1);
16537 tg3_full_unlock(tp);
16538 if (err) {
16539 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16540 goto done;
16541 }
16542
16543 netif_device_attach(netdev);
16544
21f7638e 16545 tg3_timer_start(tp);
b45aa2f6
MC
16546
16547 tg3_netif_start(tp);
16548
16549 tg3_phy_start(tp);
16550
16551done:
16552 rtnl_unlock();
16553}
16554
3646f0e5 16555static const struct pci_error_handlers tg3_err_handler = {
b45aa2f6
MC
16556 .error_detected = tg3_io_error_detected,
16557 .slot_reset = tg3_io_slot_reset,
16558 .resume = tg3_io_resume
16559};
16560
1da177e4
LT
16561static struct pci_driver tg3_driver = {
16562 .name = DRV_MODULE_NAME,
16563 .id_table = tg3_pci_tbl,
16564 .probe = tg3_init_one,
16565 .remove = __devexit_p(tg3_remove_one),
b45aa2f6 16566 .err_handler = &tg3_err_handler,
aa6027ca 16567 .driver.pm = TG3_PM_OPS,
1da177e4
LT
16568};
16569
16570static int __init tg3_init(void)
16571{
29917620 16572 return pci_register_driver(&tg3_driver);
1da177e4
LT
16573}
16574
16575static void __exit tg3_cleanup(void)
16576{
16577 pci_unregister_driver(&tg3_driver);
16578}
16579
16580module_init(tg3_init);
16581module_exit(tg3_cleanup);
This page took 5.420902 seconds and 5 git commands to generate.