tg3: Set 10_100_ONLY flag for additional 10/100 Mbps devices
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9e056c03 7 * Copyright (C) 2005-2012 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
6867c843 21#include <linux/stringify.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4 28#include <linux/init.h>
a6b7a407 29#include <linux/interrupt.h>
1da177e4
LT
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
3110f5f5 36#include <linux/mdio.h>
1da177e4 37#include <linux/mii.h>
158d7abd 38#include <linux/phy.h>
a9daf367 39#include <linux/brcmphy.h>
1da177e4
LT
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
61487480 44#include <linux/prefetch.h>
f9a5f7d3 45#include <linux/dma-mapping.h>
077f849d 46#include <linux/firmware.h>
aed93e0b
MC
47#include <linux/hwmon.h>
48#include <linux/hwmon-sysfs.h>
1da177e4
LT
49
50#include <net/checksum.h>
c9bdd4b5 51#include <net/ip.h>
1da177e4 52
27fd9de8 53#include <linux/io.h>
1da177e4 54#include <asm/byteorder.h>
27fd9de8 55#include <linux/uaccess.h>
1da177e4 56
49b6e95f 57#ifdef CONFIG_SPARC
1da177e4 58#include <asm/idprom.h>
49b6e95f 59#include <asm/prom.h>
1da177e4
LT
60#endif
61
63532394
MC
62#define BAR_0 0
63#define BAR_2 2
64
1da177e4
LT
65#include "tg3.h"
66
63c3a66f
JP
67/* Functions & macros to verify TG3_FLAGS types */
68
69static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70{
71 return test_bit(flag, bits);
72}
73
74static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75{
76 set_bit(flag, bits);
77}
78
79static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80{
81 clear_bit(flag, bits);
82}
83
84#define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86#define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88#define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
1da177e4 91#define DRV_MODULE_NAME "tg3"
6867c843 92#define TG3_MAJ_NUM 3
bd473da3 93#define TG3_MIN_NUM 126
6867c843
MC
94#define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
bd473da3 96#define DRV_MODULE_RELDATE "November 05, 2012"
1da177e4 97
fd6d3f0e
MC
98#define RESET_KIND_SHUTDOWN 0
99#define RESET_KIND_INIT 1
100#define RESET_KIND_SUSPEND 2
101
1da177e4
LT
102#define TG3_DEF_RX_MODE 0
103#define TG3_DEF_TX_MODE 0
104#define TG3_DEF_MSG_ENABLE \
105 (NETIF_MSG_DRV | \
106 NETIF_MSG_PROBE | \
107 NETIF_MSG_LINK | \
108 NETIF_MSG_TIMER | \
109 NETIF_MSG_IFDOWN | \
110 NETIF_MSG_IFUP | \
111 NETIF_MSG_RX_ERR | \
112 NETIF_MSG_TX_ERR)
113
520b2756
MC
114#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115
1da177e4
LT
116/* length of time before we decide the hardware is borked,
117 * and dev->tx_timeout() should be called to fix the problem
118 */
63c3a66f 119
1da177e4
LT
120#define TG3_TX_TIMEOUT (5 * HZ)
121
122/* hardware minimum and maximum for a single frame's data payload */
123#define TG3_MIN_MTU 60
124#define TG3_MAX_MTU(tp) \
63c3a66f 125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
126
127/* These numbers seem to be hard coded in the NIC firmware somehow.
128 * You can't change the ring sizes, but you can change where you place
129 * them in the NIC onboard memory.
130 */
7cb32cf2 131#define TG3_RX_STD_RING_SIZE(tp) \
63c3a66f 132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
1da177e4 134#define TG3_DEF_RX_RING_PENDING 200
7cb32cf2 135#define TG3_RX_JMB_RING_SIZE(tp) \
63c3a66f 136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
1da177e4
LT
138#define TG3_DEF_RX_JUMBO_RING_PENDING 100
139
140/* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
145 */
1da177e4
LT
146
147#define TG3_TX_RING_SIZE 512
148#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149
2c49a44d
MC
150#define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152#define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154#define TG3_RX_RCB_RING_BYTES(tp) \
7cb32cf2 155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
1da177e4
LT
156#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
1da177e4
LT
158#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
287be12e
MC
160#define TG3_DMA_BYTE_ENAB 64
161
162#define TG3_RX_STD_DMA_SZ 1536
163#define TG3_RX_JMB_DMA_SZ 9046
164
165#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166
167#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
1da177e4 169
2c49a44d
MC
170#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
2b2cdb65 172
2c49a44d
MC
173#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
2b2cdb65 175
d2757fc4
MC
176/* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
180 *
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
186 */
187#define TG3_RX_COPY_THRESHOLD 256
188#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190#else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192#endif
193
81389f57
MC
194#if (NET_IP_ALIGN != 0)
195#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196#else
9205fd9c 197#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
81389f57
MC
198#endif
199
1da177e4 200/* minimum number of free TX descriptors required to wake up TX process */
f3f3f27e 201#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
55086ad9 202#define TG3_TX_BD_DMA_MAX_2K 2048
a4cb428d 203#define TG3_TX_BD_DMA_MAX_4K 4096
1da177e4 204
ad829268
MC
205#define TG3_RAW_IP_ALIGN 2
206
c6cdf436 207#define TG3_FW_UPDATE_TIMEOUT_SEC 5
21f7638e 208#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
c6cdf436 209
077f849d
JSR
210#define FIRMWARE_TG3 "tigon/tg3.bin"
211#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213
1da177e4 214static char version[] __devinitdata =
05dbe005 215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
1da177e4
LT
216
217MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_MODULE_VERSION);
077f849d
JSR
221MODULE_FIRMWARE(FIRMWARE_TG3);
222MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
1da177e4
LT
225static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226module_param(tg3_debug, int, 0);
227MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
3d567e0e
NNS
229#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
230#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
231
a3aa1884 232static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
13185217
HK
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
3d567e0e
NNS
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
252 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253 TG3_DRV_DATA_FLAG_5705_10_100},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
13185217 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
3d567e0e
NNS
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
13185217 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217 263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
13185217 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
3d567e0e
NNS
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
3d567e0e
NNS
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
3d567e0e
NNS
280 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
281 PCI_VENDOR_ID_LENOVO,
282 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
3d567e0e
NNS
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
c88e668b
MC
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
2befdcea
MC
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
3d567e0e
NNS
305 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
306 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
307 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321d32a0
MC
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
3d567e0e
NNS
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
314 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
5e7ccf20 315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
5001e2f6 316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
79d49695 317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
5001e2f6 318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
b0f75221
MC
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
3d567e0e
NNS
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
324 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
302b500b 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
ba1f3c76 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
02eca3f5 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
13185217
HK
330 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
331 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
332 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
333 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
334 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
336 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
1dcb14d9 337 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
13185217 338 {}
1da177e4
LT
339};
340
341MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
342
50da859d 343static const struct {
1da177e4 344 const char string[ETH_GSTRING_LEN];
48fa55a0 345} ethtool_stats_keys[] = {
1da177e4
LT
346 { "rx_octets" },
347 { "rx_fragments" },
348 { "rx_ucast_packets" },
349 { "rx_mcast_packets" },
350 { "rx_bcast_packets" },
351 { "rx_fcs_errors" },
352 { "rx_align_errors" },
353 { "rx_xon_pause_rcvd" },
354 { "rx_xoff_pause_rcvd" },
355 { "rx_mac_ctrl_rcvd" },
356 { "rx_xoff_entered" },
357 { "rx_frame_too_long_errors" },
358 { "rx_jabbers" },
359 { "rx_undersize_packets" },
360 { "rx_in_length_errors" },
361 { "rx_out_length_errors" },
362 { "rx_64_or_less_octet_packets" },
363 { "rx_65_to_127_octet_packets" },
364 { "rx_128_to_255_octet_packets" },
365 { "rx_256_to_511_octet_packets" },
366 { "rx_512_to_1023_octet_packets" },
367 { "rx_1024_to_1522_octet_packets" },
368 { "rx_1523_to_2047_octet_packets" },
369 { "rx_2048_to_4095_octet_packets" },
370 { "rx_4096_to_8191_octet_packets" },
371 { "rx_8192_to_9022_octet_packets" },
372
373 { "tx_octets" },
374 { "tx_collisions" },
375
376 { "tx_xon_sent" },
377 { "tx_xoff_sent" },
378 { "tx_flow_control" },
379 { "tx_mac_errors" },
380 { "tx_single_collisions" },
381 { "tx_mult_collisions" },
382 { "tx_deferred" },
383 { "tx_excessive_collisions" },
384 { "tx_late_collisions" },
385 { "tx_collide_2times" },
386 { "tx_collide_3times" },
387 { "tx_collide_4times" },
388 { "tx_collide_5times" },
389 { "tx_collide_6times" },
390 { "tx_collide_7times" },
391 { "tx_collide_8times" },
392 { "tx_collide_9times" },
393 { "tx_collide_10times" },
394 { "tx_collide_11times" },
395 { "tx_collide_12times" },
396 { "tx_collide_13times" },
397 { "tx_collide_14times" },
398 { "tx_collide_15times" },
399 { "tx_ucast_packets" },
400 { "tx_mcast_packets" },
401 { "tx_bcast_packets" },
402 { "tx_carrier_sense_errors" },
403 { "tx_discards" },
404 { "tx_errors" },
405
406 { "dma_writeq_full" },
407 { "dma_write_prioq_full" },
408 { "rxbds_empty" },
409 { "rx_discards" },
410 { "rx_errors" },
411 { "rx_threshold_hit" },
412
413 { "dma_readq_full" },
414 { "dma_read_prioq_full" },
415 { "tx_comp_queue_full" },
416
417 { "ring_set_send_prod_index" },
418 { "ring_status_update" },
419 { "nic_irqs" },
420 { "nic_avoided_irqs" },
4452d099
MC
421 { "nic_tx_threshold_hit" },
422
423 { "mbuf_lwm_thresh_hit" },
1da177e4
LT
424};
425
48fa55a0
MC
426#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
427
428
50da859d 429static const struct {
4cafd3f5 430 const char string[ETH_GSTRING_LEN];
48fa55a0 431} ethtool_test_keys[] = {
28a45957
MC
432 { "nvram test (online) " },
433 { "link test (online) " },
434 { "register test (offline)" },
435 { "memory test (offline)" },
436 { "mac loopback test (offline)" },
437 { "phy loopback test (offline)" },
941ec90f 438 { "ext loopback test (offline)" },
28a45957 439 { "interrupt test (offline)" },
4cafd3f5
MC
440};
441
48fa55a0
MC
442#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
443
444
b401e9e2
MC
445static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
446{
447 writel(val, tp->regs + off);
448}
449
450static u32 tg3_read32(struct tg3 *tp, u32 off)
451{
de6f31eb 452 return readl(tp->regs + off);
b401e9e2
MC
453}
454
0d3031d9
MC
455static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
456{
457 writel(val, tp->aperegs + off);
458}
459
460static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
461{
de6f31eb 462 return readl(tp->aperegs + off);
0d3031d9
MC
463}
464
1da177e4
LT
465static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
466{
6892914f
MC
467 unsigned long flags;
468
469 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
470 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
471 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 472 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
473}
474
475static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
476{
477 writel(val, tp->regs + off);
478 readl(tp->regs + off);
1da177e4
LT
479}
480
6892914f 481static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 482{
6892914f
MC
483 unsigned long flags;
484 u32 val;
485
486 spin_lock_irqsave(&tp->indirect_lock, flags);
487 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 return val;
491}
492
493static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
494{
495 unsigned long flags;
496
497 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
498 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
499 TG3_64BIT_REG_LOW, val);
500 return;
501 }
66711e66 502 if (off == TG3_RX_STD_PROD_IDX_REG) {
6892914f
MC
503 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
504 TG3_64BIT_REG_LOW, val);
505 return;
1da177e4 506 }
6892914f
MC
507
508 spin_lock_irqsave(&tp->indirect_lock, flags);
509 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
511 spin_unlock_irqrestore(&tp->indirect_lock, flags);
512
513 /* In indirect mode when disabling interrupts, we also need
514 * to clear the interrupt bit in the GRC local ctrl register.
515 */
516 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
517 (val == 0x1)) {
518 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
519 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
520 }
521}
522
523static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
524{
525 unsigned long flags;
526 u32 val;
527
528 spin_lock_irqsave(&tp->indirect_lock, flags);
529 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
530 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
531 spin_unlock_irqrestore(&tp->indirect_lock, flags);
532 return val;
533}
534
b401e9e2
MC
535/* usec_wait specifies the wait time in usec when writing to certain registers
536 * where it is unsafe to read back the register without some delay.
537 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
538 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
539 */
540static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 541{
63c3a66f 542 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
b401e9e2
MC
543 /* Non-posted methods */
544 tp->write32(tp, off, val);
545 else {
546 /* Posted method */
547 tg3_write32(tp, off, val);
548 if (usec_wait)
549 udelay(usec_wait);
550 tp->read32(tp, off);
551 }
552 /* Wait again after the read for the posted method to guarantee that
553 * the wait time is met.
554 */
555 if (usec_wait)
556 udelay(usec_wait);
1da177e4
LT
557}
558
09ee929c
MC
559static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
560{
561 tp->write32_mbox(tp, off, val);
63c3a66f 562 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
6892914f 563 tp->read32_mbox(tp, off);
09ee929c
MC
564}
565
20094930 566static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
567{
568 void __iomem *mbox = tp->regs + off;
569 writel(val, mbox);
63c3a66f 570 if (tg3_flag(tp, TXD_MBOX_HWBUG))
1da177e4 571 writel(val, mbox);
63c3a66f 572 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1da177e4
LT
573 readl(mbox);
574}
575
b5d3772c
MC
576static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
577{
de6f31eb 578 return readl(tp->regs + off + GRCMBOX_BASE);
b5d3772c
MC
579}
580
581static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
582{
583 writel(val, tp->regs + off + GRCMBOX_BASE);
584}
585
c6cdf436 586#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 587#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
c6cdf436
MC
588#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
589#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
590#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930 591
c6cdf436
MC
592#define tw32(reg, val) tp->write32(tp, reg, val)
593#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
594#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
595#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
596
597static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
598{
6892914f
MC
599 unsigned long flags;
600
6ff6f81d 601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
602 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
603 return;
604
6892914f 605 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 606 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
607 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
608 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 609
bbadf503
MC
610 /* Always leave this as zero. */
611 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
612 } else {
613 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
614 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 615
bbadf503
MC
616 /* Always leave this as zero. */
617 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
618 }
619 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
620}
621
1da177e4
LT
622static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
623{
6892914f
MC
624 unsigned long flags;
625
6ff6f81d 626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
627 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
628 *val = 0;
629 return;
630 }
631
6892914f 632 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 633 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
635 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 636
bbadf503
MC
637 /* Always leave this as zero. */
638 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 } else {
640 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
641 *val = tr32(TG3PCI_MEM_WIN_DATA);
642
643 /* Always leave this as zero. */
644 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 }
6892914f 646 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
647}
648
0d3031d9
MC
649static void tg3_ape_lock_init(struct tg3 *tp)
650{
651 int i;
6f5c8f83 652 u32 regbase, bit;
f92d9dc1
MC
653
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
655 regbase = TG3_APE_LOCK_GRANT;
656 else
657 regbase = TG3_APE_PER_LOCK_GRANT;
0d3031d9
MC
658
659 /* Make sure the driver hasn't any stale locks. */
78f94dc7
MC
660 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
661 switch (i) {
662 case TG3_APE_LOCK_PHY0:
663 case TG3_APE_LOCK_PHY1:
664 case TG3_APE_LOCK_PHY2:
665 case TG3_APE_LOCK_PHY3:
666 bit = APE_LOCK_GRANT_DRIVER;
667 break;
668 default:
669 if (!tp->pci_fn)
670 bit = APE_LOCK_GRANT_DRIVER;
671 else
672 bit = 1 << tp->pci_fn;
673 }
674 tg3_ape_write32(tp, regbase + 4 * i, bit);
6f5c8f83
MC
675 }
676
0d3031d9
MC
677}
678
679static int tg3_ape_lock(struct tg3 *tp, int locknum)
680{
681 int i, off;
682 int ret = 0;
6f5c8f83 683 u32 status, req, gnt, bit;
0d3031d9 684
63c3a66f 685 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
686 return 0;
687
688 switch (locknum) {
6f5c8f83
MC
689 case TG3_APE_LOCK_GPIO:
690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
691 return 0;
33f401ae
MC
692 case TG3_APE_LOCK_GRC:
693 case TG3_APE_LOCK_MEM:
78f94dc7
MC
694 if (!tp->pci_fn)
695 bit = APE_LOCK_REQ_DRIVER;
696 else
697 bit = 1 << tp->pci_fn;
33f401ae 698 break;
8151ad57
MC
699 case TG3_APE_LOCK_PHY0:
700 case TG3_APE_LOCK_PHY1:
701 case TG3_APE_LOCK_PHY2:
702 case TG3_APE_LOCK_PHY3:
703 bit = APE_LOCK_REQ_DRIVER;
704 break;
33f401ae
MC
705 default:
706 return -EINVAL;
0d3031d9
MC
707 }
708
f92d9dc1
MC
709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
710 req = TG3_APE_LOCK_REQ;
711 gnt = TG3_APE_LOCK_GRANT;
712 } else {
713 req = TG3_APE_PER_LOCK_REQ;
714 gnt = TG3_APE_PER_LOCK_GRANT;
715 }
716
0d3031d9
MC
717 off = 4 * locknum;
718
6f5c8f83 719 tg3_ape_write32(tp, req + off, bit);
0d3031d9
MC
720
721 /* Wait for up to 1 millisecond to acquire lock. */
722 for (i = 0; i < 100; i++) {
f92d9dc1 723 status = tg3_ape_read32(tp, gnt + off);
6f5c8f83 724 if (status == bit)
0d3031d9
MC
725 break;
726 udelay(10);
727 }
728
6f5c8f83 729 if (status != bit) {
0d3031d9 730 /* Revoke the lock request. */
6f5c8f83 731 tg3_ape_write32(tp, gnt + off, bit);
0d3031d9
MC
732 ret = -EBUSY;
733 }
734
735 return ret;
736}
737
738static void tg3_ape_unlock(struct tg3 *tp, int locknum)
739{
6f5c8f83 740 u32 gnt, bit;
0d3031d9 741
63c3a66f 742 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
743 return;
744
745 switch (locknum) {
6f5c8f83
MC
746 case TG3_APE_LOCK_GPIO:
747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
748 return;
33f401ae
MC
749 case TG3_APE_LOCK_GRC:
750 case TG3_APE_LOCK_MEM:
78f94dc7
MC
751 if (!tp->pci_fn)
752 bit = APE_LOCK_GRANT_DRIVER;
753 else
754 bit = 1 << tp->pci_fn;
33f401ae 755 break;
8151ad57
MC
756 case TG3_APE_LOCK_PHY0:
757 case TG3_APE_LOCK_PHY1:
758 case TG3_APE_LOCK_PHY2:
759 case TG3_APE_LOCK_PHY3:
760 bit = APE_LOCK_GRANT_DRIVER;
761 break;
33f401ae
MC
762 default:
763 return;
0d3031d9
MC
764 }
765
f92d9dc1
MC
766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
767 gnt = TG3_APE_LOCK_GRANT;
768 else
769 gnt = TG3_APE_PER_LOCK_GRANT;
770
6f5c8f83 771 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
0d3031d9
MC
772}
773
b65a372b 774static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
fd6d3f0e 775{
fd6d3f0e
MC
776 u32 apedata;
777
b65a372b
MC
778 while (timeout_us) {
779 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
780 return -EBUSY;
781
782 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
783 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
784 break;
785
786 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
787
788 udelay(10);
789 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
790 }
791
792 return timeout_us ? 0 : -EBUSY;
793}
794
cf8d55ae
MC
795static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
796{
797 u32 i, apedata;
798
799 for (i = 0; i < timeout_us / 10; i++) {
800 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
801
802 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
803 break;
804
805 udelay(10);
806 }
807
808 return i == timeout_us / 10;
809}
810
86449944
MC
811static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
812 u32 len)
cf8d55ae
MC
813{
814 int err;
815 u32 i, bufoff, msgoff, maxlen, apedata;
816
817 if (!tg3_flag(tp, APE_HAS_NCSI))
818 return 0;
819
820 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
821 if (apedata != APE_SEG_SIG_MAGIC)
822 return -ENODEV;
823
824 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
825 if (!(apedata & APE_FW_STATUS_READY))
826 return -EAGAIN;
827
828 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
829 TG3_APE_SHMEM_BASE;
830 msgoff = bufoff + 2 * sizeof(u32);
831 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
832
833 while (len) {
834 u32 length;
835
836 /* Cap xfer sizes to scratchpad limits. */
837 length = (len > maxlen) ? maxlen : len;
838 len -= length;
839
840 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
841 if (!(apedata & APE_FW_STATUS_READY))
842 return -EAGAIN;
843
844 /* Wait for up to 1 msec for APE to service previous event. */
845 err = tg3_ape_event_lock(tp, 1000);
846 if (err)
847 return err;
848
849 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
850 APE_EVENT_STATUS_SCRTCHPD_READ |
851 APE_EVENT_STATUS_EVENT_PENDING;
852 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
853
854 tg3_ape_write32(tp, bufoff, base_off);
855 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
856
857 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
858 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
859
860 base_off += length;
861
862 if (tg3_ape_wait_for_event(tp, 30000))
863 return -EAGAIN;
864
865 for (i = 0; length; i += 4, length -= 4) {
866 u32 val = tg3_ape_read32(tp, msgoff + i);
867 memcpy(data, &val, sizeof(u32));
868 data++;
869 }
870 }
871
872 return 0;
873}
874
b65a372b
MC
875static int tg3_ape_send_event(struct tg3 *tp, u32 event)
876{
877 int err;
878 u32 apedata;
fd6d3f0e
MC
879
880 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
881 if (apedata != APE_SEG_SIG_MAGIC)
b65a372b 882 return -EAGAIN;
fd6d3f0e
MC
883
884 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
885 if (!(apedata & APE_FW_STATUS_READY))
b65a372b 886 return -EAGAIN;
fd6d3f0e
MC
887
888 /* Wait for up to 1 millisecond for APE to service previous event. */
b65a372b
MC
889 err = tg3_ape_event_lock(tp, 1000);
890 if (err)
891 return err;
fd6d3f0e 892
b65a372b
MC
893 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
894 event | APE_EVENT_STATUS_EVENT_PENDING);
fd6d3f0e 895
b65a372b
MC
896 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
897 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
fd6d3f0e 898
b65a372b 899 return 0;
fd6d3f0e
MC
900}
901
902static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
903{
904 u32 event;
905 u32 apedata;
906
907 if (!tg3_flag(tp, ENABLE_APE))
908 return;
909
910 switch (kind) {
911 case RESET_KIND_INIT:
912 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
913 APE_HOST_SEG_SIG_MAGIC);
914 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
915 APE_HOST_SEG_LEN_MAGIC);
916 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
917 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
918 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
919 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
920 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
921 APE_HOST_BEHAV_NO_PHYLOCK);
922 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
923 TG3_APE_HOST_DRVR_STATE_START);
924
925 event = APE_EVENT_STATUS_STATE_START;
926 break;
927 case RESET_KIND_SHUTDOWN:
928 /* With the interface we are currently using,
929 * APE does not track driver state. Wiping
930 * out the HOST SEGMENT SIGNATURE forces
931 * the APE to assume OS absent status.
932 */
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
934
935 if (device_may_wakeup(&tp->pdev->dev) &&
936 tg3_flag(tp, WOL_ENABLE)) {
937 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
938 TG3_APE_HOST_WOL_SPEED_AUTO);
939 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
940 } else
941 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
942
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
944
945 event = APE_EVENT_STATUS_STATE_UNLOAD;
946 break;
947 case RESET_KIND_SUSPEND:
948 event = APE_EVENT_STATUS_STATE_SUSPEND;
949 break;
950 default:
951 return;
952 }
953
954 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
955
956 tg3_ape_send_event(tp, event);
957}
958
1da177e4
LT
959static void tg3_disable_ints(struct tg3 *tp)
960{
89aeb3bc
MC
961 int i;
962
1da177e4
LT
963 tw32(TG3PCI_MISC_HOST_CTRL,
964 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc
MC
965 for (i = 0; i < tp->irq_max; i++)
966 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1da177e4
LT
967}
968
1da177e4
LT
969static void tg3_enable_ints(struct tg3 *tp)
970{
89aeb3bc 971 int i;
89aeb3bc 972
bbe832c0
MC
973 tp->irq_sync = 0;
974 wmb();
975
1da177e4
LT
976 tw32(TG3PCI_MISC_HOST_CTRL,
977 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc 978
f89f38b8 979 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
89aeb3bc
MC
980 for (i = 0; i < tp->irq_cnt; i++) {
981 struct tg3_napi *tnapi = &tp->napi[i];
c6cdf436 982
898a56f8 983 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
63c3a66f 984 if (tg3_flag(tp, 1SHOT_MSI))
89aeb3bc 985 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
f19af9c2 986
f89f38b8 987 tp->coal_now |= tnapi->coal_now;
89aeb3bc 988 }
f19af9c2
MC
989
990 /* Force an initial interrupt */
63c3a66f 991 if (!tg3_flag(tp, TAGGED_STATUS) &&
f19af9c2
MC
992 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
993 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
994 else
f89f38b8
MC
995 tw32(HOSTCC_MODE, tp->coal_now);
996
997 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1da177e4
LT
998}
999
17375d25 1000static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
04237ddd 1001{
17375d25 1002 struct tg3 *tp = tnapi->tp;
898a56f8 1003 struct tg3_hw_status *sblk = tnapi->hw_status;
04237ddd
MC
1004 unsigned int work_exists = 0;
1005
1006 /* check for phy events */
63c3a66f 1007 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
04237ddd
MC
1008 if (sblk->status & SD_STATUS_LINK_CHG)
1009 work_exists = 1;
1010 }
f891ea16
MC
1011
1012 /* check for TX work to do */
1013 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1014 work_exists = 1;
1015
1016 /* check for RX work to do */
1017 if (tnapi->rx_rcb_prod_idx &&
8d9d7cfc 1018 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
04237ddd
MC
1019 work_exists = 1;
1020
1021 return work_exists;
1022}
1023
17375d25 1024/* tg3_int_reenable
04237ddd
MC
1025 * similar to tg3_enable_ints, but it accurately determines whether there
1026 * is new work pending and can return without flushing the PIO write
6aa20a22 1027 * which reenables interrupts
1da177e4 1028 */
17375d25 1029static void tg3_int_reenable(struct tg3_napi *tnapi)
1da177e4 1030{
17375d25
MC
1031 struct tg3 *tp = tnapi->tp;
1032
898a56f8 1033 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1da177e4
LT
1034 mmiowb();
1035
fac9b83e
DM
1036 /* When doing tagged status, this work check is unnecessary.
1037 * The last_tag we write above tells the chip which piece of
1038 * work we've completed.
1039 */
63c3a66f 1040 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
04237ddd 1041 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 1042 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1da177e4
LT
1043}
1044
1da177e4
LT
1045static void tg3_switch_clocks(struct tg3 *tp)
1046{
f6eb9b1f 1047 u32 clock_ctrl;
1da177e4
LT
1048 u32 orig_clock_ctrl;
1049
63c3a66f 1050 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
4cf78e4f
MC
1051 return;
1052
f6eb9b1f
MC
1053 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1054
1da177e4
LT
1055 orig_clock_ctrl = clock_ctrl;
1056 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1057 CLOCK_CTRL_CLKRUN_OENABLE |
1058 0x1f);
1059 tp->pci_clock_ctrl = clock_ctrl;
1060
63c3a66f 1061 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4 1062 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
1063 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1064 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
1065 }
1066 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
1067 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1068 clock_ctrl |
1069 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1070 40);
1071 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1072 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1073 40);
1da177e4 1074 }
b401e9e2 1075 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
1076}
1077
1078#define PHY_BUSY_LOOPS 5000
1079
1080static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1081{
1082 u32 frame_val;
1083 unsigned int loops;
1084 int ret;
1085
1086 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1087 tw32_f(MAC_MI_MODE,
1088 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1089 udelay(80);
1090 }
1091
8151ad57
MC
1092 tg3_ape_lock(tp, tp->phy_ape_lock);
1093
1da177e4
LT
1094 *val = 0x0;
1095
882e9793 1096 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1097 MI_COM_PHY_ADDR_MASK);
1098 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1099 MI_COM_REG_ADDR_MASK);
1100 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 1101
1da177e4
LT
1102 tw32_f(MAC_MI_COM, frame_val);
1103
1104 loops = PHY_BUSY_LOOPS;
1105 while (loops != 0) {
1106 udelay(10);
1107 frame_val = tr32(MAC_MI_COM);
1108
1109 if ((frame_val & MI_COM_BUSY) == 0) {
1110 udelay(5);
1111 frame_val = tr32(MAC_MI_COM);
1112 break;
1113 }
1114 loops -= 1;
1115 }
1116
1117 ret = -EBUSY;
1118 if (loops != 0) {
1119 *val = frame_val & MI_COM_DATA_MASK;
1120 ret = 0;
1121 }
1122
1123 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1124 tw32_f(MAC_MI_MODE, tp->mi_mode);
1125 udelay(80);
1126 }
1127
8151ad57
MC
1128 tg3_ape_unlock(tp, tp->phy_ape_lock);
1129
1da177e4
LT
1130 return ret;
1131}
1132
1133static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1134{
1135 u32 frame_val;
1136 unsigned int loops;
1137 int ret;
1138
f07e9af3 1139 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
221c5637 1140 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
b5d3772c
MC
1141 return 0;
1142
1da177e4
LT
1143 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1144 tw32_f(MAC_MI_MODE,
1145 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1146 udelay(80);
1147 }
1148
8151ad57
MC
1149 tg3_ape_lock(tp, tp->phy_ape_lock);
1150
882e9793 1151 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1152 MI_COM_PHY_ADDR_MASK);
1153 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1154 MI_COM_REG_ADDR_MASK);
1155 frame_val |= (val & MI_COM_DATA_MASK);
1156 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 1157
1da177e4
LT
1158 tw32_f(MAC_MI_COM, frame_val);
1159
1160 loops = PHY_BUSY_LOOPS;
1161 while (loops != 0) {
1162 udelay(10);
1163 frame_val = tr32(MAC_MI_COM);
1164 if ((frame_val & MI_COM_BUSY) == 0) {
1165 udelay(5);
1166 frame_val = tr32(MAC_MI_COM);
1167 break;
1168 }
1169 loops -= 1;
1170 }
1171
1172 ret = -EBUSY;
1173 if (loops != 0)
1174 ret = 0;
1175
1176 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1177 tw32_f(MAC_MI_MODE, tp->mi_mode);
1178 udelay(80);
1179 }
1180
8151ad57
MC
1181 tg3_ape_unlock(tp, tp->phy_ape_lock);
1182
1da177e4
LT
1183 return ret;
1184}
1185
b0988c15
MC
1186static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1187{
1188 int err;
1189
1190 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1191 if (err)
1192 goto done;
1193
1194 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1195 if (err)
1196 goto done;
1197
1198 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1199 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1200 if (err)
1201 goto done;
1202
1203 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1204
1205done:
1206 return err;
1207}
1208
1209static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1210{
1211 int err;
1212
1213 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1214 if (err)
1215 goto done;
1216
1217 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1218 if (err)
1219 goto done;
1220
1221 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1222 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1223 if (err)
1224 goto done;
1225
1226 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1227
1228done:
1229 return err;
1230}
1231
1232static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1233{
1234 int err;
1235
1236 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1237 if (!err)
1238 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1239
1240 return err;
1241}
1242
1243static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1244{
1245 int err;
1246
1247 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1248 if (!err)
1249 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1250
1251 return err;
1252}
1253
15ee95c3
MC
1254static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1255{
1256 int err;
1257
1258 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1259 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1260 MII_TG3_AUXCTL_SHDWSEL_MISC);
1261 if (!err)
1262 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1263
1264 return err;
1265}
1266
b4bd2929
MC
1267static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1268{
1269 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1270 set |= MII_TG3_AUXCTL_MISC_WREN;
1271
1272 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1273}
1274
1d36ba45
MC
1275#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1276 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1277 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1278 MII_TG3_AUXCTL_ACTL_TX_6DB)
1279
1280#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1281 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1282 MII_TG3_AUXCTL_ACTL_TX_6DB);
1283
95e2869a
MC
1284static int tg3_bmcr_reset(struct tg3 *tp)
1285{
1286 u32 phy_control;
1287 int limit, err;
1288
1289 /* OK, reset it, and poll the BMCR_RESET bit until it
1290 * clears or we time out.
1291 */
1292 phy_control = BMCR_RESET;
1293 err = tg3_writephy(tp, MII_BMCR, phy_control);
1294 if (err != 0)
1295 return -EBUSY;
1296
1297 limit = 5000;
1298 while (limit--) {
1299 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1300 if (err != 0)
1301 return -EBUSY;
1302
1303 if ((phy_control & BMCR_RESET) == 0) {
1304 udelay(40);
1305 break;
1306 }
1307 udelay(10);
1308 }
d4675b52 1309 if (limit < 0)
95e2869a
MC
1310 return -EBUSY;
1311
1312 return 0;
1313}
1314
158d7abd
MC
1315static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1316{
3d16543d 1317 struct tg3 *tp = bp->priv;
158d7abd
MC
1318 u32 val;
1319
24bb4fb6 1320 spin_lock_bh(&tp->lock);
158d7abd
MC
1321
1322 if (tg3_readphy(tp, reg, &val))
24bb4fb6
MC
1323 val = -EIO;
1324
1325 spin_unlock_bh(&tp->lock);
158d7abd
MC
1326
1327 return val;
1328}
1329
1330static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1331{
3d16543d 1332 struct tg3 *tp = bp->priv;
24bb4fb6 1333 u32 ret = 0;
158d7abd 1334
24bb4fb6 1335 spin_lock_bh(&tp->lock);
158d7abd
MC
1336
1337 if (tg3_writephy(tp, reg, val))
24bb4fb6 1338 ret = -EIO;
158d7abd 1339
24bb4fb6
MC
1340 spin_unlock_bh(&tp->lock);
1341
1342 return ret;
158d7abd
MC
1343}
1344
1345static int tg3_mdio_reset(struct mii_bus *bp)
1346{
1347 return 0;
1348}
1349
9c61d6bc 1350static void tg3_mdio_config_5785(struct tg3 *tp)
a9daf367
MC
1351{
1352 u32 val;
fcb389df 1353 struct phy_device *phydev;
a9daf367 1354
3f0e3ad7 1355 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
fcb389df 1356 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f
MC
1357 case PHY_ID_BCM50610:
1358 case PHY_ID_BCM50610M:
fcb389df
MC
1359 val = MAC_PHYCFG2_50610_LED_MODES;
1360 break;
6a443a0f 1361 case PHY_ID_BCMAC131:
fcb389df
MC
1362 val = MAC_PHYCFG2_AC131_LED_MODES;
1363 break;
6a443a0f 1364 case PHY_ID_RTL8211C:
fcb389df
MC
1365 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1366 break;
6a443a0f 1367 case PHY_ID_RTL8201E:
fcb389df
MC
1368 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1369 break;
1370 default:
a9daf367 1371 return;
fcb389df
MC
1372 }
1373
1374 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1375 tw32(MAC_PHYCFG2, val);
1376
1377 val = tr32(MAC_PHYCFG1);
bb85fbb6
MC
1378 val &= ~(MAC_PHYCFG1_RGMII_INT |
1379 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1380 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
fcb389df
MC
1381 tw32(MAC_PHYCFG1, val);
1382
1383 return;
1384 }
1385
63c3a66f 1386 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
fcb389df
MC
1387 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1388 MAC_PHYCFG2_FMODE_MASK_MASK |
1389 MAC_PHYCFG2_GMODE_MASK_MASK |
1390 MAC_PHYCFG2_ACT_MASK_MASK |
1391 MAC_PHYCFG2_QUAL_MASK_MASK |
1392 MAC_PHYCFG2_INBAND_ENABLE;
1393
1394 tw32(MAC_PHYCFG2, val);
a9daf367 1395
bb85fbb6
MC
1396 val = tr32(MAC_PHYCFG1);
1397 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1398 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
63c3a66f
JP
1399 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1400 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1401 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
63c3a66f 1402 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1403 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1404 }
bb85fbb6
MC
1405 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1406 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1407 tw32(MAC_PHYCFG1, val);
a9daf367 1408
a9daf367
MC
1409 val = tr32(MAC_EXT_RGMII_MODE);
1410 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1411 MAC_RGMII_MODE_RX_QUALITY |
1412 MAC_RGMII_MODE_RX_ACTIVITY |
1413 MAC_RGMII_MODE_RX_ENG_DET |
1414 MAC_RGMII_MODE_TX_ENABLE |
1415 MAC_RGMII_MODE_TX_LOWPWR |
1416 MAC_RGMII_MODE_TX_RESET);
63c3a66f
JP
1417 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1418 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367
MC
1419 val |= MAC_RGMII_MODE_RX_INT_B |
1420 MAC_RGMII_MODE_RX_QUALITY |
1421 MAC_RGMII_MODE_RX_ACTIVITY |
1422 MAC_RGMII_MODE_RX_ENG_DET;
63c3a66f 1423 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1424 val |= MAC_RGMII_MODE_TX_ENABLE |
1425 MAC_RGMII_MODE_TX_LOWPWR |
1426 MAC_RGMII_MODE_TX_RESET;
1427 }
1428 tw32(MAC_EXT_RGMII_MODE, val);
1429}
1430
158d7abd
MC
1431static void tg3_mdio_start(struct tg3 *tp)
1432{
158d7abd
MC
1433 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1434 tw32_f(MAC_MI_MODE, tp->mi_mode);
1435 udelay(80);
a9daf367 1436
63c3a66f 1437 if (tg3_flag(tp, MDIOBUS_INITED) &&
9ea4818d
MC
1438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1439 tg3_mdio_config_5785(tp);
1440}
1441
1442static int tg3_mdio_init(struct tg3 *tp)
1443{
1444 int i;
1445 u32 reg;
1446 struct phy_device *phydev;
1447
63c3a66f 1448 if (tg3_flag(tp, 5717_PLUS)) {
9c7df915 1449 u32 is_serdes;
882e9793 1450
69f11c99 1451 tp->phy_addr = tp->pci_fn + 1;
882e9793 1452
d1ec96af
MC
1453 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1454 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1455 else
1456 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1457 TG3_CPMU_PHY_STRAP_IS_SERDES;
882e9793
MC
1458 if (is_serdes)
1459 tp->phy_addr += 7;
1460 } else
3f0e3ad7 1461 tp->phy_addr = TG3_PHY_MII_ADDR;
882e9793 1462
158d7abd
MC
1463 tg3_mdio_start(tp);
1464
63c3a66f 1465 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
158d7abd
MC
1466 return 0;
1467
298cf9be
LB
1468 tp->mdio_bus = mdiobus_alloc();
1469 if (tp->mdio_bus == NULL)
1470 return -ENOMEM;
158d7abd 1471
298cf9be
LB
1472 tp->mdio_bus->name = "tg3 mdio bus";
1473 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
158d7abd 1474 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
298cf9be
LB
1475 tp->mdio_bus->priv = tp;
1476 tp->mdio_bus->parent = &tp->pdev->dev;
1477 tp->mdio_bus->read = &tg3_mdio_read;
1478 tp->mdio_bus->write = &tg3_mdio_write;
1479 tp->mdio_bus->reset = &tg3_mdio_reset;
3f0e3ad7 1480 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
298cf9be 1481 tp->mdio_bus->irq = &tp->mdio_irq[0];
158d7abd
MC
1482
1483 for (i = 0; i < PHY_MAX_ADDR; i++)
298cf9be 1484 tp->mdio_bus->irq[i] = PHY_POLL;
158d7abd
MC
1485
1486 /* The bus registration will look for all the PHYs on the mdio bus.
1487 * Unfortunately, it does not ensure the PHY is powered up before
1488 * accessing the PHY ID registers. A chip reset is the
1489 * quickest way to bring the device back to an operational state..
1490 */
1491 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1492 tg3_bmcr_reset(tp);
1493
298cf9be 1494 i = mdiobus_register(tp->mdio_bus);
a9daf367 1495 if (i) {
ab96b241 1496 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
9c61d6bc 1497 mdiobus_free(tp->mdio_bus);
a9daf367
MC
1498 return i;
1499 }
158d7abd 1500
3f0e3ad7 1501 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
a9daf367 1502
9c61d6bc 1503 if (!phydev || !phydev->drv) {
ab96b241 1504 dev_warn(&tp->pdev->dev, "No PHY devices\n");
9c61d6bc
MC
1505 mdiobus_unregister(tp->mdio_bus);
1506 mdiobus_free(tp->mdio_bus);
1507 return -ENODEV;
1508 }
1509
1510 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f 1511 case PHY_ID_BCM57780:
321d32a0 1512 phydev->interface = PHY_INTERFACE_MODE_GMII;
c704dc23 1513 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
321d32a0 1514 break;
6a443a0f
MC
1515 case PHY_ID_BCM50610:
1516 case PHY_ID_BCM50610M:
32e5a8d6 1517 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
c704dc23 1518 PHY_BRCM_RX_REFCLK_UNUSED |
52fae083 1519 PHY_BRCM_DIS_TXCRXC_NOENRGY |
c704dc23 1520 PHY_BRCM_AUTO_PWRDWN_ENABLE;
63c3a66f 1521 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
a9daf367 1522 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
63c3a66f 1523 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1524 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
63c3a66f 1525 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367 1526 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fcb389df 1527 /* fallthru */
6a443a0f 1528 case PHY_ID_RTL8211C:
fcb389df 1529 phydev->interface = PHY_INTERFACE_MODE_RGMII;
a9daf367 1530 break;
6a443a0f
MC
1531 case PHY_ID_RTL8201E:
1532 case PHY_ID_BCMAC131:
a9daf367 1533 phydev->interface = PHY_INTERFACE_MODE_MII;
cdd4e09d 1534 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
f07e9af3 1535 tp->phy_flags |= TG3_PHYFLG_IS_FET;
a9daf367
MC
1536 break;
1537 }
1538
63c3a66f 1539 tg3_flag_set(tp, MDIOBUS_INITED);
9c61d6bc
MC
1540
1541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1542 tg3_mdio_config_5785(tp);
a9daf367
MC
1543
1544 return 0;
158d7abd
MC
1545}
1546
1547static void tg3_mdio_fini(struct tg3 *tp)
1548{
63c3a66f
JP
1549 if (tg3_flag(tp, MDIOBUS_INITED)) {
1550 tg3_flag_clear(tp, MDIOBUS_INITED);
298cf9be
LB
1551 mdiobus_unregister(tp->mdio_bus);
1552 mdiobus_free(tp->mdio_bus);
158d7abd
MC
1553 }
1554}
1555
4ba526ce
MC
1556/* tp->lock is held. */
1557static inline void tg3_generate_fw_event(struct tg3 *tp)
1558{
1559 u32 val;
1560
1561 val = tr32(GRC_RX_CPU_EVENT);
1562 val |= GRC_RX_CPU_DRIVER_EVENT;
1563 tw32_f(GRC_RX_CPU_EVENT, val);
1564
1565 tp->last_event_jiffies = jiffies;
1566}
1567
1568#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1569
95e2869a
MC
1570/* tp->lock is held. */
1571static void tg3_wait_for_event_ack(struct tg3 *tp)
1572{
1573 int i;
4ba526ce
MC
1574 unsigned int delay_cnt;
1575 long time_remain;
1576
1577 /* If enough time has passed, no wait is necessary. */
1578 time_remain = (long)(tp->last_event_jiffies + 1 +
1579 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1580 (long)jiffies;
1581 if (time_remain < 0)
1582 return;
1583
1584 /* Check if we can shorten the wait time. */
1585 delay_cnt = jiffies_to_usecs(time_remain);
1586 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1587 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1588 delay_cnt = (delay_cnt >> 3) + 1;
95e2869a 1589
4ba526ce 1590 for (i = 0; i < delay_cnt; i++) {
95e2869a
MC
1591 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1592 break;
4ba526ce 1593 udelay(8);
95e2869a
MC
1594 }
1595}
1596
1597/* tp->lock is held. */
b28f389d 1598static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
95e2869a 1599{
b28f389d 1600 u32 reg, val;
95e2869a
MC
1601
1602 val = 0;
1603 if (!tg3_readphy(tp, MII_BMCR, &reg))
1604 val = reg << 16;
1605 if (!tg3_readphy(tp, MII_BMSR, &reg))
1606 val |= (reg & 0xffff);
b28f389d 1607 *data++ = val;
95e2869a
MC
1608
1609 val = 0;
1610 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1611 val = reg << 16;
1612 if (!tg3_readphy(tp, MII_LPA, &reg))
1613 val |= (reg & 0xffff);
b28f389d 1614 *data++ = val;
95e2869a
MC
1615
1616 val = 0;
f07e9af3 1617 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
95e2869a
MC
1618 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1619 val = reg << 16;
1620 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1621 val |= (reg & 0xffff);
1622 }
b28f389d 1623 *data++ = val;
95e2869a
MC
1624
1625 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1626 val = reg << 16;
1627 else
1628 val = 0;
b28f389d
MC
1629 *data++ = val;
1630}
1631
1632/* tp->lock is held. */
1633static void tg3_ump_link_report(struct tg3 *tp)
1634{
1635 u32 data[4];
1636
1637 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1638 return;
1639
1640 tg3_phy_gather_ump_data(tp, data);
1641
1642 tg3_wait_for_event_ack(tp);
1643
1644 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1645 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1646 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1647 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1648 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1649 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
95e2869a 1650
4ba526ce 1651 tg3_generate_fw_event(tp);
95e2869a
MC
1652}
1653
8d5a89b3
MC
1654/* tp->lock is held. */
1655static void tg3_stop_fw(struct tg3 *tp)
1656{
1657 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1658 /* Wait for RX cpu to ACK the previous event. */
1659 tg3_wait_for_event_ack(tp);
1660
1661 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1662
1663 tg3_generate_fw_event(tp);
1664
1665 /* Wait for RX cpu to ACK this event. */
1666 tg3_wait_for_event_ack(tp);
1667 }
1668}
1669
fd6d3f0e
MC
1670/* tp->lock is held. */
1671static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1672{
1673 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1674 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1675
1676 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1677 switch (kind) {
1678 case RESET_KIND_INIT:
1679 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1680 DRV_STATE_START);
1681 break;
1682
1683 case RESET_KIND_SHUTDOWN:
1684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685 DRV_STATE_UNLOAD);
1686 break;
1687
1688 case RESET_KIND_SUSPEND:
1689 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690 DRV_STATE_SUSPEND);
1691 break;
1692
1693 default:
1694 break;
1695 }
1696 }
1697
1698 if (kind == RESET_KIND_INIT ||
1699 kind == RESET_KIND_SUSPEND)
1700 tg3_ape_driver_state_change(tp, kind);
1701}
1702
1703/* tp->lock is held. */
1704static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1705{
1706 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1707 switch (kind) {
1708 case RESET_KIND_INIT:
1709 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1710 DRV_STATE_START_DONE);
1711 break;
1712
1713 case RESET_KIND_SHUTDOWN:
1714 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1715 DRV_STATE_UNLOAD_DONE);
1716 break;
1717
1718 default:
1719 break;
1720 }
1721 }
1722
1723 if (kind == RESET_KIND_SHUTDOWN)
1724 tg3_ape_driver_state_change(tp, kind);
1725}
1726
1727/* tp->lock is held. */
1728static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1729{
1730 if (tg3_flag(tp, ENABLE_ASF)) {
1731 switch (kind) {
1732 case RESET_KIND_INIT:
1733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 DRV_STATE_START);
1735 break;
1736
1737 case RESET_KIND_SHUTDOWN:
1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739 DRV_STATE_UNLOAD);
1740 break;
1741
1742 case RESET_KIND_SUSPEND:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744 DRV_STATE_SUSPEND);
1745 break;
1746
1747 default:
1748 break;
1749 }
1750 }
1751}
1752
1753static int tg3_poll_fw(struct tg3 *tp)
1754{
1755 int i;
1756 u32 val;
1757
1758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1759 /* Wait up to 20ms for init done. */
1760 for (i = 0; i < 200; i++) {
1761 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1762 return 0;
1763 udelay(100);
1764 }
1765 return -ENODEV;
1766 }
1767
1768 /* Wait for firmware initialization to complete. */
1769 for (i = 0; i < 100000; i++) {
1770 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1771 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1772 break;
1773 udelay(10);
1774 }
1775
1776 /* Chip might not be fitted with firmware. Some Sun onboard
1777 * parts are configured like that. So don't signal the timeout
1778 * of the above loop as an error, but do report the lack of
1779 * running firmware once.
1780 */
1781 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1782 tg3_flag_set(tp, NO_FWARE_REPORTED);
1783
1784 netdev_info(tp->dev, "No firmware running\n");
1785 }
1786
1787 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1788 /* The 57765 A0 needs a little more
1789 * time to do some important work.
1790 */
1791 mdelay(10);
1792 }
1793
1794 return 0;
1795}
1796
95e2869a
MC
1797static void tg3_link_report(struct tg3 *tp)
1798{
1799 if (!netif_carrier_ok(tp->dev)) {
05dbe005 1800 netif_info(tp, link, tp->dev, "Link is down\n");
95e2869a
MC
1801 tg3_ump_link_report(tp);
1802 } else if (netif_msg_link(tp)) {
05dbe005
JP
1803 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1804 (tp->link_config.active_speed == SPEED_1000 ?
1805 1000 :
1806 (tp->link_config.active_speed == SPEED_100 ?
1807 100 : 10)),
1808 (tp->link_config.active_duplex == DUPLEX_FULL ?
1809 "full" : "half"));
1810
1811 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1812 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1813 "on" : "off",
1814 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1815 "on" : "off");
47007831
MC
1816
1817 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1818 netdev_info(tp->dev, "EEE is %s\n",
1819 tp->setlpicnt ? "enabled" : "disabled");
1820
95e2869a
MC
1821 tg3_ump_link_report(tp);
1822 }
1823}
1824
95e2869a
MC
1825static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1826{
1827 u16 miireg;
1828
e18ce346 1829 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1830 miireg = ADVERTISE_1000XPAUSE;
e18ce346 1831 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1832 miireg = ADVERTISE_1000XPSE_ASYM;
e18ce346 1833 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1834 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1835 else
1836 miireg = 0;
1837
1838 return miireg;
1839}
1840
95e2869a
MC
1841static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1842{
1843 u8 cap = 0;
1844
f3791cdf
MC
1845 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1846 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1847 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1848 if (lcladv & ADVERTISE_1000XPAUSE)
1849 cap = FLOW_CTRL_RX;
1850 if (rmtadv & ADVERTISE_1000XPAUSE)
e18ce346 1851 cap = FLOW_CTRL_TX;
95e2869a
MC
1852 }
1853
1854 return cap;
1855}
1856
f51f3562 1857static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1858{
b02fd9e3 1859 u8 autoneg;
f51f3562 1860 u8 flowctrl = 0;
95e2869a
MC
1861 u32 old_rx_mode = tp->rx_mode;
1862 u32 old_tx_mode = tp->tx_mode;
1863
63c3a66f 1864 if (tg3_flag(tp, USE_PHYLIB))
3f0e3ad7 1865 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
b02fd9e3
MC
1866 else
1867 autoneg = tp->link_config.autoneg;
1868
63c3a66f 1869 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
f07e9af3 1870 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
f51f3562 1871 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1872 else
bc02ff95 1873 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
f51f3562
MC
1874 } else
1875 flowctrl = tp->link_config.flowctrl;
95e2869a 1876
f51f3562 1877 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1878
e18ce346 1879 if (flowctrl & FLOW_CTRL_RX)
95e2869a
MC
1880 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1881 else
1882 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1883
f51f3562 1884 if (old_rx_mode != tp->rx_mode)
95e2869a 1885 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1886
e18ce346 1887 if (flowctrl & FLOW_CTRL_TX)
95e2869a
MC
1888 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1889 else
1890 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1891
f51f3562 1892 if (old_tx_mode != tp->tx_mode)
95e2869a 1893 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1894}
1895
b02fd9e3
MC
1896static void tg3_adjust_link(struct net_device *dev)
1897{
1898 u8 oldflowctrl, linkmesg = 0;
1899 u32 mac_mode, lcl_adv, rmt_adv;
1900 struct tg3 *tp = netdev_priv(dev);
3f0e3ad7 1901 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1902
24bb4fb6 1903 spin_lock_bh(&tp->lock);
b02fd9e3
MC
1904
1905 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1906 MAC_MODE_HALF_DUPLEX);
1907
1908 oldflowctrl = tp->link_config.active_flowctrl;
1909
1910 if (phydev->link) {
1911 lcl_adv = 0;
1912 rmt_adv = 0;
1913
1914 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1915 mac_mode |= MAC_MODE_PORT_MODE_MII;
c3df0748
MC
1916 else if (phydev->speed == SPEED_1000 ||
1917 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
b02fd9e3 1918 mac_mode |= MAC_MODE_PORT_MODE_GMII;
c3df0748
MC
1919 else
1920 mac_mode |= MAC_MODE_PORT_MODE_MII;
b02fd9e3
MC
1921
1922 if (phydev->duplex == DUPLEX_HALF)
1923 mac_mode |= MAC_MODE_HALF_DUPLEX;
1924 else {
f88788f0 1925 lcl_adv = mii_advertise_flowctrl(
b02fd9e3
MC
1926 tp->link_config.flowctrl);
1927
1928 if (phydev->pause)
1929 rmt_adv = LPA_PAUSE_CAP;
1930 if (phydev->asym_pause)
1931 rmt_adv |= LPA_PAUSE_ASYM;
1932 }
1933
1934 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1935 } else
1936 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1937
1938 if (mac_mode != tp->mac_mode) {
1939 tp->mac_mode = mac_mode;
1940 tw32_f(MAC_MODE, tp->mac_mode);
1941 udelay(40);
1942 }
1943
fcb389df
MC
1944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1945 if (phydev->speed == SPEED_10)
1946 tw32(MAC_MI_STAT,
1947 MAC_MI_STAT_10MBPS_MODE |
1948 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1949 else
1950 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1951 }
1952
b02fd9e3
MC
1953 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1954 tw32(MAC_TX_LENGTHS,
1955 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1956 (6 << TX_LENGTHS_IPG_SHIFT) |
1957 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1958 else
1959 tw32(MAC_TX_LENGTHS,
1960 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1961 (6 << TX_LENGTHS_IPG_SHIFT) |
1962 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1963
34655ad6 1964 if (phydev->link != tp->old_link ||
b02fd9e3
MC
1965 phydev->speed != tp->link_config.active_speed ||
1966 phydev->duplex != tp->link_config.active_duplex ||
1967 oldflowctrl != tp->link_config.active_flowctrl)
c6cdf436 1968 linkmesg = 1;
b02fd9e3 1969
34655ad6 1970 tp->old_link = phydev->link;
b02fd9e3
MC
1971 tp->link_config.active_speed = phydev->speed;
1972 tp->link_config.active_duplex = phydev->duplex;
1973
24bb4fb6 1974 spin_unlock_bh(&tp->lock);
b02fd9e3
MC
1975
1976 if (linkmesg)
1977 tg3_link_report(tp);
1978}
1979
1980static int tg3_phy_init(struct tg3 *tp)
1981{
1982 struct phy_device *phydev;
1983
f07e9af3 1984 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
b02fd9e3
MC
1985 return 0;
1986
1987 /* Bring the PHY back to a known state. */
1988 tg3_bmcr_reset(tp);
1989
3f0e3ad7 1990 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3
MC
1991
1992 /* Attach the MAC to the PHY. */
fb28ad35 1993 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
a9daf367 1994 phydev->dev_flags, phydev->interface);
b02fd9e3 1995 if (IS_ERR(phydev)) {
ab96b241 1996 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
b02fd9e3
MC
1997 return PTR_ERR(phydev);
1998 }
1999
b02fd9e3 2000 /* Mask with MAC supported features. */
9c61d6bc
MC
2001 switch (phydev->interface) {
2002 case PHY_INTERFACE_MODE_GMII:
2003 case PHY_INTERFACE_MODE_RGMII:
f07e9af3 2004 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
321d32a0
MC
2005 phydev->supported &= (PHY_GBIT_FEATURES |
2006 SUPPORTED_Pause |
2007 SUPPORTED_Asym_Pause);
2008 break;
2009 }
2010 /* fallthru */
9c61d6bc
MC
2011 case PHY_INTERFACE_MODE_MII:
2012 phydev->supported &= (PHY_BASIC_FEATURES |
2013 SUPPORTED_Pause |
2014 SUPPORTED_Asym_Pause);
2015 break;
2016 default:
3f0e3ad7 2017 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9c61d6bc
MC
2018 return -EINVAL;
2019 }
2020
f07e9af3 2021 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
2022
2023 phydev->advertising = phydev->supported;
2024
b02fd9e3
MC
2025 return 0;
2026}
2027
2028static void tg3_phy_start(struct tg3 *tp)
2029{
2030 struct phy_device *phydev;
2031
f07e9af3 2032 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2033 return;
2034
3f0e3ad7 2035 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 2036
80096068
MC
2037 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2038 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
c6700ce2
MC
2039 phydev->speed = tp->link_config.speed;
2040 phydev->duplex = tp->link_config.duplex;
2041 phydev->autoneg = tp->link_config.autoneg;
2042 phydev->advertising = tp->link_config.advertising;
b02fd9e3
MC
2043 }
2044
2045 phy_start(phydev);
2046
2047 phy_start_aneg(phydev);
2048}
2049
2050static void tg3_phy_stop(struct tg3 *tp)
2051{
f07e9af3 2052 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2053 return;
2054
3f0e3ad7 2055 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
2056}
2057
2058static void tg3_phy_fini(struct tg3 *tp)
2059{
f07e9af3 2060 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7 2061 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
f07e9af3 2062 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
2063 }
2064}
2065
941ec90f
MC
2066static int tg3_phy_set_extloopbk(struct tg3 *tp)
2067{
2068 int err;
2069 u32 val;
2070
2071 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2072 return 0;
2073
2074 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2075 /* Cannot do read-modify-write on 5401 */
2076 err = tg3_phy_auxctl_write(tp,
2077 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2078 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2079 0x4c20);
2080 goto done;
2081 }
2082
2083 err = tg3_phy_auxctl_read(tp,
2084 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2085 if (err)
2086 return err;
2087
2088 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2089 err = tg3_phy_auxctl_write(tp,
2090 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2091
2092done:
2093 return err;
2094}
2095
7f97a4bd
MC
2096static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2097{
2098 u32 phytest;
2099
2100 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2101 u32 phy;
2102
2103 tg3_writephy(tp, MII_TG3_FET_TEST,
2104 phytest | MII_TG3_FET_SHADOW_EN);
2105 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2106 if (enable)
2107 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2108 else
2109 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2110 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2111 }
2112 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2113 }
2114}
2115
6833c043
MC
2116static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2117{
2118 u32 reg;
2119
63c3a66f
JP
2120 if (!tg3_flag(tp, 5705_PLUS) ||
2121 (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2122 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
6833c043
MC
2123 return;
2124
f07e9af3 2125 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd
MC
2126 tg3_phy_fet_toggle_apd(tp, enable);
2127 return;
2128 }
2129
6833c043
MC
2130 reg = MII_TG3_MISC_SHDW_WREN |
2131 MII_TG3_MISC_SHDW_SCR5_SEL |
2132 MII_TG3_MISC_SHDW_SCR5_LPED |
2133 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2134 MII_TG3_MISC_SHDW_SCR5_SDTL |
2135 MII_TG3_MISC_SHDW_SCR5_C125OE;
2136 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2137 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2138
2139 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2140
2141
2142 reg = MII_TG3_MISC_SHDW_WREN |
2143 MII_TG3_MISC_SHDW_APD_SEL |
2144 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2145 if (enable)
2146 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2147
2148 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2149}
2150
9ef8ca99
MC
2151static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2152{
2153 u32 phy;
2154
63c3a66f 2155 if (!tg3_flag(tp, 5705_PLUS) ||
f07e9af3 2156 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9ef8ca99
MC
2157 return;
2158
f07e9af3 2159 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
9ef8ca99
MC
2160 u32 ephy;
2161
535ef6e1
MC
2162 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2163 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2164
2165 tg3_writephy(tp, MII_TG3_FET_TEST,
2166 ephy | MII_TG3_FET_SHADOW_EN);
2167 if (!tg3_readphy(tp, reg, &phy)) {
9ef8ca99 2168 if (enable)
535ef6e1 2169 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
9ef8ca99 2170 else
535ef6e1
MC
2171 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2172 tg3_writephy(tp, reg, phy);
9ef8ca99 2173 }
535ef6e1 2174 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
9ef8ca99
MC
2175 }
2176 } else {
15ee95c3
MC
2177 int ret;
2178
2179 ret = tg3_phy_auxctl_read(tp,
2180 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2181 if (!ret) {
9ef8ca99
MC
2182 if (enable)
2183 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2184 else
2185 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
b4bd2929
MC
2186 tg3_phy_auxctl_write(tp,
2187 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
9ef8ca99
MC
2188 }
2189 }
2190}
2191
1da177e4
LT
2192static void tg3_phy_set_wirespeed(struct tg3 *tp)
2193{
15ee95c3 2194 int ret;
1da177e4
LT
2195 u32 val;
2196
f07e9af3 2197 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1da177e4
LT
2198 return;
2199
15ee95c3
MC
2200 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2201 if (!ret)
b4bd2929
MC
2202 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2203 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1da177e4
LT
2204}
2205
b2a5c19c
MC
2206static void tg3_phy_apply_otp(struct tg3 *tp)
2207{
2208 u32 otp, phy;
2209
2210 if (!tp->phy_otp)
2211 return;
2212
2213 otp = tp->phy_otp;
2214
1d36ba45
MC
2215 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2216 return;
b2a5c19c
MC
2217
2218 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2219 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2220 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2221
2222 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2223 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2224 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2225
2226 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2227 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2228 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2229
2230 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2231 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2232
2233 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2234 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2235
2236 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2237 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2238 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2239
1d36ba45 2240 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
b2a5c19c
MC
2241}
2242
52b02d04
MC
2243static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2244{
2245 u32 val;
2246
2247 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2248 return;
2249
2250 tp->setlpicnt = 0;
2251
2252 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2253 current_link_up == 1 &&
a6b68dab
MC
2254 tp->link_config.active_duplex == DUPLEX_FULL &&
2255 (tp->link_config.active_speed == SPEED_100 ||
2256 tp->link_config.active_speed == SPEED_1000)) {
52b02d04
MC
2257 u32 eeectl;
2258
2259 if (tp->link_config.active_speed == SPEED_1000)
2260 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2261 else
2262 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2263
2264 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2265
3110f5f5
MC
2266 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2267 TG3_CL45_D7_EEERES_STAT, &val);
52b02d04 2268
b0c5943f
MC
2269 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2270 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
52b02d04
MC
2271 tp->setlpicnt = 2;
2272 }
2273
2274 if (!tp->setlpicnt) {
b715ce94
MC
2275 if (current_link_up == 1 &&
2276 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2277 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2278 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2279 }
2280
52b02d04
MC
2281 val = tr32(TG3_CPMU_EEE_MODE);
2282 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2283 }
2284}
2285
b0c5943f
MC
2286static void tg3_phy_eee_enable(struct tg3 *tp)
2287{
2288 u32 val;
2289
2290 if (tp->link_config.active_speed == SPEED_1000 &&
2291 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
55086ad9 2293 tg3_flag(tp, 57765_CLASS)) &&
b0c5943f 2294 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
b715ce94
MC
2295 val = MII_TG3_DSP_TAP26_ALNOKO |
2296 MII_TG3_DSP_TAP26_RMRXSTO;
2297 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
b0c5943f
MC
2298 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2299 }
2300
2301 val = tr32(TG3_CPMU_EEE_MODE);
2302 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2303}
2304
1da177e4
LT
2305static int tg3_wait_macro_done(struct tg3 *tp)
2306{
2307 int limit = 100;
2308
2309 while (limit--) {
2310 u32 tmp32;
2311
f08aa1a8 2312 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1da177e4
LT
2313 if ((tmp32 & 0x1000) == 0)
2314 break;
2315 }
2316 }
d4675b52 2317 if (limit < 0)
1da177e4
LT
2318 return -EBUSY;
2319
2320 return 0;
2321}
2322
2323static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2324{
2325 static const u32 test_pat[4][6] = {
2326 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2327 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2328 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2329 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2330 };
2331 int chan;
2332
2333 for (chan = 0; chan < 4; chan++) {
2334 int i;
2335
2336 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2337 (chan * 0x2000) | 0x0200);
f08aa1a8 2338 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2339
2340 for (i = 0; i < 6; i++)
2341 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2342 test_pat[chan][i]);
2343
f08aa1a8 2344 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2345 if (tg3_wait_macro_done(tp)) {
2346 *resetp = 1;
2347 return -EBUSY;
2348 }
2349
2350 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2351 (chan * 0x2000) | 0x0200);
f08aa1a8 2352 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1da177e4
LT
2353 if (tg3_wait_macro_done(tp)) {
2354 *resetp = 1;
2355 return -EBUSY;
2356 }
2357
f08aa1a8 2358 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1da177e4
LT
2359 if (tg3_wait_macro_done(tp)) {
2360 *resetp = 1;
2361 return -EBUSY;
2362 }
2363
2364 for (i = 0; i < 6; i += 2) {
2365 u32 low, high;
2366
2367 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2368 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2369 tg3_wait_macro_done(tp)) {
2370 *resetp = 1;
2371 return -EBUSY;
2372 }
2373 low &= 0x7fff;
2374 high &= 0x000f;
2375 if (low != test_pat[chan][i] ||
2376 high != test_pat[chan][i+1]) {
2377 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2378 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2379 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2380
2381 return -EBUSY;
2382 }
2383 }
2384 }
2385
2386 return 0;
2387}
2388
2389static int tg3_phy_reset_chanpat(struct tg3 *tp)
2390{
2391 int chan;
2392
2393 for (chan = 0; chan < 4; chan++) {
2394 int i;
2395
2396 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2397 (chan * 0x2000) | 0x0200);
f08aa1a8 2398 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2399 for (i = 0; i < 6; i++)
2400 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
f08aa1a8 2401 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2402 if (tg3_wait_macro_done(tp))
2403 return -EBUSY;
2404 }
2405
2406 return 0;
2407}
2408
2409static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2410{
2411 u32 reg32, phy9_orig;
2412 int retries, do_phy_reset, err;
2413
2414 retries = 10;
2415 do_phy_reset = 1;
2416 do {
2417 if (do_phy_reset) {
2418 err = tg3_bmcr_reset(tp);
2419 if (err)
2420 return err;
2421 do_phy_reset = 0;
2422 }
2423
2424 /* Disable transmitter and interrupt. */
2425 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2426 continue;
2427
2428 reg32 |= 0x3000;
2429 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2430
2431 /* Set full-duplex, 1000 mbps. */
2432 tg3_writephy(tp, MII_BMCR,
221c5637 2433 BMCR_FULLDPLX | BMCR_SPEED1000);
1da177e4
LT
2434
2435 /* Set to master mode. */
221c5637 2436 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1da177e4
LT
2437 continue;
2438
221c5637
MC
2439 tg3_writephy(tp, MII_CTRL1000,
2440 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1da177e4 2441
1d36ba45
MC
2442 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2443 if (err)
2444 return err;
1da177e4
LT
2445
2446 /* Block the PHY control access. */
6ee7c0a0 2447 tg3_phydsp_write(tp, 0x8005, 0x0800);
1da177e4
LT
2448
2449 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2450 if (!err)
2451 break;
2452 } while (--retries);
2453
2454 err = tg3_phy_reset_chanpat(tp);
2455 if (err)
2456 return err;
2457
6ee7c0a0 2458 tg3_phydsp_write(tp, 0x8005, 0x0000);
1da177e4
LT
2459
2460 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
f08aa1a8 2461 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1da177e4 2462
1d36ba45 2463 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2464
221c5637 2465 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1da177e4
LT
2466
2467 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2468 reg32 &= ~0x3000;
2469 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2470 } else if (!err)
2471 err = -EBUSY;
2472
2473 return err;
2474}
2475
2476/* This will reset the tigon3 PHY if there is no valid
2477 * link unless the FORCE argument is non-zero.
2478 */
2479static int tg3_phy_reset(struct tg3 *tp)
2480{
f833c4c1 2481 u32 val, cpmuctrl;
1da177e4
LT
2482 int err;
2483
60189ddf 2484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2485 val = tr32(GRC_MISC_CFG);
2486 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2487 udelay(40);
2488 }
f833c4c1
MC
2489 err = tg3_readphy(tp, MII_BMSR, &val);
2490 err |= tg3_readphy(tp, MII_BMSR, &val);
1da177e4
LT
2491 if (err != 0)
2492 return -EBUSY;
2493
c8e1e82b
MC
2494 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2495 netif_carrier_off(tp->dev);
2496 tg3_link_report(tp);
2497 }
2498
1da177e4
LT
2499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2500 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2501 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2502 err = tg3_phy_reset_5703_4_5(tp);
2503 if (err)
2504 return err;
2505 goto out;
2506 }
2507
b2a5c19c
MC
2508 cpmuctrl = 0;
2509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2510 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2511 cpmuctrl = tr32(TG3_CPMU_CTRL);
2512 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2513 tw32(TG3_CPMU_CTRL,
2514 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2515 }
2516
1da177e4
LT
2517 err = tg3_bmcr_reset(tp);
2518 if (err)
2519 return err;
2520
b2a5c19c 2521 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
f833c4c1
MC
2522 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2523 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
b2a5c19c
MC
2524
2525 tw32(TG3_CPMU_CTRL, cpmuctrl);
2526 }
2527
bcb37f6c
MC
2528 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2529 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2530 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2531 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2532 CPMU_LSPD_1000MB_MACCLK_12_5) {
2533 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2534 udelay(40);
2535 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2536 }
2537 }
2538
63c3a66f 2539 if (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2540 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
ecf1410b
MC
2541 return 0;
2542
b2a5c19c
MC
2543 tg3_phy_apply_otp(tp);
2544
f07e9af3 2545 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
2546 tg3_phy_toggle_apd(tp, true);
2547 else
2548 tg3_phy_toggle_apd(tp, false);
2549
1da177e4 2550out:
1d36ba45
MC
2551 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2552 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
6ee7c0a0
MC
2553 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2554 tg3_phydsp_write(tp, 0x000a, 0x0323);
1d36ba45 2555 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2556 }
1d36ba45 2557
f07e9af3 2558 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
f08aa1a8
MC
2559 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2560 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1da177e4 2561 }
1d36ba45 2562
f07e9af3 2563 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1d36ba45
MC
2564 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2565 tg3_phydsp_write(tp, 0x000a, 0x310b);
2566 tg3_phydsp_write(tp, 0x201f, 0x9506);
2567 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2568 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2569 }
f07e9af3 2570 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1d36ba45
MC
2571 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2572 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2573 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2574 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2575 tg3_writephy(tp, MII_TG3_TEST1,
2576 MII_TG3_TEST1_TRIM_EN | 0x4);
2577 } else
2578 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2579
2580 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2581 }
c424cb24 2582 }
1d36ba45 2583
1da177e4
LT
2584 /* Set Extended packet length bit (bit 14) on all chips that */
2585 /* support jumbo frames */
79eb6904 2586 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4 2587 /* Cannot do read-modify-write on 5401 */
b4bd2929 2588 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
63c3a66f 2589 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
1da177e4 2590 /* Set bit 14 with read-modify-write to preserve other bits */
15ee95c3
MC
2591 err = tg3_phy_auxctl_read(tp,
2592 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2593 if (!err)
b4bd2929
MC
2594 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2595 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
1da177e4
LT
2596 }
2597
2598 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2599 * jumbo frames transmission.
2600 */
63c3a66f 2601 if (tg3_flag(tp, JUMBO_CAPABLE)) {
f833c4c1 2602 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
c6cdf436 2603 tg3_writephy(tp, MII_TG3_EXT_CTRL,
f833c4c1 2604 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1da177e4
LT
2605 }
2606
715116a1 2607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1 2608 /* adjust output voltage */
535ef6e1 2609 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
715116a1
MC
2610 }
2611
9ef8ca99 2612 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
2613 tg3_phy_set_wirespeed(tp);
2614 return 0;
2615}
2616
3a1e19d3
MC
2617#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2618#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2619#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2620 TG3_GPIO_MSG_NEED_VAUX)
2621#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2622 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2623 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2624 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2625 (TG3_GPIO_MSG_DRVR_PRES << 12))
2626
2627#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2628 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2629 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2630 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2631 (TG3_GPIO_MSG_NEED_VAUX << 12))
2632
2633static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2634{
2635 u32 status, shift;
2636
2637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2639 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2640 else
2641 status = tr32(TG3_CPMU_DRV_STATUS);
2642
2643 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2644 status &= ~(TG3_GPIO_MSG_MASK << shift);
2645 status |= (newstat << shift);
2646
2647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2649 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2650 else
2651 tw32(TG3_CPMU_DRV_STATUS, status);
2652
2653 return status >> TG3_APE_GPIO_MSG_SHIFT;
2654}
2655
520b2756
MC
2656static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2657{
2658 if (!tg3_flag(tp, IS_NIC))
2659 return 0;
2660
3a1e19d3
MC
2661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2663 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2664 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2665 return -EIO;
520b2756 2666
3a1e19d3
MC
2667 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2668
2669 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2670 TG3_GRC_LCLCTL_PWRSW_DELAY);
2671
2672 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2673 } else {
2674 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2675 TG3_GRC_LCLCTL_PWRSW_DELAY);
2676 }
6f5c8f83 2677
520b2756
MC
2678 return 0;
2679}
2680
2681static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2682{
2683 u32 grc_local_ctrl;
2684
2685 if (!tg3_flag(tp, IS_NIC) ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2688 return;
2689
2690 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2691
2692 tw32_wait_f(GRC_LOCAL_CTRL,
2693 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2694 TG3_GRC_LCLCTL_PWRSW_DELAY);
2695
2696 tw32_wait_f(GRC_LOCAL_CTRL,
2697 grc_local_ctrl,
2698 TG3_GRC_LCLCTL_PWRSW_DELAY);
2699
2700 tw32_wait_f(GRC_LOCAL_CTRL,
2701 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2702 TG3_GRC_LCLCTL_PWRSW_DELAY);
2703}
2704
2705static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2706{
2707 if (!tg3_flag(tp, IS_NIC))
2708 return;
2709
2710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2712 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2713 (GRC_LCLCTRL_GPIO_OE0 |
2714 GRC_LCLCTRL_GPIO_OE1 |
2715 GRC_LCLCTRL_GPIO_OE2 |
2716 GRC_LCLCTRL_GPIO_OUTPUT0 |
2717 GRC_LCLCTRL_GPIO_OUTPUT1),
2718 TG3_GRC_LCLCTL_PWRSW_DELAY);
2719 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2721 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2722 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2723 GRC_LCLCTRL_GPIO_OE1 |
2724 GRC_LCLCTRL_GPIO_OE2 |
2725 GRC_LCLCTRL_GPIO_OUTPUT0 |
2726 GRC_LCLCTRL_GPIO_OUTPUT1 |
2727 tp->grc_local_ctrl;
2728 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2729 TG3_GRC_LCLCTL_PWRSW_DELAY);
2730
2731 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2732 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2733 TG3_GRC_LCLCTL_PWRSW_DELAY);
2734
2735 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2736 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2737 TG3_GRC_LCLCTL_PWRSW_DELAY);
2738 } else {
2739 u32 no_gpio2;
2740 u32 grc_local_ctrl = 0;
2741
2742 /* Workaround to prevent overdrawing Amps. */
2743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2744 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2745 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2746 grc_local_ctrl,
2747 TG3_GRC_LCLCTL_PWRSW_DELAY);
2748 }
2749
2750 /* On 5753 and variants, GPIO2 cannot be used. */
2751 no_gpio2 = tp->nic_sram_data_cfg &
2752 NIC_SRAM_DATA_CFG_NO_GPIO2;
2753
2754 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2755 GRC_LCLCTRL_GPIO_OE1 |
2756 GRC_LCLCTRL_GPIO_OE2 |
2757 GRC_LCLCTRL_GPIO_OUTPUT1 |
2758 GRC_LCLCTRL_GPIO_OUTPUT2;
2759 if (no_gpio2) {
2760 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2761 GRC_LCLCTRL_GPIO_OUTPUT2);
2762 }
2763 tw32_wait_f(GRC_LOCAL_CTRL,
2764 tp->grc_local_ctrl | grc_local_ctrl,
2765 TG3_GRC_LCLCTL_PWRSW_DELAY);
2766
2767 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2768
2769 tw32_wait_f(GRC_LOCAL_CTRL,
2770 tp->grc_local_ctrl | grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2772
2773 if (!no_gpio2) {
2774 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2775 tw32_wait_f(GRC_LOCAL_CTRL,
2776 tp->grc_local_ctrl | grc_local_ctrl,
2777 TG3_GRC_LCLCTL_PWRSW_DELAY);
2778 }
2779 }
3a1e19d3
MC
2780}
2781
cd0d7228 2782static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
3a1e19d3
MC
2783{
2784 u32 msg = 0;
2785
2786 /* Serialize power state transitions */
2787 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2788 return;
2789
cd0d7228 2790 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
3a1e19d3
MC
2791 msg = TG3_GPIO_MSG_NEED_VAUX;
2792
2793 msg = tg3_set_function_status(tp, msg);
2794
2795 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2796 goto done;
6f5c8f83 2797
3a1e19d3
MC
2798 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2799 tg3_pwrsrc_switch_to_vaux(tp);
2800 else
2801 tg3_pwrsrc_die_with_vmain(tp);
2802
2803done:
6f5c8f83 2804 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
520b2756
MC
2805}
2806
cd0d7228 2807static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
1da177e4 2808{
683644b7 2809 bool need_vaux = false;
1da177e4 2810
334355aa 2811 /* The GPIOs do something completely different on 57765. */
55086ad9 2812 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
1da177e4
LT
2813 return;
2814
3a1e19d3
MC
2815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
cd0d7228
MC
2818 tg3_frob_aux_power_5717(tp, include_wol ?
2819 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
3a1e19d3
MC
2820 return;
2821 }
2822
2823 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
8c2dc7e1
MC
2824 struct net_device *dev_peer;
2825
2826 dev_peer = pci_get_drvdata(tp->pdev_peer);
683644b7 2827
bc1c7567 2828 /* remove_one() may have been run on the peer. */
683644b7
MC
2829 if (dev_peer) {
2830 struct tg3 *tp_peer = netdev_priv(dev_peer);
2831
63c3a66f 2832 if (tg3_flag(tp_peer, INIT_COMPLETE))
683644b7
MC
2833 return;
2834
cd0d7228 2835 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
63c3a66f 2836 tg3_flag(tp_peer, ENABLE_ASF))
683644b7
MC
2837 need_vaux = true;
2838 }
1da177e4
LT
2839 }
2840
cd0d7228
MC
2841 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2842 tg3_flag(tp, ENABLE_ASF))
683644b7
MC
2843 need_vaux = true;
2844
520b2756
MC
2845 if (need_vaux)
2846 tg3_pwrsrc_switch_to_vaux(tp);
2847 else
2848 tg3_pwrsrc_die_with_vmain(tp);
1da177e4
LT
2849}
2850
e8f3f6ca
MC
2851static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2852{
2853 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2854 return 1;
79eb6904 2855 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
e8f3f6ca
MC
2856 if (speed != SPEED_10)
2857 return 1;
2858 } else if (speed == SPEED_10)
2859 return 1;
2860
2861 return 0;
2862}
2863
0a459aac 2864static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
15c3b696 2865{
ce057f01
MC
2866 u32 val;
2867
f07e9af3 2868 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
5129724a
MC
2869 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2870 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2871 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2872
2873 sg_dig_ctrl |=
2874 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2875 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2876 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2877 }
3f7045c1 2878 return;
5129724a 2879 }
3f7045c1 2880
60189ddf 2881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2882 tg3_bmcr_reset(tp);
2883 val = tr32(GRC_MISC_CFG);
2884 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2885 udelay(40);
2886 return;
f07e9af3 2887 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
0e5f784c
MC
2888 u32 phytest;
2889 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2890 u32 phy;
2891
2892 tg3_writephy(tp, MII_ADVERTISE, 0);
2893 tg3_writephy(tp, MII_BMCR,
2894 BMCR_ANENABLE | BMCR_ANRESTART);
2895
2896 tg3_writephy(tp, MII_TG3_FET_TEST,
2897 phytest | MII_TG3_FET_SHADOW_EN);
2898 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2899 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2900 tg3_writephy(tp,
2901 MII_TG3_FET_SHDW_AUXMODE4,
2902 phy);
2903 }
2904 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2905 }
2906 return;
0a459aac 2907 } else if (do_low_power) {
715116a1
MC
2908 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2909 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
0a459aac 2910
b4bd2929
MC
2911 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2912 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2913 MII_TG3_AUXCTL_PCTL_VREG_11V;
2914 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
715116a1 2915 }
3f7045c1 2916
15c3b696
MC
2917 /* The PHY should not be powered down on some chips because
2918 * of bugs.
2919 */
2920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2922 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
085f1afc
MC
2923 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2924 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2925 !tp->pci_fn))
15c3b696 2926 return;
ce057f01 2927
bcb37f6c
MC
2928 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2929 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2930 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2931 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2932 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2933 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2934 }
2935
15c3b696
MC
2936 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2937}
2938
ffbcfed4
MC
2939/* tp->lock is held. */
2940static int tg3_nvram_lock(struct tg3 *tp)
2941{
63c3a66f 2942 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2943 int i;
2944
2945 if (tp->nvram_lock_cnt == 0) {
2946 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2947 for (i = 0; i < 8000; i++) {
2948 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2949 break;
2950 udelay(20);
2951 }
2952 if (i == 8000) {
2953 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2954 return -ENODEV;
2955 }
2956 }
2957 tp->nvram_lock_cnt++;
2958 }
2959 return 0;
2960}
2961
2962/* tp->lock is held. */
2963static void tg3_nvram_unlock(struct tg3 *tp)
2964{
63c3a66f 2965 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2966 if (tp->nvram_lock_cnt > 0)
2967 tp->nvram_lock_cnt--;
2968 if (tp->nvram_lock_cnt == 0)
2969 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2970 }
2971}
2972
2973/* tp->lock is held. */
2974static void tg3_enable_nvram_access(struct tg3 *tp)
2975{
63c3a66f 2976 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2977 u32 nvaccess = tr32(NVRAM_ACCESS);
2978
2979 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2980 }
2981}
2982
2983/* tp->lock is held. */
2984static void tg3_disable_nvram_access(struct tg3 *tp)
2985{
63c3a66f 2986 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2987 u32 nvaccess = tr32(NVRAM_ACCESS);
2988
2989 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2990 }
2991}
2992
2993static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2994 u32 offset, u32 *val)
2995{
2996 u32 tmp;
2997 int i;
2998
2999 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3000 return -EINVAL;
3001
3002 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3003 EEPROM_ADDR_DEVID_MASK |
3004 EEPROM_ADDR_READ);
3005 tw32(GRC_EEPROM_ADDR,
3006 tmp |
3007 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3008 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3009 EEPROM_ADDR_ADDR_MASK) |
3010 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3011
3012 for (i = 0; i < 1000; i++) {
3013 tmp = tr32(GRC_EEPROM_ADDR);
3014
3015 if (tmp & EEPROM_ADDR_COMPLETE)
3016 break;
3017 msleep(1);
3018 }
3019 if (!(tmp & EEPROM_ADDR_COMPLETE))
3020 return -EBUSY;
3021
62cedd11
MC
3022 tmp = tr32(GRC_EEPROM_DATA);
3023
3024 /*
3025 * The data will always be opposite the native endian
3026 * format. Perform a blind byteswap to compensate.
3027 */
3028 *val = swab32(tmp);
3029
ffbcfed4
MC
3030 return 0;
3031}
3032
3033#define NVRAM_CMD_TIMEOUT 10000
3034
3035static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3036{
3037 int i;
3038
3039 tw32(NVRAM_CMD, nvram_cmd);
3040 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3041 udelay(10);
3042 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3043 udelay(10);
3044 break;
3045 }
3046 }
3047
3048 if (i == NVRAM_CMD_TIMEOUT)
3049 return -EBUSY;
3050
3051 return 0;
3052}
3053
3054static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3055{
63c3a66f
JP
3056 if (tg3_flag(tp, NVRAM) &&
3057 tg3_flag(tp, NVRAM_BUFFERED) &&
3058 tg3_flag(tp, FLASH) &&
3059 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3060 (tp->nvram_jedecnum == JEDEC_ATMEL))
3061
3062 addr = ((addr / tp->nvram_pagesize) <<
3063 ATMEL_AT45DB0X1B_PAGE_POS) +
3064 (addr % tp->nvram_pagesize);
3065
3066 return addr;
3067}
3068
3069static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3070{
63c3a66f
JP
3071 if (tg3_flag(tp, NVRAM) &&
3072 tg3_flag(tp, NVRAM_BUFFERED) &&
3073 tg3_flag(tp, FLASH) &&
3074 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3075 (tp->nvram_jedecnum == JEDEC_ATMEL))
3076
3077 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3078 tp->nvram_pagesize) +
3079 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3080
3081 return addr;
3082}
3083
e4f34110
MC
3084/* NOTE: Data read in from NVRAM is byteswapped according to
3085 * the byteswapping settings for all other register accesses.
3086 * tg3 devices are BE devices, so on a BE machine, the data
3087 * returned will be exactly as it is seen in NVRAM. On a LE
3088 * machine, the 32-bit value will be byteswapped.
3089 */
ffbcfed4
MC
3090static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3091{
3092 int ret;
3093
63c3a66f 3094 if (!tg3_flag(tp, NVRAM))
ffbcfed4
MC
3095 return tg3_nvram_read_using_eeprom(tp, offset, val);
3096
3097 offset = tg3_nvram_phys_addr(tp, offset);
3098
3099 if (offset > NVRAM_ADDR_MSK)
3100 return -EINVAL;
3101
3102 ret = tg3_nvram_lock(tp);
3103 if (ret)
3104 return ret;
3105
3106 tg3_enable_nvram_access(tp);
3107
3108 tw32(NVRAM_ADDR, offset);
3109 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3110 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3111
3112 if (ret == 0)
e4f34110 3113 *val = tr32(NVRAM_RDDATA);
ffbcfed4
MC
3114
3115 tg3_disable_nvram_access(tp);
3116
3117 tg3_nvram_unlock(tp);
3118
3119 return ret;
3120}
3121
a9dc529d
MC
3122/* Ensures NVRAM data is in bytestream format. */
3123static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
ffbcfed4
MC
3124{
3125 u32 v;
a9dc529d 3126 int res = tg3_nvram_read(tp, offset, &v);
ffbcfed4 3127 if (!res)
a9dc529d 3128 *val = cpu_to_be32(v);
ffbcfed4
MC
3129 return res;
3130}
3131
dbe9b92a
MC
3132static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3133 u32 offset, u32 len, u8 *buf)
3134{
3135 int i, j, rc = 0;
3136 u32 val;
3137
3138 for (i = 0; i < len; i += 4) {
3139 u32 addr;
3140 __be32 data;
3141
3142 addr = offset + i;
3143
3144 memcpy(&data, buf + i, 4);
3145
3146 /*
3147 * The SEEPROM interface expects the data to always be opposite
3148 * the native endian format. We accomplish this by reversing
3149 * all the operations that would have been performed on the
3150 * data from a call to tg3_nvram_read_be32().
3151 */
3152 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3153
3154 val = tr32(GRC_EEPROM_ADDR);
3155 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3156
3157 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3158 EEPROM_ADDR_READ);
3159 tw32(GRC_EEPROM_ADDR, val |
3160 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3161 (addr & EEPROM_ADDR_ADDR_MASK) |
3162 EEPROM_ADDR_START |
3163 EEPROM_ADDR_WRITE);
3164
3165 for (j = 0; j < 1000; j++) {
3166 val = tr32(GRC_EEPROM_ADDR);
3167
3168 if (val & EEPROM_ADDR_COMPLETE)
3169 break;
3170 msleep(1);
3171 }
3172 if (!(val & EEPROM_ADDR_COMPLETE)) {
3173 rc = -EBUSY;
3174 break;
3175 }
3176 }
3177
3178 return rc;
3179}
3180
3181/* offset and length are dword aligned */
3182static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3183 u8 *buf)
3184{
3185 int ret = 0;
3186 u32 pagesize = tp->nvram_pagesize;
3187 u32 pagemask = pagesize - 1;
3188 u32 nvram_cmd;
3189 u8 *tmp;
3190
3191 tmp = kmalloc(pagesize, GFP_KERNEL);
3192 if (tmp == NULL)
3193 return -ENOMEM;
3194
3195 while (len) {
3196 int j;
3197 u32 phy_addr, page_off, size;
3198
3199 phy_addr = offset & ~pagemask;
3200
3201 for (j = 0; j < pagesize; j += 4) {
3202 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3203 (__be32 *) (tmp + j));
3204 if (ret)
3205 break;
3206 }
3207 if (ret)
3208 break;
3209
3210 page_off = offset & pagemask;
3211 size = pagesize;
3212 if (len < size)
3213 size = len;
3214
3215 len -= size;
3216
3217 memcpy(tmp + page_off, buf, size);
3218
3219 offset = offset + (pagesize - page_off);
3220
3221 tg3_enable_nvram_access(tp);
3222
3223 /*
3224 * Before we can erase the flash page, we need
3225 * to issue a special "write enable" command.
3226 */
3227 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3228
3229 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3230 break;
3231
3232 /* Erase the target page */
3233 tw32(NVRAM_ADDR, phy_addr);
3234
3235 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3236 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3237
3238 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3239 break;
3240
3241 /* Issue another write enable to start the write. */
3242 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3243
3244 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3245 break;
3246
3247 for (j = 0; j < pagesize; j += 4) {
3248 __be32 data;
3249
3250 data = *((__be32 *) (tmp + j));
3251
3252 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3253
3254 tw32(NVRAM_ADDR, phy_addr + j);
3255
3256 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3257 NVRAM_CMD_WR;
3258
3259 if (j == 0)
3260 nvram_cmd |= NVRAM_CMD_FIRST;
3261 else if (j == (pagesize - 4))
3262 nvram_cmd |= NVRAM_CMD_LAST;
3263
3264 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3265 if (ret)
3266 break;
3267 }
3268 if (ret)
3269 break;
3270 }
3271
3272 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3273 tg3_nvram_exec_cmd(tp, nvram_cmd);
3274
3275 kfree(tmp);
3276
3277 return ret;
3278}
3279
3280/* offset and length are dword aligned */
3281static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3282 u8 *buf)
3283{
3284 int i, ret = 0;
3285
3286 for (i = 0; i < len; i += 4, offset += 4) {
3287 u32 page_off, phy_addr, nvram_cmd;
3288 __be32 data;
3289
3290 memcpy(&data, buf + i, 4);
3291 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3292
3293 page_off = offset % tp->nvram_pagesize;
3294
3295 phy_addr = tg3_nvram_phys_addr(tp, offset);
3296
dbe9b92a
MC
3297 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3298
3299 if (page_off == 0 || i == 0)
3300 nvram_cmd |= NVRAM_CMD_FIRST;
3301 if (page_off == (tp->nvram_pagesize - 4))
3302 nvram_cmd |= NVRAM_CMD_LAST;
3303
3304 if (i == (len - 4))
3305 nvram_cmd |= NVRAM_CMD_LAST;
3306
42278224
MC
3307 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3308 !tg3_flag(tp, FLASH) ||
3309 !tg3_flag(tp, 57765_PLUS))
3310 tw32(NVRAM_ADDR, phy_addr);
3311
dbe9b92a
MC
3312 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3313 !tg3_flag(tp, 5755_PLUS) &&
3314 (tp->nvram_jedecnum == JEDEC_ST) &&
3315 (nvram_cmd & NVRAM_CMD_FIRST)) {
3316 u32 cmd;
3317
3318 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3319 ret = tg3_nvram_exec_cmd(tp, cmd);
3320 if (ret)
3321 break;
3322 }
3323 if (!tg3_flag(tp, FLASH)) {
3324 /* We always do complete word writes to eeprom. */
3325 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3326 }
3327
3328 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329 if (ret)
3330 break;
3331 }
3332 return ret;
3333}
3334
3335/* offset and length are dword aligned */
3336static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3337{
3338 int ret;
3339
3340 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3341 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3342 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3343 udelay(40);
3344 }
3345
3346 if (!tg3_flag(tp, NVRAM)) {
3347 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3348 } else {
3349 u32 grc_mode;
3350
3351 ret = tg3_nvram_lock(tp);
3352 if (ret)
3353 return ret;
3354
3355 tg3_enable_nvram_access(tp);
3356 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3357 tw32(NVRAM_WRITE1, 0x406);
3358
3359 grc_mode = tr32(GRC_MODE);
3360 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3361
3362 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3363 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3364 buf);
3365 } else {
3366 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3367 buf);
3368 }
3369
3370 grc_mode = tr32(GRC_MODE);
3371 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3372
3373 tg3_disable_nvram_access(tp);
3374 tg3_nvram_unlock(tp);
3375 }
3376
3377 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3378 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3379 udelay(40);
3380 }
3381
3382 return ret;
3383}
3384
997b4f13
MC
3385#define RX_CPU_SCRATCH_BASE 0x30000
3386#define RX_CPU_SCRATCH_SIZE 0x04000
3387#define TX_CPU_SCRATCH_BASE 0x34000
3388#define TX_CPU_SCRATCH_SIZE 0x04000
3389
3390/* tp->lock is held. */
3391static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3392{
3393 int i;
3394
3395 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3396
3397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3398 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3399
3400 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3401 return 0;
3402 }
3403 if (offset == RX_CPU_BASE) {
3404 for (i = 0; i < 10000; i++) {
3405 tw32(offset + CPU_STATE, 0xffffffff);
3406 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3407 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3408 break;
3409 }
3410
3411 tw32(offset + CPU_STATE, 0xffffffff);
3412 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3413 udelay(10);
3414 } else {
3415 for (i = 0; i < 10000; i++) {
3416 tw32(offset + CPU_STATE, 0xffffffff);
3417 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3418 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3419 break;
3420 }
3421 }
3422
3423 if (i >= 10000) {
3424 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3425 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3426 return -ENODEV;
3427 }
3428
3429 /* Clear firmware's nvram arbitration. */
3430 if (tg3_flag(tp, NVRAM))
3431 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3432 return 0;
3433}
3434
3435struct fw_info {
3436 unsigned int fw_base;
3437 unsigned int fw_len;
3438 const __be32 *fw_data;
3439};
3440
3441/* tp->lock is held. */
3442static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3443 u32 cpu_scratch_base, int cpu_scratch_size,
3444 struct fw_info *info)
3445{
3446 int err, lock_err, i;
3447 void (*write_op)(struct tg3 *, u32, u32);
3448
3449 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3450 netdev_err(tp->dev,
3451 "%s: Trying to load TX cpu firmware which is 5705\n",
3452 __func__);
3453 return -EINVAL;
3454 }
3455
3456 if (tg3_flag(tp, 5705_PLUS))
3457 write_op = tg3_write_mem;
3458 else
3459 write_op = tg3_write_indirect_reg32;
3460
3461 /* It is possible that bootcode is still loading at this point.
3462 * Get the nvram lock first before halting the cpu.
3463 */
3464 lock_err = tg3_nvram_lock(tp);
3465 err = tg3_halt_cpu(tp, cpu_base);
3466 if (!lock_err)
3467 tg3_nvram_unlock(tp);
3468 if (err)
3469 goto out;
3470
3471 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3472 write_op(tp, cpu_scratch_base + i, 0);
3473 tw32(cpu_base + CPU_STATE, 0xffffffff);
3474 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3475 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3476 write_op(tp, (cpu_scratch_base +
3477 (info->fw_base & 0xffff) +
3478 (i * sizeof(u32))),
3479 be32_to_cpu(info->fw_data[i]));
3480
3481 err = 0;
3482
3483out:
3484 return err;
3485}
3486
3487/* tp->lock is held. */
3488static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3489{
3490 struct fw_info info;
3491 const __be32 *fw_data;
3492 int err, i;
3493
3494 fw_data = (void *)tp->fw->data;
3495
3496 /* Firmware blob starts with version numbers, followed by
3497 start address and length. We are setting complete length.
3498 length = end_address_of_bss - start_address_of_text.
3499 Remainder is the blob to be loaded contiguously
3500 from start address. */
3501
3502 info.fw_base = be32_to_cpu(fw_data[1]);
3503 info.fw_len = tp->fw->size - 12;
3504 info.fw_data = &fw_data[3];
3505
3506 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3507 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3508 &info);
3509 if (err)
3510 return err;
3511
3512 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3513 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3514 &info);
3515 if (err)
3516 return err;
3517
3518 /* Now startup only the RX cpu. */
3519 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3520 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3521
3522 for (i = 0; i < 5; i++) {
3523 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3524 break;
3525 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3526 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3527 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3528 udelay(1000);
3529 }
3530 if (i >= 5) {
3531 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3532 "should be %08x\n", __func__,
3533 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3534 return -ENODEV;
3535 }
3536 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3537 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3538
3539 return 0;
3540}
3541
3542/* tp->lock is held. */
3543static int tg3_load_tso_firmware(struct tg3 *tp)
3544{
3545 struct fw_info info;
3546 const __be32 *fw_data;
3547 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3548 int err, i;
3549
3550 if (tg3_flag(tp, HW_TSO_1) ||
3551 tg3_flag(tp, HW_TSO_2) ||
3552 tg3_flag(tp, HW_TSO_3))
3553 return 0;
3554
3555 fw_data = (void *)tp->fw->data;
3556
3557 /* Firmware blob starts with version numbers, followed by
3558 start address and length. We are setting complete length.
3559 length = end_address_of_bss - start_address_of_text.
3560 Remainder is the blob to be loaded contiguously
3561 from start address. */
3562
3563 info.fw_base = be32_to_cpu(fw_data[1]);
3564 cpu_scratch_size = tp->fw_len;
3565 info.fw_len = tp->fw->size - 12;
3566 info.fw_data = &fw_data[3];
3567
3568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3569 cpu_base = RX_CPU_BASE;
3570 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3571 } else {
3572 cpu_base = TX_CPU_BASE;
3573 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3574 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3575 }
3576
3577 err = tg3_load_firmware_cpu(tp, cpu_base,
3578 cpu_scratch_base, cpu_scratch_size,
3579 &info);
3580 if (err)
3581 return err;
3582
3583 /* Now startup the cpu. */
3584 tw32(cpu_base + CPU_STATE, 0xffffffff);
3585 tw32_f(cpu_base + CPU_PC, info.fw_base);
3586
3587 for (i = 0; i < 5; i++) {
3588 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3589 break;
3590 tw32(cpu_base + CPU_STATE, 0xffffffff);
3591 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3592 tw32_f(cpu_base + CPU_PC, info.fw_base);
3593 udelay(1000);
3594 }
3595 if (i >= 5) {
3596 netdev_err(tp->dev,
3597 "%s fails to set CPU PC, is %08x should be %08x\n",
3598 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3599 return -ENODEV;
3600 }
3601 tw32(cpu_base + CPU_STATE, 0xffffffff);
3602 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3603 return 0;
3604}
3605
3606
3f007891
MC
3607/* tp->lock is held. */
3608static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3609{
3610 u32 addr_high, addr_low;
3611 int i;
3612
3613 addr_high = ((tp->dev->dev_addr[0] << 8) |
3614 tp->dev->dev_addr[1]);
3615 addr_low = ((tp->dev->dev_addr[2] << 24) |
3616 (tp->dev->dev_addr[3] << 16) |
3617 (tp->dev->dev_addr[4] << 8) |
3618 (tp->dev->dev_addr[5] << 0));
3619 for (i = 0; i < 4; i++) {
3620 if (i == 1 && skip_mac_1)
3621 continue;
3622 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3623 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3624 }
3625
3626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3628 for (i = 0; i < 12; i++) {
3629 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3630 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3631 }
3632 }
3633
3634 addr_high = (tp->dev->dev_addr[0] +
3635 tp->dev->dev_addr[1] +
3636 tp->dev->dev_addr[2] +
3637 tp->dev->dev_addr[3] +
3638 tp->dev->dev_addr[4] +
3639 tp->dev->dev_addr[5]) &
3640 TX_BACKOFF_SEED_MASK;
3641 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3642}
3643
c866b7ea 3644static void tg3_enable_register_access(struct tg3 *tp)
1da177e4 3645{
c866b7ea
RW
3646 /*
3647 * Make sure register accesses (indirect or otherwise) will function
3648 * correctly.
1da177e4
LT
3649 */
3650 pci_write_config_dword(tp->pdev,
c866b7ea
RW
3651 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3652}
1da177e4 3653
c866b7ea
RW
3654static int tg3_power_up(struct tg3 *tp)
3655{
bed9829f 3656 int err;
8c6bda1a 3657
bed9829f 3658 tg3_enable_register_access(tp);
1da177e4 3659
bed9829f
MC
3660 err = pci_set_power_state(tp->pdev, PCI_D0);
3661 if (!err) {
3662 /* Switch out of Vaux if it is a NIC */
3663 tg3_pwrsrc_switch_to_vmain(tp);
3664 } else {
3665 netdev_err(tp->dev, "Transition to D0 failed\n");
3666 }
1da177e4 3667
bed9829f 3668 return err;
c866b7ea 3669}
1da177e4 3670
4b409522
MC
3671static int tg3_setup_phy(struct tg3 *, int);
3672
c866b7ea
RW
3673static int tg3_power_down_prepare(struct tg3 *tp)
3674{
3675 u32 misc_host_ctrl;
3676 bool device_should_wake, do_low_power;
3677
3678 tg3_enable_register_access(tp);
5e7dfd0f
MC
3679
3680 /* Restore the CLKREQ setting. */
0f49bfbd
JL
3681 if (tg3_flag(tp, CLKREQ_BUG))
3682 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3683 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 3684
1da177e4
LT
3685 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3686 tw32(TG3PCI_MISC_HOST_CTRL,
3687 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3688
c866b7ea 3689 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 3690 tg3_flag(tp, WOL_ENABLE);
05ac4cb7 3691
63c3a66f 3692 if (tg3_flag(tp, USE_PHYLIB)) {
0a459aac 3693 do_low_power = false;
f07e9af3 3694 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
80096068 3695 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
b02fd9e3 3696 struct phy_device *phydev;
0a459aac 3697 u32 phyid, advertising;
b02fd9e3 3698
3f0e3ad7 3699 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 3700
80096068 3701 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3 3702
c6700ce2
MC
3703 tp->link_config.speed = phydev->speed;
3704 tp->link_config.duplex = phydev->duplex;
3705 tp->link_config.autoneg = phydev->autoneg;
3706 tp->link_config.advertising = phydev->advertising;
b02fd9e3
MC
3707
3708 advertising = ADVERTISED_TP |
3709 ADVERTISED_Pause |
3710 ADVERTISED_Autoneg |
3711 ADVERTISED_10baseT_Half;
3712
63c3a66f
JP
3713 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3714 if (tg3_flag(tp, WOL_SPEED_100MB))
b02fd9e3
MC
3715 advertising |=
3716 ADVERTISED_100baseT_Half |
3717 ADVERTISED_100baseT_Full |
3718 ADVERTISED_10baseT_Full;
3719 else
3720 advertising |= ADVERTISED_10baseT_Full;
3721 }
3722
3723 phydev->advertising = advertising;
3724
3725 phy_start_aneg(phydev);
0a459aac
MC
3726
3727 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
6a443a0f
MC
3728 if (phyid != PHY_ID_BCMAC131) {
3729 phyid &= PHY_BCM_OUI_MASK;
3730 if (phyid == PHY_BCM_OUI_1 ||
3731 phyid == PHY_BCM_OUI_2 ||
3732 phyid == PHY_BCM_OUI_3)
0a459aac
MC
3733 do_low_power = true;
3734 }
b02fd9e3 3735 }
dd477003 3736 } else {
2023276e 3737 do_low_power = true;
0a459aac 3738
c6700ce2 3739 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
80096068 3740 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
1da177e4 3741
2855b9fe 3742 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
dd477003 3743 tg3_setup_phy(tp, 0);
1da177e4
LT
3744 }
3745
b5d3772c
MC
3746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3747 u32 val;
3748
3749 val = tr32(GRC_VCPU_EXT_CTRL);
3750 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
63c3a66f 3751 } else if (!tg3_flag(tp, ENABLE_ASF)) {
6921d201
MC
3752 int i;
3753 u32 val;
3754
3755 for (i = 0; i < 200; i++) {
3756 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3757 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3758 break;
3759 msleep(1);
3760 }
3761 }
63c3a66f 3762 if (tg3_flag(tp, WOL_CAP))
a85feb8c
GZ
3763 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3764 WOL_DRV_STATE_SHUTDOWN |
3765 WOL_DRV_WOL |
3766 WOL_SET_MAGIC_PKT);
6921d201 3767
05ac4cb7 3768 if (device_should_wake) {
1da177e4
LT
3769 u32 mac_mode;
3770
f07e9af3 3771 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
b4bd2929
MC
3772 if (do_low_power &&
3773 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3774 tg3_phy_auxctl_write(tp,
3775 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3776 MII_TG3_AUXCTL_PCTL_WOL_EN |
3777 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3778 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
dd477003
MC
3779 udelay(40);
3780 }
1da177e4 3781
f07e9af3 3782 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3f7045c1
MC
3783 mac_mode = MAC_MODE_PORT_MODE_GMII;
3784 else
3785 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 3786
e8f3f6ca
MC
3787 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3788 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3789 ASIC_REV_5700) {
63c3a66f 3790 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
e8f3f6ca
MC
3791 SPEED_100 : SPEED_10;
3792 if (tg3_5700_link_polarity(tp, speed))
3793 mac_mode |= MAC_MODE_LINK_POLARITY;
3794 else
3795 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3796 }
1da177e4
LT
3797 } else {
3798 mac_mode = MAC_MODE_PORT_MODE_TBI;
3799 }
3800
63c3a66f 3801 if (!tg3_flag(tp, 5750_PLUS))
1da177e4
LT
3802 tw32(MAC_LED_CTRL, tp->led_ctrl);
3803
05ac4cb7 3804 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
63c3a66f
JP
3805 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3806 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
05ac4cb7 3807 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
1da177e4 3808
63c3a66f 3809 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
3810 mac_mode |= MAC_MODE_APE_TX_EN |
3811 MAC_MODE_APE_RX_EN |
3812 MAC_MODE_TDE_ENABLE;
3bda1258 3813
1da177e4
LT
3814 tw32_f(MAC_MODE, mac_mode);
3815 udelay(100);
3816
3817 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3818 udelay(10);
3819 }
3820
63c3a66f 3821 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
1da177e4
LT
3822 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3824 u32 base_val;
3825
3826 base_val = tp->pci_clock_ctrl;
3827 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3828 CLOCK_CTRL_TXCLK_DISABLE);
3829
b401e9e2
MC
3830 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3831 CLOCK_CTRL_PWRDOWN_PLL133, 40);
63c3a66f
JP
3832 } else if (tg3_flag(tp, 5780_CLASS) ||
3833 tg3_flag(tp, CPMU_PRESENT) ||
6ff6f81d 3834 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4cf78e4f 3835 /* do nothing */
63c3a66f 3836 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
1da177e4
LT
3837 u32 newbits1, newbits2;
3838
3839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3841 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3842 CLOCK_CTRL_TXCLK_DISABLE |
3843 CLOCK_CTRL_ALTCLK);
3844 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
63c3a66f 3845 } else if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3846 newbits1 = CLOCK_CTRL_625_CORE;
3847 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3848 } else {
3849 newbits1 = CLOCK_CTRL_ALTCLK;
3850 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3851 }
3852
b401e9e2
MC
3853 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3854 40);
1da177e4 3855
b401e9e2
MC
3856 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3857 40);
1da177e4 3858
63c3a66f 3859 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3860 u32 newbits3;
3861
3862 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3864 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3865 CLOCK_CTRL_TXCLK_DISABLE |
3866 CLOCK_CTRL_44MHZ_CORE);
3867 } else {
3868 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3869 }
3870
b401e9e2
MC
3871 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3872 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
3873 }
3874 }
3875
63c3a66f 3876 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
0a459aac 3877 tg3_power_down_phy(tp, do_low_power);
6921d201 3878
cd0d7228 3879 tg3_frob_aux_power(tp, true);
1da177e4
LT
3880
3881 /* Workaround for unstable PLL clock */
3882 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3883 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3884 u32 val = tr32(0x7d00);
3885
3886 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3887 tw32(0x7d00, val);
63c3a66f 3888 if (!tg3_flag(tp, ENABLE_ASF)) {
ec41c7df
MC
3889 int err;
3890
3891 err = tg3_nvram_lock(tp);
1da177e4 3892 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
3893 if (!err)
3894 tg3_nvram_unlock(tp);
6921d201 3895 }
1da177e4
LT
3896 }
3897
bbadf503
MC
3898 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3899
c866b7ea
RW
3900 return 0;
3901}
12dac075 3902
c866b7ea
RW
3903static void tg3_power_down(struct tg3 *tp)
3904{
3905 tg3_power_down_prepare(tp);
1da177e4 3906
63c3a66f 3907 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
c866b7ea 3908 pci_set_power_state(tp->pdev, PCI_D3hot);
1da177e4
LT
3909}
3910
1da177e4
LT
3911static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3912{
3913 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3914 case MII_TG3_AUX_STAT_10HALF:
3915 *speed = SPEED_10;
3916 *duplex = DUPLEX_HALF;
3917 break;
3918
3919 case MII_TG3_AUX_STAT_10FULL:
3920 *speed = SPEED_10;
3921 *duplex = DUPLEX_FULL;
3922 break;
3923
3924 case MII_TG3_AUX_STAT_100HALF:
3925 *speed = SPEED_100;
3926 *duplex = DUPLEX_HALF;
3927 break;
3928
3929 case MII_TG3_AUX_STAT_100FULL:
3930 *speed = SPEED_100;
3931 *duplex = DUPLEX_FULL;
3932 break;
3933
3934 case MII_TG3_AUX_STAT_1000HALF:
3935 *speed = SPEED_1000;
3936 *duplex = DUPLEX_HALF;
3937 break;
3938
3939 case MII_TG3_AUX_STAT_1000FULL:
3940 *speed = SPEED_1000;
3941 *duplex = DUPLEX_FULL;
3942 break;
3943
3944 default:
f07e9af3 3945 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
715116a1
MC
3946 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3947 SPEED_10;
3948 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3949 DUPLEX_HALF;
3950 break;
3951 }
e740522e
MC
3952 *speed = SPEED_UNKNOWN;
3953 *duplex = DUPLEX_UNKNOWN;
1da177e4 3954 break;
855e1111 3955 }
1da177e4
LT
3956}
3957
42b64a45 3958static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1da177e4 3959{
42b64a45
MC
3960 int err = 0;
3961 u32 val, new_adv;
1da177e4 3962
42b64a45 3963 new_adv = ADVERTISE_CSMA;
202ff1c2 3964 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
f88788f0 3965 new_adv |= mii_advertise_flowctrl(flowctrl);
1da177e4 3966
42b64a45
MC
3967 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3968 if (err)
3969 goto done;
ba4d07a8 3970
4f272096
MC
3971 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3972 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
ba4d07a8 3973
4f272096
MC
3974 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3975 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3976 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
ba4d07a8 3977
4f272096
MC
3978 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3979 if (err)
3980 goto done;
3981 }
1da177e4 3982
42b64a45
MC
3983 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3984 goto done;
52b02d04 3985
42b64a45
MC
3986 tw32(TG3_CPMU_EEE_MODE,
3987 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
52b02d04 3988
42b64a45
MC
3989 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3990 if (!err) {
3991 u32 err2;
52b02d04 3992
b715ce94
MC
3993 val = 0;
3994 /* Advertise 100-BaseTX EEE ability */
3995 if (advertise & ADVERTISED_100baseT_Full)
3996 val |= MDIO_AN_EEE_ADV_100TX;
3997 /* Advertise 1000-BaseT EEE ability */
3998 if (advertise & ADVERTISED_1000baseT_Full)
3999 val |= MDIO_AN_EEE_ADV_1000T;
4000 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4001 if (err)
4002 val = 0;
4003
21a00ab2
MC
4004 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4005 case ASIC_REV_5717:
4006 case ASIC_REV_57765:
55086ad9 4007 case ASIC_REV_57766:
21a00ab2 4008 case ASIC_REV_5719:
b715ce94
MC
4009 /* If we advertised any eee advertisements above... */
4010 if (val)
4011 val = MII_TG3_DSP_TAP26_ALNOKO |
4012 MII_TG3_DSP_TAP26_RMRXSTO |
4013 MII_TG3_DSP_TAP26_OPCSINPT;
21a00ab2 4014 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
be671947
MC
4015 /* Fall through */
4016 case ASIC_REV_5720:
4017 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4018 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4019 MII_TG3_DSP_CH34TP2_HIBW01);
21a00ab2 4020 }
52b02d04 4021
42b64a45
MC
4022 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4023 if (!err)
4024 err = err2;
4025 }
4026
4027done:
4028 return err;
4029}
4030
4031static void tg3_phy_copper_begin(struct tg3 *tp)
4032{
d13ba512
MC
4033 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4034 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4035 u32 adv, fc;
4036
4037 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4038 adv = ADVERTISED_10baseT_Half |
4039 ADVERTISED_10baseT_Full;
4040 if (tg3_flag(tp, WOL_SPEED_100MB))
4041 adv |= ADVERTISED_100baseT_Half |
4042 ADVERTISED_100baseT_Full;
4043
4044 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
42b64a45 4045 } else {
d13ba512
MC
4046 adv = tp->link_config.advertising;
4047 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4048 adv &= ~(ADVERTISED_1000baseT_Half |
4049 ADVERTISED_1000baseT_Full);
4050
4051 fc = tp->link_config.flowctrl;
52b02d04 4052 }
52b02d04 4053
d13ba512 4054 tg3_phy_autoneg_cfg(tp, adv, fc);
52b02d04 4055
d13ba512
MC
4056 tg3_writephy(tp, MII_BMCR,
4057 BMCR_ANENABLE | BMCR_ANRESTART);
4058 } else {
4059 int i;
1da177e4
LT
4060 u32 bmcr, orig_bmcr;
4061
4062 tp->link_config.active_speed = tp->link_config.speed;
4063 tp->link_config.active_duplex = tp->link_config.duplex;
4064
4065 bmcr = 0;
4066 switch (tp->link_config.speed) {
4067 default:
4068 case SPEED_10:
4069 break;
4070
4071 case SPEED_100:
4072 bmcr |= BMCR_SPEED100;
4073 break;
4074
4075 case SPEED_1000:
221c5637 4076 bmcr |= BMCR_SPEED1000;
1da177e4 4077 break;
855e1111 4078 }
1da177e4
LT
4079
4080 if (tp->link_config.duplex == DUPLEX_FULL)
4081 bmcr |= BMCR_FULLDPLX;
4082
4083 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4084 (bmcr != orig_bmcr)) {
4085 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4086 for (i = 0; i < 1500; i++) {
4087 u32 tmp;
4088
4089 udelay(10);
4090 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4091 tg3_readphy(tp, MII_BMSR, &tmp))
4092 continue;
4093 if (!(tmp & BMSR_LSTATUS)) {
4094 udelay(40);
4095 break;
4096 }
4097 }
4098 tg3_writephy(tp, MII_BMCR, bmcr);
4099 udelay(40);
4100 }
1da177e4
LT
4101 }
4102}
4103
4104static int tg3_init_5401phy_dsp(struct tg3 *tp)
4105{
4106 int err;
4107
4108 /* Turn off tap power management. */
4109 /* Set Extended packet length bit */
b4bd2929 4110 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1da177e4 4111
6ee7c0a0
MC
4112 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4113 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4114 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4115 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4116 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
1da177e4
LT
4117
4118 udelay(40);
4119
4120 return err;
4121}
4122
e2bf73e7 4123static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
1da177e4 4124{
e2bf73e7 4125 u32 advmsk, tgtadv, advertising;
3600d918 4126
e2bf73e7
MC
4127 advertising = tp->link_config.advertising;
4128 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
1da177e4 4129
e2bf73e7
MC
4130 advmsk = ADVERTISE_ALL;
4131 if (tp->link_config.active_duplex == DUPLEX_FULL) {
f88788f0 4132 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
e2bf73e7
MC
4133 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4134 }
1da177e4 4135
e2bf73e7
MC
4136 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4137 return false;
4138
4139 if ((*lcladv & advmsk) != tgtadv)
4140 return false;
b99d2a57 4141
f07e9af3 4142 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1da177e4
LT
4143 u32 tg3_ctrl;
4144
e2bf73e7 4145 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3600d918 4146
221c5637 4147 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
e2bf73e7 4148 return false;
1da177e4 4149
3198e07f
MC
4150 if (tgtadv &&
4151 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4152 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4153 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4154 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4155 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4156 } else {
4157 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4158 }
4159
e2bf73e7
MC
4160 if (tg3_ctrl != tgtadv)
4161 return false;
ef167e27
MC
4162 }
4163
e2bf73e7 4164 return true;
ef167e27
MC
4165}
4166
859edb26
MC
4167static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4168{
4169 u32 lpeth = 0;
4170
4171 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4172 u32 val;
4173
4174 if (tg3_readphy(tp, MII_STAT1000, &val))
4175 return false;
4176
4177 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4178 }
4179
4180 if (tg3_readphy(tp, MII_LPA, rmtadv))
4181 return false;
4182
4183 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4184 tp->link_config.rmt_adv = lpeth;
4185
4186 return true;
4187}
4188
1da177e4
LT
4189static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4190{
4191 int current_link_up;
f833c4c1 4192 u32 bmsr, val;
ef167e27 4193 u32 lcl_adv, rmt_adv;
1da177e4
LT
4194 u16 current_speed;
4195 u8 current_duplex;
4196 int i, err;
4197
4198 tw32(MAC_EVENT, 0);
4199
4200 tw32_f(MAC_STATUS,
4201 (MAC_STATUS_SYNC_CHANGED |
4202 MAC_STATUS_CFG_CHANGED |
4203 MAC_STATUS_MI_COMPLETION |
4204 MAC_STATUS_LNKSTATE_CHANGED));
4205 udelay(40);
4206
8ef21428
MC
4207 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4208 tw32_f(MAC_MI_MODE,
4209 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4210 udelay(80);
4211 }
1da177e4 4212
b4bd2929 4213 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
1da177e4
LT
4214
4215 /* Some third-party PHYs need to be reset on link going
4216 * down.
4217 */
4218 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4221 netif_carrier_ok(tp->dev)) {
4222 tg3_readphy(tp, MII_BMSR, &bmsr);
4223 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4224 !(bmsr & BMSR_LSTATUS))
4225 force_reset = 1;
4226 }
4227 if (force_reset)
4228 tg3_phy_reset(tp);
4229
79eb6904 4230 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
4231 tg3_readphy(tp, MII_BMSR, &bmsr);
4232 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
63c3a66f 4233 !tg3_flag(tp, INIT_COMPLETE))
1da177e4
LT
4234 bmsr = 0;
4235
4236 if (!(bmsr & BMSR_LSTATUS)) {
4237 err = tg3_init_5401phy_dsp(tp);
4238 if (err)
4239 return err;
4240
4241 tg3_readphy(tp, MII_BMSR, &bmsr);
4242 for (i = 0; i < 1000; i++) {
4243 udelay(10);
4244 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4245 (bmsr & BMSR_LSTATUS)) {
4246 udelay(40);
4247 break;
4248 }
4249 }
4250
79eb6904
MC
4251 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4252 TG3_PHY_REV_BCM5401_B0 &&
1da177e4
LT
4253 !(bmsr & BMSR_LSTATUS) &&
4254 tp->link_config.active_speed == SPEED_1000) {
4255 err = tg3_phy_reset(tp);
4256 if (!err)
4257 err = tg3_init_5401phy_dsp(tp);
4258 if (err)
4259 return err;
4260 }
4261 }
4262 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4263 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4264 /* 5701 {A0,B0} CRC bug workaround */
4265 tg3_writephy(tp, 0x15, 0x0a75);
f08aa1a8
MC
4266 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4267 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4268 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
1da177e4
LT
4269 }
4270
4271 /* Clear pending interrupts... */
f833c4c1
MC
4272 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4273 tg3_readphy(tp, MII_TG3_ISTAT, &val);
1da177e4 4274
f07e9af3 4275 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
1da177e4 4276 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
f07e9af3 4277 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
1da177e4
LT
4278 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4279
4280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4281 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4282 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4283 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4284 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4285 else
4286 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4287 }
4288
4289 current_link_up = 0;
e740522e
MC
4290 current_speed = SPEED_UNKNOWN;
4291 current_duplex = DUPLEX_UNKNOWN;
e348c5e7 4292 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
859edb26 4293 tp->link_config.rmt_adv = 0;
1da177e4 4294
f07e9af3 4295 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
15ee95c3
MC
4296 err = tg3_phy_auxctl_read(tp,
4297 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4298 &val);
4299 if (!err && !(val & (1 << 10))) {
b4bd2929
MC
4300 tg3_phy_auxctl_write(tp,
4301 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4302 val | (1 << 10));
1da177e4
LT
4303 goto relink;
4304 }
4305 }
4306
4307 bmsr = 0;
4308 for (i = 0; i < 100; i++) {
4309 tg3_readphy(tp, MII_BMSR, &bmsr);
4310 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4311 (bmsr & BMSR_LSTATUS))
4312 break;
4313 udelay(40);
4314 }
4315
4316 if (bmsr & BMSR_LSTATUS) {
4317 u32 aux_stat, bmcr;
4318
4319 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4320 for (i = 0; i < 2000; i++) {
4321 udelay(10);
4322 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4323 aux_stat)
4324 break;
4325 }
4326
4327 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4328 &current_speed,
4329 &current_duplex);
4330
4331 bmcr = 0;
4332 for (i = 0; i < 200; i++) {
4333 tg3_readphy(tp, MII_BMCR, &bmcr);
4334 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4335 continue;
4336 if (bmcr && bmcr != 0x7fff)
4337 break;
4338 udelay(10);
4339 }
4340
ef167e27
MC
4341 lcl_adv = 0;
4342 rmt_adv = 0;
1da177e4 4343
ef167e27
MC
4344 tp->link_config.active_speed = current_speed;
4345 tp->link_config.active_duplex = current_duplex;
4346
4347 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4348 if ((bmcr & BMCR_ANENABLE) &&
e2bf73e7 4349 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
859edb26 4350 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
e2bf73e7 4351 current_link_up = 1;
1da177e4
LT
4352 } else {
4353 if (!(bmcr & BMCR_ANENABLE) &&
4354 tp->link_config.speed == current_speed &&
ef167e27
MC
4355 tp->link_config.duplex == current_duplex &&
4356 tp->link_config.flowctrl ==
4357 tp->link_config.active_flowctrl) {
1da177e4 4358 current_link_up = 1;
1da177e4
LT
4359 }
4360 }
4361
ef167e27 4362 if (current_link_up == 1 &&
e348c5e7
MC
4363 tp->link_config.active_duplex == DUPLEX_FULL) {
4364 u32 reg, bit;
4365
4366 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4367 reg = MII_TG3_FET_GEN_STAT;
4368 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4369 } else {
4370 reg = MII_TG3_EXT_STAT;
4371 bit = MII_TG3_EXT_STAT_MDIX;
4372 }
4373
4374 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4375 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4376
ef167e27 4377 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
e348c5e7 4378 }
1da177e4
LT
4379 }
4380
1da177e4 4381relink:
80096068 4382 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
1da177e4
LT
4383 tg3_phy_copper_begin(tp);
4384
f833c4c1 4385 tg3_readphy(tp, MII_BMSR, &bmsr);
06c03c02
MB
4386 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4387 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
1da177e4
LT
4388 current_link_up = 1;
4389 }
4390
4391 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4392 if (current_link_up == 1) {
4393 if (tp->link_config.active_speed == SPEED_100 ||
4394 tp->link_config.active_speed == SPEED_10)
4395 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4396 else
4397 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
f07e9af3 4398 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7f97a4bd
MC
4399 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4400 else
1da177e4
LT
4401 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4402
4403 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4404 if (tp->link_config.active_duplex == DUPLEX_HALF)
4405 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4406
1da177e4 4407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
4408 if (current_link_up == 1 &&
4409 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 4410 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
4411 else
4412 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
4413 }
4414
4415 /* ??? Without this setting Netgear GA302T PHY does not
4416 * ??? send/receive packets...
4417 */
79eb6904 4418 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
1da177e4
LT
4419 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4420 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4421 tw32_f(MAC_MI_MODE, tp->mi_mode);
4422 udelay(80);
4423 }
4424
4425 tw32_f(MAC_MODE, tp->mac_mode);
4426 udelay(40);
4427
52b02d04
MC
4428 tg3_phy_eee_adjust(tp, current_link_up);
4429
63c3a66f 4430 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
4431 /* Polled via timer. */
4432 tw32_f(MAC_EVENT, 0);
4433 } else {
4434 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4435 }
4436 udelay(40);
4437
4438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4439 current_link_up == 1 &&
4440 tp->link_config.active_speed == SPEED_1000 &&
63c3a66f 4441 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
1da177e4
LT
4442 udelay(120);
4443 tw32_f(MAC_STATUS,
4444 (MAC_STATUS_SYNC_CHANGED |
4445 MAC_STATUS_CFG_CHANGED));
4446 udelay(40);
4447 tg3_write_mem(tp,
4448 NIC_SRAM_FIRMWARE_MBOX,
4449 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4450 }
4451
5e7dfd0f 4452 /* Prevent send BD corruption. */
63c3a66f 4453 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
4454 if (tp->link_config.active_speed == SPEED_100 ||
4455 tp->link_config.active_speed == SPEED_10)
0f49bfbd
JL
4456 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4457 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 4458 else
0f49bfbd
JL
4459 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4460 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f
MC
4461 }
4462
1da177e4
LT
4463 if (current_link_up != netif_carrier_ok(tp->dev)) {
4464 if (current_link_up)
4465 netif_carrier_on(tp->dev);
4466 else
4467 netif_carrier_off(tp->dev);
4468 tg3_link_report(tp);
4469 }
4470
4471 return 0;
4472}
4473
4474struct tg3_fiber_aneginfo {
4475 int state;
4476#define ANEG_STATE_UNKNOWN 0
4477#define ANEG_STATE_AN_ENABLE 1
4478#define ANEG_STATE_RESTART_INIT 2
4479#define ANEG_STATE_RESTART 3
4480#define ANEG_STATE_DISABLE_LINK_OK 4
4481#define ANEG_STATE_ABILITY_DETECT_INIT 5
4482#define ANEG_STATE_ABILITY_DETECT 6
4483#define ANEG_STATE_ACK_DETECT_INIT 7
4484#define ANEG_STATE_ACK_DETECT 8
4485#define ANEG_STATE_COMPLETE_ACK_INIT 9
4486#define ANEG_STATE_COMPLETE_ACK 10
4487#define ANEG_STATE_IDLE_DETECT_INIT 11
4488#define ANEG_STATE_IDLE_DETECT 12
4489#define ANEG_STATE_LINK_OK 13
4490#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4491#define ANEG_STATE_NEXT_PAGE_WAIT 15
4492
4493 u32 flags;
4494#define MR_AN_ENABLE 0x00000001
4495#define MR_RESTART_AN 0x00000002
4496#define MR_AN_COMPLETE 0x00000004
4497#define MR_PAGE_RX 0x00000008
4498#define MR_NP_LOADED 0x00000010
4499#define MR_TOGGLE_TX 0x00000020
4500#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4501#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4502#define MR_LP_ADV_SYM_PAUSE 0x00000100
4503#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4504#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4505#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4506#define MR_LP_ADV_NEXT_PAGE 0x00001000
4507#define MR_TOGGLE_RX 0x00002000
4508#define MR_NP_RX 0x00004000
4509
4510#define MR_LINK_OK 0x80000000
4511
4512 unsigned long link_time, cur_time;
4513
4514 u32 ability_match_cfg;
4515 int ability_match_count;
4516
4517 char ability_match, idle_match, ack_match;
4518
4519 u32 txconfig, rxconfig;
4520#define ANEG_CFG_NP 0x00000080
4521#define ANEG_CFG_ACK 0x00000040
4522#define ANEG_CFG_RF2 0x00000020
4523#define ANEG_CFG_RF1 0x00000010
4524#define ANEG_CFG_PS2 0x00000001
4525#define ANEG_CFG_PS1 0x00008000
4526#define ANEG_CFG_HD 0x00004000
4527#define ANEG_CFG_FD 0x00002000
4528#define ANEG_CFG_INVAL 0x00001f06
4529
4530};
4531#define ANEG_OK 0
4532#define ANEG_DONE 1
4533#define ANEG_TIMER_ENAB 2
4534#define ANEG_FAILED -1
4535
4536#define ANEG_STATE_SETTLE_TIME 10000
4537
4538static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4539 struct tg3_fiber_aneginfo *ap)
4540{
5be73b47 4541 u16 flowctrl;
1da177e4
LT
4542 unsigned long delta;
4543 u32 rx_cfg_reg;
4544 int ret;
4545
4546 if (ap->state == ANEG_STATE_UNKNOWN) {
4547 ap->rxconfig = 0;
4548 ap->link_time = 0;
4549 ap->cur_time = 0;
4550 ap->ability_match_cfg = 0;
4551 ap->ability_match_count = 0;
4552 ap->ability_match = 0;
4553 ap->idle_match = 0;
4554 ap->ack_match = 0;
4555 }
4556 ap->cur_time++;
4557
4558 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4559 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4560
4561 if (rx_cfg_reg != ap->ability_match_cfg) {
4562 ap->ability_match_cfg = rx_cfg_reg;
4563 ap->ability_match = 0;
4564 ap->ability_match_count = 0;
4565 } else {
4566 if (++ap->ability_match_count > 1) {
4567 ap->ability_match = 1;
4568 ap->ability_match_cfg = rx_cfg_reg;
4569 }
4570 }
4571 if (rx_cfg_reg & ANEG_CFG_ACK)
4572 ap->ack_match = 1;
4573 else
4574 ap->ack_match = 0;
4575
4576 ap->idle_match = 0;
4577 } else {
4578 ap->idle_match = 1;
4579 ap->ability_match_cfg = 0;
4580 ap->ability_match_count = 0;
4581 ap->ability_match = 0;
4582 ap->ack_match = 0;
4583
4584 rx_cfg_reg = 0;
4585 }
4586
4587 ap->rxconfig = rx_cfg_reg;
4588 ret = ANEG_OK;
4589
33f401ae 4590 switch (ap->state) {
1da177e4
LT
4591 case ANEG_STATE_UNKNOWN:
4592 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4593 ap->state = ANEG_STATE_AN_ENABLE;
4594
4595 /* fallthru */
4596 case ANEG_STATE_AN_ENABLE:
4597 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4598 if (ap->flags & MR_AN_ENABLE) {
4599 ap->link_time = 0;
4600 ap->cur_time = 0;
4601 ap->ability_match_cfg = 0;
4602 ap->ability_match_count = 0;
4603 ap->ability_match = 0;
4604 ap->idle_match = 0;
4605 ap->ack_match = 0;
4606
4607 ap->state = ANEG_STATE_RESTART_INIT;
4608 } else {
4609 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4610 }
4611 break;
4612
4613 case ANEG_STATE_RESTART_INIT:
4614 ap->link_time = ap->cur_time;
4615 ap->flags &= ~(MR_NP_LOADED);
4616 ap->txconfig = 0;
4617 tw32(MAC_TX_AUTO_NEG, 0);
4618 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4619 tw32_f(MAC_MODE, tp->mac_mode);
4620 udelay(40);
4621
4622 ret = ANEG_TIMER_ENAB;
4623 ap->state = ANEG_STATE_RESTART;
4624
4625 /* fallthru */
4626 case ANEG_STATE_RESTART:
4627 delta = ap->cur_time - ap->link_time;
859a5887 4628 if (delta > ANEG_STATE_SETTLE_TIME)
1da177e4 4629 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
859a5887 4630 else
1da177e4 4631 ret = ANEG_TIMER_ENAB;
1da177e4
LT
4632 break;
4633
4634 case ANEG_STATE_DISABLE_LINK_OK:
4635 ret = ANEG_DONE;
4636 break;
4637
4638 case ANEG_STATE_ABILITY_DETECT_INIT:
4639 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
4640 ap->txconfig = ANEG_CFG_FD;
4641 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4642 if (flowctrl & ADVERTISE_1000XPAUSE)
4643 ap->txconfig |= ANEG_CFG_PS1;
4644 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4645 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
4646 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4647 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4648 tw32_f(MAC_MODE, tp->mac_mode);
4649 udelay(40);
4650
4651 ap->state = ANEG_STATE_ABILITY_DETECT;
4652 break;
4653
4654 case ANEG_STATE_ABILITY_DETECT:
859a5887 4655 if (ap->ability_match != 0 && ap->rxconfig != 0)
1da177e4 4656 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1da177e4
LT
4657 break;
4658
4659 case ANEG_STATE_ACK_DETECT_INIT:
4660 ap->txconfig |= ANEG_CFG_ACK;
4661 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4662 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4663 tw32_f(MAC_MODE, tp->mac_mode);
4664 udelay(40);
4665
4666 ap->state = ANEG_STATE_ACK_DETECT;
4667
4668 /* fallthru */
4669 case ANEG_STATE_ACK_DETECT:
4670 if (ap->ack_match != 0) {
4671 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4672 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4673 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4674 } else {
4675 ap->state = ANEG_STATE_AN_ENABLE;
4676 }
4677 } else if (ap->ability_match != 0 &&
4678 ap->rxconfig == 0) {
4679 ap->state = ANEG_STATE_AN_ENABLE;
4680 }
4681 break;
4682
4683 case ANEG_STATE_COMPLETE_ACK_INIT:
4684 if (ap->rxconfig & ANEG_CFG_INVAL) {
4685 ret = ANEG_FAILED;
4686 break;
4687 }
4688 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4689 MR_LP_ADV_HALF_DUPLEX |
4690 MR_LP_ADV_SYM_PAUSE |
4691 MR_LP_ADV_ASYM_PAUSE |
4692 MR_LP_ADV_REMOTE_FAULT1 |
4693 MR_LP_ADV_REMOTE_FAULT2 |
4694 MR_LP_ADV_NEXT_PAGE |
4695 MR_TOGGLE_RX |
4696 MR_NP_RX);
4697 if (ap->rxconfig & ANEG_CFG_FD)
4698 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4699 if (ap->rxconfig & ANEG_CFG_HD)
4700 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4701 if (ap->rxconfig & ANEG_CFG_PS1)
4702 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4703 if (ap->rxconfig & ANEG_CFG_PS2)
4704 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4705 if (ap->rxconfig & ANEG_CFG_RF1)
4706 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4707 if (ap->rxconfig & ANEG_CFG_RF2)
4708 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4709 if (ap->rxconfig & ANEG_CFG_NP)
4710 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4711
4712 ap->link_time = ap->cur_time;
4713
4714 ap->flags ^= (MR_TOGGLE_TX);
4715 if (ap->rxconfig & 0x0008)
4716 ap->flags |= MR_TOGGLE_RX;
4717 if (ap->rxconfig & ANEG_CFG_NP)
4718 ap->flags |= MR_NP_RX;
4719 ap->flags |= MR_PAGE_RX;
4720
4721 ap->state = ANEG_STATE_COMPLETE_ACK;
4722 ret = ANEG_TIMER_ENAB;
4723 break;
4724
4725 case ANEG_STATE_COMPLETE_ACK:
4726 if (ap->ability_match != 0 &&
4727 ap->rxconfig == 0) {
4728 ap->state = ANEG_STATE_AN_ENABLE;
4729 break;
4730 }
4731 delta = ap->cur_time - ap->link_time;
4732 if (delta > ANEG_STATE_SETTLE_TIME) {
4733 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4734 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4735 } else {
4736 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4737 !(ap->flags & MR_NP_RX)) {
4738 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4739 } else {
4740 ret = ANEG_FAILED;
4741 }
4742 }
4743 }
4744 break;
4745
4746 case ANEG_STATE_IDLE_DETECT_INIT:
4747 ap->link_time = ap->cur_time;
4748 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4749 tw32_f(MAC_MODE, tp->mac_mode);
4750 udelay(40);
4751
4752 ap->state = ANEG_STATE_IDLE_DETECT;
4753 ret = ANEG_TIMER_ENAB;
4754 break;
4755
4756 case ANEG_STATE_IDLE_DETECT:
4757 if (ap->ability_match != 0 &&
4758 ap->rxconfig == 0) {
4759 ap->state = ANEG_STATE_AN_ENABLE;
4760 break;
4761 }
4762 delta = ap->cur_time - ap->link_time;
4763 if (delta > ANEG_STATE_SETTLE_TIME) {
4764 /* XXX another gem from the Broadcom driver :( */
4765 ap->state = ANEG_STATE_LINK_OK;
4766 }
4767 break;
4768
4769 case ANEG_STATE_LINK_OK:
4770 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4771 ret = ANEG_DONE;
4772 break;
4773
4774 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4775 /* ??? unimplemented */
4776 break;
4777
4778 case ANEG_STATE_NEXT_PAGE_WAIT:
4779 /* ??? unimplemented */
4780 break;
4781
4782 default:
4783 ret = ANEG_FAILED;
4784 break;
855e1111 4785 }
1da177e4
LT
4786
4787 return ret;
4788}
4789
5be73b47 4790static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
4791{
4792 int res = 0;
4793 struct tg3_fiber_aneginfo aninfo;
4794 int status = ANEG_FAILED;
4795 unsigned int tick;
4796 u32 tmp;
4797
4798 tw32_f(MAC_TX_AUTO_NEG, 0);
4799
4800 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4801 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4802 udelay(40);
4803
4804 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4805 udelay(40);
4806
4807 memset(&aninfo, 0, sizeof(aninfo));
4808 aninfo.flags |= MR_AN_ENABLE;
4809 aninfo.state = ANEG_STATE_UNKNOWN;
4810 aninfo.cur_time = 0;
4811 tick = 0;
4812 while (++tick < 195000) {
4813 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4814 if (status == ANEG_DONE || status == ANEG_FAILED)
4815 break;
4816
4817 udelay(1);
4818 }
4819
4820 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4821 tw32_f(MAC_MODE, tp->mac_mode);
4822 udelay(40);
4823
5be73b47
MC
4824 *txflags = aninfo.txconfig;
4825 *rxflags = aninfo.flags;
1da177e4
LT
4826
4827 if (status == ANEG_DONE &&
4828 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4829 MR_LP_ADV_FULL_DUPLEX)))
4830 res = 1;
4831
4832 return res;
4833}
4834
4835static void tg3_init_bcm8002(struct tg3 *tp)
4836{
4837 u32 mac_status = tr32(MAC_STATUS);
4838 int i;
4839
4840 /* Reset when initting first time or we have a link. */
63c3a66f 4841 if (tg3_flag(tp, INIT_COMPLETE) &&
1da177e4
LT
4842 !(mac_status & MAC_STATUS_PCS_SYNCED))
4843 return;
4844
4845 /* Set PLL lock range. */
4846 tg3_writephy(tp, 0x16, 0x8007);
4847
4848 /* SW reset */
4849 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4850
4851 /* Wait for reset to complete. */
4852 /* XXX schedule_timeout() ... */
4853 for (i = 0; i < 500; i++)
4854 udelay(10);
4855
4856 /* Config mode; select PMA/Ch 1 regs. */
4857 tg3_writephy(tp, 0x10, 0x8411);
4858
4859 /* Enable auto-lock and comdet, select txclk for tx. */
4860 tg3_writephy(tp, 0x11, 0x0a10);
4861
4862 tg3_writephy(tp, 0x18, 0x00a0);
4863 tg3_writephy(tp, 0x16, 0x41ff);
4864
4865 /* Assert and deassert POR. */
4866 tg3_writephy(tp, 0x13, 0x0400);
4867 udelay(40);
4868 tg3_writephy(tp, 0x13, 0x0000);
4869
4870 tg3_writephy(tp, 0x11, 0x0a50);
4871 udelay(40);
4872 tg3_writephy(tp, 0x11, 0x0a10);
4873
4874 /* Wait for signal to stabilize */
4875 /* XXX schedule_timeout() ... */
4876 for (i = 0; i < 15000; i++)
4877 udelay(10);
4878
4879 /* Deselect the channel register so we can read the PHYID
4880 * later.
4881 */
4882 tg3_writephy(tp, 0x10, 0x8011);
4883}
4884
4885static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4886{
82cd3d11 4887 u16 flowctrl;
1da177e4
LT
4888 u32 sg_dig_ctrl, sg_dig_status;
4889 u32 serdes_cfg, expected_sg_dig_ctrl;
4890 int workaround, port_a;
4891 int current_link_up;
4892
4893 serdes_cfg = 0;
4894 expected_sg_dig_ctrl = 0;
4895 workaround = 0;
4896 port_a = 1;
4897 current_link_up = 0;
4898
4899 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4900 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4901 workaround = 1;
4902 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4903 port_a = 0;
4904
4905 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4906 /* preserve bits 20-23 for voltage regulator */
4907 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4908 }
4909
4910 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4911
4912 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 4913 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
4914 if (workaround) {
4915 u32 val = serdes_cfg;
4916
4917 if (port_a)
4918 val |= 0xc010000;
4919 else
4920 val |= 0x4010000;
4921 tw32_f(MAC_SERDES_CFG, val);
4922 }
c98f6e3b
MC
4923
4924 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4925 }
4926 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4927 tg3_setup_flow_control(tp, 0, 0);
4928 current_link_up = 1;
4929 }
4930 goto out;
4931 }
4932
4933 /* Want auto-negotiation. */
c98f6e3b 4934 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 4935
82cd3d11
MC
4936 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4937 if (flowctrl & ADVERTISE_1000XPAUSE)
4938 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4939 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4940 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
4941
4942 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
f07e9af3 4943 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3d3ebe74
MC
4944 tp->serdes_counter &&
4945 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4946 MAC_STATUS_RCVD_CFG)) ==
4947 MAC_STATUS_PCS_SYNCED)) {
4948 tp->serdes_counter--;
4949 current_link_up = 1;
4950 goto out;
4951 }
4952restart_autoneg:
1da177e4
LT
4953 if (workaround)
4954 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 4955 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
4956 udelay(5);
4957 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4958
3d3ebe74 4959 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4960 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4961 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4962 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 4963 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
4964 mac_status = tr32(MAC_STATUS);
4965
c98f6e3b 4966 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 4967 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
4968 u32 local_adv = 0, remote_adv = 0;
4969
4970 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4971 local_adv |= ADVERTISE_1000XPAUSE;
4972 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4973 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 4974
c98f6e3b 4975 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 4976 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 4977 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 4978 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 4979
859edb26
MC
4980 tp->link_config.rmt_adv =
4981 mii_adv_to_ethtool_adv_x(remote_adv);
4982
1da177e4
LT
4983 tg3_setup_flow_control(tp, local_adv, remote_adv);
4984 current_link_up = 1;
3d3ebe74 4985 tp->serdes_counter = 0;
f07e9af3 4986 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c98f6e3b 4987 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
4988 if (tp->serdes_counter)
4989 tp->serdes_counter--;
1da177e4
LT
4990 else {
4991 if (workaround) {
4992 u32 val = serdes_cfg;
4993
4994 if (port_a)
4995 val |= 0xc010000;
4996 else
4997 val |= 0x4010000;
4998
4999 tw32_f(MAC_SERDES_CFG, val);
5000 }
5001
c98f6e3b 5002 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
5003 udelay(40);
5004
5005 /* Link parallel detection - link is up */
5006 /* only if we have PCS_SYNC and not */
5007 /* receiving config code words */
5008 mac_status = tr32(MAC_STATUS);
5009 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5010 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5011 tg3_setup_flow_control(tp, 0, 0);
5012 current_link_up = 1;
f07e9af3
MC
5013 tp->phy_flags |=
5014 TG3_PHYFLG_PARALLEL_DETECT;
3d3ebe74
MC
5015 tp->serdes_counter =
5016 SERDES_PARALLEL_DET_TIMEOUT;
5017 } else
5018 goto restart_autoneg;
1da177e4
LT
5019 }
5020 }
3d3ebe74
MC
5021 } else {
5022 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 5023 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
5024 }
5025
5026out:
5027 return current_link_up;
5028}
5029
5030static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5031{
5032 int current_link_up = 0;
5033
5cf64b8a 5034 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 5035 goto out;
1da177e4
LT
5036
5037 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 5038 u32 txflags, rxflags;
1da177e4 5039 int i;
6aa20a22 5040
5be73b47
MC
5041 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5042 u32 local_adv = 0, remote_adv = 0;
1da177e4 5043
5be73b47
MC
5044 if (txflags & ANEG_CFG_PS1)
5045 local_adv |= ADVERTISE_1000XPAUSE;
5046 if (txflags & ANEG_CFG_PS2)
5047 local_adv |= ADVERTISE_1000XPSE_ASYM;
5048
5049 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5050 remote_adv |= LPA_1000XPAUSE;
5051 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5052 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 5053
859edb26
MC
5054 tp->link_config.rmt_adv =
5055 mii_adv_to_ethtool_adv_x(remote_adv);
5056
1da177e4
LT
5057 tg3_setup_flow_control(tp, local_adv, remote_adv);
5058
1da177e4
LT
5059 current_link_up = 1;
5060 }
5061 for (i = 0; i < 30; i++) {
5062 udelay(20);
5063 tw32_f(MAC_STATUS,
5064 (MAC_STATUS_SYNC_CHANGED |
5065 MAC_STATUS_CFG_CHANGED));
5066 udelay(40);
5067 if ((tr32(MAC_STATUS) &
5068 (MAC_STATUS_SYNC_CHANGED |
5069 MAC_STATUS_CFG_CHANGED)) == 0)
5070 break;
5071 }
5072
5073 mac_status = tr32(MAC_STATUS);
5074 if (current_link_up == 0 &&
5075 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5076 !(mac_status & MAC_STATUS_RCVD_CFG))
5077 current_link_up = 1;
5078 } else {
5be73b47
MC
5079 tg3_setup_flow_control(tp, 0, 0);
5080
1da177e4
LT
5081 /* Forcing 1000FD link up. */
5082 current_link_up = 1;
1da177e4
LT
5083
5084 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5085 udelay(40);
e8f3f6ca
MC
5086
5087 tw32_f(MAC_MODE, tp->mac_mode);
5088 udelay(40);
1da177e4
LT
5089 }
5090
5091out:
5092 return current_link_up;
5093}
5094
5095static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5096{
5097 u32 orig_pause_cfg;
5098 u16 orig_active_speed;
5099 u8 orig_active_duplex;
5100 u32 mac_status;
5101 int current_link_up;
5102 int i;
5103
8d018621 5104 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5105 orig_active_speed = tp->link_config.active_speed;
5106 orig_active_duplex = tp->link_config.active_duplex;
5107
63c3a66f 5108 if (!tg3_flag(tp, HW_AUTONEG) &&
1da177e4 5109 netif_carrier_ok(tp->dev) &&
63c3a66f 5110 tg3_flag(tp, INIT_COMPLETE)) {
1da177e4
LT
5111 mac_status = tr32(MAC_STATUS);
5112 mac_status &= (MAC_STATUS_PCS_SYNCED |
5113 MAC_STATUS_SIGNAL_DET |
5114 MAC_STATUS_CFG_CHANGED |
5115 MAC_STATUS_RCVD_CFG);
5116 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5117 MAC_STATUS_SIGNAL_DET)) {
5118 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5119 MAC_STATUS_CFG_CHANGED));
5120 return 0;
5121 }
5122 }
5123
5124 tw32_f(MAC_TX_AUTO_NEG, 0);
5125
5126 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5127 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5128 tw32_f(MAC_MODE, tp->mac_mode);
5129 udelay(40);
5130
79eb6904 5131 if (tp->phy_id == TG3_PHY_ID_BCM8002)
1da177e4
LT
5132 tg3_init_bcm8002(tp);
5133
5134 /* Enable link change event even when serdes polling. */
5135 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5136 udelay(40);
5137
5138 current_link_up = 0;
859edb26 5139 tp->link_config.rmt_adv = 0;
1da177e4
LT
5140 mac_status = tr32(MAC_STATUS);
5141
63c3a66f 5142 if (tg3_flag(tp, HW_AUTONEG))
1da177e4
LT
5143 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5144 else
5145 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5146
898a56f8 5147 tp->napi[0].hw_status->status =
1da177e4 5148 (SD_STATUS_UPDATED |
898a56f8 5149 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
1da177e4
LT
5150
5151 for (i = 0; i < 100; i++) {
5152 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5153 MAC_STATUS_CFG_CHANGED));
5154 udelay(5);
5155 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
5156 MAC_STATUS_CFG_CHANGED |
5157 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
5158 break;
5159 }
5160
5161 mac_status = tr32(MAC_STATUS);
5162 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5163 current_link_up = 0;
3d3ebe74
MC
5164 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5165 tp->serdes_counter == 0) {
1da177e4
LT
5166 tw32_f(MAC_MODE, (tp->mac_mode |
5167 MAC_MODE_SEND_CONFIGS));
5168 udelay(1);
5169 tw32_f(MAC_MODE, tp->mac_mode);
5170 }
5171 }
5172
5173 if (current_link_up == 1) {
5174 tp->link_config.active_speed = SPEED_1000;
5175 tp->link_config.active_duplex = DUPLEX_FULL;
5176 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5177 LED_CTRL_LNKLED_OVERRIDE |
5178 LED_CTRL_1000MBPS_ON));
5179 } else {
e740522e
MC
5180 tp->link_config.active_speed = SPEED_UNKNOWN;
5181 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5182 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5183 LED_CTRL_LNKLED_OVERRIDE |
5184 LED_CTRL_TRAFFIC_OVERRIDE));
5185 }
5186
5187 if (current_link_up != netif_carrier_ok(tp->dev)) {
5188 if (current_link_up)
5189 netif_carrier_on(tp->dev);
5190 else
5191 netif_carrier_off(tp->dev);
5192 tg3_link_report(tp);
5193 } else {
8d018621 5194 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5195 if (orig_pause_cfg != now_pause_cfg ||
5196 orig_active_speed != tp->link_config.active_speed ||
5197 orig_active_duplex != tp->link_config.active_duplex)
5198 tg3_link_report(tp);
5199 }
5200
5201 return 0;
5202}
5203
747e8f8b
MC
5204static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5205{
5206 int current_link_up, err = 0;
5207 u32 bmsr, bmcr;
5208 u16 current_speed;
5209 u8 current_duplex;
ef167e27 5210 u32 local_adv, remote_adv;
747e8f8b
MC
5211
5212 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5213 tw32_f(MAC_MODE, tp->mac_mode);
5214 udelay(40);
5215
5216 tw32(MAC_EVENT, 0);
5217
5218 tw32_f(MAC_STATUS,
5219 (MAC_STATUS_SYNC_CHANGED |
5220 MAC_STATUS_CFG_CHANGED |
5221 MAC_STATUS_MI_COMPLETION |
5222 MAC_STATUS_LNKSTATE_CHANGED));
5223 udelay(40);
5224
5225 if (force_reset)
5226 tg3_phy_reset(tp);
5227
5228 current_link_up = 0;
e740522e
MC
5229 current_speed = SPEED_UNKNOWN;
5230 current_duplex = DUPLEX_UNKNOWN;
859edb26 5231 tp->link_config.rmt_adv = 0;
747e8f8b
MC
5232
5233 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5234 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5236 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5237 bmsr |= BMSR_LSTATUS;
5238 else
5239 bmsr &= ~BMSR_LSTATUS;
5240 }
747e8f8b
MC
5241
5242 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5243
5244 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
f07e9af3 5245 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5246 /* do nothing, just check for link up at the end */
5247 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
28011cf1 5248 u32 adv, newadv;
747e8f8b
MC
5249
5250 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
28011cf1
MC
5251 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5252 ADVERTISE_1000XPAUSE |
5253 ADVERTISE_1000XPSE_ASYM |
5254 ADVERTISE_SLCT);
747e8f8b 5255
28011cf1 5256 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
37f07023 5257 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
747e8f8b 5258
28011cf1
MC
5259 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5260 tg3_writephy(tp, MII_ADVERTISE, newadv);
747e8f8b
MC
5261 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5262 tg3_writephy(tp, MII_BMCR, bmcr);
5263
5264 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 5265 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
f07e9af3 5266 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5267
5268 return err;
5269 }
5270 } else {
5271 u32 new_bmcr;
5272
5273 bmcr &= ~BMCR_SPEED1000;
5274 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5275
5276 if (tp->link_config.duplex == DUPLEX_FULL)
5277 new_bmcr |= BMCR_FULLDPLX;
5278
5279 if (new_bmcr != bmcr) {
5280 /* BMCR_SPEED1000 is a reserved bit that needs
5281 * to be set on write.
5282 */
5283 new_bmcr |= BMCR_SPEED1000;
5284
5285 /* Force a linkdown */
5286 if (netif_carrier_ok(tp->dev)) {
5287 u32 adv;
5288
5289 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5290 adv &= ~(ADVERTISE_1000XFULL |
5291 ADVERTISE_1000XHALF |
5292 ADVERTISE_SLCT);
5293 tg3_writephy(tp, MII_ADVERTISE, adv);
5294 tg3_writephy(tp, MII_BMCR, bmcr |
5295 BMCR_ANRESTART |
5296 BMCR_ANENABLE);
5297 udelay(10);
5298 netif_carrier_off(tp->dev);
5299 }
5300 tg3_writephy(tp, MII_BMCR, new_bmcr);
5301 bmcr = new_bmcr;
5302 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5303 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5304 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5305 ASIC_REV_5714) {
5306 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5307 bmsr |= BMSR_LSTATUS;
5308 else
5309 bmsr &= ~BMSR_LSTATUS;
5310 }
f07e9af3 5311 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5312 }
5313 }
5314
5315 if (bmsr & BMSR_LSTATUS) {
5316 current_speed = SPEED_1000;
5317 current_link_up = 1;
5318 if (bmcr & BMCR_FULLDPLX)
5319 current_duplex = DUPLEX_FULL;
5320 else
5321 current_duplex = DUPLEX_HALF;
5322
ef167e27
MC
5323 local_adv = 0;
5324 remote_adv = 0;
5325
747e8f8b 5326 if (bmcr & BMCR_ANENABLE) {
ef167e27 5327 u32 common;
747e8f8b
MC
5328
5329 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5330 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5331 common = local_adv & remote_adv;
5332 if (common & (ADVERTISE_1000XHALF |
5333 ADVERTISE_1000XFULL)) {
5334 if (common & ADVERTISE_1000XFULL)
5335 current_duplex = DUPLEX_FULL;
5336 else
5337 current_duplex = DUPLEX_HALF;
859edb26
MC
5338
5339 tp->link_config.rmt_adv =
5340 mii_adv_to_ethtool_adv_x(remote_adv);
63c3a66f 5341 } else if (!tg3_flag(tp, 5780_CLASS)) {
57d8b880 5342 /* Link is up via parallel detect */
859a5887 5343 } else {
747e8f8b 5344 current_link_up = 0;
859a5887 5345 }
747e8f8b
MC
5346 }
5347 }
5348
ef167e27
MC
5349 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5350 tg3_setup_flow_control(tp, local_adv, remote_adv);
5351
747e8f8b
MC
5352 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5353 if (tp->link_config.active_duplex == DUPLEX_HALF)
5354 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5355
5356 tw32_f(MAC_MODE, tp->mac_mode);
5357 udelay(40);
5358
5359 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5360
5361 tp->link_config.active_speed = current_speed;
5362 tp->link_config.active_duplex = current_duplex;
5363
5364 if (current_link_up != netif_carrier_ok(tp->dev)) {
5365 if (current_link_up)
5366 netif_carrier_on(tp->dev);
5367 else {
5368 netif_carrier_off(tp->dev);
f07e9af3 5369 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5370 }
5371 tg3_link_report(tp);
5372 }
5373 return err;
5374}
5375
5376static void tg3_serdes_parallel_detect(struct tg3 *tp)
5377{
3d3ebe74 5378 if (tp->serdes_counter) {
747e8f8b 5379 /* Give autoneg time to complete. */
3d3ebe74 5380 tp->serdes_counter--;
747e8f8b
MC
5381 return;
5382 }
c6cdf436 5383
747e8f8b
MC
5384 if (!netif_carrier_ok(tp->dev) &&
5385 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5386 u32 bmcr;
5387
5388 tg3_readphy(tp, MII_BMCR, &bmcr);
5389 if (bmcr & BMCR_ANENABLE) {
5390 u32 phy1, phy2;
5391
5392 /* Select shadow register 0x1f */
f08aa1a8
MC
5393 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5394 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
747e8f8b
MC
5395
5396 /* Select expansion interrupt status register */
f08aa1a8
MC
5397 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5398 MII_TG3_DSP_EXP1_INT_STAT);
5399 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5400 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5401
5402 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5403 /* We have signal detect and not receiving
5404 * config code words, link is up by parallel
5405 * detection.
5406 */
5407
5408 bmcr &= ~BMCR_ANENABLE;
5409 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5410 tg3_writephy(tp, MII_BMCR, bmcr);
f07e9af3 5411 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5412 }
5413 }
859a5887
MC
5414 } else if (netif_carrier_ok(tp->dev) &&
5415 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
f07e9af3 5416 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5417 u32 phy2;
5418
5419 /* Select expansion interrupt status register */
f08aa1a8
MC
5420 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5421 MII_TG3_DSP_EXP1_INT_STAT);
5422 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5423 if (phy2 & 0x20) {
5424 u32 bmcr;
5425
5426 /* Config code words received, turn on autoneg. */
5427 tg3_readphy(tp, MII_BMCR, &bmcr);
5428 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5429
f07e9af3 5430 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5431
5432 }
5433 }
5434}
5435
1da177e4
LT
5436static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5437{
f2096f94 5438 u32 val;
1da177e4
LT
5439 int err;
5440
f07e9af3 5441 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4 5442 err = tg3_setup_fiber_phy(tp, force_reset);
f07e9af3 5443 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
747e8f8b 5444 err = tg3_setup_fiber_mii_phy(tp, force_reset);
859a5887 5445 else
1da177e4 5446 err = tg3_setup_copper_phy(tp, force_reset);
1da177e4 5447
bcb37f6c 5448 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
f2096f94 5449 u32 scale;
aa6c91fe
MC
5450
5451 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5452 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5453 scale = 65;
5454 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5455 scale = 6;
5456 else
5457 scale = 12;
5458
5459 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5460 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5461 tw32(GRC_MISC_CFG, val);
5462 }
5463
f2096f94
MC
5464 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5465 (6 << TX_LENGTHS_IPG_SHIFT);
5466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5467 val |= tr32(MAC_TX_LENGTHS) &
5468 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5469 TX_LENGTHS_CNT_DWN_VAL_MSK);
5470
1da177e4
LT
5471 if (tp->link_config.active_speed == SPEED_1000 &&
5472 tp->link_config.active_duplex == DUPLEX_HALF)
f2096f94
MC
5473 tw32(MAC_TX_LENGTHS, val |
5474 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5475 else
f2096f94
MC
5476 tw32(MAC_TX_LENGTHS, val |
5477 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5478
63c3a66f 5479 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
5480 if (netif_carrier_ok(tp->dev)) {
5481 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 5482 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
5483 } else {
5484 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5485 }
5486 }
5487
63c3a66f 5488 if (tg3_flag(tp, ASPM_WORKAROUND)) {
f2096f94 5489 val = tr32(PCIE_PWR_MGMT_THRESH);
8ed5d97e
MC
5490 if (!netif_carrier_ok(tp->dev))
5491 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5492 tp->pwrmgmt_thresh;
5493 else
5494 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5495 tw32(PCIE_PWR_MGMT_THRESH, val);
5496 }
5497
1da177e4
LT
5498 return err;
5499}
5500
66cfd1bd
MC
5501static inline int tg3_irq_sync(struct tg3 *tp)
5502{
5503 return tp->irq_sync;
5504}
5505
97bd8e49
MC
5506static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5507{
5508 int i;
5509
5510 dst = (u32 *)((u8 *)dst + off);
5511 for (i = 0; i < len; i += sizeof(u32))
5512 *dst++ = tr32(off + i);
5513}
5514
5515static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5516{
5517 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5518 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5519 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5520 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5521 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5522 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5523 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5524 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5525 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5526 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5527 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5528 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5529 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5530 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5531 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5532 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5533 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5534 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5535 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5536
63c3a66f 5537 if (tg3_flag(tp, SUPPORT_MSIX))
97bd8e49
MC
5538 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5539
5540 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5541 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5542 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5543 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5544 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5545 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5546 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5547 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5548
63c3a66f 5549 if (!tg3_flag(tp, 5705_PLUS)) {
97bd8e49
MC
5550 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5551 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5552 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5553 }
5554
5555 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5556 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5557 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5558 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5559 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5560
63c3a66f 5561 if (tg3_flag(tp, NVRAM))
97bd8e49
MC
5562 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5563}
5564
5565static void tg3_dump_state(struct tg3 *tp)
5566{
5567 int i;
5568 u32 *regs;
5569
5570 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5571 if (!regs) {
5572 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5573 return;
5574 }
5575
63c3a66f 5576 if (tg3_flag(tp, PCI_EXPRESS)) {
97bd8e49
MC
5577 /* Read up to but not including private PCI registers */
5578 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5579 regs[i / sizeof(u32)] = tr32(i);
5580 } else
5581 tg3_dump_legacy_regs(tp, regs);
5582
5583 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5584 if (!regs[i + 0] && !regs[i + 1] &&
5585 !regs[i + 2] && !regs[i + 3])
5586 continue;
5587
5588 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5589 i * 4,
5590 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5591 }
5592
5593 kfree(regs);
5594
5595 for (i = 0; i < tp->irq_cnt; i++) {
5596 struct tg3_napi *tnapi = &tp->napi[i];
5597
5598 /* SW status block */
5599 netdev_err(tp->dev,
5600 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5601 i,
5602 tnapi->hw_status->status,
5603 tnapi->hw_status->status_tag,
5604 tnapi->hw_status->rx_jumbo_consumer,
5605 tnapi->hw_status->rx_consumer,
5606 tnapi->hw_status->rx_mini_consumer,
5607 tnapi->hw_status->idx[0].rx_producer,
5608 tnapi->hw_status->idx[0].tx_consumer);
5609
5610 netdev_err(tp->dev,
5611 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5612 i,
5613 tnapi->last_tag, tnapi->last_irq_tag,
5614 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5615 tnapi->rx_rcb_ptr,
5616 tnapi->prodring.rx_std_prod_idx,
5617 tnapi->prodring.rx_std_cons_idx,
5618 tnapi->prodring.rx_jmb_prod_idx,
5619 tnapi->prodring.rx_jmb_cons_idx);
5620 }
5621}
5622
df3e6548
MC
5623/* This is called whenever we suspect that the system chipset is re-
5624 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5625 * is bogus tx completions. We try to recover by setting the
5626 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5627 * in the workqueue.
5628 */
5629static void tg3_tx_recover(struct tg3 *tp)
5630{
63c3a66f 5631 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
df3e6548
MC
5632 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5633
5129c3a3
MC
5634 netdev_warn(tp->dev,
5635 "The system may be re-ordering memory-mapped I/O "
5636 "cycles to the network device, attempting to recover. "
5637 "Please report the problem to the driver maintainer "
5638 "and include system chipset information.\n");
df3e6548
MC
5639
5640 spin_lock(&tp->lock);
63c3a66f 5641 tg3_flag_set(tp, TX_RECOVERY_PENDING);
df3e6548
MC
5642 spin_unlock(&tp->lock);
5643}
5644
f3f3f27e 5645static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
1b2a7205 5646{
f65aac16
MC
5647 /* Tell compiler to fetch tx indices from memory. */
5648 barrier();
f3f3f27e
MC
5649 return tnapi->tx_pending -
5650 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
1b2a7205
MC
5651}
5652
1da177e4
LT
5653/* Tigon3 never reports partial packet sends. So we do not
5654 * need special logic to handle SKBs that have not had all
5655 * of their frags sent yet, like SunGEM does.
5656 */
17375d25 5657static void tg3_tx(struct tg3_napi *tnapi)
1da177e4 5658{
17375d25 5659 struct tg3 *tp = tnapi->tp;
898a56f8 5660 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
f3f3f27e 5661 u32 sw_idx = tnapi->tx_cons;
fe5f5787
MC
5662 struct netdev_queue *txq;
5663 int index = tnapi - tp->napi;
298376d3 5664 unsigned int pkts_compl = 0, bytes_compl = 0;
fe5f5787 5665
63c3a66f 5666 if (tg3_flag(tp, ENABLE_TSS))
fe5f5787
MC
5667 index--;
5668
5669 txq = netdev_get_tx_queue(tp->dev, index);
1da177e4
LT
5670
5671 while (sw_idx != hw_idx) {
df8944cf 5672 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
1da177e4 5673 struct sk_buff *skb = ri->skb;
df3e6548
MC
5674 int i, tx_bug = 0;
5675
5676 if (unlikely(skb == NULL)) {
5677 tg3_tx_recover(tp);
5678 return;
5679 }
1da177e4 5680
f4188d8a 5681 pci_unmap_single(tp->pdev,
4e5e4f0d 5682 dma_unmap_addr(ri, mapping),
f4188d8a
AD
5683 skb_headlen(skb),
5684 PCI_DMA_TODEVICE);
1da177e4
LT
5685
5686 ri->skb = NULL;
5687
e01ee14d
MC
5688 while (ri->fragmented) {
5689 ri->fragmented = false;
5690 sw_idx = NEXT_TX(sw_idx);
5691 ri = &tnapi->tx_buffers[sw_idx];
5692 }
5693
1da177e4
LT
5694 sw_idx = NEXT_TX(sw_idx);
5695
5696 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
f3f3f27e 5697 ri = &tnapi->tx_buffers[sw_idx];
df3e6548
MC
5698 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5699 tx_bug = 1;
f4188d8a
AD
5700
5701 pci_unmap_page(tp->pdev,
4e5e4f0d 5702 dma_unmap_addr(ri, mapping),
9e903e08 5703 skb_frag_size(&skb_shinfo(skb)->frags[i]),
f4188d8a 5704 PCI_DMA_TODEVICE);
e01ee14d
MC
5705
5706 while (ri->fragmented) {
5707 ri->fragmented = false;
5708 sw_idx = NEXT_TX(sw_idx);
5709 ri = &tnapi->tx_buffers[sw_idx];
5710 }
5711
1da177e4
LT
5712 sw_idx = NEXT_TX(sw_idx);
5713 }
5714
298376d3
TH
5715 pkts_compl++;
5716 bytes_compl += skb->len;
5717
f47c11ee 5718 dev_kfree_skb(skb);
df3e6548
MC
5719
5720 if (unlikely(tx_bug)) {
5721 tg3_tx_recover(tp);
5722 return;
5723 }
1da177e4
LT
5724 }
5725
5cb917bc 5726 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298376d3 5727
f3f3f27e 5728 tnapi->tx_cons = sw_idx;
1da177e4 5729
1b2a7205
MC
5730 /* Need to make the tx_cons update visible to tg3_start_xmit()
5731 * before checking for netif_queue_stopped(). Without the
5732 * memory barrier, there is a small possibility that tg3_start_xmit()
5733 * will miss it and cause the queue to be stopped forever.
5734 */
5735 smp_mb();
5736
fe5f5787 5737 if (unlikely(netif_tx_queue_stopped(txq) &&
f3f3f27e 5738 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
fe5f5787
MC
5739 __netif_tx_lock(txq, smp_processor_id());
5740 if (netif_tx_queue_stopped(txq) &&
f3f3f27e 5741 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
fe5f5787
MC
5742 netif_tx_wake_queue(txq);
5743 __netif_tx_unlock(txq);
51b91468 5744 }
1da177e4
LT
5745}
5746
8d4057a9
ED
5747static void tg3_frag_free(bool is_frag, void *data)
5748{
5749 if (is_frag)
5750 put_page(virt_to_head_page(data));
5751 else
5752 kfree(data);
5753}
5754
9205fd9c 5755static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
2b2cdb65 5756{
8d4057a9
ED
5757 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5758 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5759
9205fd9c 5760 if (!ri->data)
2b2cdb65
MC
5761 return;
5762
4e5e4f0d 5763 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
2b2cdb65 5764 map_sz, PCI_DMA_FROMDEVICE);
a1e8b307 5765 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
9205fd9c 5766 ri->data = NULL;
2b2cdb65
MC
5767}
5768
8d4057a9 5769
1da177e4
LT
5770/* Returns size of skb allocated or < 0 on error.
5771 *
5772 * We only need to fill in the address because the other members
5773 * of the RX descriptor are invariant, see tg3_init_rings.
5774 *
5775 * Note the purposeful assymetry of cpu vs. chip accesses. For
5776 * posting buffers we only dirty the first cache line of the RX
5777 * descriptor (containing the address). Whereas for the RX status
5778 * buffers the cpu only reads the last cacheline of the RX descriptor
5779 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5780 */
9205fd9c 5781static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
8d4057a9
ED
5782 u32 opaque_key, u32 dest_idx_unmasked,
5783 unsigned int *frag_size)
1da177e4
LT
5784{
5785 struct tg3_rx_buffer_desc *desc;
f94e290e 5786 struct ring_info *map;
9205fd9c 5787 u8 *data;
1da177e4 5788 dma_addr_t mapping;
9205fd9c 5789 int skb_size, data_size, dest_idx;
1da177e4 5790
1da177e4
LT
5791 switch (opaque_key) {
5792 case RXD_OPAQUE_RING_STD:
2c49a44d 5793 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
21f581a5
MC
5794 desc = &tpr->rx_std[dest_idx];
5795 map = &tpr->rx_std_buffers[dest_idx];
9205fd9c 5796 data_size = tp->rx_pkt_map_sz;
1da177e4
LT
5797 break;
5798
5799 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5800 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
79ed5ac7 5801 desc = &tpr->rx_jmb[dest_idx].std;
21f581a5 5802 map = &tpr->rx_jmb_buffers[dest_idx];
9205fd9c 5803 data_size = TG3_RX_JMB_MAP_SZ;
1da177e4
LT
5804 break;
5805
5806 default:
5807 return -EINVAL;
855e1111 5808 }
1da177e4
LT
5809
5810 /* Do not overwrite any of the map or rp information
5811 * until we are sure we can commit to a new buffer.
5812 *
5813 * Callers depend upon this behavior and assume that
5814 * we leave everything unchanged if we fail.
5815 */
9205fd9c
ED
5816 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5817 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
a1e8b307
ED
5818 if (skb_size <= PAGE_SIZE) {
5819 data = netdev_alloc_frag(skb_size);
5820 *frag_size = skb_size;
8d4057a9
ED
5821 } else {
5822 data = kmalloc(skb_size, GFP_ATOMIC);
5823 *frag_size = 0;
5824 }
9205fd9c 5825 if (!data)
1da177e4
LT
5826 return -ENOMEM;
5827
9205fd9c
ED
5828 mapping = pci_map_single(tp->pdev,
5829 data + TG3_RX_OFFSET(tp),
5830 data_size,
1da177e4 5831 PCI_DMA_FROMDEVICE);
8d4057a9 5832 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
a1e8b307 5833 tg3_frag_free(skb_size <= PAGE_SIZE, data);
a21771dd
MC
5834 return -EIO;
5835 }
1da177e4 5836
9205fd9c 5837 map->data = data;
4e5e4f0d 5838 dma_unmap_addr_set(map, mapping, mapping);
1da177e4 5839
1da177e4
LT
5840 desc->addr_hi = ((u64)mapping >> 32);
5841 desc->addr_lo = ((u64)mapping & 0xffffffff);
5842
9205fd9c 5843 return data_size;
1da177e4
LT
5844}
5845
5846/* We only need to move over in the address because the other
5847 * members of the RX descriptor are invariant. See notes above
9205fd9c 5848 * tg3_alloc_rx_data for full details.
1da177e4 5849 */
a3896167
MC
5850static void tg3_recycle_rx(struct tg3_napi *tnapi,
5851 struct tg3_rx_prodring_set *dpr,
5852 u32 opaque_key, int src_idx,
5853 u32 dest_idx_unmasked)
1da177e4 5854{
17375d25 5855 struct tg3 *tp = tnapi->tp;
1da177e4
LT
5856 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5857 struct ring_info *src_map, *dest_map;
8fea32b9 5858 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
c6cdf436 5859 int dest_idx;
1da177e4
LT
5860
5861 switch (opaque_key) {
5862 case RXD_OPAQUE_RING_STD:
2c49a44d 5863 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
a3896167
MC
5864 dest_desc = &dpr->rx_std[dest_idx];
5865 dest_map = &dpr->rx_std_buffers[dest_idx];
5866 src_desc = &spr->rx_std[src_idx];
5867 src_map = &spr->rx_std_buffers[src_idx];
1da177e4
LT
5868 break;
5869
5870 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5871 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
a3896167
MC
5872 dest_desc = &dpr->rx_jmb[dest_idx].std;
5873 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5874 src_desc = &spr->rx_jmb[src_idx].std;
5875 src_map = &spr->rx_jmb_buffers[src_idx];
1da177e4
LT
5876 break;
5877
5878 default:
5879 return;
855e1111 5880 }
1da177e4 5881
9205fd9c 5882 dest_map->data = src_map->data;
4e5e4f0d
FT
5883 dma_unmap_addr_set(dest_map, mapping,
5884 dma_unmap_addr(src_map, mapping));
1da177e4
LT
5885 dest_desc->addr_hi = src_desc->addr_hi;
5886 dest_desc->addr_lo = src_desc->addr_lo;
e92967bf
MC
5887
5888 /* Ensure that the update to the skb happens after the physical
5889 * addresses have been transferred to the new BD location.
5890 */
5891 smp_wmb();
5892
9205fd9c 5893 src_map->data = NULL;
1da177e4
LT
5894}
5895
1da177e4
LT
5896/* The RX ring scheme is composed of multiple rings which post fresh
5897 * buffers to the chip, and one special ring the chip uses to report
5898 * status back to the host.
5899 *
5900 * The special ring reports the status of received packets to the
5901 * host. The chip does not write into the original descriptor the
5902 * RX buffer was obtained from. The chip simply takes the original
5903 * descriptor as provided by the host, updates the status and length
5904 * field, then writes this into the next status ring entry.
5905 *
5906 * Each ring the host uses to post buffers to the chip is described
5907 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5908 * it is first placed into the on-chip ram. When the packet's length
5909 * is known, it walks down the TG3_BDINFO entries to select the ring.
5910 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5911 * which is within the range of the new packet's length is chosen.
5912 *
5913 * The "separate ring for rx status" scheme may sound queer, but it makes
5914 * sense from a cache coherency perspective. If only the host writes
5915 * to the buffer post rings, and only the chip writes to the rx status
5916 * rings, then cache lines never move beyond shared-modified state.
5917 * If both the host and chip were to write into the same ring, cache line
5918 * eviction could occur since both entities want it in an exclusive state.
5919 */
17375d25 5920static int tg3_rx(struct tg3_napi *tnapi, int budget)
1da177e4 5921{
17375d25 5922 struct tg3 *tp = tnapi->tp;
f92905de 5923 u32 work_mask, rx_std_posted = 0;
4361935a 5924 u32 std_prod_idx, jmb_prod_idx;
72334482 5925 u32 sw_idx = tnapi->rx_rcb_ptr;
483ba50b 5926 u16 hw_idx;
1da177e4 5927 int received;
8fea32b9 5928 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
1da177e4 5929
8d9d7cfc 5930 hw_idx = *(tnapi->rx_rcb_prod_idx);
1da177e4
LT
5931 /*
5932 * We need to order the read of hw_idx and the read of
5933 * the opaque cookie.
5934 */
5935 rmb();
1da177e4
LT
5936 work_mask = 0;
5937 received = 0;
4361935a
MC
5938 std_prod_idx = tpr->rx_std_prod_idx;
5939 jmb_prod_idx = tpr->rx_jmb_prod_idx;
1da177e4 5940 while (sw_idx != hw_idx && budget > 0) {
afc081f8 5941 struct ring_info *ri;
72334482 5942 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
1da177e4
LT
5943 unsigned int len;
5944 struct sk_buff *skb;
5945 dma_addr_t dma_addr;
5946 u32 opaque_key, desc_idx, *post_ptr;
9205fd9c 5947 u8 *data;
1da177e4
LT
5948
5949 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5950 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5951 if (opaque_key == RXD_OPAQUE_RING_STD) {
8fea32b9 5952 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4e5e4f0d 5953 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5954 data = ri->data;
4361935a 5955 post_ptr = &std_prod_idx;
f92905de 5956 rx_std_posted++;
1da177e4 5957 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
8fea32b9 5958 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4e5e4f0d 5959 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5960 data = ri->data;
4361935a 5961 post_ptr = &jmb_prod_idx;
21f581a5 5962 } else
1da177e4 5963 goto next_pkt_nopost;
1da177e4
LT
5964
5965 work_mask |= opaque_key;
5966
5967 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5968 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5969 drop_it:
a3896167 5970 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5971 desc_idx, *post_ptr);
5972 drop_it_no_recycle:
5973 /* Other statistics kept track of by card. */
b0057c51 5974 tp->rx_dropped++;
1da177e4
LT
5975 goto next_pkt;
5976 }
5977
9205fd9c 5978 prefetch(data + TG3_RX_OFFSET(tp));
ad829268
MC
5979 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5980 ETH_FCS_LEN;
1da177e4 5981
d2757fc4 5982 if (len > TG3_RX_COPY_THRESH(tp)) {
1da177e4 5983 int skb_size;
8d4057a9 5984 unsigned int frag_size;
1da177e4 5985
9205fd9c 5986 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
8d4057a9 5987 *post_ptr, &frag_size);
1da177e4
LT
5988 if (skb_size < 0)
5989 goto drop_it;
5990
287be12e 5991 pci_unmap_single(tp->pdev, dma_addr, skb_size,
1da177e4
LT
5992 PCI_DMA_FROMDEVICE);
5993
8d4057a9 5994 skb = build_skb(data, frag_size);
9205fd9c 5995 if (!skb) {
8d4057a9 5996 tg3_frag_free(frag_size != 0, data);
9205fd9c
ED
5997 goto drop_it_no_recycle;
5998 }
5999 skb_reserve(skb, TG3_RX_OFFSET(tp));
6000 /* Ensure that the update to the data happens
61e800cf
MC
6001 * after the usage of the old DMA mapping.
6002 */
6003 smp_wmb();
6004
9205fd9c 6005 ri->data = NULL;
61e800cf 6006
1da177e4 6007 } else {
a3896167 6008 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
6009 desc_idx, *post_ptr);
6010
9205fd9c
ED
6011 skb = netdev_alloc_skb(tp->dev,
6012 len + TG3_RAW_IP_ALIGN);
6013 if (skb == NULL)
1da177e4
LT
6014 goto drop_it_no_recycle;
6015
9205fd9c 6016 skb_reserve(skb, TG3_RAW_IP_ALIGN);
1da177e4 6017 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
9205fd9c
ED
6018 memcpy(skb->data,
6019 data + TG3_RX_OFFSET(tp),
6020 len);
1da177e4 6021 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
1da177e4
LT
6022 }
6023
9205fd9c 6024 skb_put(skb, len);
dc668910 6025 if ((tp->dev->features & NETIF_F_RXCSUM) &&
1da177e4
LT
6026 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6027 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6028 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6029 skb->ip_summed = CHECKSUM_UNNECESSARY;
6030 else
bc8acf2c 6031 skb_checksum_none_assert(skb);
1da177e4
LT
6032
6033 skb->protocol = eth_type_trans(skb, tp->dev);
f7b493e0
MC
6034
6035 if (len > (tp->dev->mtu + ETH_HLEN) &&
6036 skb->protocol != htons(ETH_P_8021Q)) {
6037 dev_kfree_skb(skb);
b0057c51 6038 goto drop_it_no_recycle;
f7b493e0
MC
6039 }
6040
9dc7a113 6041 if (desc->type_flags & RXD_FLAG_VLAN &&
bf933c80
MC
6042 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6043 __vlan_hwaccel_put_tag(skb,
6044 desc->err_vlan & RXD_VLAN_MASK);
9dc7a113 6045
bf933c80 6046 napi_gro_receive(&tnapi->napi, skb);
1da177e4 6047
1da177e4
LT
6048 received++;
6049 budget--;
6050
6051next_pkt:
6052 (*post_ptr)++;
f92905de
MC
6053
6054 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
2c49a44d
MC
6055 tpr->rx_std_prod_idx = std_prod_idx &
6056 tp->rx_std_ring_mask;
86cfe4ff
MC
6057 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6058 tpr->rx_std_prod_idx);
f92905de
MC
6059 work_mask &= ~RXD_OPAQUE_RING_STD;
6060 rx_std_posted = 0;
6061 }
1da177e4 6062next_pkt_nopost:
483ba50b 6063 sw_idx++;
7cb32cf2 6064 sw_idx &= tp->rx_ret_ring_mask;
52f6d697
MC
6065
6066 /* Refresh hw_idx to see if there is new work */
6067 if (sw_idx == hw_idx) {
8d9d7cfc 6068 hw_idx = *(tnapi->rx_rcb_prod_idx);
52f6d697
MC
6069 rmb();
6070 }
1da177e4
LT
6071 }
6072
6073 /* ACK the status ring. */
72334482
MC
6074 tnapi->rx_rcb_ptr = sw_idx;
6075 tw32_rx_mbox(tnapi->consmbox, sw_idx);
1da177e4
LT
6076
6077 /* Refill RX ring(s). */
63c3a66f 6078 if (!tg3_flag(tp, ENABLE_RSS)) {
6541b806
MC
6079 /* Sync BD data before updating mailbox */
6080 wmb();
6081
b196c7e4 6082 if (work_mask & RXD_OPAQUE_RING_STD) {
2c49a44d
MC
6083 tpr->rx_std_prod_idx = std_prod_idx &
6084 tp->rx_std_ring_mask;
b196c7e4
MC
6085 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6086 tpr->rx_std_prod_idx);
6087 }
6088 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2c49a44d
MC
6089 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6090 tp->rx_jmb_ring_mask;
b196c7e4
MC
6091 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6092 tpr->rx_jmb_prod_idx);
6093 }
6094 mmiowb();
6095 } else if (work_mask) {
6096 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6097 * updated before the producer indices can be updated.
6098 */
6099 smp_wmb();
6100
2c49a44d
MC
6101 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6102 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
b196c7e4 6103
7ae52890
MC
6104 if (tnapi != &tp->napi[1]) {
6105 tp->rx_refill = true;
e4af1af9 6106 napi_schedule(&tp->napi[1].napi);
7ae52890 6107 }
1da177e4 6108 }
1da177e4
LT
6109
6110 return received;
6111}
6112
35f2d7d0 6113static void tg3_poll_link(struct tg3 *tp)
1da177e4 6114{
1da177e4 6115 /* handle link change and other phy events */
63c3a66f 6116 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
35f2d7d0
MC
6117 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6118
1da177e4
LT
6119 if (sblk->status & SD_STATUS_LINK_CHG) {
6120 sblk->status = SD_STATUS_UPDATED |
35f2d7d0 6121 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 6122 spin_lock(&tp->lock);
63c3a66f 6123 if (tg3_flag(tp, USE_PHYLIB)) {
dd477003
MC
6124 tw32_f(MAC_STATUS,
6125 (MAC_STATUS_SYNC_CHANGED |
6126 MAC_STATUS_CFG_CHANGED |
6127 MAC_STATUS_MI_COMPLETION |
6128 MAC_STATUS_LNKSTATE_CHANGED));
6129 udelay(40);
6130 } else
6131 tg3_setup_phy(tp, 0);
f47c11ee 6132 spin_unlock(&tp->lock);
1da177e4
LT
6133 }
6134 }
35f2d7d0
MC
6135}
6136
f89f38b8
MC
6137static int tg3_rx_prodring_xfer(struct tg3 *tp,
6138 struct tg3_rx_prodring_set *dpr,
6139 struct tg3_rx_prodring_set *spr)
b196c7e4
MC
6140{
6141 u32 si, di, cpycnt, src_prod_idx;
f89f38b8 6142 int i, err = 0;
b196c7e4
MC
6143
6144 while (1) {
6145 src_prod_idx = spr->rx_std_prod_idx;
6146
6147 /* Make sure updates to the rx_std_buffers[] entries and the
6148 * standard producer index are seen in the correct order.
6149 */
6150 smp_rmb();
6151
6152 if (spr->rx_std_cons_idx == src_prod_idx)
6153 break;
6154
6155 if (spr->rx_std_cons_idx < src_prod_idx)
6156 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6157 else
2c49a44d
MC
6158 cpycnt = tp->rx_std_ring_mask + 1 -
6159 spr->rx_std_cons_idx;
b196c7e4 6160
2c49a44d
MC
6161 cpycnt = min(cpycnt,
6162 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
b196c7e4
MC
6163
6164 si = spr->rx_std_cons_idx;
6165 di = dpr->rx_std_prod_idx;
6166
e92967bf 6167 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6168 if (dpr->rx_std_buffers[i].data) {
e92967bf 6169 cpycnt = i - di;
f89f38b8 6170 err = -ENOSPC;
e92967bf
MC
6171 break;
6172 }
6173 }
6174
6175 if (!cpycnt)
6176 break;
6177
6178 /* Ensure that updates to the rx_std_buffers ring and the
6179 * shadowed hardware producer ring from tg3_recycle_skb() are
6180 * ordered correctly WRT the skb check above.
6181 */
6182 smp_rmb();
6183
b196c7e4
MC
6184 memcpy(&dpr->rx_std_buffers[di],
6185 &spr->rx_std_buffers[si],
6186 cpycnt * sizeof(struct ring_info));
6187
6188 for (i = 0; i < cpycnt; i++, di++, si++) {
6189 struct tg3_rx_buffer_desc *sbd, *dbd;
6190 sbd = &spr->rx_std[si];
6191 dbd = &dpr->rx_std[di];
6192 dbd->addr_hi = sbd->addr_hi;
6193 dbd->addr_lo = sbd->addr_lo;
6194 }
6195
2c49a44d
MC
6196 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6197 tp->rx_std_ring_mask;
6198 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6199 tp->rx_std_ring_mask;
b196c7e4
MC
6200 }
6201
6202 while (1) {
6203 src_prod_idx = spr->rx_jmb_prod_idx;
6204
6205 /* Make sure updates to the rx_jmb_buffers[] entries and
6206 * the jumbo producer index are seen in the correct order.
6207 */
6208 smp_rmb();
6209
6210 if (spr->rx_jmb_cons_idx == src_prod_idx)
6211 break;
6212
6213 if (spr->rx_jmb_cons_idx < src_prod_idx)
6214 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6215 else
2c49a44d
MC
6216 cpycnt = tp->rx_jmb_ring_mask + 1 -
6217 spr->rx_jmb_cons_idx;
b196c7e4
MC
6218
6219 cpycnt = min(cpycnt,
2c49a44d 6220 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
b196c7e4
MC
6221
6222 si = spr->rx_jmb_cons_idx;
6223 di = dpr->rx_jmb_prod_idx;
6224
e92967bf 6225 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6226 if (dpr->rx_jmb_buffers[i].data) {
e92967bf 6227 cpycnt = i - di;
f89f38b8 6228 err = -ENOSPC;
e92967bf
MC
6229 break;
6230 }
6231 }
6232
6233 if (!cpycnt)
6234 break;
6235
6236 /* Ensure that updates to the rx_jmb_buffers ring and the
6237 * shadowed hardware producer ring from tg3_recycle_skb() are
6238 * ordered correctly WRT the skb check above.
6239 */
6240 smp_rmb();
6241
b196c7e4
MC
6242 memcpy(&dpr->rx_jmb_buffers[di],
6243 &spr->rx_jmb_buffers[si],
6244 cpycnt * sizeof(struct ring_info));
6245
6246 for (i = 0; i < cpycnt; i++, di++, si++) {
6247 struct tg3_rx_buffer_desc *sbd, *dbd;
6248 sbd = &spr->rx_jmb[si].std;
6249 dbd = &dpr->rx_jmb[di].std;
6250 dbd->addr_hi = sbd->addr_hi;
6251 dbd->addr_lo = sbd->addr_lo;
6252 }
6253
2c49a44d
MC
6254 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6255 tp->rx_jmb_ring_mask;
6256 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6257 tp->rx_jmb_ring_mask;
b196c7e4 6258 }
f89f38b8
MC
6259
6260 return err;
b196c7e4
MC
6261}
6262
35f2d7d0
MC
6263static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6264{
6265 struct tg3 *tp = tnapi->tp;
1da177e4
LT
6266
6267 /* run TX completion thread */
f3f3f27e 6268 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
17375d25 6269 tg3_tx(tnapi);
63c3a66f 6270 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
4fd7ab59 6271 return work_done;
1da177e4
LT
6272 }
6273
f891ea16
MC
6274 if (!tnapi->rx_rcb_prod_idx)
6275 return work_done;
6276
1da177e4
LT
6277 /* run RX thread, within the bounds set by NAPI.
6278 * All RX "locking" is done by ensuring outside
bea3348e 6279 * code synchronizes with tg3->napi.poll()
1da177e4 6280 */
8d9d7cfc 6281 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
17375d25 6282 work_done += tg3_rx(tnapi, budget - work_done);
1da177e4 6283
63c3a66f 6284 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
8fea32b9 6285 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
f89f38b8 6286 int i, err = 0;
e4af1af9
MC
6287 u32 std_prod_idx = dpr->rx_std_prod_idx;
6288 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
b196c7e4 6289
7ae52890 6290 tp->rx_refill = false;
9102426a 6291 for (i = 1; i <= tp->rxq_cnt; i++)
f89f38b8 6292 err |= tg3_rx_prodring_xfer(tp, dpr,
8fea32b9 6293 &tp->napi[i].prodring);
b196c7e4
MC
6294
6295 wmb();
6296
e4af1af9
MC
6297 if (std_prod_idx != dpr->rx_std_prod_idx)
6298 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6299 dpr->rx_std_prod_idx);
b196c7e4 6300
e4af1af9
MC
6301 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6302 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6303 dpr->rx_jmb_prod_idx);
b196c7e4
MC
6304
6305 mmiowb();
f89f38b8
MC
6306
6307 if (err)
6308 tw32_f(HOSTCC_MODE, tp->coal_now);
b196c7e4
MC
6309 }
6310
6f535763
DM
6311 return work_done;
6312}
6313
db219973
MC
6314static inline void tg3_reset_task_schedule(struct tg3 *tp)
6315{
6316 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6317 schedule_work(&tp->reset_task);
6318}
6319
6320static inline void tg3_reset_task_cancel(struct tg3 *tp)
6321{
6322 cancel_work_sync(&tp->reset_task);
6323 tg3_flag_clear(tp, RESET_TASK_PENDING);
c7101359 6324 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
db219973
MC
6325}
6326
35f2d7d0
MC
6327static int tg3_poll_msix(struct napi_struct *napi, int budget)
6328{
6329 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6330 struct tg3 *tp = tnapi->tp;
6331 int work_done = 0;
6332 struct tg3_hw_status *sblk = tnapi->hw_status;
6333
6334 while (1) {
6335 work_done = tg3_poll_work(tnapi, work_done, budget);
6336
63c3a66f 6337 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
35f2d7d0
MC
6338 goto tx_recovery;
6339
6340 if (unlikely(work_done >= budget))
6341 break;
6342
c6cdf436 6343 /* tp->last_tag is used in tg3_int_reenable() below
35f2d7d0
MC
6344 * to tell the hw how much work has been processed,
6345 * so we must read it before checking for more work.
6346 */
6347 tnapi->last_tag = sblk->status_tag;
6348 tnapi->last_irq_tag = tnapi->last_tag;
6349 rmb();
6350
6351 /* check for RX/TX work to do */
6d40db7b
MC
6352 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6353 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7ae52890
MC
6354
6355 /* This test here is not race free, but will reduce
6356 * the number of interrupts by looping again.
6357 */
6358 if (tnapi == &tp->napi[1] && tp->rx_refill)
6359 continue;
6360
35f2d7d0
MC
6361 napi_complete(napi);
6362 /* Reenable interrupts. */
6363 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7ae52890
MC
6364
6365 /* This test here is synchronized by napi_schedule()
6366 * and napi_complete() to close the race condition.
6367 */
6368 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6369 tw32(HOSTCC_MODE, tp->coalesce_mode |
6370 HOSTCC_MODE_ENABLE |
6371 tnapi->coal_now);
6372 }
35f2d7d0
MC
6373 mmiowb();
6374 break;
6375 }
6376 }
6377
6378 return work_done;
6379
6380tx_recovery:
6381 /* work_done is guaranteed to be less than budget. */
6382 napi_complete(napi);
db219973 6383 tg3_reset_task_schedule(tp);
35f2d7d0
MC
6384 return work_done;
6385}
6386
e64de4e6
MC
6387static void tg3_process_error(struct tg3 *tp)
6388{
6389 u32 val;
6390 bool real_error = false;
6391
63c3a66f 6392 if (tg3_flag(tp, ERROR_PROCESSED))
e64de4e6
MC
6393 return;
6394
6395 /* Check Flow Attention register */
6396 val = tr32(HOSTCC_FLOW_ATTN);
6397 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6398 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6399 real_error = true;
6400 }
6401
6402 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6403 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6404 real_error = true;
6405 }
6406
6407 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6408 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6409 real_error = true;
6410 }
6411
6412 if (!real_error)
6413 return;
6414
6415 tg3_dump_state(tp);
6416
63c3a66f 6417 tg3_flag_set(tp, ERROR_PROCESSED);
db219973 6418 tg3_reset_task_schedule(tp);
e64de4e6
MC
6419}
6420
6f535763
DM
6421static int tg3_poll(struct napi_struct *napi, int budget)
6422{
8ef0442f
MC
6423 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6424 struct tg3 *tp = tnapi->tp;
6f535763 6425 int work_done = 0;
898a56f8 6426 struct tg3_hw_status *sblk = tnapi->hw_status;
6f535763
DM
6427
6428 while (1) {
e64de4e6
MC
6429 if (sblk->status & SD_STATUS_ERROR)
6430 tg3_process_error(tp);
6431
35f2d7d0
MC
6432 tg3_poll_link(tp);
6433
17375d25 6434 work_done = tg3_poll_work(tnapi, work_done, budget);
6f535763 6435
63c3a66f 6436 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6f535763
DM
6437 goto tx_recovery;
6438
6439 if (unlikely(work_done >= budget))
6440 break;
6441
63c3a66f 6442 if (tg3_flag(tp, TAGGED_STATUS)) {
17375d25 6443 /* tp->last_tag is used in tg3_int_reenable() below
4fd7ab59
MC
6444 * to tell the hw how much work has been processed,
6445 * so we must read it before checking for more work.
6446 */
898a56f8
MC
6447 tnapi->last_tag = sblk->status_tag;
6448 tnapi->last_irq_tag = tnapi->last_tag;
4fd7ab59
MC
6449 rmb();
6450 } else
6451 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 6452
17375d25 6453 if (likely(!tg3_has_work(tnapi))) {
288379f0 6454 napi_complete(napi);
17375d25 6455 tg3_int_reenable(tnapi);
6f535763
DM
6456 break;
6457 }
1da177e4
LT
6458 }
6459
bea3348e 6460 return work_done;
6f535763
DM
6461
6462tx_recovery:
4fd7ab59 6463 /* work_done is guaranteed to be less than budget. */
288379f0 6464 napi_complete(napi);
db219973 6465 tg3_reset_task_schedule(tp);
4fd7ab59 6466 return work_done;
1da177e4
LT
6467}
6468
66cfd1bd
MC
6469static void tg3_napi_disable(struct tg3 *tp)
6470{
6471 int i;
6472
6473 for (i = tp->irq_cnt - 1; i >= 0; i--)
6474 napi_disable(&tp->napi[i].napi);
6475}
6476
6477static void tg3_napi_enable(struct tg3 *tp)
6478{
6479 int i;
6480
6481 for (i = 0; i < tp->irq_cnt; i++)
6482 napi_enable(&tp->napi[i].napi);
6483}
6484
6485static void tg3_napi_init(struct tg3 *tp)
6486{
6487 int i;
6488
6489 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6490 for (i = 1; i < tp->irq_cnt; i++)
6491 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6492}
6493
6494static void tg3_napi_fini(struct tg3 *tp)
6495{
6496 int i;
6497
6498 for (i = 0; i < tp->irq_cnt; i++)
6499 netif_napi_del(&tp->napi[i].napi);
6500}
6501
6502static inline void tg3_netif_stop(struct tg3 *tp)
6503{
6504 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6505 tg3_napi_disable(tp);
6506 netif_tx_disable(tp->dev);
6507}
6508
6509static inline void tg3_netif_start(struct tg3 *tp)
6510{
6511 /* NOTE: unconditional netif_tx_wake_all_queues is only
6512 * appropriate so long as all callers are assured to
6513 * have free tx slots (such as after tg3_init_hw)
6514 */
6515 netif_tx_wake_all_queues(tp->dev);
6516
6517 tg3_napi_enable(tp);
6518 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6519 tg3_enable_ints(tp);
6520}
6521
f47c11ee
DM
6522static void tg3_irq_quiesce(struct tg3 *tp)
6523{
4f125f42
MC
6524 int i;
6525
f47c11ee
DM
6526 BUG_ON(tp->irq_sync);
6527
6528 tp->irq_sync = 1;
6529 smp_mb();
6530
4f125f42
MC
6531 for (i = 0; i < tp->irq_cnt; i++)
6532 synchronize_irq(tp->napi[i].irq_vec);
f47c11ee
DM
6533}
6534
f47c11ee
DM
6535/* Fully shutdown all tg3 driver activity elsewhere in the system.
6536 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6537 * with as well. Most of the time, this is not necessary except when
6538 * shutting down the device.
6539 */
6540static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6541{
46966545 6542 spin_lock_bh(&tp->lock);
f47c11ee
DM
6543 if (irq_sync)
6544 tg3_irq_quiesce(tp);
f47c11ee
DM
6545}
6546
6547static inline void tg3_full_unlock(struct tg3 *tp)
6548{
f47c11ee
DM
6549 spin_unlock_bh(&tp->lock);
6550}
6551
fcfa0a32
MC
6552/* One-shot MSI handler - Chip automatically disables interrupt
6553 * after sending MSI so driver doesn't have to do it.
6554 */
7d12e780 6555static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32 6556{
09943a18
MC
6557 struct tg3_napi *tnapi = dev_id;
6558 struct tg3 *tp = tnapi->tp;
fcfa0a32 6559
898a56f8 6560 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6561 if (tnapi->rx_rcb)
6562 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
fcfa0a32
MC
6563
6564 if (likely(!tg3_irq_sync(tp)))
09943a18 6565 napi_schedule(&tnapi->napi);
fcfa0a32
MC
6566
6567 return IRQ_HANDLED;
6568}
6569
88b06bc2
MC
6570/* MSI ISR - No need to check for interrupt sharing and no need to
6571 * flush status block and interrupt mailbox. PCI ordering rules
6572 * guarantee that MSI will arrive after the status block.
6573 */
7d12e780 6574static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2 6575{
09943a18
MC
6576 struct tg3_napi *tnapi = dev_id;
6577 struct tg3 *tp = tnapi->tp;
88b06bc2 6578
898a56f8 6579 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6580 if (tnapi->rx_rcb)
6581 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
88b06bc2 6582 /*
fac9b83e 6583 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 6584 * chip-internal interrupt pending events.
fac9b83e 6585 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
6586 * NIC to stop sending us irqs, engaging "in-intr-handler"
6587 * event coalescing.
6588 */
5b39de91 6589 tw32_mailbox(tnapi->int_mbox, 0x00000001);
61487480 6590 if (likely(!tg3_irq_sync(tp)))
09943a18 6591 napi_schedule(&tnapi->napi);
61487480 6592
88b06bc2
MC
6593 return IRQ_RETVAL(1);
6594}
6595
7d12e780 6596static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4 6597{
09943a18
MC
6598 struct tg3_napi *tnapi = dev_id;
6599 struct tg3 *tp = tnapi->tp;
898a56f8 6600 struct tg3_hw_status *sblk = tnapi->hw_status;
1da177e4
LT
6601 unsigned int handled = 1;
6602
1da177e4
LT
6603 /* In INTx mode, it is possible for the interrupt to arrive at
6604 * the CPU before the status block posted prior to the interrupt.
6605 * Reading the PCI State register will confirm whether the
6606 * interrupt is ours and will flush the status block.
6607 */
d18edcb2 6608 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
63c3a66f 6609 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6610 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6611 handled = 0;
f47c11ee 6612 goto out;
fac9b83e 6613 }
d18edcb2
MC
6614 }
6615
6616 /*
6617 * Writing any value to intr-mbox-0 clears PCI INTA# and
6618 * chip-internal interrupt pending events.
6619 * Writing non-zero to intr-mbox-0 additional tells the
6620 * NIC to stop sending us irqs, engaging "in-intr-handler"
6621 * event coalescing.
c04cb347
MC
6622 *
6623 * Flush the mailbox to de-assert the IRQ immediately to prevent
6624 * spurious interrupts. The flush impacts performance but
6625 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6626 */
c04cb347 6627 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
6628 if (tg3_irq_sync(tp))
6629 goto out;
6630 sblk->status &= ~SD_STATUS_UPDATED;
17375d25 6631 if (likely(tg3_has_work(tnapi))) {
72334482 6632 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
09943a18 6633 napi_schedule(&tnapi->napi);
d18edcb2
MC
6634 } else {
6635 /* No work, shared interrupt perhaps? re-enable
6636 * interrupts, and flush that PCI write
6637 */
6638 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6639 0x00000000);
fac9b83e 6640 }
f47c11ee 6641out:
fac9b83e
DM
6642 return IRQ_RETVAL(handled);
6643}
6644
7d12e780 6645static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e 6646{
09943a18
MC
6647 struct tg3_napi *tnapi = dev_id;
6648 struct tg3 *tp = tnapi->tp;
898a56f8 6649 struct tg3_hw_status *sblk = tnapi->hw_status;
fac9b83e
DM
6650 unsigned int handled = 1;
6651
fac9b83e
DM
6652 /* In INTx mode, it is possible for the interrupt to arrive at
6653 * the CPU before the status block posted prior to the interrupt.
6654 * Reading the PCI State register will confirm whether the
6655 * interrupt is ours and will flush the status block.
6656 */
898a56f8 6657 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
63c3a66f 6658 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6659 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6660 handled = 0;
f47c11ee 6661 goto out;
1da177e4 6662 }
d18edcb2
MC
6663 }
6664
6665 /*
6666 * writing any value to intr-mbox-0 clears PCI INTA# and
6667 * chip-internal interrupt pending events.
6668 * writing non-zero to intr-mbox-0 additional tells the
6669 * NIC to stop sending us irqs, engaging "in-intr-handler"
6670 * event coalescing.
c04cb347
MC
6671 *
6672 * Flush the mailbox to de-assert the IRQ immediately to prevent
6673 * spurious interrupts. The flush impacts performance but
6674 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6675 */
c04cb347 6676 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
624f8e50
MC
6677
6678 /*
6679 * In a shared interrupt configuration, sometimes other devices'
6680 * interrupts will scream. We record the current status tag here
6681 * so that the above check can report that the screaming interrupts
6682 * are unhandled. Eventually they will be silenced.
6683 */
898a56f8 6684 tnapi->last_irq_tag = sblk->status_tag;
624f8e50 6685
d18edcb2
MC
6686 if (tg3_irq_sync(tp))
6687 goto out;
624f8e50 6688
72334482 6689 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
624f8e50 6690
09943a18 6691 napi_schedule(&tnapi->napi);
624f8e50 6692
f47c11ee 6693out:
1da177e4
LT
6694 return IRQ_RETVAL(handled);
6695}
6696
7938109f 6697/* ISR for interrupt test */
7d12e780 6698static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f 6699{
09943a18
MC
6700 struct tg3_napi *tnapi = dev_id;
6701 struct tg3 *tp = tnapi->tp;
898a56f8 6702 struct tg3_hw_status *sblk = tnapi->hw_status;
7938109f 6703
f9804ddb
MC
6704 if ((sblk->status & SD_STATUS_UPDATED) ||
6705 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 6706 tg3_disable_ints(tp);
7938109f
MC
6707 return IRQ_RETVAL(1);
6708 }
6709 return IRQ_RETVAL(0);
6710}
6711
1da177e4
LT
6712#ifdef CONFIG_NET_POLL_CONTROLLER
6713static void tg3_poll_controller(struct net_device *dev)
6714{
4f125f42 6715 int i;
88b06bc2
MC
6716 struct tg3 *tp = netdev_priv(dev);
6717
4f125f42 6718 for (i = 0; i < tp->irq_cnt; i++)
fe234f0e 6719 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1da177e4
LT
6720}
6721#endif
6722
1da177e4
LT
6723static void tg3_tx_timeout(struct net_device *dev)
6724{
6725 struct tg3 *tp = netdev_priv(dev);
6726
b0408751 6727 if (netif_msg_tx_err(tp)) {
05dbe005 6728 netdev_err(dev, "transmit timed out, resetting\n");
97bd8e49 6729 tg3_dump_state(tp);
b0408751 6730 }
1da177e4 6731
db219973 6732 tg3_reset_task_schedule(tp);
1da177e4
LT
6733}
6734
c58ec932
MC
6735/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6736static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6737{
6738 u32 base = (u32) mapping & 0xffffffff;
6739
807540ba 6740 return (base > 0xffffdcc0) && (base + len + 8 < base);
c58ec932
MC
6741}
6742
72f2afb8
MC
6743/* Test for DMA addresses > 40-bit */
6744static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6745 int len)
6746{
6747#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
63c3a66f 6748 if (tg3_flag(tp, 40BIT_DMA_BUG))
807540ba 6749 return ((u64) mapping + len) > DMA_BIT_MASK(40);
72f2afb8
MC
6750 return 0;
6751#else
6752 return 0;
6753#endif
6754}
6755
d1a3b737 6756static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
92cd3a17
MC
6757 dma_addr_t mapping, u32 len, u32 flags,
6758 u32 mss, u32 vlan)
2ffcc981 6759{
92cd3a17
MC
6760 txbd->addr_hi = ((u64) mapping >> 32);
6761 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6762 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6763 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
2ffcc981 6764}
1da177e4 6765
84b67b27 6766static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
d1a3b737
MC
6767 dma_addr_t map, u32 len, u32 flags,
6768 u32 mss, u32 vlan)
6769{
6770 struct tg3 *tp = tnapi->tp;
6771 bool hwbug = false;
6772
6773 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
3db1cd5c 6774 hwbug = true;
d1a3b737
MC
6775
6776 if (tg3_4g_overflow_test(map, len))
3db1cd5c 6777 hwbug = true;
d1a3b737
MC
6778
6779 if (tg3_40bit_overflow_test(tp, map, len))
3db1cd5c 6780 hwbug = true;
d1a3b737 6781
a4cb428d 6782 if (tp->dma_limit) {
b9e45482 6783 u32 prvidx = *entry;
e31aa987 6784 u32 tmp_flag = flags & ~TXD_FLAG_END;
a4cb428d
MC
6785 while (len > tp->dma_limit && *budget) {
6786 u32 frag_len = tp->dma_limit;
6787 len -= tp->dma_limit;
e31aa987 6788
b9e45482
MC
6789 /* Avoid the 8byte DMA problem */
6790 if (len <= 8) {
a4cb428d
MC
6791 len += tp->dma_limit / 2;
6792 frag_len = tp->dma_limit / 2;
e31aa987
MC
6793 }
6794
b9e45482
MC
6795 tnapi->tx_buffers[*entry].fragmented = true;
6796
6797 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6798 frag_len, tmp_flag, mss, vlan);
6799 *budget -= 1;
6800 prvidx = *entry;
6801 *entry = NEXT_TX(*entry);
6802
e31aa987
MC
6803 map += frag_len;
6804 }
6805
6806 if (len) {
6807 if (*budget) {
6808 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6809 len, flags, mss, vlan);
b9e45482 6810 *budget -= 1;
e31aa987
MC
6811 *entry = NEXT_TX(*entry);
6812 } else {
3db1cd5c 6813 hwbug = true;
b9e45482 6814 tnapi->tx_buffers[prvidx].fragmented = false;
e31aa987
MC
6815 }
6816 }
6817 } else {
84b67b27
MC
6818 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6819 len, flags, mss, vlan);
e31aa987
MC
6820 *entry = NEXT_TX(*entry);
6821 }
d1a3b737
MC
6822
6823 return hwbug;
6824}
6825
0d681b27 6826static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
432aa7ed
MC
6827{
6828 int i;
0d681b27 6829 struct sk_buff *skb;
df8944cf 6830 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
432aa7ed 6831
0d681b27
MC
6832 skb = txb->skb;
6833 txb->skb = NULL;
6834
432aa7ed
MC
6835 pci_unmap_single(tnapi->tp->pdev,
6836 dma_unmap_addr(txb, mapping),
6837 skb_headlen(skb),
6838 PCI_DMA_TODEVICE);
e01ee14d
MC
6839
6840 while (txb->fragmented) {
6841 txb->fragmented = false;
6842 entry = NEXT_TX(entry);
6843 txb = &tnapi->tx_buffers[entry];
6844 }
6845
ba1142e4 6846 for (i = 0; i <= last; i++) {
9e903e08 6847 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
432aa7ed
MC
6848
6849 entry = NEXT_TX(entry);
6850 txb = &tnapi->tx_buffers[entry];
6851
6852 pci_unmap_page(tnapi->tp->pdev,
6853 dma_unmap_addr(txb, mapping),
9e903e08 6854 skb_frag_size(frag), PCI_DMA_TODEVICE);
e01ee14d
MC
6855
6856 while (txb->fragmented) {
6857 txb->fragmented = false;
6858 entry = NEXT_TX(entry);
6859 txb = &tnapi->tx_buffers[entry];
6860 }
432aa7ed
MC
6861 }
6862}
6863
72f2afb8 6864/* Workaround 4GB and 40-bit hardware DMA bugs. */
24f4efd4 6865static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
f7ff1987 6866 struct sk_buff **pskb,
84b67b27 6867 u32 *entry, u32 *budget,
92cd3a17 6868 u32 base_flags, u32 mss, u32 vlan)
1da177e4 6869{
24f4efd4 6870 struct tg3 *tp = tnapi->tp;
f7ff1987 6871 struct sk_buff *new_skb, *skb = *pskb;
c58ec932 6872 dma_addr_t new_addr = 0;
432aa7ed 6873 int ret = 0;
1da177e4 6874
41588ba1
MC
6875 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6876 new_skb = skb_copy(skb, GFP_ATOMIC);
6877 else {
6878 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6879
6880 new_skb = skb_copy_expand(skb,
6881 skb_headroom(skb) + more_headroom,
6882 skb_tailroom(skb), GFP_ATOMIC);
6883 }
6884
1da177e4 6885 if (!new_skb) {
c58ec932
MC
6886 ret = -1;
6887 } else {
6888 /* New SKB is guaranteed to be linear. */
f4188d8a
AD
6889 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6890 PCI_DMA_TODEVICE);
6891 /* Make sure the mapping succeeded */
6892 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
f4188d8a 6893 dev_kfree_skb(new_skb);
c58ec932 6894 ret = -1;
c58ec932 6895 } else {
b9e45482
MC
6896 u32 save_entry = *entry;
6897
92cd3a17
MC
6898 base_flags |= TXD_FLAG_END;
6899
84b67b27
MC
6900 tnapi->tx_buffers[*entry].skb = new_skb;
6901 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
432aa7ed
MC
6902 mapping, new_addr);
6903
84b67b27 6904 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
d1a3b737
MC
6905 new_skb->len, base_flags,
6906 mss, vlan)) {
ba1142e4 6907 tg3_tx_skb_unmap(tnapi, save_entry, -1);
d1a3b737
MC
6908 dev_kfree_skb(new_skb);
6909 ret = -1;
6910 }
f4188d8a 6911 }
1da177e4
LT
6912 }
6913
6914 dev_kfree_skb(skb);
f7ff1987 6915 *pskb = new_skb;
c58ec932 6916 return ret;
1da177e4
LT
6917}
6918
2ffcc981 6919static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
52c0fd83
MC
6920
6921/* Use GSO to workaround a rare TSO bug that may be triggered when the
6922 * TSO header is greater than 80 bytes.
6923 */
6924static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6925{
6926 struct sk_buff *segs, *nskb;
f3f3f27e 6927 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
52c0fd83
MC
6928
6929 /* Estimate the number of fragments in the worst case */
f3f3f27e 6930 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
52c0fd83 6931 netif_stop_queue(tp->dev);
f65aac16
MC
6932
6933 /* netif_tx_stop_queue() must be done before checking
6934 * checking tx index in tg3_tx_avail() below, because in
6935 * tg3_tx(), we update tx index before checking for
6936 * netif_tx_queue_stopped().
6937 */
6938 smp_mb();
f3f3f27e 6939 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7f62ad5d
MC
6940 return NETDEV_TX_BUSY;
6941
6942 netif_wake_queue(tp->dev);
52c0fd83
MC
6943 }
6944
6945 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 6946 if (IS_ERR(segs))
52c0fd83
MC
6947 goto tg3_tso_bug_end;
6948
6949 do {
6950 nskb = segs;
6951 segs = segs->next;
6952 nskb->next = NULL;
2ffcc981 6953 tg3_start_xmit(nskb, tp->dev);
52c0fd83
MC
6954 } while (segs);
6955
6956tg3_tso_bug_end:
6957 dev_kfree_skb(skb);
6958
6959 return NETDEV_TX_OK;
6960}
52c0fd83 6961
5a6f3074 6962/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
63c3a66f 6963 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5a6f3074 6964 */
2ffcc981 6965static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
6966{
6967 struct tg3 *tp = netdev_priv(dev);
92cd3a17 6968 u32 len, entry, base_flags, mss, vlan = 0;
84b67b27 6969 u32 budget;
432aa7ed 6970 int i = -1, would_hit_hwbug;
90079ce8 6971 dma_addr_t mapping;
24f4efd4
MC
6972 struct tg3_napi *tnapi;
6973 struct netdev_queue *txq;
432aa7ed 6974 unsigned int last;
f4188d8a 6975
24f4efd4
MC
6976 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6977 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
63c3a66f 6978 if (tg3_flag(tp, ENABLE_TSS))
24f4efd4 6979 tnapi++;
1da177e4 6980
84b67b27
MC
6981 budget = tg3_tx_avail(tnapi);
6982
00b70504 6983 /* We are running in BH disabled context with netif_tx_lock
bea3348e 6984 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
6985 * interrupt. Furthermore, IRQ processing runs lockless so we have
6986 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 6987 */
84b67b27 6988 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
24f4efd4
MC
6989 if (!netif_tx_queue_stopped(txq)) {
6990 netif_tx_stop_queue(txq);
1f064a87
SH
6991
6992 /* This is a hard error, log it. */
5129c3a3
MC
6993 netdev_err(dev,
6994 "BUG! Tx Ring full when queue awake!\n");
1f064a87 6995 }
1da177e4
LT
6996 return NETDEV_TX_BUSY;
6997 }
6998
f3f3f27e 6999 entry = tnapi->tx_prod;
1da177e4 7000 base_flags = 0;
84fa7933 7001 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 7002 base_flags |= TXD_FLAG_TCPUDP_CSUM;
24f4efd4 7003
be98da6a
MC
7004 mss = skb_shinfo(skb)->gso_size;
7005 if (mss) {
eddc9ec5 7006 struct iphdr *iph;
34195c3d 7007 u32 tcp_opt_len, hdr_len;
1da177e4
LT
7008
7009 if (skb_header_cloned(skb) &&
48855432
ED
7010 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7011 goto drop;
1da177e4 7012
34195c3d 7013 iph = ip_hdr(skb);
ab6a5bb6 7014 tcp_opt_len = tcp_optlen(skb);
1da177e4 7015
a5a11955 7016 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
34195c3d 7017
a5a11955 7018 if (!skb_is_gso_v6(skb)) {
34195c3d
MC
7019 iph->check = 0;
7020 iph->tot_len = htons(mss + hdr_len);
7021 }
7022
52c0fd83 7023 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
63c3a66f 7024 tg3_flag(tp, TSO_BUG))
de6f31eb 7025 return tg3_tso_bug(tp, skb);
52c0fd83 7026
1da177e4
LT
7027 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7028 TXD_FLAG_CPU_POST_DMA);
7029
63c3a66f
JP
7030 if (tg3_flag(tp, HW_TSO_1) ||
7031 tg3_flag(tp, HW_TSO_2) ||
7032 tg3_flag(tp, HW_TSO_3)) {
aa8223c7 7033 tcp_hdr(skb)->check = 0;
1da177e4 7034 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
7035 } else
7036 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7037 iph->daddr, 0,
7038 IPPROTO_TCP,
7039 0);
1da177e4 7040
63c3a66f 7041 if (tg3_flag(tp, HW_TSO_3)) {
615774fe
MC
7042 mss |= (hdr_len & 0xc) << 12;
7043 if (hdr_len & 0x10)
7044 base_flags |= 0x00000010;
7045 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 7046 } else if (tg3_flag(tp, HW_TSO_2))
92c6b8d1 7047 mss |= hdr_len << 9;
63c3a66f 7048 else if (tg3_flag(tp, HW_TSO_1) ||
92c6b8d1 7049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
eddc9ec5 7050 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7051 int tsflags;
7052
eddc9ec5 7053 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7054 mss |= (tsflags << 11);
7055 }
7056 } else {
eddc9ec5 7057 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7058 int tsflags;
7059
eddc9ec5 7060 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7061 base_flags |= tsflags << 12;
7062 }
7063 }
7064 }
bf933c80 7065
93a700a9
MC
7066 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7067 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7068 base_flags |= TXD_FLAG_JMB_PKT;
7069
92cd3a17
MC
7070 if (vlan_tx_tag_present(skb)) {
7071 base_flags |= TXD_FLAG_VLAN;
7072 vlan = vlan_tx_tag_get(skb);
7073 }
1da177e4 7074
f4188d8a
AD
7075 len = skb_headlen(skb);
7076
7077 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
48855432
ED
7078 if (pci_dma_mapping_error(tp->pdev, mapping))
7079 goto drop;
7080
90079ce8 7081
f3f3f27e 7082 tnapi->tx_buffers[entry].skb = skb;
4e5e4f0d 7083 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
1da177e4
LT
7084
7085 would_hit_hwbug = 0;
7086
63c3a66f 7087 if (tg3_flag(tp, 5701_DMA_BUG))
c58ec932 7088 would_hit_hwbug = 1;
1da177e4 7089
84b67b27 7090 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
d1a3b737 7091 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
ba1142e4 7092 mss, vlan)) {
d1a3b737 7093 would_hit_hwbug = 1;
ba1142e4 7094 } else if (skb_shinfo(skb)->nr_frags > 0) {
92cd3a17
MC
7095 u32 tmp_mss = mss;
7096
7097 if (!tg3_flag(tp, HW_TSO_1) &&
7098 !tg3_flag(tp, HW_TSO_2) &&
7099 !tg3_flag(tp, HW_TSO_3))
7100 tmp_mss = 0;
7101
c5665a53
MC
7102 /* Now loop through additional data
7103 * fragments, and queue them.
7104 */
1da177e4
LT
7105 last = skb_shinfo(skb)->nr_frags - 1;
7106 for (i = 0; i <= last; i++) {
7107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7108
9e903e08 7109 len = skb_frag_size(frag);
dc234d0b 7110 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
5d6bcdfe 7111 len, DMA_TO_DEVICE);
1da177e4 7112
f3f3f27e 7113 tnapi->tx_buffers[entry].skb = NULL;
4e5e4f0d 7114 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
f4188d8a 7115 mapping);
5d6bcdfe 7116 if (dma_mapping_error(&tp->pdev->dev, mapping))
f4188d8a 7117 goto dma_error;
1da177e4 7118
b9e45482
MC
7119 if (!budget ||
7120 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
84b67b27
MC
7121 len, base_flags |
7122 ((i == last) ? TXD_FLAG_END : 0),
b9e45482 7123 tmp_mss, vlan)) {
72f2afb8 7124 would_hit_hwbug = 1;
b9e45482
MC
7125 break;
7126 }
1da177e4
LT
7127 }
7128 }
7129
7130 if (would_hit_hwbug) {
0d681b27 7131 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
1da177e4
LT
7132
7133 /* If the workaround fails due to memory/mapping
7134 * failure, silently drop this packet.
7135 */
84b67b27
MC
7136 entry = tnapi->tx_prod;
7137 budget = tg3_tx_avail(tnapi);
f7ff1987 7138 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
84b67b27 7139 base_flags, mss, vlan))
48855432 7140 goto drop_nofree;
1da177e4
LT
7141 }
7142
d515b450 7143 skb_tx_timestamp(skb);
5cb917bc 7144 netdev_tx_sent_queue(txq, skb->len);
d515b450 7145
6541b806
MC
7146 /* Sync BD data before updating mailbox */
7147 wmb();
7148
1da177e4 7149 /* Packets are ready, update Tx producer idx local and on card. */
24f4efd4 7150 tw32_tx_mbox(tnapi->prodmbox, entry);
1da177e4 7151
f3f3f27e
MC
7152 tnapi->tx_prod = entry;
7153 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
24f4efd4 7154 netif_tx_stop_queue(txq);
f65aac16
MC
7155
7156 /* netif_tx_stop_queue() must be done before checking
7157 * checking tx index in tg3_tx_avail() below, because in
7158 * tg3_tx(), we update tx index before checking for
7159 * netif_tx_queue_stopped().
7160 */
7161 smp_mb();
f3f3f27e 7162 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
24f4efd4 7163 netif_tx_wake_queue(txq);
51b91468 7164 }
1da177e4 7165
cdd0db05 7166 mmiowb();
1da177e4 7167 return NETDEV_TX_OK;
f4188d8a
AD
7168
7169dma_error:
ba1142e4 7170 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
432aa7ed 7171 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
48855432
ED
7172drop:
7173 dev_kfree_skb(skb);
7174drop_nofree:
7175 tp->tx_dropped++;
f4188d8a 7176 return NETDEV_TX_OK;
1da177e4
LT
7177}
7178
6e01b20b
MC
7179static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7180{
7181 if (enable) {
7182 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7183 MAC_MODE_PORT_MODE_MASK);
7184
7185 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7186
7187 if (!tg3_flag(tp, 5705_PLUS))
7188 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7189
7190 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7191 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7192 else
7193 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7194 } else {
7195 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7196
7197 if (tg3_flag(tp, 5705_PLUS) ||
7198 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7200 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7201 }
7202
7203 tw32(MAC_MODE, tp->mac_mode);
7204 udelay(40);
7205}
7206
941ec90f 7207static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
5e5a7f37 7208{
941ec90f 7209 u32 val, bmcr, mac_mode, ptest = 0;
5e5a7f37
MC
7210
7211 tg3_phy_toggle_apd(tp, false);
7212 tg3_phy_toggle_automdix(tp, 0);
7213
941ec90f
MC
7214 if (extlpbk && tg3_phy_set_extloopbk(tp))
7215 return -EIO;
7216
7217 bmcr = BMCR_FULLDPLX;
5e5a7f37
MC
7218 switch (speed) {
7219 case SPEED_10:
7220 break;
7221 case SPEED_100:
7222 bmcr |= BMCR_SPEED100;
7223 break;
7224 case SPEED_1000:
7225 default:
7226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7227 speed = SPEED_100;
7228 bmcr |= BMCR_SPEED100;
7229 } else {
7230 speed = SPEED_1000;
7231 bmcr |= BMCR_SPEED1000;
7232 }
7233 }
7234
941ec90f
MC
7235 if (extlpbk) {
7236 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7237 tg3_readphy(tp, MII_CTRL1000, &val);
7238 val |= CTL1000_AS_MASTER |
7239 CTL1000_ENABLE_MASTER;
7240 tg3_writephy(tp, MII_CTRL1000, val);
7241 } else {
7242 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7243 MII_TG3_FET_PTEST_TRIM_2;
7244 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7245 }
7246 } else
7247 bmcr |= BMCR_LOOPBACK;
7248
5e5a7f37
MC
7249 tg3_writephy(tp, MII_BMCR, bmcr);
7250
7251 /* The write needs to be flushed for the FETs */
7252 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7253 tg3_readphy(tp, MII_BMCR, &bmcr);
7254
7255 udelay(40);
7256
7257 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
941ec90f 7259 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
5e5a7f37
MC
7260 MII_TG3_FET_PTEST_FRC_TX_LINK |
7261 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7262
7263 /* The write needs to be flushed for the AC131 */
7264 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7265 }
7266
7267 /* Reset to prevent losing 1st rx packet intermittently */
7268 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7269 tg3_flag(tp, 5780_CLASS)) {
7270 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7271 udelay(10);
7272 tw32_f(MAC_RX_MODE, tp->rx_mode);
7273 }
7274
7275 mac_mode = tp->mac_mode &
7276 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7277 if (speed == SPEED_1000)
7278 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7279 else
7280 mac_mode |= MAC_MODE_PORT_MODE_MII;
7281
7282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7283 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7284
7285 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7286 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7287 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7288 mac_mode |= MAC_MODE_LINK_POLARITY;
7289
7290 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7291 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7292 }
7293
7294 tw32(MAC_MODE, mac_mode);
7295 udelay(40);
941ec90f
MC
7296
7297 return 0;
5e5a7f37
MC
7298}
7299
c8f44aff 7300static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
06c03c02
MB
7301{
7302 struct tg3 *tp = netdev_priv(dev);
7303
7304 if (features & NETIF_F_LOOPBACK) {
7305 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7306 return;
7307
06c03c02 7308 spin_lock_bh(&tp->lock);
6e01b20b 7309 tg3_mac_loopback(tp, true);
06c03c02
MB
7310 netif_carrier_on(tp->dev);
7311 spin_unlock_bh(&tp->lock);
7312 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7313 } else {
7314 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7315 return;
7316
06c03c02 7317 spin_lock_bh(&tp->lock);
6e01b20b 7318 tg3_mac_loopback(tp, false);
06c03c02
MB
7319 /* Force link status check */
7320 tg3_setup_phy(tp, 1);
7321 spin_unlock_bh(&tp->lock);
7322 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7323 }
7324}
7325
c8f44aff
MM
7326static netdev_features_t tg3_fix_features(struct net_device *dev,
7327 netdev_features_t features)
dc668910
MM
7328{
7329 struct tg3 *tp = netdev_priv(dev);
7330
63c3a66f 7331 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
dc668910
MM
7332 features &= ~NETIF_F_ALL_TSO;
7333
7334 return features;
7335}
7336
c8f44aff 7337static int tg3_set_features(struct net_device *dev, netdev_features_t features)
06c03c02 7338{
c8f44aff 7339 netdev_features_t changed = dev->features ^ features;
06c03c02
MB
7340
7341 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7342 tg3_set_loopback(dev, features);
7343
7344 return 0;
7345}
7346
21f581a5
MC
7347static void tg3_rx_prodring_free(struct tg3 *tp,
7348 struct tg3_rx_prodring_set *tpr)
1da177e4 7349{
1da177e4
LT
7350 int i;
7351
8fea32b9 7352 if (tpr != &tp->napi[0].prodring) {
b196c7e4 7353 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
2c49a44d 7354 i = (i + 1) & tp->rx_std_ring_mask)
9205fd9c 7355 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
b196c7e4
MC
7356 tp->rx_pkt_map_sz);
7357
63c3a66f 7358 if (tg3_flag(tp, JUMBO_CAPABLE)) {
b196c7e4
MC
7359 for (i = tpr->rx_jmb_cons_idx;
7360 i != tpr->rx_jmb_prod_idx;
2c49a44d 7361 i = (i + 1) & tp->rx_jmb_ring_mask) {
9205fd9c 7362 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
b196c7e4
MC
7363 TG3_RX_JMB_MAP_SZ);
7364 }
7365 }
7366
2b2cdb65 7367 return;
b196c7e4 7368 }
1da177e4 7369
2c49a44d 7370 for (i = 0; i <= tp->rx_std_ring_mask; i++)
9205fd9c 7371 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
2b2cdb65 7372 tp->rx_pkt_map_sz);
1da177e4 7373
63c3a66f 7374 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7375 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
9205fd9c 7376 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
2b2cdb65 7377 TG3_RX_JMB_MAP_SZ);
1da177e4
LT
7378 }
7379}
7380
c6cdf436 7381/* Initialize rx rings for packet processing.
1da177e4
LT
7382 *
7383 * The chip has been shut down and the driver detached from
7384 * the networking, so no interrupts or new tx packets will
7385 * end up in the driver. tp->{tx,}lock are held and thus
7386 * we may not sleep.
7387 */
21f581a5
MC
7388static int tg3_rx_prodring_alloc(struct tg3 *tp,
7389 struct tg3_rx_prodring_set *tpr)
1da177e4 7390{
287be12e 7391 u32 i, rx_pkt_dma_sz;
1da177e4 7392
b196c7e4
MC
7393 tpr->rx_std_cons_idx = 0;
7394 tpr->rx_std_prod_idx = 0;
7395 tpr->rx_jmb_cons_idx = 0;
7396 tpr->rx_jmb_prod_idx = 0;
7397
8fea32b9 7398 if (tpr != &tp->napi[0].prodring) {
2c49a44d
MC
7399 memset(&tpr->rx_std_buffers[0], 0,
7400 TG3_RX_STD_BUFF_RING_SIZE(tp));
48035728 7401 if (tpr->rx_jmb_buffers)
2b2cdb65 7402 memset(&tpr->rx_jmb_buffers[0], 0,
2c49a44d 7403 TG3_RX_JMB_BUFF_RING_SIZE(tp));
2b2cdb65
MC
7404 goto done;
7405 }
7406
1da177e4 7407 /* Zero out all descriptors. */
2c49a44d 7408 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
1da177e4 7409
287be12e 7410 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
63c3a66f 7411 if (tg3_flag(tp, 5780_CLASS) &&
287be12e
MC
7412 tp->dev->mtu > ETH_DATA_LEN)
7413 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7414 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7e72aad4 7415
1da177e4
LT
7416 /* Initialize invariants of the rings, we only set this
7417 * stuff once. This works because the card does not
7418 * write into the rx buffer posting rings.
7419 */
2c49a44d 7420 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
1da177e4
LT
7421 struct tg3_rx_buffer_desc *rxd;
7422
21f581a5 7423 rxd = &tpr->rx_std[i];
287be12e 7424 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
1da177e4
LT
7425 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7426 rxd->opaque = (RXD_OPAQUE_RING_STD |
7427 (i << RXD_OPAQUE_INDEX_SHIFT));
7428 }
7429
1da177e4
LT
7430 /* Now allocate fresh SKBs for each rx ring. */
7431 for (i = 0; i < tp->rx_pending; i++) {
8d4057a9
ED
7432 unsigned int frag_size;
7433
7434 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7435 &frag_size) < 0) {
5129c3a3
MC
7436 netdev_warn(tp->dev,
7437 "Using a smaller RX standard ring. Only "
7438 "%d out of %d buffers were allocated "
7439 "successfully\n", i, tp->rx_pending);
32d8c572 7440 if (i == 0)
cf7a7298 7441 goto initfail;
32d8c572 7442 tp->rx_pending = i;
1da177e4 7443 break;
32d8c572 7444 }
1da177e4
LT
7445 }
7446
63c3a66f 7447 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
cf7a7298
MC
7448 goto done;
7449
2c49a44d 7450 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
cf7a7298 7451
63c3a66f 7452 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
0d86df80 7453 goto done;
cf7a7298 7454
2c49a44d 7455 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
0d86df80
MC
7456 struct tg3_rx_buffer_desc *rxd;
7457
7458 rxd = &tpr->rx_jmb[i].std;
7459 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7460 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7461 RXD_FLAG_JUMBO;
7462 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7463 (i << RXD_OPAQUE_INDEX_SHIFT));
7464 }
7465
7466 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8d4057a9
ED
7467 unsigned int frag_size;
7468
7469 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7470 &frag_size) < 0) {
5129c3a3
MC
7471 netdev_warn(tp->dev,
7472 "Using a smaller RX jumbo ring. Only %d "
7473 "out of %d buffers were allocated "
7474 "successfully\n", i, tp->rx_jumbo_pending);
0d86df80
MC
7475 if (i == 0)
7476 goto initfail;
7477 tp->rx_jumbo_pending = i;
7478 break;
1da177e4
LT
7479 }
7480 }
cf7a7298
MC
7481
7482done:
32d8c572 7483 return 0;
cf7a7298
MC
7484
7485initfail:
21f581a5 7486 tg3_rx_prodring_free(tp, tpr);
cf7a7298 7487 return -ENOMEM;
1da177e4
LT
7488}
7489
21f581a5
MC
7490static void tg3_rx_prodring_fini(struct tg3 *tp,
7491 struct tg3_rx_prodring_set *tpr)
1da177e4 7492{
21f581a5
MC
7493 kfree(tpr->rx_std_buffers);
7494 tpr->rx_std_buffers = NULL;
7495 kfree(tpr->rx_jmb_buffers);
7496 tpr->rx_jmb_buffers = NULL;
7497 if (tpr->rx_std) {
4bae65c8
MC
7498 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7499 tpr->rx_std, tpr->rx_std_mapping);
21f581a5 7500 tpr->rx_std = NULL;
1da177e4 7501 }
21f581a5 7502 if (tpr->rx_jmb) {
4bae65c8
MC
7503 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7504 tpr->rx_jmb, tpr->rx_jmb_mapping);
21f581a5 7505 tpr->rx_jmb = NULL;
1da177e4 7506 }
cf7a7298
MC
7507}
7508
21f581a5
MC
7509static int tg3_rx_prodring_init(struct tg3 *tp,
7510 struct tg3_rx_prodring_set *tpr)
cf7a7298 7511{
2c49a44d
MC
7512 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7513 GFP_KERNEL);
21f581a5 7514 if (!tpr->rx_std_buffers)
cf7a7298
MC
7515 return -ENOMEM;
7516
4bae65c8
MC
7517 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7518 TG3_RX_STD_RING_BYTES(tp),
7519 &tpr->rx_std_mapping,
7520 GFP_KERNEL);
21f581a5 7521 if (!tpr->rx_std)
cf7a7298
MC
7522 goto err_out;
7523
63c3a66f 7524 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7525 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
21f581a5
MC
7526 GFP_KERNEL);
7527 if (!tpr->rx_jmb_buffers)
cf7a7298
MC
7528 goto err_out;
7529
4bae65c8
MC
7530 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7531 TG3_RX_JMB_RING_BYTES(tp),
7532 &tpr->rx_jmb_mapping,
7533 GFP_KERNEL);
21f581a5 7534 if (!tpr->rx_jmb)
cf7a7298
MC
7535 goto err_out;
7536 }
7537
7538 return 0;
7539
7540err_out:
21f581a5 7541 tg3_rx_prodring_fini(tp, tpr);
cf7a7298
MC
7542 return -ENOMEM;
7543}
7544
7545/* Free up pending packets in all rx/tx rings.
7546 *
7547 * The chip has been shut down and the driver detached from
7548 * the networking, so no interrupts or new tx packets will
7549 * end up in the driver. tp->{tx,}lock is not held and we are not
7550 * in an interrupt context and thus may sleep.
7551 */
7552static void tg3_free_rings(struct tg3 *tp)
7553{
f77a6a8e 7554 int i, j;
cf7a7298 7555
f77a6a8e
MC
7556 for (j = 0; j < tp->irq_cnt; j++) {
7557 struct tg3_napi *tnapi = &tp->napi[j];
cf7a7298 7558
8fea32b9 7559 tg3_rx_prodring_free(tp, &tnapi->prodring);
b28f6428 7560
0c1d0e2b
MC
7561 if (!tnapi->tx_buffers)
7562 continue;
7563
0d681b27
MC
7564 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7565 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
cf7a7298 7566
0d681b27 7567 if (!skb)
f77a6a8e 7568 continue;
cf7a7298 7569
ba1142e4
MC
7570 tg3_tx_skb_unmap(tnapi, i,
7571 skb_shinfo(skb)->nr_frags - 1);
f77a6a8e
MC
7572
7573 dev_kfree_skb_any(skb);
7574 }
5cb917bc 7575 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
2b2cdb65 7576 }
cf7a7298
MC
7577}
7578
7579/* Initialize tx/rx rings for packet processing.
7580 *
7581 * The chip has been shut down and the driver detached from
7582 * the networking, so no interrupts or new tx packets will
7583 * end up in the driver. tp->{tx,}lock are held and thus
7584 * we may not sleep.
7585 */
7586static int tg3_init_rings(struct tg3 *tp)
7587{
f77a6a8e 7588 int i;
72334482 7589
cf7a7298
MC
7590 /* Free up all the SKBs. */
7591 tg3_free_rings(tp);
7592
f77a6a8e
MC
7593 for (i = 0; i < tp->irq_cnt; i++) {
7594 struct tg3_napi *tnapi = &tp->napi[i];
7595
7596 tnapi->last_tag = 0;
7597 tnapi->last_irq_tag = 0;
7598 tnapi->hw_status->status = 0;
7599 tnapi->hw_status->status_tag = 0;
7600 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
cf7a7298 7601
f77a6a8e
MC
7602 tnapi->tx_prod = 0;
7603 tnapi->tx_cons = 0;
0c1d0e2b
MC
7604 if (tnapi->tx_ring)
7605 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
f77a6a8e
MC
7606
7607 tnapi->rx_rcb_ptr = 0;
0c1d0e2b
MC
7608 if (tnapi->rx_rcb)
7609 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
2b2cdb65 7610
8fea32b9 7611 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
e4af1af9 7612 tg3_free_rings(tp);
2b2cdb65 7613 return -ENOMEM;
e4af1af9 7614 }
f77a6a8e 7615 }
72334482 7616
2b2cdb65 7617 return 0;
cf7a7298
MC
7618}
7619
49a359e3 7620static void tg3_mem_tx_release(struct tg3 *tp)
cf7a7298 7621{
f77a6a8e 7622 int i;
898a56f8 7623
49a359e3 7624 for (i = 0; i < tp->irq_max; i++) {
f77a6a8e
MC
7625 struct tg3_napi *tnapi = &tp->napi[i];
7626
7627 if (tnapi->tx_ring) {
4bae65c8 7628 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
f77a6a8e
MC
7629 tnapi->tx_ring, tnapi->tx_desc_mapping);
7630 tnapi->tx_ring = NULL;
7631 }
7632
7633 kfree(tnapi->tx_buffers);
7634 tnapi->tx_buffers = NULL;
49a359e3
MC
7635 }
7636}
f77a6a8e 7637
49a359e3
MC
7638static int tg3_mem_tx_acquire(struct tg3 *tp)
7639{
7640 int i;
7641 struct tg3_napi *tnapi = &tp->napi[0];
7642
7643 /* If multivector TSS is enabled, vector 0 does not handle
7644 * tx interrupts. Don't allocate any resources for it.
7645 */
7646 if (tg3_flag(tp, ENABLE_TSS))
7647 tnapi++;
7648
7649 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7650 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7651 TG3_TX_RING_SIZE, GFP_KERNEL);
7652 if (!tnapi->tx_buffers)
7653 goto err_out;
7654
7655 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7656 TG3_TX_RING_BYTES,
7657 &tnapi->tx_desc_mapping,
7658 GFP_KERNEL);
7659 if (!tnapi->tx_ring)
7660 goto err_out;
7661 }
7662
7663 return 0;
7664
7665err_out:
7666 tg3_mem_tx_release(tp);
7667 return -ENOMEM;
7668}
7669
7670static void tg3_mem_rx_release(struct tg3 *tp)
7671{
7672 int i;
7673
7674 for (i = 0; i < tp->irq_max; i++) {
7675 struct tg3_napi *tnapi = &tp->napi[i];
f77a6a8e 7676
8fea32b9
MC
7677 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7678
49a359e3
MC
7679 if (!tnapi->rx_rcb)
7680 continue;
7681
7682 dma_free_coherent(&tp->pdev->dev,
7683 TG3_RX_RCB_RING_BYTES(tp),
7684 tnapi->rx_rcb,
7685 tnapi->rx_rcb_mapping);
7686 tnapi->rx_rcb = NULL;
7687 }
7688}
7689
7690static int tg3_mem_rx_acquire(struct tg3 *tp)
7691{
7692 unsigned int i, limit;
7693
7694 limit = tp->rxq_cnt;
7695
7696 /* If RSS is enabled, we need a (dummy) producer ring
7697 * set on vector zero. This is the true hw prodring.
7698 */
7699 if (tg3_flag(tp, ENABLE_RSS))
7700 limit++;
7701
7702 for (i = 0; i < limit; i++) {
7703 struct tg3_napi *tnapi = &tp->napi[i];
7704
7705 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7706 goto err_out;
7707
7708 /* If multivector RSS is enabled, vector 0
7709 * does not handle rx or tx interrupts.
7710 * Don't allocate any resources for it.
7711 */
7712 if (!i && tg3_flag(tp, ENABLE_RSS))
7713 continue;
7714
7715 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7716 TG3_RX_RCB_RING_BYTES(tp),
7717 &tnapi->rx_rcb_mapping,
7718 GFP_KERNEL);
7719 if (!tnapi->rx_rcb)
7720 goto err_out;
7721
7722 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7723 }
7724
7725 return 0;
7726
7727err_out:
7728 tg3_mem_rx_release(tp);
7729 return -ENOMEM;
7730}
7731
7732/*
7733 * Must not be invoked with interrupt sources disabled and
7734 * the hardware shutdown down.
7735 */
7736static void tg3_free_consistent(struct tg3 *tp)
7737{
7738 int i;
7739
7740 for (i = 0; i < tp->irq_cnt; i++) {
7741 struct tg3_napi *tnapi = &tp->napi[i];
7742
f77a6a8e 7743 if (tnapi->hw_status) {
4bae65c8
MC
7744 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7745 tnapi->hw_status,
7746 tnapi->status_mapping);
f77a6a8e
MC
7747 tnapi->hw_status = NULL;
7748 }
1da177e4 7749 }
f77a6a8e 7750
49a359e3
MC
7751 tg3_mem_rx_release(tp);
7752 tg3_mem_tx_release(tp);
7753
1da177e4 7754 if (tp->hw_stats) {
4bae65c8
MC
7755 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7756 tp->hw_stats, tp->stats_mapping);
1da177e4
LT
7757 tp->hw_stats = NULL;
7758 }
7759}
7760
7761/*
7762 * Must not be invoked with interrupt sources disabled and
7763 * the hardware shutdown down. Can sleep.
7764 */
7765static int tg3_alloc_consistent(struct tg3 *tp)
7766{
f77a6a8e 7767 int i;
898a56f8 7768
4bae65c8
MC
7769 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7770 sizeof(struct tg3_hw_stats),
7771 &tp->stats_mapping,
7772 GFP_KERNEL);
f77a6a8e 7773 if (!tp->hw_stats)
1da177e4
LT
7774 goto err_out;
7775
f77a6a8e 7776 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
1da177e4 7777
f77a6a8e
MC
7778 for (i = 0; i < tp->irq_cnt; i++) {
7779 struct tg3_napi *tnapi = &tp->napi[i];
8d9d7cfc 7780 struct tg3_hw_status *sblk;
1da177e4 7781
4bae65c8
MC
7782 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7783 TG3_HW_STATUS_SIZE,
7784 &tnapi->status_mapping,
7785 GFP_KERNEL);
f77a6a8e
MC
7786 if (!tnapi->hw_status)
7787 goto err_out;
898a56f8 7788
f77a6a8e 7789 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8d9d7cfc
MC
7790 sblk = tnapi->hw_status;
7791
49a359e3 7792 if (tg3_flag(tp, ENABLE_RSS)) {
86449944 7793 u16 *prodptr = NULL;
8fea32b9 7794
49a359e3
MC
7795 /*
7796 * When RSS is enabled, the status block format changes
7797 * slightly. The "rx_jumbo_consumer", "reserved",
7798 * and "rx_mini_consumer" members get mapped to the
7799 * other three rx return ring producer indexes.
7800 */
7801 switch (i) {
7802 case 1:
7803 prodptr = &sblk->idx[0].rx_producer;
7804 break;
7805 case 2:
7806 prodptr = &sblk->rx_jumbo_consumer;
7807 break;
7808 case 3:
7809 prodptr = &sblk->reserved;
7810 break;
7811 case 4:
7812 prodptr = &sblk->rx_mini_consumer;
f891ea16
MC
7813 break;
7814 }
49a359e3
MC
7815 tnapi->rx_rcb_prod_idx = prodptr;
7816 } else {
8d9d7cfc 7817 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8d9d7cfc 7818 }
f77a6a8e 7819 }
1da177e4 7820
49a359e3
MC
7821 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7822 goto err_out;
7823
1da177e4
LT
7824 return 0;
7825
7826err_out:
7827 tg3_free_consistent(tp);
7828 return -ENOMEM;
7829}
7830
7831#define MAX_WAIT_CNT 1000
7832
7833/* To stop a block, clear the enable bit and poll till it
7834 * clears. tp->lock is held.
7835 */
b3b7d6be 7836static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
7837{
7838 unsigned int i;
7839 u32 val;
7840
63c3a66f 7841 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
7842 switch (ofs) {
7843 case RCVLSC_MODE:
7844 case DMAC_MODE:
7845 case MBFREE_MODE:
7846 case BUFMGR_MODE:
7847 case MEMARB_MODE:
7848 /* We can't enable/disable these bits of the
7849 * 5705/5750, just say success.
7850 */
7851 return 0;
7852
7853 default:
7854 break;
855e1111 7855 }
1da177e4
LT
7856 }
7857
7858 val = tr32(ofs);
7859 val &= ~enable_bit;
7860 tw32_f(ofs, val);
7861
7862 for (i = 0; i < MAX_WAIT_CNT; i++) {
7863 udelay(100);
7864 val = tr32(ofs);
7865 if ((val & enable_bit) == 0)
7866 break;
7867 }
7868
b3b7d6be 7869 if (i == MAX_WAIT_CNT && !silent) {
2445e461
MC
7870 dev_err(&tp->pdev->dev,
7871 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7872 ofs, enable_bit);
1da177e4
LT
7873 return -ENODEV;
7874 }
7875
7876 return 0;
7877}
7878
7879/* tp->lock is held. */
b3b7d6be 7880static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
7881{
7882 int i, err;
7883
7884 tg3_disable_ints(tp);
7885
7886 tp->rx_mode &= ~RX_MODE_ENABLE;
7887 tw32_f(MAC_RX_MODE, tp->rx_mode);
7888 udelay(10);
7889
b3b7d6be
DM
7890 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7891 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7892 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7893 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7894 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7895 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7896
7897 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7898 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7899 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7900 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7901 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7902 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7903 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
7904
7905 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7906 tw32_f(MAC_MODE, tp->mac_mode);
7907 udelay(40);
7908
7909 tp->tx_mode &= ~TX_MODE_ENABLE;
7910 tw32_f(MAC_TX_MODE, tp->tx_mode);
7911
7912 for (i = 0; i < MAX_WAIT_CNT; i++) {
7913 udelay(100);
7914 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7915 break;
7916 }
7917 if (i >= MAX_WAIT_CNT) {
ab96b241
MC
7918 dev_err(&tp->pdev->dev,
7919 "%s timed out, TX_MODE_ENABLE will not clear "
7920 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
e6de8ad1 7921 err |= -ENODEV;
1da177e4
LT
7922 }
7923
e6de8ad1 7924 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
7925 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7926 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
7927
7928 tw32(FTQ_RESET, 0xffffffff);
7929 tw32(FTQ_RESET, 0x00000000);
7930
b3b7d6be
DM
7931 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7932 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4 7933
f77a6a8e
MC
7934 for (i = 0; i < tp->irq_cnt; i++) {
7935 struct tg3_napi *tnapi = &tp->napi[i];
7936 if (tnapi->hw_status)
7937 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7938 }
1da177e4 7939
1da177e4
LT
7940 return err;
7941}
7942
ee6a99b5
MC
7943/* Save PCI command register before chip reset */
7944static void tg3_save_pci_state(struct tg3 *tp)
7945{
8a6eac90 7946 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
7947}
7948
7949/* Restore PCI state after chip reset */
7950static void tg3_restore_pci_state(struct tg3 *tp)
7951{
7952 u32 val;
7953
7954 /* Re-enable indirect register accesses. */
7955 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7956 tp->misc_host_ctrl);
7957
7958 /* Set MAX PCI retry to zero. */
7959 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7960 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 7961 tg3_flag(tp, PCIX_MODE))
ee6a99b5 7962 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9 7963 /* Allow reads and writes to the APE register and memory space. */
63c3a66f 7964 if (tg3_flag(tp, ENABLE_APE))
0d3031d9 7965 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
7966 PCISTATE_ALLOW_APE_SHMEM_WR |
7967 PCISTATE_ALLOW_APE_PSPACE_WR;
ee6a99b5
MC
7968 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7969
8a6eac90 7970 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 7971
2c55a3d0
MC
7972 if (!tg3_flag(tp, PCI_EXPRESS)) {
7973 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7974 tp->pci_cacheline_sz);
7975 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7976 tp->pci_lat_timer);
114342f2 7977 }
5f5c51e3 7978
ee6a99b5 7979 /* Make sure PCI-X relaxed ordering bit is clear. */
63c3a66f 7980 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
7981 u16 pcix_cmd;
7982
7983 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7984 &pcix_cmd);
7985 pcix_cmd &= ~PCI_X_CMD_ERO;
7986 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7987 pcix_cmd);
7988 }
ee6a99b5 7989
63c3a66f 7990 if (tg3_flag(tp, 5780_CLASS)) {
ee6a99b5
MC
7991
7992 /* Chip reset on 5780 will reset MSI enable bit,
7993 * so need to restore it.
7994 */
63c3a66f 7995 if (tg3_flag(tp, USING_MSI)) {
ee6a99b5
MC
7996 u16 ctrl;
7997
7998 pci_read_config_word(tp->pdev,
7999 tp->msi_cap + PCI_MSI_FLAGS,
8000 &ctrl);
8001 pci_write_config_word(tp->pdev,
8002 tp->msi_cap + PCI_MSI_FLAGS,
8003 ctrl | PCI_MSI_FLAGS_ENABLE);
8004 val = tr32(MSGINT_MODE);
8005 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8006 }
8007 }
8008}
8009
1da177e4
LT
8010/* tp->lock is held. */
8011static int tg3_chip_reset(struct tg3 *tp)
8012{
8013 u32 val;
1ee582d8 8014 void (*write_op)(struct tg3 *, u32, u32);
4f125f42 8015 int i, err;
1da177e4 8016
f49639e6
DM
8017 tg3_nvram_lock(tp);
8018
77b483f1
MC
8019 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8020
f49639e6
DM
8021 /* No matching tg3_nvram_unlock() after this because
8022 * chip reset below will undo the nvram lock.
8023 */
8024 tp->nvram_lock_cnt = 0;
1da177e4 8025
ee6a99b5
MC
8026 /* GRC_MISC_CFG core clock reset will clear the memory
8027 * enable bit in PCI register 4 and the MSI enable bit
8028 * on some chips, so we save relevant registers here.
8029 */
8030 tg3_save_pci_state(tp);
8031
d9ab5ad1 8032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
63c3a66f 8033 tg3_flag(tp, 5755_PLUS))
d9ab5ad1
MC
8034 tw32(GRC_FASTBOOT_PC, 0);
8035
1da177e4
LT
8036 /*
8037 * We must avoid the readl() that normally takes place.
8038 * It locks machines, causes machine checks, and other
8039 * fun things. So, temporarily disable the 5701
8040 * hardware workaround, while we do the reset.
8041 */
1ee582d8
MC
8042 write_op = tp->write32;
8043 if (write_op == tg3_write_flush_reg32)
8044 tp->write32 = tg3_write32;
1da177e4 8045
d18edcb2
MC
8046 /* Prevent the irq handler from reading or writing PCI registers
8047 * during chip reset when the memory enable bit in the PCI command
8048 * register may be cleared. The chip does not generate interrupt
8049 * at this time, but the irq handler may still be called due to irq
8050 * sharing or irqpoll.
8051 */
63c3a66f 8052 tg3_flag_set(tp, CHIP_RESETTING);
f77a6a8e
MC
8053 for (i = 0; i < tp->irq_cnt; i++) {
8054 struct tg3_napi *tnapi = &tp->napi[i];
8055 if (tnapi->hw_status) {
8056 tnapi->hw_status->status = 0;
8057 tnapi->hw_status->status_tag = 0;
8058 }
8059 tnapi->last_tag = 0;
8060 tnapi->last_irq_tag = 0;
b8fa2f3a 8061 }
d18edcb2 8062 smp_mb();
4f125f42
MC
8063
8064 for (i = 0; i < tp->irq_cnt; i++)
8065 synchronize_irq(tp->napi[i].irq_vec);
d18edcb2 8066
255ca311
MC
8067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8068 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8069 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8070 }
8071
1da177e4
LT
8072 /* do the reset */
8073 val = GRC_MISC_CFG_CORECLK_RESET;
8074
63c3a66f 8075 if (tg3_flag(tp, PCI_EXPRESS)) {
88075d91
MC
8076 /* Force PCIe 1.0a mode */
8077 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8078 !tg3_flag(tp, 57765_PLUS) &&
88075d91
MC
8079 tr32(TG3_PCIE_PHY_TSTCTL) ==
8080 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8081 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8082
1da177e4
LT
8083 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8084 tw32(GRC_MISC_CFG, (1 << 29));
8085 val |= (1 << 29);
8086 }
8087 }
8088
b5d3772c
MC
8089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8090 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8091 tw32(GRC_VCPU_EXT_CTRL,
8092 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8093 }
8094
f37500d3 8095 /* Manage gphy power for all CPMU absent PCIe devices. */
63c3a66f 8096 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1da177e4 8097 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
f37500d3 8098
1da177e4
LT
8099 tw32(GRC_MISC_CFG, val);
8100
1ee582d8
MC
8101 /* restore 5701 hardware bug workaround write method */
8102 tp->write32 = write_op;
1da177e4
LT
8103
8104 /* Unfortunately, we have to delay before the PCI read back.
8105 * Some 575X chips even will not respond to a PCI cfg access
8106 * when the reset command is given to the chip.
8107 *
8108 * How do these hardware designers expect things to work
8109 * properly if the PCI write is posted for a long period
8110 * of time? It is always necessary to have some method by
8111 * which a register read back can occur to push the write
8112 * out which does the reset.
8113 *
8114 * For most tg3 variants the trick below was working.
8115 * Ho hum...
8116 */
8117 udelay(120);
8118
8119 /* Flush PCI posted writes. The normal MMIO registers
8120 * are inaccessible at this time so this is the only
8121 * way to make this reliably (actually, this is no longer
8122 * the case, see above). I tried to use indirect
8123 * register read/write but this upset some 5701 variants.
8124 */
8125 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8126
8127 udelay(120);
8128
0f49bfbd 8129 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
e7126997
MC
8130 u16 val16;
8131
1da177e4 8132 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
86449944 8133 int j;
1da177e4
LT
8134 u32 cfg_val;
8135
8136 /* Wait for link training to complete. */
86449944 8137 for (j = 0; j < 5000; j++)
1da177e4
LT
8138 udelay(100);
8139
8140 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8141 pci_write_config_dword(tp->pdev, 0xc4,
8142 cfg_val | (1 << 15));
8143 }
5e7dfd0f 8144
e7126997 8145 /* Clear the "no snoop" and "relaxed ordering" bits. */
0f49bfbd 8146 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
e7126997
MC
8147 /*
8148 * Older PCIe devices only support the 128 byte
8149 * MPS setting. Enforce the restriction.
5e7dfd0f 8150 */
63c3a66f 8151 if (!tg3_flag(tp, CPMU_PRESENT))
0f49bfbd
JL
8152 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8153 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
5e7dfd0f 8154
5e7dfd0f 8155 /* Clear error status */
0f49bfbd 8156 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
5e7dfd0f
MC
8157 PCI_EXP_DEVSTA_CED |
8158 PCI_EXP_DEVSTA_NFED |
8159 PCI_EXP_DEVSTA_FED |
8160 PCI_EXP_DEVSTA_URD);
1da177e4
LT
8161 }
8162
ee6a99b5 8163 tg3_restore_pci_state(tp);
1da177e4 8164
63c3a66f
JP
8165 tg3_flag_clear(tp, CHIP_RESETTING);
8166 tg3_flag_clear(tp, ERROR_PROCESSED);
d18edcb2 8167
ee6a99b5 8168 val = 0;
63c3a66f 8169 if (tg3_flag(tp, 5780_CLASS))
4cf78e4f 8170 val = tr32(MEMARB_MODE);
ee6a99b5 8171 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
8172
8173 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8174 tg3_stop_fw(tp);
8175 tw32(0x5000, 0x400);
8176 }
8177
8178 tw32(GRC_MODE, tp->grc_mode);
8179
8180 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 8181 val = tr32(0xc4);
1da177e4
LT
8182
8183 tw32(0xc4, val | (1 << 15));
8184 }
8185
8186 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8188 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8189 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8190 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8191 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8192 }
8193
f07e9af3 8194 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9e975cc2 8195 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
d2394e6b 8196 val = tp->mac_mode;
f07e9af3 8197 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9e975cc2 8198 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
d2394e6b 8199 val = tp->mac_mode;
1da177e4 8200 } else
d2394e6b
MC
8201 val = 0;
8202
8203 tw32_f(MAC_MODE, val);
1da177e4
LT
8204 udelay(40);
8205
77b483f1
MC
8206 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8207
7a6f4369
MC
8208 err = tg3_poll_fw(tp);
8209 if (err)
8210 return err;
1da177e4 8211
0a9140cf
MC
8212 tg3_mdio_start(tp);
8213
63c3a66f 8214 if (tg3_flag(tp, PCI_EXPRESS) &&
f6eb9b1f
MC
8215 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8216 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8217 !tg3_flag(tp, 57765_PLUS)) {
ab0049b4 8218 val = tr32(0x7c00);
1da177e4
LT
8219
8220 tw32(0x7c00, val | (1 << 25));
8221 }
8222
d78b59f5
MC
8223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8224 val = tr32(TG3_CPMU_CLCK_ORIDE);
8225 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8226 }
8227
1da177e4 8228 /* Reprobe ASF enable state. */
63c3a66f
JP
8229 tg3_flag_clear(tp, ENABLE_ASF);
8230 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8231 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8232 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8233 u32 nic_cfg;
8234
8235 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8236 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f 8237 tg3_flag_set(tp, ENABLE_ASF);
4ba526ce 8238 tp->last_event_jiffies = jiffies;
63c3a66f
JP
8239 if (tg3_flag(tp, 5750_PLUS))
8240 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8241 }
8242 }
8243
8244 return 0;
8245}
8246
65ec698d
MC
8247static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8248static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
92feeabf 8249
1da177e4 8250/* tp->lock is held. */
944d980e 8251static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
8252{
8253 int err;
8254
8255 tg3_stop_fw(tp);
8256
944d980e 8257 tg3_write_sig_pre_reset(tp, kind);
1da177e4 8258
b3b7d6be 8259 tg3_abort_hw(tp, silent);
1da177e4
LT
8260 err = tg3_chip_reset(tp);
8261
daba2a63
MC
8262 __tg3_set_mac_addr(tp, 0);
8263
944d980e
MC
8264 tg3_write_sig_legacy(tp, kind);
8265 tg3_write_sig_post_reset(tp, kind);
1da177e4 8266
92feeabf
MC
8267 if (tp->hw_stats) {
8268 /* Save the stats across chip resets... */
b4017c53 8269 tg3_get_nstats(tp, &tp->net_stats_prev);
92feeabf
MC
8270 tg3_get_estats(tp, &tp->estats_prev);
8271
8272 /* And make sure the next sample is new data */
8273 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8274 }
8275
1da177e4
LT
8276 if (err)
8277 return err;
8278
8279 return 0;
8280}
8281
1da177e4
LT
8282static int tg3_set_mac_addr(struct net_device *dev, void *p)
8283{
8284 struct tg3 *tp = netdev_priv(dev);
8285 struct sockaddr *addr = p;
986e0aeb 8286 int err = 0, skip_mac_1 = 0;
1da177e4 8287
f9804ddb 8288 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 8289 return -EADDRNOTAVAIL;
f9804ddb 8290
1da177e4
LT
8291 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8292
e75f7c90
MC
8293 if (!netif_running(dev))
8294 return 0;
8295
63c3a66f 8296 if (tg3_flag(tp, ENABLE_ASF)) {
986e0aeb 8297 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 8298
986e0aeb
MC
8299 addr0_high = tr32(MAC_ADDR_0_HIGH);
8300 addr0_low = tr32(MAC_ADDR_0_LOW);
8301 addr1_high = tr32(MAC_ADDR_1_HIGH);
8302 addr1_low = tr32(MAC_ADDR_1_LOW);
8303
8304 /* Skip MAC addr 1 if ASF is using it. */
8305 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8306 !(addr1_high == 0 && addr1_low == 0))
8307 skip_mac_1 = 1;
58712ef9 8308 }
986e0aeb
MC
8309 spin_lock_bh(&tp->lock);
8310 __tg3_set_mac_addr(tp, skip_mac_1);
8311 spin_unlock_bh(&tp->lock);
1da177e4 8312
b9ec6c1b 8313 return err;
1da177e4
LT
8314}
8315
8316/* tp->lock is held. */
8317static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8318 dma_addr_t mapping, u32 maxlen_flags,
8319 u32 nic_addr)
8320{
8321 tg3_write_mem(tp,
8322 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8323 ((u64) mapping >> 32));
8324 tg3_write_mem(tp,
8325 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8326 ((u64) mapping & 0xffffffff));
8327 tg3_write_mem(tp,
8328 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8329 maxlen_flags);
8330
63c3a66f 8331 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8332 tg3_write_mem(tp,
8333 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8334 nic_addr);
8335}
8336
a489b6d9
MC
8337
8338static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d 8339{
a489b6d9 8340 int i = 0;
b6080e12 8341
63c3a66f 8342 if (!tg3_flag(tp, ENABLE_TSS)) {
b6080e12
MC
8343 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8344 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8345 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
b6080e12
MC
8346 } else {
8347 tw32(HOSTCC_TXCOL_TICKS, 0);
8348 tw32(HOSTCC_TXMAX_FRAMES, 0);
8349 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
a489b6d9
MC
8350
8351 for (; i < tp->txq_cnt; i++) {
8352 u32 reg;
8353
8354 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8355 tw32(reg, ec->tx_coalesce_usecs);
8356 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8357 tw32(reg, ec->tx_max_coalesced_frames);
8358 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8359 tw32(reg, ec->tx_max_coalesced_frames_irq);
8360 }
19cfaecc 8361 }
b6080e12 8362
a489b6d9
MC
8363 for (; i < tp->irq_max - 1; i++) {
8364 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8365 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8366 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8367 }
8368}
8369
8370static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8371{
8372 int i = 0;
8373 u32 limit = tp->rxq_cnt;
8374
63c3a66f 8375 if (!tg3_flag(tp, ENABLE_RSS)) {
19cfaecc
MC
8376 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8377 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8378 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
a489b6d9 8379 limit--;
19cfaecc 8380 } else {
b6080e12
MC
8381 tw32(HOSTCC_RXCOL_TICKS, 0);
8382 tw32(HOSTCC_RXMAX_FRAMES, 0);
8383 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
15f9850d 8384 }
b6080e12 8385
a489b6d9 8386 for (; i < limit; i++) {
b6080e12
MC
8387 u32 reg;
8388
8389 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8390 tw32(reg, ec->rx_coalesce_usecs);
b6080e12
MC
8391 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8392 tw32(reg, ec->rx_max_coalesced_frames);
b6080e12
MC
8393 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8394 tw32(reg, ec->rx_max_coalesced_frames_irq);
b6080e12
MC
8395 }
8396
8397 for (; i < tp->irq_max - 1; i++) {
8398 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
b6080e12 8399 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
b6080e12 8400 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
a489b6d9
MC
8401 }
8402}
19cfaecc 8403
a489b6d9
MC
8404static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8405{
8406 tg3_coal_tx_init(tp, ec);
8407 tg3_coal_rx_init(tp, ec);
8408
8409 if (!tg3_flag(tp, 5705_PLUS)) {
8410 u32 val = ec->stats_block_coalesce_usecs;
8411
8412 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8413 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8414
8415 if (!netif_carrier_ok(tp->dev))
8416 val = 0;
8417
8418 tw32(HOSTCC_STAT_COAL_TICKS, val);
b6080e12 8419 }
15f9850d 8420}
1da177e4 8421
2d31ecaf
MC
8422/* tp->lock is held. */
8423static void tg3_rings_reset(struct tg3 *tp)
8424{
8425 int i;
f77a6a8e 8426 u32 stblk, txrcb, rxrcb, limit;
2d31ecaf
MC
8427 struct tg3_napi *tnapi = &tp->napi[0];
8428
8429 /* Disable all transmit rings but the first. */
63c3a66f 8430 if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8431 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
63c3a66f 8432 else if (tg3_flag(tp, 5717_PLUS))
3d37728b 8433 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
55086ad9 8434 else if (tg3_flag(tp, 57765_CLASS))
b703df6f 8435 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
2d31ecaf
MC
8436 else
8437 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8438
8439 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8440 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8441 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8442 BDINFO_FLAGS_DISABLED);
8443
8444
8445 /* Disable all receive return rings but the first. */
63c3a66f 8446 if (tg3_flag(tp, 5717_PLUS))
f6eb9b1f 8447 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
63c3a66f 8448 else if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8449 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
b703df6f 8450 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
55086ad9 8451 tg3_flag(tp, 57765_CLASS))
2d31ecaf
MC
8452 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8453 else
8454 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8455
8456 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8457 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8458 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8459 BDINFO_FLAGS_DISABLED);
8460
8461 /* Disable interrupts */
8462 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
0e6cf6a9
MC
8463 tp->napi[0].chk_msi_cnt = 0;
8464 tp->napi[0].last_rx_cons = 0;
8465 tp->napi[0].last_tx_cons = 0;
2d31ecaf
MC
8466
8467 /* Zero mailbox registers. */
63c3a66f 8468 if (tg3_flag(tp, SUPPORT_MSIX)) {
6fd45cb8 8469 for (i = 1; i < tp->irq_max; i++) {
f77a6a8e
MC
8470 tp->napi[i].tx_prod = 0;
8471 tp->napi[i].tx_cons = 0;
63c3a66f 8472 if (tg3_flag(tp, ENABLE_TSS))
c2353a32 8473 tw32_mailbox(tp->napi[i].prodmbox, 0);
f77a6a8e
MC
8474 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8475 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7f230735 8476 tp->napi[i].chk_msi_cnt = 0;
0e6cf6a9
MC
8477 tp->napi[i].last_rx_cons = 0;
8478 tp->napi[i].last_tx_cons = 0;
f77a6a8e 8479 }
63c3a66f 8480 if (!tg3_flag(tp, ENABLE_TSS))
c2353a32 8481 tw32_mailbox(tp->napi[0].prodmbox, 0);
f77a6a8e
MC
8482 } else {
8483 tp->napi[0].tx_prod = 0;
8484 tp->napi[0].tx_cons = 0;
8485 tw32_mailbox(tp->napi[0].prodmbox, 0);
8486 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8487 }
2d31ecaf
MC
8488
8489 /* Make sure the NIC-based send BD rings are disabled. */
63c3a66f 8490 if (!tg3_flag(tp, 5705_PLUS)) {
2d31ecaf
MC
8491 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8492 for (i = 0; i < 16; i++)
8493 tw32_tx_mbox(mbox + i * 8, 0);
8494 }
8495
8496 txrcb = NIC_SRAM_SEND_RCB;
8497 rxrcb = NIC_SRAM_RCV_RET_RCB;
8498
8499 /* Clear status block in ram. */
8500 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8501
8502 /* Set status block DMA address */
8503 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8504 ((u64) tnapi->status_mapping >> 32));
8505 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8506 ((u64) tnapi->status_mapping & 0xffffffff));
8507
f77a6a8e
MC
8508 if (tnapi->tx_ring) {
8509 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8510 (TG3_TX_RING_SIZE <<
8511 BDINFO_FLAGS_MAXLEN_SHIFT),
8512 NIC_SRAM_TX_BUFFER_DESC);
8513 txrcb += TG3_BDINFO_SIZE;
8514 }
8515
8516 if (tnapi->rx_rcb) {
8517 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2
MC
8518 (tp->rx_ret_ring_mask + 1) <<
8519 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
f77a6a8e
MC
8520 rxrcb += TG3_BDINFO_SIZE;
8521 }
8522
8523 stblk = HOSTCC_STATBLCK_RING1;
2d31ecaf 8524
f77a6a8e
MC
8525 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8526 u64 mapping = (u64)tnapi->status_mapping;
8527 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8528 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8529
8530 /* Clear status block in ram. */
8531 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8532
19cfaecc
MC
8533 if (tnapi->tx_ring) {
8534 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8535 (TG3_TX_RING_SIZE <<
8536 BDINFO_FLAGS_MAXLEN_SHIFT),
8537 NIC_SRAM_TX_BUFFER_DESC);
8538 txrcb += TG3_BDINFO_SIZE;
8539 }
f77a6a8e
MC
8540
8541 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2 8542 ((tp->rx_ret_ring_mask + 1) <<
f77a6a8e
MC
8543 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8544
8545 stblk += 8;
f77a6a8e
MC
8546 rxrcb += TG3_BDINFO_SIZE;
8547 }
2d31ecaf
MC
8548}
8549
eb07a940
MC
8550static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8551{
8552 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8553
63c3a66f
JP
8554 if (!tg3_flag(tp, 5750_PLUS) ||
8555 tg3_flag(tp, 5780_CLASS) ||
eb07a940 8556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
513aa6ea
MC
8557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8558 tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8559 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8560 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8562 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8563 else
8564 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8565
8566 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8567 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8568
8569 val = min(nic_rep_thresh, host_rep_thresh);
8570 tw32(RCVBDI_STD_THRESH, val);
8571
63c3a66f 8572 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8573 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8574
63c3a66f 8575 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
eb07a940
MC
8576 return;
8577
513aa6ea 8578 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
eb07a940
MC
8579
8580 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8581
8582 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8583 tw32(RCVBDI_JUMBO_THRESH, val);
8584
63c3a66f 8585 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8586 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8587}
8588
ccd5ba9d
MC
8589static inline u32 calc_crc(unsigned char *buf, int len)
8590{
8591 u32 reg;
8592 u32 tmp;
8593 int j, k;
8594
8595 reg = 0xffffffff;
8596
8597 for (j = 0; j < len; j++) {
8598 reg ^= buf[j];
8599
8600 for (k = 0; k < 8; k++) {
8601 tmp = reg & 0x01;
8602
8603 reg >>= 1;
8604
8605 if (tmp)
8606 reg ^= 0xedb88320;
8607 }
8608 }
8609
8610 return ~reg;
8611}
8612
8613static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8614{
8615 /* accept or reject all multicast frames */
8616 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8617 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8618 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8619 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8620}
8621
8622static void __tg3_set_rx_mode(struct net_device *dev)
8623{
8624 struct tg3 *tp = netdev_priv(dev);
8625 u32 rx_mode;
8626
8627 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8628 RX_MODE_KEEP_VLAN_TAG);
8629
8630#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8631 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8632 * flag clear.
8633 */
8634 if (!tg3_flag(tp, ENABLE_ASF))
8635 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8636#endif
8637
8638 if (dev->flags & IFF_PROMISC) {
8639 /* Promiscuous mode. */
8640 rx_mode |= RX_MODE_PROMISC;
8641 } else if (dev->flags & IFF_ALLMULTI) {
8642 /* Accept all multicast. */
8643 tg3_set_multi(tp, 1);
8644 } else if (netdev_mc_empty(dev)) {
8645 /* Reject all multicast. */
8646 tg3_set_multi(tp, 0);
8647 } else {
8648 /* Accept one or more multicast(s). */
8649 struct netdev_hw_addr *ha;
8650 u32 mc_filter[4] = { 0, };
8651 u32 regidx;
8652 u32 bit;
8653 u32 crc;
8654
8655 netdev_for_each_mc_addr(ha, dev) {
8656 crc = calc_crc(ha->addr, ETH_ALEN);
8657 bit = ~crc & 0x7f;
8658 regidx = (bit & 0x60) >> 5;
8659 bit &= 0x1f;
8660 mc_filter[regidx] |= (1 << bit);
8661 }
8662
8663 tw32(MAC_HASH_REG_0, mc_filter[0]);
8664 tw32(MAC_HASH_REG_1, mc_filter[1]);
8665 tw32(MAC_HASH_REG_2, mc_filter[2]);
8666 tw32(MAC_HASH_REG_3, mc_filter[3]);
8667 }
8668
8669 if (rx_mode != tp->rx_mode) {
8670 tp->rx_mode = rx_mode;
8671 tw32_f(MAC_RX_MODE, rx_mode);
8672 udelay(10);
8673 }
8674}
8675
9102426a 8676static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
90415477
MC
8677{
8678 int i;
8679
8680 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9102426a 8681 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
90415477
MC
8682}
8683
8684static void tg3_rss_check_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8685{
8686 int i;
8687
8688 if (!tg3_flag(tp, SUPPORT_MSIX))
8689 return;
8690
90415477 8691 if (tp->irq_cnt <= 2) {
bcebcc46 8692 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
90415477
MC
8693 return;
8694 }
8695
8696 /* Validate table against current IRQ count */
8697 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8698 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8699 break;
8700 }
8701
8702 if (i != TG3_RSS_INDIR_TBL_SIZE)
9102426a 8703 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
bcebcc46
MC
8704}
8705
90415477 8706static void tg3_rss_write_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8707{
8708 int i = 0;
8709 u32 reg = MAC_RSS_INDIR_TBL_0;
8710
8711 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8712 u32 val = tp->rss_ind_tbl[i];
8713 i++;
8714 for (; i % 8; i++) {
8715 val <<= 4;
8716 val |= tp->rss_ind_tbl[i];
8717 }
8718 tw32(reg, val);
8719 reg += 4;
8720 }
8721}
8722
1da177e4 8723/* tp->lock is held. */
8e7a22e3 8724static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
8725{
8726 u32 val, rdmac_mode;
8727 int i, err, limit;
8fea32b9 8728 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
1da177e4
LT
8729
8730 tg3_disable_ints(tp);
8731
8732 tg3_stop_fw(tp);
8733
8734 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8735
63c3a66f 8736 if (tg3_flag(tp, INIT_COMPLETE))
e6de8ad1 8737 tg3_abort_hw(tp, 1);
1da177e4 8738
699c0193
MC
8739 /* Enable MAC control of LPI */
8740 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8741 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8742 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8743 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8744
8745 tw32_f(TG3_CPMU_EEE_CTRL,
8746 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8747
a386b901
MC
8748 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8749 TG3_CPMU_EEEMD_LPI_IN_TX |
8750 TG3_CPMU_EEEMD_LPI_IN_RX |
8751 TG3_CPMU_EEEMD_EEE_ENABLE;
8752
8753 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8754 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8755
63c3a66f 8756 if (tg3_flag(tp, ENABLE_APE))
a386b901
MC
8757 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8758
8759 tw32_f(TG3_CPMU_EEE_MODE, val);
8760
8761 tw32_f(TG3_CPMU_EEE_DBTMR1,
8762 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8763 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8764
8765 tw32_f(TG3_CPMU_EEE_DBTMR2,
d7f2ab20 8766 TG3_CPMU_DBTMR2_APE_TX_2047US |
a386b901 8767 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
699c0193
MC
8768 }
8769
603f1173 8770 if (reset_phy)
d4d2c558
MC
8771 tg3_phy_reset(tp);
8772
1da177e4
LT
8773 err = tg3_chip_reset(tp);
8774 if (err)
8775 return err;
8776
8777 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8778
bcb37f6c 8779 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
d30cdd28
MC
8780 val = tr32(TG3_CPMU_CTRL);
8781 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8782 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
8783
8784 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8785 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8786 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8787 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8788
8789 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8790 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8791 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8792 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8793
8794 val = tr32(TG3_CPMU_HST_ACC);
8795 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8796 val |= CPMU_HST_ACC_MACCLK_6_25;
8797 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
8798 }
8799
33466d93
MC
8800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8801 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8802 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8803 PCIE_PWR_MGMT_L1_THRESH_4MS;
8804 tw32(PCIE_PWR_MGMT_THRESH, val);
521e6b90
MC
8805
8806 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8807 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8808
8809 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
33466d93 8810
f40386c8
MC
8811 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8812 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
255ca311
MC
8813 }
8814
63c3a66f 8815 if (tg3_flag(tp, L1PLLPD_EN)) {
614b0590
MC
8816 u32 grc_mode = tr32(GRC_MODE);
8817
8818 /* Access the lower 1K of PL PCIE block registers. */
8819 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8820 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8821
8822 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8823 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8824 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8825
8826 tw32(GRC_MODE, grc_mode);
8827 }
8828
55086ad9 8829 if (tg3_flag(tp, 57765_CLASS)) {
5093eedc
MC
8830 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8831 u32 grc_mode = tr32(GRC_MODE);
cea46462 8832
5093eedc
MC
8833 /* Access the lower 1K of PL PCIE block registers. */
8834 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8835 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
cea46462 8836
5093eedc
MC
8837 val = tr32(TG3_PCIE_TLDLPL_PORT +
8838 TG3_PCIE_PL_LO_PHYCTL5);
8839 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8840 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
cea46462 8841
5093eedc
MC
8842 tw32(GRC_MODE, grc_mode);
8843 }
a977dbe8 8844
1ff30a59
MC
8845 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8846 u32 grc_mode = tr32(GRC_MODE);
8847
8848 /* Access the lower 1K of DL PCIE block registers. */
8849 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8850 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8851
8852 val = tr32(TG3_PCIE_TLDLPL_PORT +
8853 TG3_PCIE_DL_LO_FTSMAX);
8854 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8855 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8856 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8857
8858 tw32(GRC_MODE, grc_mode);
8859 }
8860
a977dbe8
MC
8861 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8862 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8863 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8864 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
cea46462
MC
8865 }
8866
1da177e4
LT
8867 /* This works around an issue with Athlon chipsets on
8868 * B3 tigon3 silicon. This bit has no effect on any
8869 * other revision. But do not set this on PCI Express
795d01c5 8870 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 8871 */
63c3a66f
JP
8872 if (!tg3_flag(tp, CPMU_PRESENT)) {
8873 if (!tg3_flag(tp, PCI_EXPRESS))
795d01c5
MC
8874 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8875 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8876 }
1da177e4
LT
8877
8878 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 8879 tg3_flag(tp, PCIX_MODE)) {
1da177e4
LT
8880 val = tr32(TG3PCI_PCISTATE);
8881 val |= PCISTATE_RETRY_SAME_DMA;
8882 tw32(TG3PCI_PCISTATE, val);
8883 }
8884
63c3a66f 8885 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
8886 /* Allow reads and writes to the
8887 * APE register and memory space.
8888 */
8889 val = tr32(TG3PCI_PCISTATE);
8890 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
8891 PCISTATE_ALLOW_APE_SHMEM_WR |
8892 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
8893 tw32(TG3PCI_PCISTATE, val);
8894 }
8895
1da177e4
LT
8896 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8897 /* Enable some hw fixes. */
8898 val = tr32(TG3PCI_MSI_DATA);
8899 val |= (1 << 26) | (1 << 28) | (1 << 29);
8900 tw32(TG3PCI_MSI_DATA, val);
8901 }
8902
8903 /* Descriptor ring init may make accesses to the
8904 * NIC SRAM area to setup the TX descriptors, so we
8905 * can only do this after the hardware has been
8906 * successfully reset.
8907 */
32d8c572
MC
8908 err = tg3_init_rings(tp);
8909 if (err)
8910 return err;
1da177e4 8911
63c3a66f 8912 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
8913 val = tr32(TG3PCI_DMA_RW_CTRL) &
8914 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
1a319025
MC
8915 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8916 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
55086ad9 8917 if (!tg3_flag(tp, 57765_CLASS) &&
0aebff48
MC
8918 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8919 val |= DMA_RWCTRL_TAGGED_STAT_WA;
cbf9ca6c
MC
8920 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8921 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8922 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
8923 /* This value is determined during the probe time DMA
8924 * engine test, tg3_test_dma.
8925 */
8926 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8927 }
1da177e4
LT
8928
8929 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8930 GRC_MODE_4X_NIC_SEND_RINGS |
8931 GRC_MODE_NO_TX_PHDR_CSUM |
8932 GRC_MODE_NO_RX_PHDR_CSUM);
8933 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
8934
8935 /* Pseudo-header checksum is done by hardware logic and not
8936 * the offload processers, so make the chip do the pseudo-
8937 * header checksums on receive. For transmit it is more
8938 * convenient to do the pseudo-header checksum in software
8939 * as Linux does that on transmit for us in all cases.
8940 */
8941 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
8942
8943 tw32(GRC_MODE,
8944 tp->grc_mode |
8945 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8946
8947 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8948 val = tr32(GRC_MISC_CFG);
8949 val &= ~0xff;
8950 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8951 tw32(GRC_MISC_CFG, val);
8952
8953 /* Initialize MBUF/DESC pool. */
63c3a66f 8954 if (tg3_flag(tp, 5750_PLUS)) {
1da177e4
LT
8955 /* Do nothing. */
8956 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8957 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8959 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8960 else
8961 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8962 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8963 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
63c3a66f 8964 } else if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8965 int fw_len;
8966
077f849d 8967 fw_len = tp->fw_len;
1da177e4
LT
8968 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8969 tw32(BUFMGR_MB_POOL_ADDR,
8970 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8971 tw32(BUFMGR_MB_POOL_SIZE,
8972 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8973 }
1da177e4 8974
0f893dc6 8975 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
8976 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8977 tp->bufmgr_config.mbuf_read_dma_low_water);
8978 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8979 tp->bufmgr_config.mbuf_mac_rx_low_water);
8980 tw32(BUFMGR_MB_HIGH_WATER,
8981 tp->bufmgr_config.mbuf_high_water);
8982 } else {
8983 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8984 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8985 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8986 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8987 tw32(BUFMGR_MB_HIGH_WATER,
8988 tp->bufmgr_config.mbuf_high_water_jumbo);
8989 }
8990 tw32(BUFMGR_DMA_LOW_WATER,
8991 tp->bufmgr_config.dma_low_water);
8992 tw32(BUFMGR_DMA_HIGH_WATER,
8993 tp->bufmgr_config.dma_high_water);
8994
d309a46e
MC
8995 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8997 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
4d958473
MC
8998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8999 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9000 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9001 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
d309a46e 9002 tw32(BUFMGR_MODE, val);
1da177e4
LT
9003 for (i = 0; i < 2000; i++) {
9004 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9005 break;
9006 udelay(10);
9007 }
9008 if (i >= 2000) {
05dbe005 9009 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
1da177e4
LT
9010 return -ENODEV;
9011 }
9012
eb07a940
MC
9013 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9014 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
b5d3772c 9015
eb07a940 9016 tg3_setup_rxbd_thresholds(tp);
1da177e4
LT
9017
9018 /* Initialize TG3_BDINFO's at:
9019 * RCVDBDI_STD_BD: standard eth size rx ring
9020 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9021 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9022 *
9023 * like so:
9024 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9025 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9026 * ring attribute flags
9027 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9028 *
9029 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9030 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9031 *
9032 * The size of each ring is fixed in the firmware, but the location is
9033 * configurable.
9034 */
9035 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9036 ((u64) tpr->rx_std_mapping >> 32));
1da177e4 9037 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9038 ((u64) tpr->rx_std_mapping & 0xffffffff));
63c3a66f 9039 if (!tg3_flag(tp, 5717_PLUS))
87668d35
MC
9040 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9041 NIC_SRAM_RX_BUFFER_DESC);
1da177e4 9042
fdb72b38 9043 /* Disable the mini ring */
63c3a66f 9044 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9045 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9046 BDINFO_FLAGS_DISABLED);
9047
fdb72b38
MC
9048 /* Program the jumbo buffer descriptor ring control
9049 * blocks on those devices that have them.
9050 */
a0512944 9051 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
63c3a66f 9052 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
1da177e4 9053
63c3a66f 9054 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
1da177e4 9055 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9056 ((u64) tpr->rx_jmb_mapping >> 32));
1da177e4 9057 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9058 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
de9f5230
MC
9059 val = TG3_RX_JMB_RING_SIZE(tp) <<
9060 BDINFO_FLAGS_MAXLEN_SHIFT;
1da177e4 9061 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
de9f5230 9062 val | BDINFO_FLAGS_USE_EXT_RECV);
63c3a66f 9063 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
55086ad9 9064 tg3_flag(tp, 57765_CLASS))
87668d35
MC
9065 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9066 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
1da177e4
LT
9067 } else {
9068 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9069 BDINFO_FLAGS_DISABLED);
9070 }
9071
63c3a66f 9072 if (tg3_flag(tp, 57765_PLUS)) {
fa6b2aae 9073 val = TG3_RX_STD_RING_SIZE(tp);
7cb32cf2
MC
9074 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9075 val |= (TG3_RX_STD_DMA_SZ << 2);
9076 } else
04380d40 9077 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38 9078 } else
de9f5230 9079 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38
MC
9080
9081 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
1da177e4 9082
411da640 9083 tpr->rx_std_prod_idx = tp->rx_pending;
66711e66 9084 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
1da177e4 9085
63c3a66f
JP
9086 tpr->rx_jmb_prod_idx =
9087 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
66711e66 9088 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
1da177e4 9089
2d31ecaf
MC
9090 tg3_rings_reset(tp);
9091
1da177e4 9092 /* Initialize MAC address and backoff seed. */
986e0aeb 9093 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
9094
9095 /* MTU + ethernet header + FCS + optional VLAN tag */
f7b493e0
MC
9096 tw32(MAC_RX_MTU_SIZE,
9097 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
1da177e4
LT
9098
9099 /* The slot time is changed by tg3_setup_phy if we
9100 * run at gigabit with half duplex.
9101 */
f2096f94
MC
9102 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9103 (6 << TX_LENGTHS_IPG_SHIFT) |
9104 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9105
9106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9107 val |= tr32(MAC_TX_LENGTHS) &
9108 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9109 TX_LENGTHS_CNT_DWN_VAL_MSK);
9110
9111 tw32(MAC_TX_LENGTHS, val);
1da177e4
LT
9112
9113 /* Receive rules. */
9114 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9115 tw32(RCVLPC_CONFIG, 0x0181);
9116
9117 /* Calculate RDMAC_MODE setting early, we need it to determine
9118 * the RCVLPC_STATE_ENABLE mask.
9119 */
9120 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9121 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9122 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9123 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9124 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 9125
deabaac8 9126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
0339e4e3
MC
9127 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9128
57e6983c 9129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0
MC
9130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
d30cdd28
MC
9132 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9133 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9134 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9135
c5908939
MC
9136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9137 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9138 if (tg3_flag(tp, TSO_CAPABLE) &&
c13e3713 9139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
9140 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9141 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9142 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9143 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9144 }
9145 }
9146
63c3a66f 9147 if (tg3_flag(tp, PCI_EXPRESS))
85e94ced
MC
9148 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9149
63c3a66f
JP
9150 if (tg3_flag(tp, HW_TSO_1) ||
9151 tg3_flag(tp, HW_TSO_2) ||
9152 tg3_flag(tp, HW_TSO_3))
027455ad
MC
9153 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9154
108a6c16 9155 if (tg3_flag(tp, 57765_PLUS) ||
e849cdc3 9156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
027455ad
MC
9157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9158 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
1da177e4 9159
f2096f94
MC
9160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9161 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9162
41a8a7ee
MC
9163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9164 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9165 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f 9167 tg3_flag(tp, 57765_PLUS)) {
41a8a7ee 9168 val = tr32(TG3_RDMA_RSRVCTRL_REG);
10ce95d6 9169 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
b4495ed8
MC
9170 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9171 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9172 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9173 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9174 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9175 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
b75cc0e4 9176 }
41a8a7ee
MC
9177 tw32(TG3_RDMA_RSRVCTRL_REG,
9178 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9179 }
9180
d78b59f5
MC
9181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
d309a46e
MC
9183 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9184 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9185 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9186 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9187 }
9188
1da177e4 9189 /* Receive/send statistics. */
63c3a66f 9190 if (tg3_flag(tp, 5750_PLUS)) {
1661394e
MC
9191 val = tr32(RCVLPC_STATS_ENABLE);
9192 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9193 tw32(RCVLPC_STATS_ENABLE, val);
9194 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
63c3a66f 9195 tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9196 val = tr32(RCVLPC_STATS_ENABLE);
9197 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9198 tw32(RCVLPC_STATS_ENABLE, val);
9199 } else {
9200 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9201 }
9202 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9203 tw32(SNDDATAI_STATSENAB, 0xffffff);
9204 tw32(SNDDATAI_STATSCTRL,
9205 (SNDDATAI_SCTRL_ENABLE |
9206 SNDDATAI_SCTRL_FASTUPD));
9207
9208 /* Setup host coalescing engine. */
9209 tw32(HOSTCC_MODE, 0);
9210 for (i = 0; i < 2000; i++) {
9211 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9212 break;
9213 udelay(10);
9214 }
9215
d244c892 9216 __tg3_set_coalesce(tp, &tp->coal);
1da177e4 9217
63c3a66f 9218 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9219 /* Status/statistics block address. See tg3_timer,
9220 * the tg3_periodic_fetch_stats call there, and
9221 * tg3_get_stats to see how this works for 5705/5750 chips.
9222 */
1da177e4
LT
9223 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9224 ((u64) tp->stats_mapping >> 32));
9225 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9226 ((u64) tp->stats_mapping & 0xffffffff));
9227 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
2d31ecaf 9228
1da177e4 9229 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2d31ecaf
MC
9230
9231 /* Clear statistics and status block memory areas */
9232 for (i = NIC_SRAM_STATS_BLK;
9233 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9234 i += sizeof(u32)) {
9235 tg3_write_mem(tp, i, 0);
9236 udelay(40);
9237 }
1da177e4
LT
9238 }
9239
9240 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9241
9242 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9243 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
63c3a66f 9244 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9245 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9246
f07e9af3
MC
9247 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9248 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c94e3941
MC
9249 /* reset to prevent losing 1st rx packet intermittently */
9250 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9251 udelay(10);
9252 }
9253
3bda1258 9254 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9e975cc2
MC
9255 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9256 MAC_MODE_FHDE_ENABLE;
9257 if (tg3_flag(tp, ENABLE_APE))
9258 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
63c3a66f 9259 if (!tg3_flag(tp, 5705_PLUS) &&
f07e9af3 9260 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
e8f3f6ca
MC
9261 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9262 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
9263 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9264 udelay(40);
9265
314fba34 9266 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
63c3a66f 9267 * If TG3_FLAG_IS_NIC is zero, we should read the
314fba34
MC
9268 * register to preserve the GPIO settings for LOMs. The GPIOs,
9269 * whether used as inputs or outputs, are set by boot code after
9270 * reset.
9271 */
63c3a66f 9272 if (!tg3_flag(tp, IS_NIC)) {
314fba34
MC
9273 u32 gpio_mask;
9274
9d26e213
MC
9275 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9276 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9277 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
9278
9279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9280 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9281 GRC_LCLCTRL_GPIO_OUTPUT3;
9282
af36e6b6
MC
9283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9284 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9285
aaf84465 9286 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
9287 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9288
9289 /* GPIO1 must be driven high for eeprom write protect */
63c3a66f 9290 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9d26e213
MC
9291 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9292 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 9293 }
1da177e4
LT
9294 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9295 udelay(100);
9296
c3b5003b 9297 if (tg3_flag(tp, USING_MSIX)) {
baf8a94a 9298 val = tr32(MSGINT_MODE);
c3b5003b
MC
9299 val |= MSGINT_MODE_ENABLE;
9300 if (tp->irq_cnt > 1)
9301 val |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
9302 if (!tg3_flag(tp, 1SHOT_MSI))
9303 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
baf8a94a
MC
9304 tw32(MSGINT_MODE, val);
9305 }
9306
63c3a66f 9307 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9308 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9309 udelay(40);
9310 }
9311
9312 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9313 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9314 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9315 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9316 WDMAC_MODE_LNGREAD_ENAB);
9317
c5908939
MC
9318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9319 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9320 if (tg3_flag(tp, TSO_CAPABLE) &&
1da177e4
LT
9321 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9322 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9323 /* nothing */
9324 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9325 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9326 val |= WDMAC_MODE_RX_ACCEL;
9327 }
9328 }
9329
d9ab5ad1 9330 /* Enable host coalescing bug fix */
63c3a66f 9331 if (tg3_flag(tp, 5755_PLUS))
f51f3562 9332 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 9333
788a035e
MC
9334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9335 val |= WDMAC_MODE_BURST_ALL_DATA;
9336
1da177e4
LT
9337 tw32_f(WDMAC_MODE, val);
9338 udelay(40);
9339
63c3a66f 9340 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
9341 u16 pcix_cmd;
9342
9343 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9344 &pcix_cmd);
1da177e4 9345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
9346 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9347 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9348 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
9349 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9350 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9351 }
9974a356
MC
9352 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9353 pcix_cmd);
1da177e4
LT
9354 }
9355
9356 tw32_f(RDMAC_MODE, rdmac_mode);
9357 udelay(40);
9358
091f0ea3
MC
9359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9360 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9361 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9362 break;
9363 }
9364 if (i < TG3_NUM_RDMA_CHANNELS) {
9365 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9366 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9367 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9368 tg3_flag_set(tp, 5719_RDMA_BUG);
9369 }
9370 }
9371
1da177e4 9372 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
63c3a66f 9373 if (!tg3_flag(tp, 5705_PLUS))
1da177e4 9374 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
9375
9376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9377 tw32(SNDDATAC_MODE,
9378 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9379 else
9380 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9381
1da177e4
LT
9382 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9383 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7cb32cf2 9384 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
63c3a66f 9385 if (tg3_flag(tp, LRG_PROD_RING_CAP))
7cb32cf2
MC
9386 val |= RCVDBDI_MODE_LRG_RING_SZ;
9387 tw32(RCVDBDI_MODE, val);
1da177e4 9388 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
63c3a66f
JP
9389 if (tg3_flag(tp, HW_TSO_1) ||
9390 tg3_flag(tp, HW_TSO_2) ||
9391 tg3_flag(tp, HW_TSO_3))
1da177e4 9392 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
baf8a94a 9393 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
63c3a66f 9394 if (tg3_flag(tp, ENABLE_TSS))
baf8a94a
MC
9395 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9396 tw32(SNDBDI_MODE, val);
1da177e4
LT
9397 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9398
9399 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9400 err = tg3_load_5701_a0_firmware_fix(tp);
9401 if (err)
9402 return err;
9403 }
9404
63c3a66f 9405 if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9406 err = tg3_load_tso_firmware(tp);
9407 if (err)
9408 return err;
9409 }
1da177e4
LT
9410
9411 tp->tx_mode = TX_MODE_ENABLE;
f2096f94 9412
63c3a66f 9413 if (tg3_flag(tp, 5755_PLUS) ||
b1d05210
MC
9414 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9415 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
f2096f94
MC
9416
9417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9418 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9419 tp->tx_mode &= ~val;
9420 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9421 }
9422
1da177e4
LT
9423 tw32_f(MAC_TX_MODE, tp->tx_mode);
9424 udelay(100);
9425
63c3a66f 9426 if (tg3_flag(tp, ENABLE_RSS)) {
bcebcc46 9427 tg3_rss_write_indir_tbl(tp);
baf8a94a
MC
9428
9429 /* Setup the "secret" hash key. */
9430 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9431 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9432 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9433 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9434 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9435 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9436 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9437 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9438 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9439 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9440 }
9441
1da177e4 9442 tp->rx_mode = RX_MODE_ENABLE;
63c3a66f 9443 if (tg3_flag(tp, 5755_PLUS))
af36e6b6
MC
9444 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9445
63c3a66f 9446 if (tg3_flag(tp, ENABLE_RSS))
baf8a94a
MC
9447 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9448 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9449 RX_MODE_RSS_IPV6_HASH_EN |
9450 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9451 RX_MODE_RSS_IPV4_HASH_EN |
9452 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9453
1da177e4
LT
9454 tw32_f(MAC_RX_MODE, tp->rx_mode);
9455 udelay(10);
9456
1da177e4
LT
9457 tw32(MAC_LED_CTRL, tp->led_ctrl);
9458
9459 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
f07e9af3 9460 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4
LT
9461 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9462 udelay(10);
9463 }
9464 tw32_f(MAC_RX_MODE, tp->rx_mode);
9465 udelay(10);
9466
f07e9af3 9467 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4 9468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
f07e9af3 9469 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
1da177e4
LT
9470 /* Set drive transmission level to 1.2V */
9471 /* only if the signal pre-emphasis bit is not set */
9472 val = tr32(MAC_SERDES_CFG);
9473 val &= 0xfffff000;
9474 val |= 0x880;
9475 tw32(MAC_SERDES_CFG, val);
9476 }
9477 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9478 tw32(MAC_SERDES_CFG, 0x616000);
9479 }
9480
9481 /* Prevent chip from dropping frames when flow control
9482 * is enabled.
9483 */
55086ad9 9484 if (tg3_flag(tp, 57765_CLASS))
666bc831
MC
9485 val = 1;
9486 else
9487 val = 2;
9488 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
1da177e4
LT
9489
9490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
f07e9af3 9491 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
1da177e4 9492 /* Use hardware link auto-negotiation */
63c3a66f 9493 tg3_flag_set(tp, HW_AUTONEG);
1da177e4
LT
9494 }
9495
f07e9af3 9496 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6ff6f81d 9497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
d4d2c558
MC
9498 u32 tmp;
9499
9500 tmp = tr32(SERDES_RX_CTRL);
9501 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9502 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9503 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9504 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9505 }
9506
63c3a66f 9507 if (!tg3_flag(tp, USE_PHYLIB)) {
c6700ce2 9508 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
80096068 9509 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1da177e4 9510
dd477003
MC
9511 err = tg3_setup_phy(tp, 0);
9512 if (err)
9513 return err;
1da177e4 9514
f07e9af3
MC
9515 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9516 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
dd477003
MC
9517 u32 tmp;
9518
9519 /* Clear CRC stats. */
9520 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9521 tg3_writephy(tp, MII_TG3_TEST1,
9522 tmp | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9523 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
dd477003 9524 }
1da177e4
LT
9525 }
9526 }
9527
9528 __tg3_set_rx_mode(tp->dev);
9529
9530 /* Initialize receive rules. */
9531 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9532 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9533 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9534 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9535
63c3a66f 9536 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
1da177e4
LT
9537 limit = 8;
9538 else
9539 limit = 16;
63c3a66f 9540 if (tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
9541 limit -= 4;
9542 switch (limit) {
9543 case 16:
9544 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9545 case 15:
9546 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9547 case 14:
9548 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9549 case 13:
9550 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9551 case 12:
9552 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9553 case 11:
9554 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9555 case 10:
9556 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9557 case 9:
9558 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9559 case 8:
9560 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9561 case 7:
9562 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9563 case 6:
9564 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9565 case 5:
9566 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9567 case 4:
9568 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9569 case 3:
9570 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9571 case 2:
9572 case 1:
9573
9574 default:
9575 break;
855e1111 9576 }
1da177e4 9577
63c3a66f 9578 if (tg3_flag(tp, ENABLE_APE))
9ce768ea
MC
9579 /* Write our heartbeat update interval to APE. */
9580 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9581 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 9582
1da177e4
LT
9583 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9584
1da177e4
LT
9585 return 0;
9586}
9587
9588/* Called at device open time to get the chip ready for
9589 * packet processing. Invoked with tp->lock held.
9590 */
8e7a22e3 9591static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4 9592{
1da177e4
LT
9593 tg3_switch_clocks(tp);
9594
9595 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9596
2f751b67 9597 return tg3_reset_hw(tp, reset_phy);
1da177e4
LT
9598}
9599
aed93e0b
MC
9600static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9601{
9602 int i;
9603
9604 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9605 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9606
9607 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9608 off += len;
9609
9610 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9611 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9612 memset(ocir, 0, TG3_OCIR_LEN);
9613 }
9614}
9615
9616/* sysfs attributes for hwmon */
9617static ssize_t tg3_show_temp(struct device *dev,
9618 struct device_attribute *devattr, char *buf)
9619{
9620 struct pci_dev *pdev = to_pci_dev(dev);
9621 struct net_device *netdev = pci_get_drvdata(pdev);
9622 struct tg3 *tp = netdev_priv(netdev);
9623 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9624 u32 temperature;
9625
9626 spin_lock_bh(&tp->lock);
9627 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9628 sizeof(temperature));
9629 spin_unlock_bh(&tp->lock);
9630 return sprintf(buf, "%u\n", temperature);
9631}
9632
9633
9634static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9635 TG3_TEMP_SENSOR_OFFSET);
9636static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9637 TG3_TEMP_CAUTION_OFFSET);
9638static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9639 TG3_TEMP_MAX_OFFSET);
9640
9641static struct attribute *tg3_attributes[] = {
9642 &sensor_dev_attr_temp1_input.dev_attr.attr,
9643 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9644 &sensor_dev_attr_temp1_max.dev_attr.attr,
9645 NULL
9646};
9647
9648static const struct attribute_group tg3_group = {
9649 .attrs = tg3_attributes,
9650};
9651
aed93e0b
MC
9652static void tg3_hwmon_close(struct tg3 *tp)
9653{
aed93e0b
MC
9654 if (tp->hwmon_dev) {
9655 hwmon_device_unregister(tp->hwmon_dev);
9656 tp->hwmon_dev = NULL;
9657 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9658 }
aed93e0b
MC
9659}
9660
9661static void tg3_hwmon_open(struct tg3 *tp)
9662{
aed93e0b
MC
9663 int i, err;
9664 u32 size = 0;
9665 struct pci_dev *pdev = tp->pdev;
9666 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9667
9668 tg3_sd_scan_scratchpad(tp, ocirs);
9669
9670 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9671 if (!ocirs[i].src_data_length)
9672 continue;
9673
9674 size += ocirs[i].src_hdr_length;
9675 size += ocirs[i].src_data_length;
9676 }
9677
9678 if (!size)
9679 return;
9680
9681 /* Register hwmon sysfs hooks */
9682 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9683 if (err) {
9684 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9685 return;
9686 }
9687
9688 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9689 if (IS_ERR(tp->hwmon_dev)) {
9690 tp->hwmon_dev = NULL;
9691 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9692 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9693 }
aed93e0b
MC
9694}
9695
9696
1da177e4
LT
9697#define TG3_STAT_ADD32(PSTAT, REG) \
9698do { u32 __val = tr32(REG); \
9699 (PSTAT)->low += __val; \
9700 if ((PSTAT)->low < __val) \
9701 (PSTAT)->high += 1; \
9702} while (0)
9703
9704static void tg3_periodic_fetch_stats(struct tg3 *tp)
9705{
9706 struct tg3_hw_stats *sp = tp->hw_stats;
9707
9708 if (!netif_carrier_ok(tp->dev))
9709 return;
9710
9711 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9712 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9713 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9714 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9715 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9716 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9717 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9718 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9719 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9720 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9721 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9722 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9723 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
091f0ea3
MC
9724 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9725 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9726 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9727 u32 val;
9728
9729 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9730 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9731 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9732 tg3_flag_clear(tp, 5719_RDMA_BUG);
9733 }
1da177e4
LT
9734
9735 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9736 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9737 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9738 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9739 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9740 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9741 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9742 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9743 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9744 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9745 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9746 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9747 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9748 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
9749
9750 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
310050fa
MC
9751 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9752 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9753 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
4d958473
MC
9754 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9755 } else {
9756 u32 val = tr32(HOSTCC_FLOW_ATTN);
9757 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9758 if (val) {
9759 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9760 sp->rx_discards.low += val;
9761 if (sp->rx_discards.low < val)
9762 sp->rx_discards.high += 1;
9763 }
9764 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9765 }
463d305b 9766 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
9767}
9768
0e6cf6a9
MC
9769static void tg3_chk_missed_msi(struct tg3 *tp)
9770{
9771 u32 i;
9772
9773 for (i = 0; i < tp->irq_cnt; i++) {
9774 struct tg3_napi *tnapi = &tp->napi[i];
9775
9776 if (tg3_has_work(tnapi)) {
9777 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9778 tnapi->last_tx_cons == tnapi->tx_cons) {
9779 if (tnapi->chk_msi_cnt < 1) {
9780 tnapi->chk_msi_cnt++;
9781 return;
9782 }
7f230735 9783 tg3_msi(0, tnapi);
0e6cf6a9
MC
9784 }
9785 }
9786 tnapi->chk_msi_cnt = 0;
9787 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9788 tnapi->last_tx_cons = tnapi->tx_cons;
9789 }
9790}
9791
1da177e4
LT
9792static void tg3_timer(unsigned long __opaque)
9793{
9794 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 9795
5b190624 9796 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
f475f163
MC
9797 goto restart_timer;
9798
f47c11ee 9799 spin_lock(&tp->lock);
1da177e4 9800
0e6cf6a9 9801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
55086ad9 9802 tg3_flag(tp, 57765_CLASS))
0e6cf6a9
MC
9803 tg3_chk_missed_msi(tp);
9804
63c3a66f 9805 if (!tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
9806 /* All of this garbage is because when using non-tagged
9807 * IRQ status the mailbox/status_block protocol the chip
9808 * uses with the cpu is race prone.
9809 */
898a56f8 9810 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
fac9b83e
DM
9811 tw32(GRC_LOCAL_CTRL,
9812 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9813 } else {
9814 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 9815 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
fac9b83e 9816 }
1da177e4 9817
fac9b83e 9818 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
f47c11ee 9819 spin_unlock(&tp->lock);
db219973 9820 tg3_reset_task_schedule(tp);
5b190624 9821 goto restart_timer;
fac9b83e 9822 }
1da177e4
LT
9823 }
9824
1da177e4
LT
9825 /* This part only runs once per second. */
9826 if (!--tp->timer_counter) {
63c3a66f 9827 if (tg3_flag(tp, 5705_PLUS))
fac9b83e
DM
9828 tg3_periodic_fetch_stats(tp);
9829
b0c5943f
MC
9830 if (tp->setlpicnt && !--tp->setlpicnt)
9831 tg3_phy_eee_enable(tp);
52b02d04 9832
63c3a66f 9833 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
9834 u32 mac_stat;
9835 int phy_event;
9836
9837 mac_stat = tr32(MAC_STATUS);
9838
9839 phy_event = 0;
f07e9af3 9840 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
1da177e4
LT
9841 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9842 phy_event = 1;
9843 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9844 phy_event = 1;
9845
9846 if (phy_event)
9847 tg3_setup_phy(tp, 0);
63c3a66f 9848 } else if (tg3_flag(tp, POLL_SERDES)) {
1da177e4
LT
9849 u32 mac_stat = tr32(MAC_STATUS);
9850 int need_setup = 0;
9851
9852 if (netif_carrier_ok(tp->dev) &&
9853 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9854 need_setup = 1;
9855 }
be98da6a 9856 if (!netif_carrier_ok(tp->dev) &&
1da177e4
LT
9857 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9858 MAC_STATUS_SIGNAL_DET))) {
9859 need_setup = 1;
9860 }
9861 if (need_setup) {
3d3ebe74
MC
9862 if (!tp->serdes_counter) {
9863 tw32_f(MAC_MODE,
9864 (tp->mac_mode &
9865 ~MAC_MODE_PORT_MODE_MASK));
9866 udelay(40);
9867 tw32_f(MAC_MODE, tp->mac_mode);
9868 udelay(40);
9869 }
1da177e4
LT
9870 tg3_setup_phy(tp, 0);
9871 }
f07e9af3 9872 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
63c3a66f 9873 tg3_flag(tp, 5780_CLASS)) {
747e8f8b 9874 tg3_serdes_parallel_detect(tp);
57d8b880 9875 }
1da177e4
LT
9876
9877 tp->timer_counter = tp->timer_multiplier;
9878 }
9879
130b8e4d
MC
9880 /* Heartbeat is only sent once every 2 seconds.
9881 *
9882 * The heartbeat is to tell the ASF firmware that the host
9883 * driver is still alive. In the event that the OS crashes,
9884 * ASF needs to reset the hardware to free up the FIFO space
9885 * that may be filled with rx packets destined for the host.
9886 * If the FIFO is full, ASF will no longer function properly.
9887 *
9888 * Unintended resets have been reported on real time kernels
9889 * where the timer doesn't run on time. Netpoll will also have
9890 * same problem.
9891 *
9892 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9893 * to check the ring condition when the heartbeat is expiring
9894 * before doing the reset. This will prevent most unintended
9895 * resets.
9896 */
1da177e4 9897 if (!--tp->asf_counter) {
63c3a66f 9898 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
9899 tg3_wait_for_event_ack(tp);
9900
bbadf503 9901 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 9902 FWCMD_NICDRV_ALIVE3);
bbadf503 9903 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
c6cdf436
MC
9904 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9905 TG3_FW_UPDATE_TIMEOUT_SEC);
4ba526ce
MC
9906
9907 tg3_generate_fw_event(tp);
1da177e4
LT
9908 }
9909 tp->asf_counter = tp->asf_multiplier;
9910 }
9911
f47c11ee 9912 spin_unlock(&tp->lock);
1da177e4 9913
f475f163 9914restart_timer:
1da177e4
LT
9915 tp->timer.expires = jiffies + tp->timer_offset;
9916 add_timer(&tp->timer);
9917}
9918
21f7638e
MC
9919static void __devinit tg3_timer_init(struct tg3 *tp)
9920{
9921 if (tg3_flag(tp, TAGGED_STATUS) &&
9922 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9923 !tg3_flag(tp, 57765_CLASS))
9924 tp->timer_offset = HZ;
9925 else
9926 tp->timer_offset = HZ / 10;
9927
9928 BUG_ON(tp->timer_offset > HZ);
9929
9930 tp->timer_multiplier = (HZ / tp->timer_offset);
9931 tp->asf_multiplier = (HZ / tp->timer_offset) *
9932 TG3_FW_UPDATE_FREQ_SEC;
9933
9934 init_timer(&tp->timer);
9935 tp->timer.data = (unsigned long) tp;
9936 tp->timer.function = tg3_timer;
9937}
9938
9939static void tg3_timer_start(struct tg3 *tp)
9940{
9941 tp->asf_counter = tp->asf_multiplier;
9942 tp->timer_counter = tp->timer_multiplier;
9943
9944 tp->timer.expires = jiffies + tp->timer_offset;
9945 add_timer(&tp->timer);
9946}
9947
9948static void tg3_timer_stop(struct tg3 *tp)
9949{
9950 del_timer_sync(&tp->timer);
9951}
9952
9953/* Restart hardware after configuration changes, self-test, etc.
9954 * Invoked with tp->lock held.
9955 */
9956static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9957 __releases(tp->lock)
9958 __acquires(tp->lock)
9959{
9960 int err;
9961
9962 err = tg3_init_hw(tp, reset_phy);
9963 if (err) {
9964 netdev_err(tp->dev,
9965 "Failed to re-initialize device, aborting\n");
9966 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9967 tg3_full_unlock(tp);
9968 tg3_timer_stop(tp);
9969 tp->irq_sync = 0;
9970 tg3_napi_enable(tp);
9971 dev_close(tp->dev);
9972 tg3_full_lock(tp, 0);
9973 }
9974 return err;
9975}
9976
9977static void tg3_reset_task(struct work_struct *work)
9978{
9979 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9980 int err;
9981
9982 tg3_full_lock(tp, 0);
9983
9984 if (!netif_running(tp->dev)) {
9985 tg3_flag_clear(tp, RESET_TASK_PENDING);
9986 tg3_full_unlock(tp);
9987 return;
9988 }
9989
9990 tg3_full_unlock(tp);
9991
9992 tg3_phy_stop(tp);
9993
9994 tg3_netif_stop(tp);
9995
9996 tg3_full_lock(tp, 1);
9997
9998 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9999 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10000 tp->write32_rx_mbox = tg3_write_flush_reg32;
10001 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10002 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10003 }
10004
10005 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10006 err = tg3_init_hw(tp, 1);
10007 if (err)
10008 goto out;
10009
10010 tg3_netif_start(tp);
10011
10012out:
10013 tg3_full_unlock(tp);
10014
10015 if (!err)
10016 tg3_phy_start(tp);
10017
10018 tg3_flag_clear(tp, RESET_TASK_PENDING);
10019}
10020
4f125f42 10021static int tg3_request_irq(struct tg3 *tp, int irq_num)
fcfa0a32 10022{
7d12e780 10023 irq_handler_t fn;
fcfa0a32 10024 unsigned long flags;
4f125f42
MC
10025 char *name;
10026 struct tg3_napi *tnapi = &tp->napi[irq_num];
10027
10028 if (tp->irq_cnt == 1)
10029 name = tp->dev->name;
10030 else {
10031 name = &tnapi->irq_lbl[0];
10032 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10033 name[IFNAMSIZ-1] = 0;
10034 }
fcfa0a32 10035
63c3a66f 10036 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fcfa0a32 10037 fn = tg3_msi;
63c3a66f 10038 if (tg3_flag(tp, 1SHOT_MSI))
fcfa0a32 10039 fn = tg3_msi_1shot;
ab392d2d 10040 flags = 0;
fcfa0a32
MC
10041 } else {
10042 fn = tg3_interrupt;
63c3a66f 10043 if (tg3_flag(tp, TAGGED_STATUS))
fcfa0a32 10044 fn = tg3_interrupt_tagged;
ab392d2d 10045 flags = IRQF_SHARED;
fcfa0a32 10046 }
4f125f42
MC
10047
10048 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
fcfa0a32
MC
10049}
10050
7938109f
MC
10051static int tg3_test_interrupt(struct tg3 *tp)
10052{
09943a18 10053 struct tg3_napi *tnapi = &tp->napi[0];
7938109f 10054 struct net_device *dev = tp->dev;
b16250e3 10055 int err, i, intr_ok = 0;
f6eb9b1f 10056 u32 val;
7938109f 10057
d4bc3927
MC
10058 if (!netif_running(dev))
10059 return -ENODEV;
10060
7938109f
MC
10061 tg3_disable_ints(tp);
10062
4f125f42 10063 free_irq(tnapi->irq_vec, tnapi);
7938109f 10064
f6eb9b1f
MC
10065 /*
10066 * Turn off MSI one shot mode. Otherwise this test has no
10067 * observable way to know whether the interrupt was delivered.
10068 */
3aa1cdf8 10069 if (tg3_flag(tp, 57765_PLUS)) {
f6eb9b1f
MC
10070 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10071 tw32(MSGINT_MODE, val);
10072 }
10073
4f125f42 10074 err = request_irq(tnapi->irq_vec, tg3_test_isr,
f274fd9a 10075 IRQF_SHARED, dev->name, tnapi);
7938109f
MC
10076 if (err)
10077 return err;
10078
898a56f8 10079 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
10080 tg3_enable_ints(tp);
10081
10082 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 10083 tnapi->coal_now);
7938109f
MC
10084
10085 for (i = 0; i < 5; i++) {
b16250e3
MC
10086 u32 int_mbox, misc_host_ctrl;
10087
898a56f8 10088 int_mbox = tr32_mailbox(tnapi->int_mbox);
b16250e3
MC
10089 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10090
10091 if ((int_mbox != 0) ||
10092 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10093 intr_ok = 1;
7938109f 10094 break;
b16250e3
MC
10095 }
10096
3aa1cdf8
MC
10097 if (tg3_flag(tp, 57765_PLUS) &&
10098 tnapi->hw_status->status_tag != tnapi->last_tag)
10099 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10100
7938109f
MC
10101 msleep(10);
10102 }
10103
10104 tg3_disable_ints(tp);
10105
4f125f42 10106 free_irq(tnapi->irq_vec, tnapi);
6aa20a22 10107
4f125f42 10108 err = tg3_request_irq(tp, 0);
7938109f
MC
10109
10110 if (err)
10111 return err;
10112
f6eb9b1f
MC
10113 if (intr_ok) {
10114 /* Reenable MSI one shot mode. */
5b39de91 10115 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
f6eb9b1f
MC
10116 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10117 tw32(MSGINT_MODE, val);
10118 }
7938109f 10119 return 0;
f6eb9b1f 10120 }
7938109f
MC
10121
10122 return -EIO;
10123}
10124
10125/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10126 * successfully restored
10127 */
10128static int tg3_test_msi(struct tg3 *tp)
10129{
7938109f
MC
10130 int err;
10131 u16 pci_cmd;
10132
63c3a66f 10133 if (!tg3_flag(tp, USING_MSI))
7938109f
MC
10134 return 0;
10135
10136 /* Turn off SERR reporting in case MSI terminates with Master
10137 * Abort.
10138 */
10139 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10140 pci_write_config_word(tp->pdev, PCI_COMMAND,
10141 pci_cmd & ~PCI_COMMAND_SERR);
10142
10143 err = tg3_test_interrupt(tp);
10144
10145 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10146
10147 if (!err)
10148 return 0;
10149
10150 /* other failures */
10151 if (err != -EIO)
10152 return err;
10153
10154 /* MSI test failed, go back to INTx mode */
5129c3a3
MC
10155 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10156 "to INTx mode. Please report this failure to the PCI "
10157 "maintainer and include system chipset information\n");
7938109f 10158
4f125f42 10159 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
09943a18 10160
7938109f
MC
10161 pci_disable_msi(tp->pdev);
10162
63c3a66f 10163 tg3_flag_clear(tp, USING_MSI);
dc8bf1b1 10164 tp->napi[0].irq_vec = tp->pdev->irq;
7938109f 10165
4f125f42 10166 err = tg3_request_irq(tp, 0);
7938109f
MC
10167 if (err)
10168 return err;
10169
10170 /* Need to reset the chip because the MSI cycle may have terminated
10171 * with Master Abort.
10172 */
f47c11ee 10173 tg3_full_lock(tp, 1);
7938109f 10174
944d980e 10175 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 10176 err = tg3_init_hw(tp, 1);
7938109f 10177
f47c11ee 10178 tg3_full_unlock(tp);
7938109f
MC
10179
10180 if (err)
4f125f42 10181 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7938109f
MC
10182
10183 return err;
10184}
10185
9e9fd12d
MC
10186static int tg3_request_firmware(struct tg3 *tp)
10187{
10188 const __be32 *fw_data;
10189
10190 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
05dbe005
JP
10191 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10192 tp->fw_needed);
9e9fd12d
MC
10193 return -ENOENT;
10194 }
10195
10196 fw_data = (void *)tp->fw->data;
10197
10198 /* Firmware blob starts with version numbers, followed by
10199 * start address and _full_ length including BSS sections
10200 * (which must be longer than the actual data, of course
10201 */
10202
10203 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10204 if (tp->fw_len < (tp->fw->size - 12)) {
05dbe005
JP
10205 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10206 tp->fw_len, tp->fw_needed);
9e9fd12d
MC
10207 release_firmware(tp->fw);
10208 tp->fw = NULL;
10209 return -EINVAL;
10210 }
10211
10212 /* We no longer need firmware; we have it. */
10213 tp->fw_needed = NULL;
10214 return 0;
10215}
10216
9102426a 10217static u32 tg3_irq_count(struct tg3 *tp)
679563f4 10218{
9102426a 10219 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
679563f4 10220
9102426a 10221 if (irq_cnt > 1) {
c3b5003b
MC
10222 /* We want as many rx rings enabled as there are cpus.
10223 * In multiqueue MSI-X mode, the first MSI-X vector
10224 * only deals with link interrupts, etc, so we add
10225 * one to the number of vectors we are requesting.
10226 */
9102426a 10227 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
c3b5003b 10228 }
679563f4 10229
9102426a
MC
10230 return irq_cnt;
10231}
10232
10233static bool tg3_enable_msix(struct tg3 *tp)
10234{
10235 int i, rc;
86449944 10236 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
9102426a 10237
0968169c
MC
10238 tp->txq_cnt = tp->txq_req;
10239 tp->rxq_cnt = tp->rxq_req;
10240 if (!tp->rxq_cnt)
10241 tp->rxq_cnt = netif_get_num_default_rss_queues();
9102426a
MC
10242 if (tp->rxq_cnt > tp->rxq_max)
10243 tp->rxq_cnt = tp->rxq_max;
cf6d6ea6
MC
10244
10245 /* Disable multiple TX rings by default. Simple round-robin hardware
10246 * scheduling of the TX rings can cause starvation of rings with
10247 * small packets when other rings have TSO or jumbo packets.
10248 */
10249 if (!tp->txq_req)
10250 tp->txq_cnt = 1;
9102426a
MC
10251
10252 tp->irq_cnt = tg3_irq_count(tp);
10253
679563f4
MC
10254 for (i = 0; i < tp->irq_max; i++) {
10255 msix_ent[i].entry = i;
10256 msix_ent[i].vector = 0;
10257 }
10258
10259 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
2430b031
MC
10260 if (rc < 0) {
10261 return false;
10262 } else if (rc != 0) {
679563f4
MC
10263 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10264 return false;
05dbe005
JP
10265 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10266 tp->irq_cnt, rc);
679563f4 10267 tp->irq_cnt = rc;
49a359e3 10268 tp->rxq_cnt = max(rc - 1, 1);
9102426a
MC
10269 if (tp->txq_cnt)
10270 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
679563f4
MC
10271 }
10272
10273 for (i = 0; i < tp->irq_max; i++)
10274 tp->napi[i].irq_vec = msix_ent[i].vector;
10275
49a359e3 10276 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
2ddaad39
BH
10277 pci_disable_msix(tp->pdev);
10278 return false;
10279 }
b92b9040 10280
9102426a
MC
10281 if (tp->irq_cnt == 1)
10282 return true;
d78b59f5 10283
9102426a
MC
10284 tg3_flag_set(tp, ENABLE_RSS);
10285
10286 if (tp->txq_cnt > 1)
10287 tg3_flag_set(tp, ENABLE_TSS);
10288
10289 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
2430b031 10290
679563f4
MC
10291 return true;
10292}
10293
07b0173c
MC
10294static void tg3_ints_init(struct tg3 *tp)
10295{
63c3a66f
JP
10296 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10297 !tg3_flag(tp, TAGGED_STATUS)) {
07b0173c
MC
10298 /* All MSI supporting chips should support tagged
10299 * status. Assert that this is the case.
10300 */
5129c3a3
MC
10301 netdev_warn(tp->dev,
10302 "MSI without TAGGED_STATUS? Not using MSI\n");
679563f4 10303 goto defcfg;
07b0173c 10304 }
4f125f42 10305
63c3a66f
JP
10306 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10307 tg3_flag_set(tp, USING_MSIX);
10308 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10309 tg3_flag_set(tp, USING_MSI);
679563f4 10310
63c3a66f 10311 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
679563f4 10312 u32 msi_mode = tr32(MSGINT_MODE);
63c3a66f 10313 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
baf8a94a 10314 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
10315 if (!tg3_flag(tp, 1SHOT_MSI))
10316 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
679563f4
MC
10317 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10318 }
10319defcfg:
63c3a66f 10320 if (!tg3_flag(tp, USING_MSIX)) {
679563f4
MC
10321 tp->irq_cnt = 1;
10322 tp->napi[0].irq_vec = tp->pdev->irq;
49a359e3
MC
10323 }
10324
10325 if (tp->irq_cnt == 1) {
10326 tp->txq_cnt = 1;
10327 tp->rxq_cnt = 1;
2ddaad39 10328 netif_set_real_num_tx_queues(tp->dev, 1);
85407885 10329 netif_set_real_num_rx_queues(tp->dev, 1);
679563f4 10330 }
07b0173c
MC
10331}
10332
10333static void tg3_ints_fini(struct tg3 *tp)
10334{
63c3a66f 10335 if (tg3_flag(tp, USING_MSIX))
679563f4 10336 pci_disable_msix(tp->pdev);
63c3a66f 10337 else if (tg3_flag(tp, USING_MSI))
679563f4 10338 pci_disable_msi(tp->pdev);
63c3a66f
JP
10339 tg3_flag_clear(tp, USING_MSI);
10340 tg3_flag_clear(tp, USING_MSIX);
10341 tg3_flag_clear(tp, ENABLE_RSS);
10342 tg3_flag_clear(tp, ENABLE_TSS);
07b0173c
MC
10343}
10344
d8f4cd38 10345static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
1da177e4 10346{
d8f4cd38 10347 struct net_device *dev = tp->dev;
4f125f42 10348 int i, err;
1da177e4 10349
679563f4
MC
10350 /*
10351 * Setup interrupts first so we know how
10352 * many NAPI resources to allocate
10353 */
10354 tg3_ints_init(tp);
10355
90415477 10356 tg3_rss_check_indir_tbl(tp);
bcebcc46 10357
1da177e4
LT
10358 /* The placement of this call is tied
10359 * to the setup and use of Host TX descriptors.
10360 */
10361 err = tg3_alloc_consistent(tp);
10362 if (err)
679563f4 10363 goto err_out1;
88b06bc2 10364
66cfd1bd
MC
10365 tg3_napi_init(tp);
10366
fed97810 10367 tg3_napi_enable(tp);
1da177e4 10368
4f125f42
MC
10369 for (i = 0; i < tp->irq_cnt; i++) {
10370 struct tg3_napi *tnapi = &tp->napi[i];
10371 err = tg3_request_irq(tp, i);
10372 if (err) {
5bc09186
MC
10373 for (i--; i >= 0; i--) {
10374 tnapi = &tp->napi[i];
4f125f42 10375 free_irq(tnapi->irq_vec, tnapi);
5bc09186
MC
10376 }
10377 goto err_out2;
4f125f42
MC
10378 }
10379 }
1da177e4 10380
f47c11ee 10381 tg3_full_lock(tp, 0);
1da177e4 10382
d8f4cd38 10383 err = tg3_init_hw(tp, reset_phy);
1da177e4 10384 if (err) {
944d980e 10385 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10386 tg3_free_rings(tp);
1da177e4
LT
10387 }
10388
f47c11ee 10389 tg3_full_unlock(tp);
1da177e4 10390
07b0173c 10391 if (err)
679563f4 10392 goto err_out3;
1da177e4 10393
d8f4cd38 10394 if (test_irq && tg3_flag(tp, USING_MSI)) {
7938109f 10395 err = tg3_test_msi(tp);
fac9b83e 10396
7938109f 10397 if (err) {
f47c11ee 10398 tg3_full_lock(tp, 0);
944d980e 10399 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f 10400 tg3_free_rings(tp);
f47c11ee 10401 tg3_full_unlock(tp);
7938109f 10402
679563f4 10403 goto err_out2;
7938109f 10404 }
fcfa0a32 10405
63c3a66f 10406 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f 10407 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 10408
f6eb9b1f
MC
10409 tw32(PCIE_TRANSACTION_CFG,
10410 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32 10411 }
7938109f
MC
10412 }
10413
b02fd9e3
MC
10414 tg3_phy_start(tp);
10415
aed93e0b
MC
10416 tg3_hwmon_open(tp);
10417
f47c11ee 10418 tg3_full_lock(tp, 0);
1da177e4 10419
21f7638e 10420 tg3_timer_start(tp);
63c3a66f 10421 tg3_flag_set(tp, INIT_COMPLETE);
1da177e4
LT
10422 tg3_enable_ints(tp);
10423
f47c11ee 10424 tg3_full_unlock(tp);
1da177e4 10425
fe5f5787 10426 netif_tx_start_all_queues(dev);
1da177e4 10427
06c03c02
MB
10428 /*
10429 * Reset loopback feature if it was turned on while the device was down
10430 * make sure that it's installed properly now.
10431 */
10432 if (dev->features & NETIF_F_LOOPBACK)
10433 tg3_set_loopback(dev, dev->features);
10434
1da177e4 10435 return 0;
07b0173c 10436
679563f4 10437err_out3:
4f125f42
MC
10438 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10439 struct tg3_napi *tnapi = &tp->napi[i];
10440 free_irq(tnapi->irq_vec, tnapi);
10441 }
07b0173c 10442
679563f4 10443err_out2:
fed97810 10444 tg3_napi_disable(tp);
66cfd1bd 10445 tg3_napi_fini(tp);
07b0173c 10446 tg3_free_consistent(tp);
679563f4
MC
10447
10448err_out1:
10449 tg3_ints_fini(tp);
d8f4cd38 10450
07b0173c 10451 return err;
1da177e4
LT
10452}
10453
65138594 10454static void tg3_stop(struct tg3 *tp)
1da177e4 10455{
4f125f42 10456 int i;
1da177e4 10457
db219973 10458 tg3_reset_task_cancel(tp);
bd473da3 10459 tg3_netif_stop(tp);
1da177e4 10460
21f7638e 10461 tg3_timer_stop(tp);
1da177e4 10462
aed93e0b
MC
10463 tg3_hwmon_close(tp);
10464
24bb4fb6
MC
10465 tg3_phy_stop(tp);
10466
f47c11ee 10467 tg3_full_lock(tp, 1);
1da177e4
LT
10468
10469 tg3_disable_ints(tp);
10470
944d980e 10471 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10472 tg3_free_rings(tp);
63c3a66f 10473 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 10474
f47c11ee 10475 tg3_full_unlock(tp);
1da177e4 10476
4f125f42
MC
10477 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10478 struct tg3_napi *tnapi = &tp->napi[i];
10479 free_irq(tnapi->irq_vec, tnapi);
10480 }
07b0173c
MC
10481
10482 tg3_ints_fini(tp);
1da177e4 10483
66cfd1bd
MC
10484 tg3_napi_fini(tp);
10485
1da177e4 10486 tg3_free_consistent(tp);
65138594
MC
10487}
10488
d8f4cd38
MC
10489static int tg3_open(struct net_device *dev)
10490{
10491 struct tg3 *tp = netdev_priv(dev);
10492 int err;
10493
10494 if (tp->fw_needed) {
10495 err = tg3_request_firmware(tp);
10496 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10497 if (err)
10498 return err;
10499 } else if (err) {
10500 netdev_warn(tp->dev, "TSO capability disabled\n");
10501 tg3_flag_clear(tp, TSO_CAPABLE);
10502 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10503 netdev_notice(tp->dev, "TSO capability restored\n");
10504 tg3_flag_set(tp, TSO_CAPABLE);
10505 }
10506 }
10507
10508 netif_carrier_off(tp->dev);
10509
10510 err = tg3_power_up(tp);
10511 if (err)
10512 return err;
10513
10514 tg3_full_lock(tp, 0);
10515
10516 tg3_disable_ints(tp);
10517 tg3_flag_clear(tp, INIT_COMPLETE);
10518
10519 tg3_full_unlock(tp);
10520
10521 err = tg3_start(tp, true, true);
10522 if (err) {
10523 tg3_frob_aux_power(tp, false);
10524 pci_set_power_state(tp->pdev, PCI_D3hot);
10525 }
07b0173c 10526 return err;
1da177e4
LT
10527}
10528
1da177e4
LT
10529static int tg3_close(struct net_device *dev)
10530{
10531 struct tg3 *tp = netdev_priv(dev);
10532
65138594 10533 tg3_stop(tp);
1da177e4 10534
92feeabf
MC
10535 /* Clear stats across close / open calls */
10536 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10537 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
1da177e4 10538
c866b7ea 10539 tg3_power_down(tp);
bc1c7567
MC
10540
10541 netif_carrier_off(tp->dev);
10542
1da177e4
LT
10543 return 0;
10544}
10545
511d2224 10546static inline u64 get_stat64(tg3_stat64_t *val)
816f8b86
SB
10547{
10548 return ((u64)val->high << 32) | ((u64)val->low);
10549}
10550
65ec698d 10551static u64 tg3_calc_crc_errors(struct tg3 *tp)
1da177e4
LT
10552{
10553 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10554
f07e9af3 10555 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
1da177e4
LT
10556 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
10558 u32 val;
10559
569a5df8
MC
10560 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10561 tg3_writephy(tp, MII_TG3_TEST1,
10562 val | MII_TG3_TEST1_CRC_EN);
f08aa1a8 10563 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
1da177e4
LT
10564 } else
10565 val = 0;
1da177e4
LT
10566
10567 tp->phy_crc_errors += val;
10568
10569 return tp->phy_crc_errors;
10570 }
10571
10572 return get_stat64(&hw_stats->rx_fcs_errors);
10573}
10574
10575#define ESTAT_ADD(member) \
10576 estats->member = old_estats->member + \
511d2224 10577 get_stat64(&hw_stats->member)
1da177e4 10578
65ec698d 10579static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
1da177e4 10580{
1da177e4
LT
10581 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10582 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10583
1da177e4
LT
10584 ESTAT_ADD(rx_octets);
10585 ESTAT_ADD(rx_fragments);
10586 ESTAT_ADD(rx_ucast_packets);
10587 ESTAT_ADD(rx_mcast_packets);
10588 ESTAT_ADD(rx_bcast_packets);
10589 ESTAT_ADD(rx_fcs_errors);
10590 ESTAT_ADD(rx_align_errors);
10591 ESTAT_ADD(rx_xon_pause_rcvd);
10592 ESTAT_ADD(rx_xoff_pause_rcvd);
10593 ESTAT_ADD(rx_mac_ctrl_rcvd);
10594 ESTAT_ADD(rx_xoff_entered);
10595 ESTAT_ADD(rx_frame_too_long_errors);
10596 ESTAT_ADD(rx_jabbers);
10597 ESTAT_ADD(rx_undersize_packets);
10598 ESTAT_ADD(rx_in_length_errors);
10599 ESTAT_ADD(rx_out_length_errors);
10600 ESTAT_ADD(rx_64_or_less_octet_packets);
10601 ESTAT_ADD(rx_65_to_127_octet_packets);
10602 ESTAT_ADD(rx_128_to_255_octet_packets);
10603 ESTAT_ADD(rx_256_to_511_octet_packets);
10604 ESTAT_ADD(rx_512_to_1023_octet_packets);
10605 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10606 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10607 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10608 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10609 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10610
10611 ESTAT_ADD(tx_octets);
10612 ESTAT_ADD(tx_collisions);
10613 ESTAT_ADD(tx_xon_sent);
10614 ESTAT_ADD(tx_xoff_sent);
10615 ESTAT_ADD(tx_flow_control);
10616 ESTAT_ADD(tx_mac_errors);
10617 ESTAT_ADD(tx_single_collisions);
10618 ESTAT_ADD(tx_mult_collisions);
10619 ESTAT_ADD(tx_deferred);
10620 ESTAT_ADD(tx_excessive_collisions);
10621 ESTAT_ADD(tx_late_collisions);
10622 ESTAT_ADD(tx_collide_2times);
10623 ESTAT_ADD(tx_collide_3times);
10624 ESTAT_ADD(tx_collide_4times);
10625 ESTAT_ADD(tx_collide_5times);
10626 ESTAT_ADD(tx_collide_6times);
10627 ESTAT_ADD(tx_collide_7times);
10628 ESTAT_ADD(tx_collide_8times);
10629 ESTAT_ADD(tx_collide_9times);
10630 ESTAT_ADD(tx_collide_10times);
10631 ESTAT_ADD(tx_collide_11times);
10632 ESTAT_ADD(tx_collide_12times);
10633 ESTAT_ADD(tx_collide_13times);
10634 ESTAT_ADD(tx_collide_14times);
10635 ESTAT_ADD(tx_collide_15times);
10636 ESTAT_ADD(tx_ucast_packets);
10637 ESTAT_ADD(tx_mcast_packets);
10638 ESTAT_ADD(tx_bcast_packets);
10639 ESTAT_ADD(tx_carrier_sense_errors);
10640 ESTAT_ADD(tx_discards);
10641 ESTAT_ADD(tx_errors);
10642
10643 ESTAT_ADD(dma_writeq_full);
10644 ESTAT_ADD(dma_write_prioq_full);
10645 ESTAT_ADD(rxbds_empty);
10646 ESTAT_ADD(rx_discards);
10647 ESTAT_ADD(rx_errors);
10648 ESTAT_ADD(rx_threshold_hit);
10649
10650 ESTAT_ADD(dma_readq_full);
10651 ESTAT_ADD(dma_read_prioq_full);
10652 ESTAT_ADD(tx_comp_queue_full);
10653
10654 ESTAT_ADD(ring_set_send_prod_index);
10655 ESTAT_ADD(ring_status_update);
10656 ESTAT_ADD(nic_irqs);
10657 ESTAT_ADD(nic_avoided_irqs);
10658 ESTAT_ADD(nic_tx_threshold_hit);
10659
4452d099 10660 ESTAT_ADD(mbuf_lwm_thresh_hit);
1da177e4
LT
10661}
10662
65ec698d 10663static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
1da177e4 10664{
511d2224 10665 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
1da177e4
LT
10666 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10667
1da177e4
LT
10668 stats->rx_packets = old_stats->rx_packets +
10669 get_stat64(&hw_stats->rx_ucast_packets) +
10670 get_stat64(&hw_stats->rx_mcast_packets) +
10671 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 10672
1da177e4
LT
10673 stats->tx_packets = old_stats->tx_packets +
10674 get_stat64(&hw_stats->tx_ucast_packets) +
10675 get_stat64(&hw_stats->tx_mcast_packets) +
10676 get_stat64(&hw_stats->tx_bcast_packets);
10677
10678 stats->rx_bytes = old_stats->rx_bytes +
10679 get_stat64(&hw_stats->rx_octets);
10680 stats->tx_bytes = old_stats->tx_bytes +
10681 get_stat64(&hw_stats->tx_octets);
10682
10683 stats->rx_errors = old_stats->rx_errors +
4f63b877 10684 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
10685 stats->tx_errors = old_stats->tx_errors +
10686 get_stat64(&hw_stats->tx_errors) +
10687 get_stat64(&hw_stats->tx_mac_errors) +
10688 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10689 get_stat64(&hw_stats->tx_discards);
10690
10691 stats->multicast = old_stats->multicast +
10692 get_stat64(&hw_stats->rx_mcast_packets);
10693 stats->collisions = old_stats->collisions +
10694 get_stat64(&hw_stats->tx_collisions);
10695
10696 stats->rx_length_errors = old_stats->rx_length_errors +
10697 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10698 get_stat64(&hw_stats->rx_undersize_packets);
10699
10700 stats->rx_over_errors = old_stats->rx_over_errors +
10701 get_stat64(&hw_stats->rxbds_empty);
10702 stats->rx_frame_errors = old_stats->rx_frame_errors +
10703 get_stat64(&hw_stats->rx_align_errors);
10704 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10705 get_stat64(&hw_stats->tx_discards);
10706 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10707 get_stat64(&hw_stats->tx_carrier_sense_errors);
10708
10709 stats->rx_crc_errors = old_stats->rx_crc_errors +
65ec698d 10710 tg3_calc_crc_errors(tp);
1da177e4 10711
4f63b877
JL
10712 stats->rx_missed_errors = old_stats->rx_missed_errors +
10713 get_stat64(&hw_stats->rx_discards);
10714
b0057c51 10715 stats->rx_dropped = tp->rx_dropped;
48855432 10716 stats->tx_dropped = tp->tx_dropped;
1da177e4
LT
10717}
10718
1da177e4
LT
10719static int tg3_get_regs_len(struct net_device *dev)
10720{
97bd8e49 10721 return TG3_REG_BLK_SIZE;
1da177e4
LT
10722}
10723
10724static void tg3_get_regs(struct net_device *dev,
10725 struct ethtool_regs *regs, void *_p)
10726{
1da177e4 10727 struct tg3 *tp = netdev_priv(dev);
1da177e4
LT
10728
10729 regs->version = 0;
10730
97bd8e49 10731 memset(_p, 0, TG3_REG_BLK_SIZE);
1da177e4 10732
80096068 10733 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10734 return;
10735
f47c11ee 10736 tg3_full_lock(tp, 0);
1da177e4 10737
97bd8e49 10738 tg3_dump_legacy_regs(tp, (u32 *)_p);
1da177e4 10739
f47c11ee 10740 tg3_full_unlock(tp);
1da177e4
LT
10741}
10742
10743static int tg3_get_eeprom_len(struct net_device *dev)
10744{
10745 struct tg3 *tp = netdev_priv(dev);
10746
10747 return tp->nvram_size;
10748}
10749
1da177e4
LT
10750static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10751{
10752 struct tg3 *tp = netdev_priv(dev);
10753 int ret;
10754 u8 *pd;
b9fc7dc5 10755 u32 i, offset, len, b_offset, b_count;
a9dc529d 10756 __be32 val;
1da177e4 10757
63c3a66f 10758 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
10759 return -EINVAL;
10760
80096068 10761 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10762 return -EAGAIN;
10763
1da177e4
LT
10764 offset = eeprom->offset;
10765 len = eeprom->len;
10766 eeprom->len = 0;
10767
10768 eeprom->magic = TG3_EEPROM_MAGIC;
10769
10770 if (offset & 3) {
10771 /* adjustments to start on required 4 byte boundary */
10772 b_offset = offset & 3;
10773 b_count = 4 - b_offset;
10774 if (b_count > len) {
10775 /* i.e. offset=1 len=2 */
10776 b_count = len;
10777 }
a9dc529d 10778 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
1da177e4
LT
10779 if (ret)
10780 return ret;
be98da6a 10781 memcpy(data, ((char *)&val) + b_offset, b_count);
1da177e4
LT
10782 len -= b_count;
10783 offset += b_count;
c6cdf436 10784 eeprom->len += b_count;
1da177e4
LT
10785 }
10786
25985edc 10787 /* read bytes up to the last 4 byte boundary */
1da177e4
LT
10788 pd = &data[eeprom->len];
10789 for (i = 0; i < (len - (len & 3)); i += 4) {
a9dc529d 10790 ret = tg3_nvram_read_be32(tp, offset + i, &val);
1da177e4
LT
10791 if (ret) {
10792 eeprom->len += i;
10793 return ret;
10794 }
1da177e4
LT
10795 memcpy(pd + i, &val, 4);
10796 }
10797 eeprom->len += i;
10798
10799 if (len & 3) {
10800 /* read last bytes not ending on 4 byte boundary */
10801 pd = &data[eeprom->len];
10802 b_count = len & 3;
10803 b_offset = offset + len - b_count;
a9dc529d 10804 ret = tg3_nvram_read_be32(tp, b_offset, &val);
1da177e4
LT
10805 if (ret)
10806 return ret;
b9fc7dc5 10807 memcpy(pd, &val, b_count);
1da177e4
LT
10808 eeprom->len += b_count;
10809 }
10810 return 0;
10811}
10812
1da177e4
LT
10813static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10814{
10815 struct tg3 *tp = netdev_priv(dev);
10816 int ret;
b9fc7dc5 10817 u32 offset, len, b_offset, odd_len;
1da177e4 10818 u8 *buf;
a9dc529d 10819 __be32 start, end;
1da177e4 10820
80096068 10821 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10822 return -EAGAIN;
10823
63c3a66f 10824 if (tg3_flag(tp, NO_NVRAM) ||
df259d8c 10825 eeprom->magic != TG3_EEPROM_MAGIC)
1da177e4
LT
10826 return -EINVAL;
10827
10828 offset = eeprom->offset;
10829 len = eeprom->len;
10830
10831 if ((b_offset = (offset & 3))) {
10832 /* adjustments to start on required 4 byte boundary */
a9dc529d 10833 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
1da177e4
LT
10834 if (ret)
10835 return ret;
1da177e4
LT
10836 len += b_offset;
10837 offset &= ~3;
1c8594b4
MC
10838 if (len < 4)
10839 len = 4;
1da177e4
LT
10840 }
10841
10842 odd_len = 0;
1c8594b4 10843 if (len & 3) {
1da177e4
LT
10844 /* adjustments to end on required 4 byte boundary */
10845 odd_len = 1;
10846 len = (len + 3) & ~3;
a9dc529d 10847 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
1da177e4
LT
10848 if (ret)
10849 return ret;
1da177e4
LT
10850 }
10851
10852 buf = data;
10853 if (b_offset || odd_len) {
10854 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 10855 if (!buf)
1da177e4
LT
10856 return -ENOMEM;
10857 if (b_offset)
10858 memcpy(buf, &start, 4);
10859 if (odd_len)
10860 memcpy(buf+len-4, &end, 4);
10861 memcpy(buf + b_offset, data, eeprom->len);
10862 }
10863
10864 ret = tg3_nvram_write_block(tp, offset, len, buf);
10865
10866 if (buf != data)
10867 kfree(buf);
10868
10869 return ret;
10870}
10871
10872static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10873{
b02fd9e3
MC
10874 struct tg3 *tp = netdev_priv(dev);
10875
63c3a66f 10876 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10877 struct phy_device *phydev;
f07e9af3 10878 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10879 return -EAGAIN;
3f0e3ad7
MC
10880 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10881 return phy_ethtool_gset(phydev, cmd);
b02fd9e3 10882 }
6aa20a22 10883
1da177e4
LT
10884 cmd->supported = (SUPPORTED_Autoneg);
10885
f07e9af3 10886 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
1da177e4
LT
10887 cmd->supported |= (SUPPORTED_1000baseT_Half |
10888 SUPPORTED_1000baseT_Full);
10889
f07e9af3 10890 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
1da177e4
LT
10891 cmd->supported |= (SUPPORTED_100baseT_Half |
10892 SUPPORTED_100baseT_Full |
10893 SUPPORTED_10baseT_Half |
10894 SUPPORTED_10baseT_Full |
3bebab59 10895 SUPPORTED_TP);
ef348144
KK
10896 cmd->port = PORT_TP;
10897 } else {
1da177e4 10898 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
10899 cmd->port = PORT_FIBRE;
10900 }
6aa20a22 10901
1da177e4 10902 cmd->advertising = tp->link_config.advertising;
5bb09778
MC
10903 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10904 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10905 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10906 cmd->advertising |= ADVERTISED_Pause;
10907 } else {
10908 cmd->advertising |= ADVERTISED_Pause |
10909 ADVERTISED_Asym_Pause;
10910 }
10911 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10912 cmd->advertising |= ADVERTISED_Asym_Pause;
10913 }
10914 }
859edb26 10915 if (netif_running(dev) && netif_carrier_ok(dev)) {
70739497 10916 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
1da177e4 10917 cmd->duplex = tp->link_config.active_duplex;
859edb26 10918 cmd->lp_advertising = tp->link_config.rmt_adv;
e348c5e7
MC
10919 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10920 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10921 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10922 else
10923 cmd->eth_tp_mdix = ETH_TP_MDI;
10924 }
64c22182 10925 } else {
e740522e
MC
10926 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10927 cmd->duplex = DUPLEX_UNKNOWN;
e348c5e7 10928 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
1da177e4 10929 }
882e9793 10930 cmd->phy_address = tp->phy_addr;
7e5856bd 10931 cmd->transceiver = XCVR_INTERNAL;
1da177e4
LT
10932 cmd->autoneg = tp->link_config.autoneg;
10933 cmd->maxtxpkt = 0;
10934 cmd->maxrxpkt = 0;
10935 return 0;
10936}
6aa20a22 10937
1da177e4
LT
10938static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10939{
10940 struct tg3 *tp = netdev_priv(dev);
25db0338 10941 u32 speed = ethtool_cmd_speed(cmd);
6aa20a22 10942
63c3a66f 10943 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10944 struct phy_device *phydev;
f07e9af3 10945 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10946 return -EAGAIN;
3f0e3ad7
MC
10947 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10948 return phy_ethtool_sset(phydev, cmd);
b02fd9e3
MC
10949 }
10950
7e5856bd
MC
10951 if (cmd->autoneg != AUTONEG_ENABLE &&
10952 cmd->autoneg != AUTONEG_DISABLE)
37ff238d 10953 return -EINVAL;
7e5856bd
MC
10954
10955 if (cmd->autoneg == AUTONEG_DISABLE &&
10956 cmd->duplex != DUPLEX_FULL &&
10957 cmd->duplex != DUPLEX_HALF)
37ff238d 10958 return -EINVAL;
1da177e4 10959
7e5856bd
MC
10960 if (cmd->autoneg == AUTONEG_ENABLE) {
10961 u32 mask = ADVERTISED_Autoneg |
10962 ADVERTISED_Pause |
10963 ADVERTISED_Asym_Pause;
10964
f07e9af3 10965 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
7e5856bd
MC
10966 mask |= ADVERTISED_1000baseT_Half |
10967 ADVERTISED_1000baseT_Full;
10968
f07e9af3 10969 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
7e5856bd
MC
10970 mask |= ADVERTISED_100baseT_Half |
10971 ADVERTISED_100baseT_Full |
10972 ADVERTISED_10baseT_Half |
10973 ADVERTISED_10baseT_Full |
10974 ADVERTISED_TP;
10975 else
10976 mask |= ADVERTISED_FIBRE;
10977
10978 if (cmd->advertising & ~mask)
10979 return -EINVAL;
10980
10981 mask &= (ADVERTISED_1000baseT_Half |
10982 ADVERTISED_1000baseT_Full |
10983 ADVERTISED_100baseT_Half |
10984 ADVERTISED_100baseT_Full |
10985 ADVERTISED_10baseT_Half |
10986 ADVERTISED_10baseT_Full);
10987
10988 cmd->advertising &= mask;
10989 } else {
f07e9af3 10990 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
25db0338 10991 if (speed != SPEED_1000)
7e5856bd
MC
10992 return -EINVAL;
10993
10994 if (cmd->duplex != DUPLEX_FULL)
10995 return -EINVAL;
10996 } else {
25db0338
DD
10997 if (speed != SPEED_100 &&
10998 speed != SPEED_10)
7e5856bd
MC
10999 return -EINVAL;
11000 }
11001 }
11002
f47c11ee 11003 tg3_full_lock(tp, 0);
1da177e4
LT
11004
11005 tp->link_config.autoneg = cmd->autoneg;
11006 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
11007 tp->link_config.advertising = (cmd->advertising |
11008 ADVERTISED_Autoneg);
e740522e
MC
11009 tp->link_config.speed = SPEED_UNKNOWN;
11010 tp->link_config.duplex = DUPLEX_UNKNOWN;
1da177e4
LT
11011 } else {
11012 tp->link_config.advertising = 0;
25db0338 11013 tp->link_config.speed = speed;
1da177e4 11014 tp->link_config.duplex = cmd->duplex;
b02fd9e3 11015 }
6aa20a22 11016
1da177e4
LT
11017 if (netif_running(dev))
11018 tg3_setup_phy(tp, 1);
11019
f47c11ee 11020 tg3_full_unlock(tp);
6aa20a22 11021
1da177e4
LT
11022 return 0;
11023}
6aa20a22 11024
1da177e4
LT
11025static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11026{
11027 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11028
68aad78c
RJ
11029 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11030 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11031 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11032 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
1da177e4 11033}
6aa20a22 11034
1da177e4
LT
11035static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11036{
11037 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11038
63c3a66f 11039 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
a85feb8c
GZ
11040 wol->supported = WAKE_MAGIC;
11041 else
11042 wol->supported = 0;
1da177e4 11043 wol->wolopts = 0;
63c3a66f 11044 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
1da177e4
LT
11045 wol->wolopts = WAKE_MAGIC;
11046 memset(&wol->sopass, 0, sizeof(wol->sopass));
11047}
6aa20a22 11048
1da177e4
LT
11049static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11050{
11051 struct tg3 *tp = netdev_priv(dev);
12dac075 11052 struct device *dp = &tp->pdev->dev;
6aa20a22 11053
1da177e4
LT
11054 if (wol->wolopts & ~WAKE_MAGIC)
11055 return -EINVAL;
11056 if ((wol->wolopts & WAKE_MAGIC) &&
63c3a66f 11057 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
1da177e4 11058 return -EINVAL;
6aa20a22 11059
f2dc0d18
RW
11060 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11061
f47c11ee 11062 spin_lock_bh(&tp->lock);
f2dc0d18 11063 if (device_may_wakeup(dp))
63c3a66f 11064 tg3_flag_set(tp, WOL_ENABLE);
f2dc0d18 11065 else
63c3a66f 11066 tg3_flag_clear(tp, WOL_ENABLE);
f47c11ee 11067 spin_unlock_bh(&tp->lock);
6aa20a22 11068
1da177e4
LT
11069 return 0;
11070}
6aa20a22 11071
1da177e4
LT
11072static u32 tg3_get_msglevel(struct net_device *dev)
11073{
11074 struct tg3 *tp = netdev_priv(dev);
11075 return tp->msg_enable;
11076}
6aa20a22 11077
1da177e4
LT
11078static void tg3_set_msglevel(struct net_device *dev, u32 value)
11079{
11080 struct tg3 *tp = netdev_priv(dev);
11081 tp->msg_enable = value;
11082}
6aa20a22 11083
1da177e4
LT
11084static int tg3_nway_reset(struct net_device *dev)
11085{
11086 struct tg3 *tp = netdev_priv(dev);
1da177e4 11087 int r;
6aa20a22 11088
1da177e4
LT
11089 if (!netif_running(dev))
11090 return -EAGAIN;
11091
f07e9af3 11092 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
c94e3941
MC
11093 return -EINVAL;
11094
63c3a66f 11095 if (tg3_flag(tp, USE_PHYLIB)) {
f07e9af3 11096 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11097 return -EAGAIN;
3f0e3ad7 11098 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
11099 } else {
11100 u32 bmcr;
11101
11102 spin_lock_bh(&tp->lock);
11103 r = -EINVAL;
11104 tg3_readphy(tp, MII_BMCR, &bmcr);
11105 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11106 ((bmcr & BMCR_ANENABLE) ||
f07e9af3 11107 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
b02fd9e3
MC
11108 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11109 BMCR_ANENABLE);
11110 r = 0;
11111 }
11112 spin_unlock_bh(&tp->lock);
1da177e4 11113 }
6aa20a22 11114
1da177e4
LT
11115 return r;
11116}
6aa20a22 11117
1da177e4
LT
11118static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11119{
11120 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11121
2c49a44d 11122 ering->rx_max_pending = tp->rx_std_ring_mask;
63c3a66f 11123 if (tg3_flag(tp, JUMBO_RING_ENABLE))
2c49a44d 11124 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
4f81c32b
MC
11125 else
11126 ering->rx_jumbo_max_pending = 0;
11127
11128 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
11129
11130 ering->rx_pending = tp->rx_pending;
63c3a66f 11131 if (tg3_flag(tp, JUMBO_RING_ENABLE))
4f81c32b
MC
11132 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11133 else
11134 ering->rx_jumbo_pending = 0;
11135
f3f3f27e 11136 ering->tx_pending = tp->napi[0].tx_pending;
1da177e4 11137}
6aa20a22 11138
1da177e4
LT
11139static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11140{
11141 struct tg3 *tp = netdev_priv(dev);
646c9edd 11142 int i, irq_sync = 0, err = 0;
6aa20a22 11143
2c49a44d
MC
11144 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11145 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
bc3a9254
MC
11146 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11147 (ering->tx_pending <= MAX_SKB_FRAGS) ||
63c3a66f 11148 (tg3_flag(tp, TSO_BUG) &&
bc3a9254 11149 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 11150 return -EINVAL;
6aa20a22 11151
bbe832c0 11152 if (netif_running(dev)) {
b02fd9e3 11153 tg3_phy_stop(tp);
1da177e4 11154 tg3_netif_stop(tp);
bbe832c0
MC
11155 irq_sync = 1;
11156 }
1da177e4 11157
bbe832c0 11158 tg3_full_lock(tp, irq_sync);
6aa20a22 11159
1da177e4
LT
11160 tp->rx_pending = ering->rx_pending;
11161
63c3a66f 11162 if (tg3_flag(tp, MAX_RXPEND_64) &&
1da177e4
LT
11163 tp->rx_pending > 63)
11164 tp->rx_pending = 63;
11165 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
646c9edd 11166
6fd45cb8 11167 for (i = 0; i < tp->irq_max; i++)
646c9edd 11168 tp->napi[i].tx_pending = ering->tx_pending;
1da177e4
LT
11169
11170 if (netif_running(dev)) {
944d980e 11171 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
11172 err = tg3_restart_hw(tp, 1);
11173 if (!err)
11174 tg3_netif_start(tp);
1da177e4
LT
11175 }
11176
f47c11ee 11177 tg3_full_unlock(tp);
6aa20a22 11178
b02fd9e3
MC
11179 if (irq_sync && !err)
11180 tg3_phy_start(tp);
11181
b9ec6c1b 11182 return err;
1da177e4 11183}
6aa20a22 11184
1da177e4
LT
11185static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11186{
11187 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11188
63c3a66f 11189 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
8d018621 11190
4a2db503 11191 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
8d018621
MC
11192 epause->rx_pause = 1;
11193 else
11194 epause->rx_pause = 0;
11195
4a2db503 11196 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
8d018621
MC
11197 epause->tx_pause = 1;
11198 else
11199 epause->tx_pause = 0;
1da177e4 11200}
6aa20a22 11201
1da177e4
LT
11202static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11203{
11204 struct tg3 *tp = netdev_priv(dev);
b02fd9e3 11205 int err = 0;
6aa20a22 11206
63c3a66f 11207 if (tg3_flag(tp, USE_PHYLIB)) {
2712168f
MC
11208 u32 newadv;
11209 struct phy_device *phydev;
1da177e4 11210
2712168f 11211 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
f47c11ee 11212
2712168f
MC
11213 if (!(phydev->supported & SUPPORTED_Pause) ||
11214 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
2259dca3 11215 (epause->rx_pause != epause->tx_pause)))
2712168f 11216 return -EINVAL;
1da177e4 11217
2712168f
MC
11218 tp->link_config.flowctrl = 0;
11219 if (epause->rx_pause) {
11220 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11221
11222 if (epause->tx_pause) {
11223 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11224 newadv = ADVERTISED_Pause;
b02fd9e3 11225 } else
2712168f
MC
11226 newadv = ADVERTISED_Pause |
11227 ADVERTISED_Asym_Pause;
11228 } else if (epause->tx_pause) {
11229 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11230 newadv = ADVERTISED_Asym_Pause;
11231 } else
11232 newadv = 0;
11233
11234 if (epause->autoneg)
63c3a66f 11235 tg3_flag_set(tp, PAUSE_AUTONEG);
2712168f 11236 else
63c3a66f 11237 tg3_flag_clear(tp, PAUSE_AUTONEG);
2712168f 11238
f07e9af3 11239 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2712168f
MC
11240 u32 oldadv = phydev->advertising &
11241 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11242 if (oldadv != newadv) {
11243 phydev->advertising &=
11244 ~(ADVERTISED_Pause |
11245 ADVERTISED_Asym_Pause);
11246 phydev->advertising |= newadv;
11247 if (phydev->autoneg) {
11248 /*
11249 * Always renegotiate the link to
11250 * inform our link partner of our
11251 * flow control settings, even if the
11252 * flow control is forced. Let
11253 * tg3_adjust_link() do the final
11254 * flow control setup.
11255 */
11256 return phy_start_aneg(phydev);
b02fd9e3 11257 }
b02fd9e3 11258 }
b02fd9e3 11259
2712168f 11260 if (!epause->autoneg)
b02fd9e3 11261 tg3_setup_flow_control(tp, 0, 0);
2712168f 11262 } else {
c6700ce2 11263 tp->link_config.advertising &=
2712168f
MC
11264 ~(ADVERTISED_Pause |
11265 ADVERTISED_Asym_Pause);
c6700ce2 11266 tp->link_config.advertising |= newadv;
b02fd9e3
MC
11267 }
11268 } else {
11269 int irq_sync = 0;
11270
11271 if (netif_running(dev)) {
11272 tg3_netif_stop(tp);
11273 irq_sync = 1;
11274 }
11275
11276 tg3_full_lock(tp, irq_sync);
11277
11278 if (epause->autoneg)
63c3a66f 11279 tg3_flag_set(tp, PAUSE_AUTONEG);
b02fd9e3 11280 else
63c3a66f 11281 tg3_flag_clear(tp, PAUSE_AUTONEG);
b02fd9e3 11282 if (epause->rx_pause)
e18ce346 11283 tp->link_config.flowctrl |= FLOW_CTRL_RX;
b02fd9e3 11284 else
e18ce346 11285 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
b02fd9e3 11286 if (epause->tx_pause)
e18ce346 11287 tp->link_config.flowctrl |= FLOW_CTRL_TX;
b02fd9e3 11288 else
e18ce346 11289 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
b02fd9e3
MC
11290
11291 if (netif_running(dev)) {
11292 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11293 err = tg3_restart_hw(tp, 1);
11294 if (!err)
11295 tg3_netif_start(tp);
11296 }
11297
11298 tg3_full_unlock(tp);
11299 }
6aa20a22 11300
b9ec6c1b 11301 return err;
1da177e4 11302}
6aa20a22 11303
de6f31eb 11304static int tg3_get_sset_count(struct net_device *dev, int sset)
1da177e4 11305{
b9f2c044
JG
11306 switch (sset) {
11307 case ETH_SS_TEST:
11308 return TG3_NUM_TEST;
11309 case ETH_SS_STATS:
11310 return TG3_NUM_STATS;
11311 default:
11312 return -EOPNOTSUPP;
11313 }
4cafd3f5
MC
11314}
11315
90415477
MC
11316static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11317 u32 *rules __always_unused)
11318{
11319 struct tg3 *tp = netdev_priv(dev);
11320
11321 if (!tg3_flag(tp, SUPPORT_MSIX))
11322 return -EOPNOTSUPP;
11323
11324 switch (info->cmd) {
11325 case ETHTOOL_GRXRINGS:
11326 if (netif_running(tp->dev))
9102426a 11327 info->data = tp->rxq_cnt;
90415477
MC
11328 else {
11329 info->data = num_online_cpus();
9102426a
MC
11330 if (info->data > TG3_RSS_MAX_NUM_QS)
11331 info->data = TG3_RSS_MAX_NUM_QS;
90415477
MC
11332 }
11333
11334 /* The first interrupt vector only
11335 * handles link interrupts.
11336 */
11337 info->data -= 1;
11338 return 0;
11339
11340 default:
11341 return -EOPNOTSUPP;
11342 }
11343}
11344
11345static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11346{
11347 u32 size = 0;
11348 struct tg3 *tp = netdev_priv(dev);
11349
11350 if (tg3_flag(tp, SUPPORT_MSIX))
11351 size = TG3_RSS_INDIR_TBL_SIZE;
11352
11353 return size;
11354}
11355
11356static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11357{
11358 struct tg3 *tp = netdev_priv(dev);
11359 int i;
11360
11361 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11362 indir[i] = tp->rss_ind_tbl[i];
11363
11364 return 0;
11365}
11366
11367static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11368{
11369 struct tg3 *tp = netdev_priv(dev);
11370 size_t i;
11371
11372 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11373 tp->rss_ind_tbl[i] = indir[i];
11374
11375 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11376 return 0;
11377
11378 /* It is legal to write the indirection
11379 * table while the device is running.
11380 */
11381 tg3_full_lock(tp, 0);
11382 tg3_rss_write_indir_tbl(tp);
11383 tg3_full_unlock(tp);
11384
11385 return 0;
11386}
11387
0968169c
MC
11388static void tg3_get_channels(struct net_device *dev,
11389 struct ethtool_channels *channel)
11390{
11391 struct tg3 *tp = netdev_priv(dev);
11392 u32 deflt_qs = netif_get_num_default_rss_queues();
11393
11394 channel->max_rx = tp->rxq_max;
11395 channel->max_tx = tp->txq_max;
11396
11397 if (netif_running(dev)) {
11398 channel->rx_count = tp->rxq_cnt;
11399 channel->tx_count = tp->txq_cnt;
11400 } else {
11401 if (tp->rxq_req)
11402 channel->rx_count = tp->rxq_req;
11403 else
11404 channel->rx_count = min(deflt_qs, tp->rxq_max);
11405
11406 if (tp->txq_req)
11407 channel->tx_count = tp->txq_req;
11408 else
11409 channel->tx_count = min(deflt_qs, tp->txq_max);
11410 }
11411}
11412
11413static int tg3_set_channels(struct net_device *dev,
11414 struct ethtool_channels *channel)
11415{
11416 struct tg3 *tp = netdev_priv(dev);
11417
11418 if (!tg3_flag(tp, SUPPORT_MSIX))
11419 return -EOPNOTSUPP;
11420
11421 if (channel->rx_count > tp->rxq_max ||
11422 channel->tx_count > tp->txq_max)
11423 return -EINVAL;
11424
11425 tp->rxq_req = channel->rx_count;
11426 tp->txq_req = channel->tx_count;
11427
11428 if (!netif_running(dev))
11429 return 0;
11430
11431 tg3_stop(tp);
11432
11433 netif_carrier_off(dev);
11434
11435 tg3_start(tp, true, false);
11436
11437 return 0;
11438}
11439
de6f31eb 11440static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1da177e4
LT
11441{
11442 switch (stringset) {
11443 case ETH_SS_STATS:
11444 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11445 break;
4cafd3f5
MC
11446 case ETH_SS_TEST:
11447 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11448 break;
1da177e4
LT
11449 default:
11450 WARN_ON(1); /* we need a WARN() */
11451 break;
11452 }
11453}
11454
81b8709c 11455static int tg3_set_phys_id(struct net_device *dev,
11456 enum ethtool_phys_id_state state)
4009a93d
MC
11457{
11458 struct tg3 *tp = netdev_priv(dev);
4009a93d
MC
11459
11460 if (!netif_running(tp->dev))
11461 return -EAGAIN;
11462
81b8709c 11463 switch (state) {
11464 case ETHTOOL_ID_ACTIVE:
fce55922 11465 return 1; /* cycle on/off once per second */
4009a93d 11466
81b8709c 11467 case ETHTOOL_ID_ON:
11468 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11469 LED_CTRL_1000MBPS_ON |
11470 LED_CTRL_100MBPS_ON |
11471 LED_CTRL_10MBPS_ON |
11472 LED_CTRL_TRAFFIC_OVERRIDE |
11473 LED_CTRL_TRAFFIC_BLINK |
11474 LED_CTRL_TRAFFIC_LED);
11475 break;
6aa20a22 11476
81b8709c 11477 case ETHTOOL_ID_OFF:
11478 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11479 LED_CTRL_TRAFFIC_OVERRIDE);
11480 break;
4009a93d 11481
81b8709c 11482 case ETHTOOL_ID_INACTIVE:
11483 tw32(MAC_LED_CTRL, tp->led_ctrl);
11484 break;
4009a93d 11485 }
81b8709c 11486
4009a93d
MC
11487 return 0;
11488}
11489
de6f31eb 11490static void tg3_get_ethtool_stats(struct net_device *dev,
1da177e4
LT
11491 struct ethtool_stats *estats, u64 *tmp_stats)
11492{
11493 struct tg3 *tp = netdev_priv(dev);
0e6c9da3 11494
b546e46f
MC
11495 if (tp->hw_stats)
11496 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11497 else
11498 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
1da177e4
LT
11499}
11500
535a490e 11501static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
c3e94500
MC
11502{
11503 int i;
11504 __be32 *buf;
11505 u32 offset = 0, len = 0;
11506 u32 magic, val;
11507
63c3a66f 11508 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
c3e94500
MC
11509 return NULL;
11510
11511 if (magic == TG3_EEPROM_MAGIC) {
11512 for (offset = TG3_NVM_DIR_START;
11513 offset < TG3_NVM_DIR_END;
11514 offset += TG3_NVM_DIRENT_SIZE) {
11515 if (tg3_nvram_read(tp, offset, &val))
11516 return NULL;
11517
11518 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11519 TG3_NVM_DIRTYPE_EXTVPD)
11520 break;
11521 }
11522
11523 if (offset != TG3_NVM_DIR_END) {
11524 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11525 if (tg3_nvram_read(tp, offset + 4, &offset))
11526 return NULL;
11527
11528 offset = tg3_nvram_logical_addr(tp, offset);
11529 }
11530 }
11531
11532 if (!offset || !len) {
11533 offset = TG3_NVM_VPD_OFF;
11534 len = TG3_NVM_VPD_LEN;
11535 }
11536
11537 buf = kmalloc(len, GFP_KERNEL);
11538 if (buf == NULL)
11539 return NULL;
11540
11541 if (magic == TG3_EEPROM_MAGIC) {
11542 for (i = 0; i < len; i += 4) {
11543 /* The data is in little-endian format in NVRAM.
11544 * Use the big-endian read routines to preserve
11545 * the byte order as it exists in NVRAM.
11546 */
11547 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11548 goto error;
11549 }
11550 } else {
11551 u8 *ptr;
11552 ssize_t cnt;
11553 unsigned int pos = 0;
11554
11555 ptr = (u8 *)&buf[0];
11556 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11557 cnt = pci_read_vpd(tp->pdev, pos,
11558 len - pos, ptr);
11559 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11560 cnt = 0;
11561 else if (cnt < 0)
11562 goto error;
11563 }
11564 if (pos != len)
11565 goto error;
11566 }
11567
535a490e
MC
11568 *vpdlen = len;
11569
c3e94500
MC
11570 return buf;
11571
11572error:
11573 kfree(buf);
11574 return NULL;
11575}
11576
566f86ad 11577#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
11578#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11579#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11580#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
727a6d9f
MC
11581#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11582#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
bda18faf 11583#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
b16250e3
MC
11584#define NVRAM_SELFBOOT_HW_SIZE 0x20
11585#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
11586
11587static int tg3_test_nvram(struct tg3 *tp)
11588{
535a490e 11589 u32 csum, magic, len;
a9dc529d 11590 __be32 *buf;
ab0049b4 11591 int i, j, k, err = 0, size;
566f86ad 11592
63c3a66f 11593 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
11594 return 0;
11595
e4f34110 11596 if (tg3_nvram_read(tp, 0, &magic) != 0)
1b27777a
MC
11597 return -EIO;
11598
1b27777a
MC
11599 if (magic == TG3_EEPROM_MAGIC)
11600 size = NVRAM_TEST_SIZE;
b16250e3 11601 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
11602 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11603 TG3_EEPROM_SB_FORMAT_1) {
11604 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11605 case TG3_EEPROM_SB_REVISION_0:
11606 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11607 break;
11608 case TG3_EEPROM_SB_REVISION_2:
11609 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11610 break;
11611 case TG3_EEPROM_SB_REVISION_3:
11612 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11613 break;
727a6d9f
MC
11614 case TG3_EEPROM_SB_REVISION_4:
11615 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11616 break;
11617 case TG3_EEPROM_SB_REVISION_5:
11618 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11619 break;
11620 case TG3_EEPROM_SB_REVISION_6:
11621 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11622 break;
a5767dec 11623 default:
727a6d9f 11624 return -EIO;
a5767dec
MC
11625 }
11626 } else
1b27777a 11627 return 0;
b16250e3
MC
11628 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11629 size = NVRAM_SELFBOOT_HW_SIZE;
11630 else
1b27777a
MC
11631 return -EIO;
11632
11633 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
11634 if (buf == NULL)
11635 return -ENOMEM;
11636
1b27777a
MC
11637 err = -EIO;
11638 for (i = 0, j = 0; i < size; i += 4, j++) {
a9dc529d
MC
11639 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11640 if (err)
566f86ad 11641 break;
566f86ad 11642 }
1b27777a 11643 if (i < size)
566f86ad
MC
11644 goto out;
11645
1b27777a 11646 /* Selfboot format */
a9dc529d 11647 magic = be32_to_cpu(buf[0]);
b9fc7dc5 11648 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 11649 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
11650 u8 *buf8 = (u8 *) buf, csum8 = 0;
11651
b9fc7dc5 11652 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
11653 TG3_EEPROM_SB_REVISION_2) {
11654 /* For rev 2, the csum doesn't include the MBA. */
11655 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11656 csum8 += buf8[i];
11657 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11658 csum8 += buf8[i];
11659 } else {
11660 for (i = 0; i < size; i++)
11661 csum8 += buf8[i];
11662 }
1b27777a 11663
ad96b485
AB
11664 if (csum8 == 0) {
11665 err = 0;
11666 goto out;
11667 }
11668
11669 err = -EIO;
11670 goto out;
1b27777a 11671 }
566f86ad 11672
b9fc7dc5 11673 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
11674 TG3_EEPROM_MAGIC_HW) {
11675 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
a9dc529d 11676 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
b16250e3 11677 u8 *buf8 = (u8 *) buf;
b16250e3
MC
11678
11679 /* Separate the parity bits and the data bytes. */
11680 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11681 if ((i == 0) || (i == 8)) {
11682 int l;
11683 u8 msk;
11684
11685 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11686 parity[k++] = buf8[i] & msk;
11687 i++;
859a5887 11688 } else if (i == 16) {
b16250e3
MC
11689 int l;
11690 u8 msk;
11691
11692 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11693 parity[k++] = buf8[i] & msk;
11694 i++;
11695
11696 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11697 parity[k++] = buf8[i] & msk;
11698 i++;
11699 }
11700 data[j++] = buf8[i];
11701 }
11702
11703 err = -EIO;
11704 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11705 u8 hw8 = hweight8(data[i]);
11706
11707 if ((hw8 & 0x1) && parity[i])
11708 goto out;
11709 else if (!(hw8 & 0x1) && !parity[i])
11710 goto out;
11711 }
11712 err = 0;
11713 goto out;
11714 }
11715
01c3a392
MC
11716 err = -EIO;
11717
566f86ad
MC
11718 /* Bootstrap checksum at offset 0x10 */
11719 csum = calc_crc((unsigned char *) buf, 0x10);
01c3a392 11720 if (csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
11721 goto out;
11722
11723 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11724 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
01c3a392 11725 if (csum != le32_to_cpu(buf[0xfc/4]))
a9dc529d 11726 goto out;
566f86ad 11727
c3e94500
MC
11728 kfree(buf);
11729
535a490e 11730 buf = tg3_vpd_readblock(tp, &len);
c3e94500
MC
11731 if (!buf)
11732 return -ENOMEM;
d4894f3e 11733
535a490e 11734 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
d4894f3e
MC
11735 if (i > 0) {
11736 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11737 if (j < 0)
11738 goto out;
11739
535a490e 11740 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
d4894f3e
MC
11741 goto out;
11742
11743 i += PCI_VPD_LRDT_TAG_SIZE;
11744 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11745 PCI_VPD_RO_KEYWORD_CHKSUM);
11746 if (j > 0) {
11747 u8 csum8 = 0;
11748
11749 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11750
11751 for (i = 0; i <= j; i++)
11752 csum8 += ((u8 *)buf)[i];
11753
11754 if (csum8)
11755 goto out;
11756 }
11757 }
11758
566f86ad
MC
11759 err = 0;
11760
11761out:
11762 kfree(buf);
11763 return err;
11764}
11765
ca43007a
MC
11766#define TG3_SERDES_TIMEOUT_SEC 2
11767#define TG3_COPPER_TIMEOUT_SEC 6
11768
11769static int tg3_test_link(struct tg3 *tp)
11770{
11771 int i, max;
11772
11773 if (!netif_running(tp->dev))
11774 return -ENODEV;
11775
f07e9af3 11776 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
ca43007a
MC
11777 max = TG3_SERDES_TIMEOUT_SEC;
11778 else
11779 max = TG3_COPPER_TIMEOUT_SEC;
11780
11781 for (i = 0; i < max; i++) {
11782 if (netif_carrier_ok(tp->dev))
11783 return 0;
11784
11785 if (msleep_interruptible(1000))
11786 break;
11787 }
11788
11789 return -EIO;
11790}
11791
a71116d1 11792/* Only test the commonly used registers */
30ca3e37 11793static int tg3_test_registers(struct tg3 *tp)
a71116d1 11794{
b16250e3 11795 int i, is_5705, is_5750;
a71116d1
MC
11796 u32 offset, read_mask, write_mask, val, save_val, read_val;
11797 static struct {
11798 u16 offset;
11799 u16 flags;
11800#define TG3_FL_5705 0x1
11801#define TG3_FL_NOT_5705 0x2
11802#define TG3_FL_NOT_5788 0x4
b16250e3 11803#define TG3_FL_NOT_5750 0x8
a71116d1
MC
11804 u32 read_mask;
11805 u32 write_mask;
11806 } reg_tbl[] = {
11807 /* MAC Control Registers */
11808 { MAC_MODE, TG3_FL_NOT_5705,
11809 0x00000000, 0x00ef6f8c },
11810 { MAC_MODE, TG3_FL_5705,
11811 0x00000000, 0x01ef6b8c },
11812 { MAC_STATUS, TG3_FL_NOT_5705,
11813 0x03800107, 0x00000000 },
11814 { MAC_STATUS, TG3_FL_5705,
11815 0x03800100, 0x00000000 },
11816 { MAC_ADDR_0_HIGH, 0x0000,
11817 0x00000000, 0x0000ffff },
11818 { MAC_ADDR_0_LOW, 0x0000,
c6cdf436 11819 0x00000000, 0xffffffff },
a71116d1
MC
11820 { MAC_RX_MTU_SIZE, 0x0000,
11821 0x00000000, 0x0000ffff },
11822 { MAC_TX_MODE, 0x0000,
11823 0x00000000, 0x00000070 },
11824 { MAC_TX_LENGTHS, 0x0000,
11825 0x00000000, 0x00003fff },
11826 { MAC_RX_MODE, TG3_FL_NOT_5705,
11827 0x00000000, 0x000007fc },
11828 { MAC_RX_MODE, TG3_FL_5705,
11829 0x00000000, 0x000007dc },
11830 { MAC_HASH_REG_0, 0x0000,
11831 0x00000000, 0xffffffff },
11832 { MAC_HASH_REG_1, 0x0000,
11833 0x00000000, 0xffffffff },
11834 { MAC_HASH_REG_2, 0x0000,
11835 0x00000000, 0xffffffff },
11836 { MAC_HASH_REG_3, 0x0000,
11837 0x00000000, 0xffffffff },
11838
11839 /* Receive Data and Receive BD Initiator Control Registers. */
11840 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11841 0x00000000, 0xffffffff },
11842 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11843 0x00000000, 0xffffffff },
11844 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11845 0x00000000, 0x00000003 },
11846 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11847 0x00000000, 0xffffffff },
11848 { RCVDBDI_STD_BD+0, 0x0000,
11849 0x00000000, 0xffffffff },
11850 { RCVDBDI_STD_BD+4, 0x0000,
11851 0x00000000, 0xffffffff },
11852 { RCVDBDI_STD_BD+8, 0x0000,
11853 0x00000000, 0xffff0002 },
11854 { RCVDBDI_STD_BD+0xc, 0x0000,
11855 0x00000000, 0xffffffff },
6aa20a22 11856
a71116d1
MC
11857 /* Receive BD Initiator Control Registers. */
11858 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11859 0x00000000, 0xffffffff },
11860 { RCVBDI_STD_THRESH, TG3_FL_5705,
11861 0x00000000, 0x000003ff },
11862 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11863 0x00000000, 0xffffffff },
6aa20a22 11864
a71116d1
MC
11865 /* Host Coalescing Control Registers. */
11866 { HOSTCC_MODE, TG3_FL_NOT_5705,
11867 0x00000000, 0x00000004 },
11868 { HOSTCC_MODE, TG3_FL_5705,
11869 0x00000000, 0x000000f6 },
11870 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11871 0x00000000, 0xffffffff },
11872 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11873 0x00000000, 0x000003ff },
11874 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11875 0x00000000, 0xffffffff },
11876 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11877 0x00000000, 0x000003ff },
11878 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11879 0x00000000, 0xffffffff },
11880 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11881 0x00000000, 0x000000ff },
11882 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11883 0x00000000, 0xffffffff },
11884 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11885 0x00000000, 0x000000ff },
11886 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11887 0x00000000, 0xffffffff },
11888 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11889 0x00000000, 0xffffffff },
11890 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11891 0x00000000, 0xffffffff },
11892 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11893 0x00000000, 0x000000ff },
11894 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11895 0x00000000, 0xffffffff },
11896 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11897 0x00000000, 0x000000ff },
11898 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11899 0x00000000, 0xffffffff },
11900 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11901 0x00000000, 0xffffffff },
11902 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11903 0x00000000, 0xffffffff },
11904 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11905 0x00000000, 0xffffffff },
11906 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11907 0x00000000, 0xffffffff },
11908 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11909 0xffffffff, 0x00000000 },
11910 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11911 0xffffffff, 0x00000000 },
11912
11913 /* Buffer Manager Control Registers. */
b16250e3 11914 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 11915 0x00000000, 0x007fff80 },
b16250e3 11916 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
11917 0x00000000, 0x007fffff },
11918 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11919 0x00000000, 0x0000003f },
11920 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11921 0x00000000, 0x000001ff },
11922 { BUFMGR_MB_HIGH_WATER, 0x0000,
11923 0x00000000, 0x000001ff },
11924 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11925 0xffffffff, 0x00000000 },
11926 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11927 0xffffffff, 0x00000000 },
6aa20a22 11928
a71116d1
MC
11929 /* Mailbox Registers */
11930 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11931 0x00000000, 0x000001ff },
11932 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11933 0x00000000, 0x000001ff },
11934 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11935 0x00000000, 0x000007ff },
11936 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11937 0x00000000, 0x000001ff },
11938
11939 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11940 };
11941
b16250e3 11942 is_5705 = is_5750 = 0;
63c3a66f 11943 if (tg3_flag(tp, 5705_PLUS)) {
a71116d1 11944 is_5705 = 1;
63c3a66f 11945 if (tg3_flag(tp, 5750_PLUS))
b16250e3
MC
11946 is_5750 = 1;
11947 }
a71116d1
MC
11948
11949 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11950 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11951 continue;
11952
11953 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11954 continue;
11955
63c3a66f 11956 if (tg3_flag(tp, IS_5788) &&
a71116d1
MC
11957 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11958 continue;
11959
b16250e3
MC
11960 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11961 continue;
11962
a71116d1
MC
11963 offset = (u32) reg_tbl[i].offset;
11964 read_mask = reg_tbl[i].read_mask;
11965 write_mask = reg_tbl[i].write_mask;
11966
11967 /* Save the original register content */
11968 save_val = tr32(offset);
11969
11970 /* Determine the read-only value. */
11971 read_val = save_val & read_mask;
11972
11973 /* Write zero to the register, then make sure the read-only bits
11974 * are not changed and the read/write bits are all zeros.
11975 */
11976 tw32(offset, 0);
11977
11978 val = tr32(offset);
11979
11980 /* Test the read-only and read/write bits. */
11981 if (((val & read_mask) != read_val) || (val & write_mask))
11982 goto out;
11983
11984 /* Write ones to all the bits defined by RdMask and WrMask, then
11985 * make sure the read-only bits are not changed and the
11986 * read/write bits are all ones.
11987 */
11988 tw32(offset, read_mask | write_mask);
11989
11990 val = tr32(offset);
11991
11992 /* Test the read-only bits. */
11993 if ((val & read_mask) != read_val)
11994 goto out;
11995
11996 /* Test the read/write bits. */
11997 if ((val & write_mask) != write_mask)
11998 goto out;
11999
12000 tw32(offset, save_val);
12001 }
12002
12003 return 0;
12004
12005out:
9f88f29f 12006 if (netif_msg_hw(tp))
2445e461
MC
12007 netdev_err(tp->dev,
12008 "Register test failed at offset %x\n", offset);
a71116d1
MC
12009 tw32(offset, save_val);
12010 return -EIO;
12011}
12012
7942e1db
MC
12013static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12014{
f71e1309 12015 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
12016 int i;
12017 u32 j;
12018
e9edda69 12019 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
12020 for (j = 0; j < len; j += 4) {
12021 u32 val;
12022
12023 tg3_write_mem(tp, offset + j, test_pattern[i]);
12024 tg3_read_mem(tp, offset + j, &val);
12025 if (val != test_pattern[i])
12026 return -EIO;
12027 }
12028 }
12029 return 0;
12030}
12031
12032static int tg3_test_memory(struct tg3 *tp)
12033{
12034 static struct mem_entry {
12035 u32 offset;
12036 u32 len;
12037 } mem_tbl_570x[] = {
38690194 12038 { 0x00000000, 0x00b50},
7942e1db
MC
12039 { 0x00002000, 0x1c000},
12040 { 0xffffffff, 0x00000}
12041 }, mem_tbl_5705[] = {
12042 { 0x00000100, 0x0000c},
12043 { 0x00000200, 0x00008},
7942e1db
MC
12044 { 0x00004000, 0x00800},
12045 { 0x00006000, 0x01000},
12046 { 0x00008000, 0x02000},
12047 { 0x00010000, 0x0e000},
12048 { 0xffffffff, 0x00000}
79f4d13a
MC
12049 }, mem_tbl_5755[] = {
12050 { 0x00000200, 0x00008},
12051 { 0x00004000, 0x00800},
12052 { 0x00006000, 0x00800},
12053 { 0x00008000, 0x02000},
12054 { 0x00010000, 0x0c000},
12055 { 0xffffffff, 0x00000}
b16250e3
MC
12056 }, mem_tbl_5906[] = {
12057 { 0x00000200, 0x00008},
12058 { 0x00004000, 0x00400},
12059 { 0x00006000, 0x00400},
12060 { 0x00008000, 0x01000},
12061 { 0x00010000, 0x01000},
12062 { 0xffffffff, 0x00000}
8b5a6c42
MC
12063 }, mem_tbl_5717[] = {
12064 { 0x00000200, 0x00008},
12065 { 0x00010000, 0x0a000},
12066 { 0x00020000, 0x13c00},
12067 { 0xffffffff, 0x00000}
12068 }, mem_tbl_57765[] = {
12069 { 0x00000200, 0x00008},
12070 { 0x00004000, 0x00800},
12071 { 0x00006000, 0x09800},
12072 { 0x00010000, 0x0a000},
12073 { 0xffffffff, 0x00000}
7942e1db
MC
12074 };
12075 struct mem_entry *mem_tbl;
12076 int err = 0;
12077 int i;
12078
63c3a66f 12079 if (tg3_flag(tp, 5717_PLUS))
8b5a6c42 12080 mem_tbl = mem_tbl_5717;
55086ad9 12081 else if (tg3_flag(tp, 57765_CLASS))
8b5a6c42 12082 mem_tbl = mem_tbl_57765;
63c3a66f 12083 else if (tg3_flag(tp, 5755_PLUS))
321d32a0
MC
12084 mem_tbl = mem_tbl_5755;
12085 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12086 mem_tbl = mem_tbl_5906;
63c3a66f 12087 else if (tg3_flag(tp, 5705_PLUS))
321d32a0
MC
12088 mem_tbl = mem_tbl_5705;
12089 else
7942e1db
MC
12090 mem_tbl = mem_tbl_570x;
12091
12092 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
be98da6a
MC
12093 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12094 if (err)
7942e1db
MC
12095 break;
12096 }
6aa20a22 12097
7942e1db
MC
12098 return err;
12099}
12100
bb158d69
MC
12101#define TG3_TSO_MSS 500
12102
12103#define TG3_TSO_IP_HDR_LEN 20
12104#define TG3_TSO_TCP_HDR_LEN 20
12105#define TG3_TSO_TCP_OPT_LEN 12
12106
12107static const u8 tg3_tso_header[] = {
121080x08, 0x00,
121090x45, 0x00, 0x00, 0x00,
121100x00, 0x00, 0x40, 0x00,
121110x40, 0x06, 0x00, 0x00,
121120x0a, 0x00, 0x00, 0x01,
121130x0a, 0x00, 0x00, 0x02,
121140x0d, 0x00, 0xe0, 0x00,
121150x00, 0x00, 0x01, 0x00,
121160x00, 0x00, 0x02, 0x00,
121170x80, 0x10, 0x10, 0x00,
121180x14, 0x09, 0x00, 0x00,
121190x01, 0x01, 0x08, 0x0a,
121200x11, 0x11, 0x11, 0x11,
121210x11, 0x11, 0x11, 0x11,
12122};
9f40dead 12123
28a45957 12124static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
c76949a6 12125{
5e5a7f37 12126 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
bb158d69 12127 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
84b67b27 12128 u32 budget;
9205fd9c
ED
12129 struct sk_buff *skb;
12130 u8 *tx_data, *rx_data;
c76949a6
MC
12131 dma_addr_t map;
12132 int num_pkts, tx_len, rx_len, i, err;
12133 struct tg3_rx_buffer_desc *desc;
898a56f8 12134 struct tg3_napi *tnapi, *rnapi;
8fea32b9 12135 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
c76949a6 12136
c8873405
MC
12137 tnapi = &tp->napi[0];
12138 rnapi = &tp->napi[0];
0c1d0e2b 12139 if (tp->irq_cnt > 1) {
63c3a66f 12140 if (tg3_flag(tp, ENABLE_RSS))
1da85aa3 12141 rnapi = &tp->napi[1];
63c3a66f 12142 if (tg3_flag(tp, ENABLE_TSS))
c8873405 12143 tnapi = &tp->napi[1];
0c1d0e2b 12144 }
fd2ce37f 12145 coal_now = tnapi->coal_now | rnapi->coal_now;
898a56f8 12146
c76949a6
MC
12147 err = -EIO;
12148
4852a861 12149 tx_len = pktsz;
a20e9c62 12150 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
12151 if (!skb)
12152 return -ENOMEM;
12153
c76949a6
MC
12154 tx_data = skb_put(skb, tx_len);
12155 memcpy(tx_data, tp->dev->dev_addr, 6);
12156 memset(tx_data + 6, 0x0, 8);
12157
4852a861 12158 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
c76949a6 12159
28a45957 12160 if (tso_loopback) {
bb158d69
MC
12161 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12162
12163 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12164 TG3_TSO_TCP_OPT_LEN;
12165
12166 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12167 sizeof(tg3_tso_header));
12168 mss = TG3_TSO_MSS;
12169
12170 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12171 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12172
12173 /* Set the total length field in the IP header */
12174 iph->tot_len = htons((u16)(mss + hdr_len));
12175
12176 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12177 TXD_FLAG_CPU_POST_DMA);
12178
63c3a66f
JP
12179 if (tg3_flag(tp, HW_TSO_1) ||
12180 tg3_flag(tp, HW_TSO_2) ||
12181 tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12182 struct tcphdr *th;
12183 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12184 th = (struct tcphdr *)&tx_data[val];
12185 th->check = 0;
12186 } else
12187 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12188
63c3a66f 12189 if (tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12190 mss |= (hdr_len & 0xc) << 12;
12191 if (hdr_len & 0x10)
12192 base_flags |= 0x00000010;
12193 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 12194 } else if (tg3_flag(tp, HW_TSO_2))
bb158d69 12195 mss |= hdr_len << 9;
63c3a66f 12196 else if (tg3_flag(tp, HW_TSO_1) ||
bb158d69
MC
12197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12198 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12199 } else {
12200 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12201 }
12202
12203 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12204 } else {
12205 num_pkts = 1;
12206 data_off = ETH_HLEN;
c441b456
MC
12207
12208 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12209 tx_len > VLAN_ETH_FRAME_LEN)
12210 base_flags |= TXD_FLAG_JMB_PKT;
bb158d69
MC
12211 }
12212
12213 for (i = data_off; i < tx_len; i++)
c76949a6
MC
12214 tx_data[i] = (u8) (i & 0xff);
12215
f4188d8a
AD
12216 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12217 if (pci_dma_mapping_error(tp->pdev, map)) {
a21771dd
MC
12218 dev_kfree_skb(skb);
12219 return -EIO;
12220 }
c76949a6 12221
0d681b27
MC
12222 val = tnapi->tx_prod;
12223 tnapi->tx_buffers[val].skb = skb;
12224 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12225
c76949a6 12226 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12227 rnapi->coal_now);
c76949a6
MC
12228
12229 udelay(10);
12230
898a56f8 12231 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
c76949a6 12232
84b67b27
MC
12233 budget = tg3_tx_avail(tnapi);
12234 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
d1a3b737
MC
12235 base_flags | TXD_FLAG_END, mss, 0)) {
12236 tnapi->tx_buffers[val].skb = NULL;
12237 dev_kfree_skb(skb);
12238 return -EIO;
12239 }
c76949a6 12240
f3f3f27e 12241 tnapi->tx_prod++;
c76949a6 12242
6541b806
MC
12243 /* Sync BD data before updating mailbox */
12244 wmb();
12245
f3f3f27e
MC
12246 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12247 tr32_mailbox(tnapi->prodmbox);
c76949a6
MC
12248
12249 udelay(10);
12250
303fc921
MC
12251 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12252 for (i = 0; i < 35; i++) {
c76949a6 12253 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12254 coal_now);
c76949a6
MC
12255
12256 udelay(10);
12257
898a56f8
MC
12258 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12259 rx_idx = rnapi->hw_status->idx[0].rx_producer;
f3f3f27e 12260 if ((tx_idx == tnapi->tx_prod) &&
c76949a6
MC
12261 (rx_idx == (rx_start_idx + num_pkts)))
12262 break;
12263 }
12264
ba1142e4 12265 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
c76949a6
MC
12266 dev_kfree_skb(skb);
12267
f3f3f27e 12268 if (tx_idx != tnapi->tx_prod)
c76949a6
MC
12269 goto out;
12270
12271 if (rx_idx != rx_start_idx + num_pkts)
12272 goto out;
12273
bb158d69
MC
12274 val = data_off;
12275 while (rx_idx != rx_start_idx) {
12276 desc = &rnapi->rx_rcb[rx_start_idx++];
12277 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12278 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
c76949a6 12279
bb158d69
MC
12280 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12281 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12282 goto out;
c76949a6 12283
bb158d69
MC
12284 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12285 - ETH_FCS_LEN;
c76949a6 12286
28a45957 12287 if (!tso_loopback) {
bb158d69
MC
12288 if (rx_len != tx_len)
12289 goto out;
4852a861 12290
bb158d69
MC
12291 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12292 if (opaque_key != RXD_OPAQUE_RING_STD)
12293 goto out;
12294 } else {
12295 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12296 goto out;
12297 }
12298 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12299 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
54e0a67f 12300 >> RXD_TCPCSUM_SHIFT != 0xffff) {
4852a861 12301 goto out;
bb158d69 12302 }
4852a861 12303
bb158d69 12304 if (opaque_key == RXD_OPAQUE_RING_STD) {
9205fd9c 12305 rx_data = tpr->rx_std_buffers[desc_idx].data;
bb158d69
MC
12306 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12307 mapping);
12308 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
9205fd9c 12309 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
bb158d69
MC
12310 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12311 mapping);
12312 } else
12313 goto out;
c76949a6 12314
bb158d69
MC
12315 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12316 PCI_DMA_FROMDEVICE);
c76949a6 12317
9205fd9c 12318 rx_data += TG3_RX_OFFSET(tp);
bb158d69 12319 for (i = data_off; i < rx_len; i++, val++) {
9205fd9c 12320 if (*(rx_data + i) != (u8) (val & 0xff))
bb158d69
MC
12321 goto out;
12322 }
c76949a6 12323 }
bb158d69 12324
c76949a6 12325 err = 0;
6aa20a22 12326
9205fd9c 12327 /* tg3_free_rings will unmap and free the rx_data */
c76949a6
MC
12328out:
12329 return err;
12330}
12331
00c266b7
MC
12332#define TG3_STD_LOOPBACK_FAILED 1
12333#define TG3_JMB_LOOPBACK_FAILED 2
bb158d69 12334#define TG3_TSO_LOOPBACK_FAILED 4
28a45957
MC
12335#define TG3_LOOPBACK_FAILED \
12336 (TG3_STD_LOOPBACK_FAILED | \
12337 TG3_JMB_LOOPBACK_FAILED | \
12338 TG3_TSO_LOOPBACK_FAILED)
00c266b7 12339
941ec90f 12340static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
9f40dead 12341{
28a45957 12342 int err = -EIO;
2215e24c 12343 u32 eee_cap;
c441b456
MC
12344 u32 jmb_pkt_sz = 9000;
12345
12346 if (tp->dma_limit)
12347 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
9f40dead 12348
ab789046
MC
12349 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12350 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12351
28a45957
MC
12352 if (!netif_running(tp->dev)) {
12353 data[0] = TG3_LOOPBACK_FAILED;
12354 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
12355 if (do_extlpbk)
12356 data[2] = TG3_LOOPBACK_FAILED;
28a45957
MC
12357 goto done;
12358 }
12359
b9ec6c1b 12360 err = tg3_reset_hw(tp, 1);
ab789046 12361 if (err) {
28a45957
MC
12362 data[0] = TG3_LOOPBACK_FAILED;
12363 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
12364 if (do_extlpbk)
12365 data[2] = TG3_LOOPBACK_FAILED;
ab789046
MC
12366 goto done;
12367 }
9f40dead 12368
63c3a66f 12369 if (tg3_flag(tp, ENABLE_RSS)) {
4a85f098
MC
12370 int i;
12371
12372 /* Reroute all rx packets to the 1st queue */
12373 for (i = MAC_RSS_INDIR_TBL_0;
12374 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12375 tw32(i, 0x0);
12376 }
12377
6e01b20b
MC
12378 /* HW errata - mac loopback fails in some cases on 5780.
12379 * Normal traffic and PHY loopback are not affected by
12380 * errata. Also, the MAC loopback test is deprecated for
12381 * all newer ASIC revisions.
12382 */
12383 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12384 !tg3_flag(tp, CPMU_PRESENT)) {
12385 tg3_mac_loopback(tp, true);
9936bcf6 12386
28a45957
MC
12387 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12388 data[0] |= TG3_STD_LOOPBACK_FAILED;
6e01b20b
MC
12389
12390 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12391 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
28a45957 12392 data[0] |= TG3_JMB_LOOPBACK_FAILED;
6e01b20b
MC
12393
12394 tg3_mac_loopback(tp, false);
12395 }
4852a861 12396
f07e9af3 12397 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
63c3a66f 12398 !tg3_flag(tp, USE_PHYLIB)) {
5e5a7f37
MC
12399 int i;
12400
941ec90f 12401 tg3_phy_lpbk_set(tp, 0, false);
5e5a7f37
MC
12402
12403 /* Wait for link */
12404 for (i = 0; i < 100; i++) {
12405 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12406 break;
12407 mdelay(1);
12408 }
12409
28a45957
MC
12410 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12411 data[1] |= TG3_STD_LOOPBACK_FAILED;
63c3a66f 12412 if (tg3_flag(tp, TSO_CAPABLE) &&
28a45957
MC
12413 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12414 data[1] |= TG3_TSO_LOOPBACK_FAILED;
63c3a66f 12415 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12416 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
28a45957 12417 data[1] |= TG3_JMB_LOOPBACK_FAILED;
9f40dead 12418
941ec90f
MC
12419 if (do_extlpbk) {
12420 tg3_phy_lpbk_set(tp, 0, true);
12421
12422 /* All link indications report up, but the hardware
12423 * isn't really ready for about 20 msec. Double it
12424 * to be sure.
12425 */
12426 mdelay(40);
12427
12428 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12429 data[2] |= TG3_STD_LOOPBACK_FAILED;
12430 if (tg3_flag(tp, TSO_CAPABLE) &&
12431 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12432 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12433 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12434 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
941ec90f
MC
12435 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12436 }
12437
5e5a7f37
MC
12438 /* Re-enable gphy autopowerdown. */
12439 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12440 tg3_phy_toggle_apd(tp, true);
12441 }
6833c043 12442
941ec90f 12443 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
28a45957 12444
ab789046
MC
12445done:
12446 tp->phy_flags |= eee_cap;
12447
9f40dead
MC
12448 return err;
12449}
12450
4cafd3f5
MC
12451static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12452 u64 *data)
12453{
566f86ad 12454 struct tg3 *tp = netdev_priv(dev);
941ec90f 12455 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
566f86ad 12456
bed9829f
MC
12457 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12458 tg3_power_up(tp)) {
12459 etest->flags |= ETH_TEST_FL_FAILED;
12460 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12461 return;
12462 }
bc1c7567 12463
566f86ad
MC
12464 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12465
12466 if (tg3_test_nvram(tp) != 0) {
12467 etest->flags |= ETH_TEST_FL_FAILED;
12468 data[0] = 1;
12469 }
941ec90f 12470 if (!doextlpbk && tg3_test_link(tp)) {
ca43007a
MC
12471 etest->flags |= ETH_TEST_FL_FAILED;
12472 data[1] = 1;
12473 }
a71116d1 12474 if (etest->flags & ETH_TEST_FL_OFFLINE) {
b02fd9e3 12475 int err, err2 = 0, irq_sync = 0;
bbe832c0
MC
12476
12477 if (netif_running(dev)) {
b02fd9e3 12478 tg3_phy_stop(tp);
a71116d1 12479 tg3_netif_stop(tp);
bbe832c0
MC
12480 irq_sync = 1;
12481 }
a71116d1 12482
bbe832c0 12483 tg3_full_lock(tp, irq_sync);
a71116d1
MC
12484
12485 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 12486 err = tg3_nvram_lock(tp);
a71116d1 12487 tg3_halt_cpu(tp, RX_CPU_BASE);
63c3a66f 12488 if (!tg3_flag(tp, 5705_PLUS))
a71116d1 12489 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
12490 if (!err)
12491 tg3_nvram_unlock(tp);
a71116d1 12492
f07e9af3 12493 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
d9ab5ad1
MC
12494 tg3_phy_reset(tp);
12495
a71116d1
MC
12496 if (tg3_test_registers(tp) != 0) {
12497 etest->flags |= ETH_TEST_FL_FAILED;
12498 data[2] = 1;
12499 }
28a45957 12500
7942e1db
MC
12501 if (tg3_test_memory(tp) != 0) {
12502 etest->flags |= ETH_TEST_FL_FAILED;
12503 data[3] = 1;
12504 }
28a45957 12505
941ec90f
MC
12506 if (doextlpbk)
12507 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12508
12509 if (tg3_test_loopback(tp, &data[4], doextlpbk))
c76949a6 12510 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 12511
f47c11ee
DM
12512 tg3_full_unlock(tp);
12513
d4bc3927
MC
12514 if (tg3_test_interrupt(tp) != 0) {
12515 etest->flags |= ETH_TEST_FL_FAILED;
941ec90f 12516 data[7] = 1;
d4bc3927 12517 }
f47c11ee
DM
12518
12519 tg3_full_lock(tp, 0);
d4bc3927 12520
a71116d1
MC
12521 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12522 if (netif_running(dev)) {
63c3a66f 12523 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
12524 err2 = tg3_restart_hw(tp, 1);
12525 if (!err2)
b9ec6c1b 12526 tg3_netif_start(tp);
a71116d1 12527 }
f47c11ee
DM
12528
12529 tg3_full_unlock(tp);
b02fd9e3
MC
12530
12531 if (irq_sync && !err2)
12532 tg3_phy_start(tp);
a71116d1 12533 }
80096068 12534 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
c866b7ea 12535 tg3_power_down(tp);
bc1c7567 12536
4cafd3f5
MC
12537}
12538
1da177e4
LT
12539static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12540{
12541 struct mii_ioctl_data *data = if_mii(ifr);
12542 struct tg3 *tp = netdev_priv(dev);
12543 int err;
12544
63c3a66f 12545 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 12546 struct phy_device *phydev;
f07e9af3 12547 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 12548 return -EAGAIN;
3f0e3ad7 12549 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
28b04113 12550 return phy_mii_ioctl(phydev, ifr, cmd);
b02fd9e3
MC
12551 }
12552
33f401ae 12553 switch (cmd) {
1da177e4 12554 case SIOCGMIIPHY:
882e9793 12555 data->phy_id = tp->phy_addr;
1da177e4
LT
12556
12557 /* fallthru */
12558 case SIOCGMIIREG: {
12559 u32 mii_regval;
12560
f07e9af3 12561 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12562 break; /* We have no PHY */
12563
34eea5ac 12564 if (!netif_running(dev))
bc1c7567
MC
12565 return -EAGAIN;
12566
f47c11ee 12567 spin_lock_bh(&tp->lock);
1da177e4 12568 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 12569 spin_unlock_bh(&tp->lock);
1da177e4
LT
12570
12571 data->val_out = mii_regval;
12572
12573 return err;
12574 }
12575
12576 case SIOCSMIIREG:
f07e9af3 12577 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12578 break; /* We have no PHY */
12579
34eea5ac 12580 if (!netif_running(dev))
bc1c7567
MC
12581 return -EAGAIN;
12582
f47c11ee 12583 spin_lock_bh(&tp->lock);
1da177e4 12584 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 12585 spin_unlock_bh(&tp->lock);
1da177e4
LT
12586
12587 return err;
12588
12589 default:
12590 /* do nothing */
12591 break;
12592 }
12593 return -EOPNOTSUPP;
12594}
12595
15f9850d
DM
12596static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12597{
12598 struct tg3 *tp = netdev_priv(dev);
12599
12600 memcpy(ec, &tp->coal, sizeof(*ec));
12601 return 0;
12602}
12603
d244c892
MC
12604static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12605{
12606 struct tg3 *tp = netdev_priv(dev);
12607 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12608 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12609
63c3a66f 12610 if (!tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
12611 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12612 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12613 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12614 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12615 }
12616
12617 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12618 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12619 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12620 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12621 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12622 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12623 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12624 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12625 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12626 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12627 return -EINVAL;
12628
12629 /* No rx interrupts will be generated if both are zero */
12630 if ((ec->rx_coalesce_usecs == 0) &&
12631 (ec->rx_max_coalesced_frames == 0))
12632 return -EINVAL;
12633
12634 /* No tx interrupts will be generated if both are zero */
12635 if ((ec->tx_coalesce_usecs == 0) &&
12636 (ec->tx_max_coalesced_frames == 0))
12637 return -EINVAL;
12638
12639 /* Only copy relevant parameters, ignore all others. */
12640 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12641 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12642 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12643 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12644 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12645 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12646 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12647 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12648 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12649
12650 if (netif_running(dev)) {
12651 tg3_full_lock(tp, 0);
12652 __tg3_set_coalesce(tp, &tp->coal);
12653 tg3_full_unlock(tp);
12654 }
12655 return 0;
12656}
12657
7282d491 12658static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
12659 .get_settings = tg3_get_settings,
12660 .set_settings = tg3_set_settings,
12661 .get_drvinfo = tg3_get_drvinfo,
12662 .get_regs_len = tg3_get_regs_len,
12663 .get_regs = tg3_get_regs,
12664 .get_wol = tg3_get_wol,
12665 .set_wol = tg3_set_wol,
12666 .get_msglevel = tg3_get_msglevel,
12667 .set_msglevel = tg3_set_msglevel,
12668 .nway_reset = tg3_nway_reset,
12669 .get_link = ethtool_op_get_link,
12670 .get_eeprom_len = tg3_get_eeprom_len,
12671 .get_eeprom = tg3_get_eeprom,
12672 .set_eeprom = tg3_set_eeprom,
12673 .get_ringparam = tg3_get_ringparam,
12674 .set_ringparam = tg3_set_ringparam,
12675 .get_pauseparam = tg3_get_pauseparam,
12676 .set_pauseparam = tg3_set_pauseparam,
4cafd3f5 12677 .self_test = tg3_self_test,
1da177e4 12678 .get_strings = tg3_get_strings,
81b8709c 12679 .set_phys_id = tg3_set_phys_id,
1da177e4 12680 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 12681 .get_coalesce = tg3_get_coalesce,
d244c892 12682 .set_coalesce = tg3_set_coalesce,
b9f2c044 12683 .get_sset_count = tg3_get_sset_count,
90415477
MC
12684 .get_rxnfc = tg3_get_rxnfc,
12685 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12686 .get_rxfh_indir = tg3_get_rxfh_indir,
12687 .set_rxfh_indir = tg3_set_rxfh_indir,
0968169c
MC
12688 .get_channels = tg3_get_channels,
12689 .set_channels = tg3_set_channels,
3f847490 12690 .get_ts_info = ethtool_op_get_ts_info,
1da177e4
LT
12691};
12692
b4017c53
DM
12693static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12694 struct rtnl_link_stats64 *stats)
12695{
12696 struct tg3 *tp = netdev_priv(dev);
12697
0f566b20
MC
12698 spin_lock_bh(&tp->lock);
12699 if (!tp->hw_stats) {
12700 spin_unlock_bh(&tp->lock);
b4017c53 12701 return &tp->net_stats_prev;
0f566b20 12702 }
b4017c53 12703
b4017c53
DM
12704 tg3_get_nstats(tp, stats);
12705 spin_unlock_bh(&tp->lock);
12706
12707 return stats;
12708}
12709
ccd5ba9d
MC
12710static void tg3_set_rx_mode(struct net_device *dev)
12711{
12712 struct tg3 *tp = netdev_priv(dev);
12713
12714 if (!netif_running(dev))
12715 return;
12716
12717 tg3_full_lock(tp, 0);
12718 __tg3_set_rx_mode(dev);
12719 tg3_full_unlock(tp);
12720}
12721
faf1627a
MC
12722static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12723 int new_mtu)
12724{
12725 dev->mtu = new_mtu;
12726
12727 if (new_mtu > ETH_DATA_LEN) {
12728 if (tg3_flag(tp, 5780_CLASS)) {
12729 netdev_update_features(dev);
12730 tg3_flag_clear(tp, TSO_CAPABLE);
12731 } else {
12732 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12733 }
12734 } else {
12735 if (tg3_flag(tp, 5780_CLASS)) {
12736 tg3_flag_set(tp, TSO_CAPABLE);
12737 netdev_update_features(dev);
12738 }
12739 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12740 }
12741}
12742
12743static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12744{
12745 struct tg3 *tp = netdev_priv(dev);
2fae5e36 12746 int err, reset_phy = 0;
faf1627a
MC
12747
12748 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12749 return -EINVAL;
12750
12751 if (!netif_running(dev)) {
12752 /* We'll just catch it later when the
12753 * device is up'd.
12754 */
12755 tg3_set_mtu(dev, tp, new_mtu);
12756 return 0;
12757 }
12758
12759 tg3_phy_stop(tp);
12760
12761 tg3_netif_stop(tp);
12762
12763 tg3_full_lock(tp, 1);
12764
12765 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12766
12767 tg3_set_mtu(dev, tp, new_mtu);
12768
2fae5e36
MC
12769 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12770 * breaks all requests to 256 bytes.
12771 */
12772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12773 reset_phy = 1;
12774
12775 err = tg3_restart_hw(tp, reset_phy);
faf1627a
MC
12776
12777 if (!err)
12778 tg3_netif_start(tp);
12779
12780 tg3_full_unlock(tp);
12781
12782 if (!err)
12783 tg3_phy_start(tp);
12784
12785 return err;
12786}
12787
12788static const struct net_device_ops tg3_netdev_ops = {
12789 .ndo_open = tg3_open,
12790 .ndo_stop = tg3_close,
12791 .ndo_start_xmit = tg3_start_xmit,
12792 .ndo_get_stats64 = tg3_get_stats64,
12793 .ndo_validate_addr = eth_validate_addr,
12794 .ndo_set_rx_mode = tg3_set_rx_mode,
12795 .ndo_set_mac_address = tg3_set_mac_addr,
12796 .ndo_do_ioctl = tg3_ioctl,
12797 .ndo_tx_timeout = tg3_tx_timeout,
12798 .ndo_change_mtu = tg3_change_mtu,
12799 .ndo_fix_features = tg3_fix_features,
12800 .ndo_set_features = tg3_set_features,
12801#ifdef CONFIG_NET_POLL_CONTROLLER
12802 .ndo_poll_controller = tg3_poll_controller,
12803#endif
12804};
12805
1da177e4
LT
12806static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12807{
1b27777a 12808 u32 cursize, val, magic;
1da177e4
LT
12809
12810 tp->nvram_size = EEPROM_CHIP_SIZE;
12811
e4f34110 12812 if (tg3_nvram_read(tp, 0, &magic) != 0)
1da177e4
LT
12813 return;
12814
b16250e3
MC
12815 if ((magic != TG3_EEPROM_MAGIC) &&
12816 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12817 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
12818 return;
12819
12820 /*
12821 * Size the chip by reading offsets at increasing powers of two.
12822 * When we encounter our validation signature, we know the addressing
12823 * has wrapped around, and thus have our chip size.
12824 */
1b27777a 12825 cursize = 0x10;
1da177e4
LT
12826
12827 while (cursize < tp->nvram_size) {
e4f34110 12828 if (tg3_nvram_read(tp, cursize, &val) != 0)
1da177e4
LT
12829 return;
12830
1820180b 12831 if (val == magic)
1da177e4
LT
12832 break;
12833
12834 cursize <<= 1;
12835 }
12836
12837 tp->nvram_size = cursize;
12838}
6aa20a22 12839
1da177e4
LT
12840static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12841{
12842 u32 val;
12843
63c3a66f 12844 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
1b27777a
MC
12845 return;
12846
12847 /* Selfboot format */
1820180b 12848 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
12849 tg3_get_eeprom_size(tp);
12850 return;
12851 }
12852
6d348f2c 12853 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
1da177e4 12854 if (val != 0) {
6d348f2c
MC
12855 /* This is confusing. We want to operate on the
12856 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12857 * call will read from NVRAM and byteswap the data
12858 * according to the byteswapping settings for all
12859 * other register accesses. This ensures the data we
12860 * want will always reside in the lower 16-bits.
12861 * However, the data in NVRAM is in LE format, which
12862 * means the data from the NVRAM read will always be
12863 * opposite the endianness of the CPU. The 16-bit
12864 * byteswap then brings the data to CPU endianness.
12865 */
12866 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
1da177e4
LT
12867 return;
12868 }
12869 }
fd1122a2 12870 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
12871}
12872
12873static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12874{
12875 u32 nvcfg1;
12876
12877 nvcfg1 = tr32(NVRAM_CFG1);
12878 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
63c3a66f 12879 tg3_flag_set(tp, FLASH);
8590a603 12880 } else {
1da177e4
LT
12881 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12882 tw32(NVRAM_CFG1, nvcfg1);
12883 }
12884
6ff6f81d 12885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
63c3a66f 12886 tg3_flag(tp, 5780_CLASS)) {
1da177e4 12887 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8590a603
MC
12888 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12889 tp->nvram_jedecnum = JEDEC_ATMEL;
12890 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12891 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12892 break;
12893 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12894 tp->nvram_jedecnum = JEDEC_ATMEL;
12895 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12896 break;
12897 case FLASH_VENDOR_ATMEL_EEPROM:
12898 tp->nvram_jedecnum = JEDEC_ATMEL;
12899 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
63c3a66f 12900 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12901 break;
12902 case FLASH_VENDOR_ST:
12903 tp->nvram_jedecnum = JEDEC_ST;
12904 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
63c3a66f 12905 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12906 break;
12907 case FLASH_VENDOR_SAIFUN:
12908 tp->nvram_jedecnum = JEDEC_SAIFUN;
12909 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12910 break;
12911 case FLASH_VENDOR_SST_SMALL:
12912 case FLASH_VENDOR_SST_LARGE:
12913 tp->nvram_jedecnum = JEDEC_SST;
12914 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12915 break;
1da177e4 12916 }
8590a603 12917 } else {
1da177e4
LT
12918 tp->nvram_jedecnum = JEDEC_ATMEL;
12919 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12920 tg3_flag_set(tp, NVRAM_BUFFERED);
1da177e4
LT
12921 }
12922}
12923
a1b950d5
MC
12924static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12925{
12926 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12927 case FLASH_5752PAGE_SIZE_256:
12928 tp->nvram_pagesize = 256;
12929 break;
12930 case FLASH_5752PAGE_SIZE_512:
12931 tp->nvram_pagesize = 512;
12932 break;
12933 case FLASH_5752PAGE_SIZE_1K:
12934 tp->nvram_pagesize = 1024;
12935 break;
12936 case FLASH_5752PAGE_SIZE_2K:
12937 tp->nvram_pagesize = 2048;
12938 break;
12939 case FLASH_5752PAGE_SIZE_4K:
12940 tp->nvram_pagesize = 4096;
12941 break;
12942 case FLASH_5752PAGE_SIZE_264:
12943 tp->nvram_pagesize = 264;
12944 break;
12945 case FLASH_5752PAGE_SIZE_528:
12946 tp->nvram_pagesize = 528;
12947 break;
12948 }
12949}
12950
361b4ac2
MC
12951static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12952{
12953 u32 nvcfg1;
12954
12955 nvcfg1 = tr32(NVRAM_CFG1);
12956
e6af301b
MC
12957 /* NVRAM protection for TPM */
12958 if (nvcfg1 & (1 << 27))
63c3a66f 12959 tg3_flag_set(tp, PROTECTED_NVRAM);
e6af301b 12960
361b4ac2 12961 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12962 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12963 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12964 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12965 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12966 break;
12967 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12968 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12969 tg3_flag_set(tp, NVRAM_BUFFERED);
12970 tg3_flag_set(tp, FLASH);
8590a603
MC
12971 break;
12972 case FLASH_5752VENDOR_ST_M45PE10:
12973 case FLASH_5752VENDOR_ST_M45PE20:
12974 case FLASH_5752VENDOR_ST_M45PE40:
12975 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12976 tg3_flag_set(tp, NVRAM_BUFFERED);
12977 tg3_flag_set(tp, FLASH);
8590a603 12978 break;
361b4ac2
MC
12979 }
12980
63c3a66f 12981 if (tg3_flag(tp, FLASH)) {
a1b950d5 12982 tg3_nvram_get_pagesize(tp, nvcfg1);
8590a603 12983 } else {
361b4ac2
MC
12984 /* For eeprom, set pagesize to maximum eeprom size */
12985 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12986
12987 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12988 tw32(NVRAM_CFG1, nvcfg1);
12989 }
12990}
12991
d3c7b886
MC
12992static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12993{
989a9d23 12994 u32 nvcfg1, protect = 0;
d3c7b886
MC
12995
12996 nvcfg1 = tr32(NVRAM_CFG1);
12997
12998 /* NVRAM protection for TPM */
989a9d23 12999 if (nvcfg1 & (1 << 27)) {
63c3a66f 13000 tg3_flag_set(tp, PROTECTED_NVRAM);
989a9d23
MC
13001 protect = 1;
13002 }
d3c7b886 13003
989a9d23
MC
13004 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13005 switch (nvcfg1) {
8590a603
MC
13006 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13007 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13008 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13009 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13010 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13011 tg3_flag_set(tp, NVRAM_BUFFERED);
13012 tg3_flag_set(tp, FLASH);
8590a603
MC
13013 tp->nvram_pagesize = 264;
13014 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13015 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13016 tp->nvram_size = (protect ? 0x3e200 :
13017 TG3_NVRAM_SIZE_512KB);
13018 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13019 tp->nvram_size = (protect ? 0x1f200 :
13020 TG3_NVRAM_SIZE_256KB);
13021 else
13022 tp->nvram_size = (protect ? 0x1f200 :
13023 TG3_NVRAM_SIZE_128KB);
13024 break;
13025 case FLASH_5752VENDOR_ST_M45PE10:
13026 case FLASH_5752VENDOR_ST_M45PE20:
13027 case FLASH_5752VENDOR_ST_M45PE40:
13028 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13029 tg3_flag_set(tp, NVRAM_BUFFERED);
13030 tg3_flag_set(tp, FLASH);
8590a603
MC
13031 tp->nvram_pagesize = 256;
13032 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13033 tp->nvram_size = (protect ?
13034 TG3_NVRAM_SIZE_64KB :
13035 TG3_NVRAM_SIZE_128KB);
13036 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13037 tp->nvram_size = (protect ?
13038 TG3_NVRAM_SIZE_64KB :
13039 TG3_NVRAM_SIZE_256KB);
13040 else
13041 tp->nvram_size = (protect ?
13042 TG3_NVRAM_SIZE_128KB :
13043 TG3_NVRAM_SIZE_512KB);
13044 break;
d3c7b886
MC
13045 }
13046}
13047
1b27777a
MC
13048static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13049{
13050 u32 nvcfg1;
13051
13052 nvcfg1 = tr32(NVRAM_CFG1);
13053
13054 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
13055 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13056 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13057 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13058 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13059 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13060 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603 13061 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
1b27777a 13062
8590a603
MC
13063 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13064 tw32(NVRAM_CFG1, nvcfg1);
13065 break;
13066 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13067 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13068 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13069 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13070 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13071 tg3_flag_set(tp, NVRAM_BUFFERED);
13072 tg3_flag_set(tp, FLASH);
8590a603
MC
13073 tp->nvram_pagesize = 264;
13074 break;
13075 case FLASH_5752VENDOR_ST_M45PE10:
13076 case FLASH_5752VENDOR_ST_M45PE20:
13077 case FLASH_5752VENDOR_ST_M45PE40:
13078 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13079 tg3_flag_set(tp, NVRAM_BUFFERED);
13080 tg3_flag_set(tp, FLASH);
8590a603
MC
13081 tp->nvram_pagesize = 256;
13082 break;
1b27777a
MC
13083 }
13084}
13085
6b91fa02
MC
13086static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13087{
13088 u32 nvcfg1, protect = 0;
13089
13090 nvcfg1 = tr32(NVRAM_CFG1);
13091
13092 /* NVRAM protection for TPM */
13093 if (nvcfg1 & (1 << 27)) {
63c3a66f 13094 tg3_flag_set(tp, PROTECTED_NVRAM);
6b91fa02
MC
13095 protect = 1;
13096 }
13097
13098 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13099 switch (nvcfg1) {
8590a603
MC
13100 case FLASH_5761VENDOR_ATMEL_ADB021D:
13101 case FLASH_5761VENDOR_ATMEL_ADB041D:
13102 case FLASH_5761VENDOR_ATMEL_ADB081D:
13103 case FLASH_5761VENDOR_ATMEL_ADB161D:
13104 case FLASH_5761VENDOR_ATMEL_MDB021D:
13105 case FLASH_5761VENDOR_ATMEL_MDB041D:
13106 case FLASH_5761VENDOR_ATMEL_MDB081D:
13107 case FLASH_5761VENDOR_ATMEL_MDB161D:
13108 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13109 tg3_flag_set(tp, NVRAM_BUFFERED);
13110 tg3_flag_set(tp, FLASH);
13111 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
8590a603
MC
13112 tp->nvram_pagesize = 256;
13113 break;
13114 case FLASH_5761VENDOR_ST_A_M45PE20:
13115 case FLASH_5761VENDOR_ST_A_M45PE40:
13116 case FLASH_5761VENDOR_ST_A_M45PE80:
13117 case FLASH_5761VENDOR_ST_A_M45PE16:
13118 case FLASH_5761VENDOR_ST_M_M45PE20:
13119 case FLASH_5761VENDOR_ST_M_M45PE40:
13120 case FLASH_5761VENDOR_ST_M_M45PE80:
13121 case FLASH_5761VENDOR_ST_M_M45PE16:
13122 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13123 tg3_flag_set(tp, NVRAM_BUFFERED);
13124 tg3_flag_set(tp, FLASH);
8590a603
MC
13125 tp->nvram_pagesize = 256;
13126 break;
6b91fa02
MC
13127 }
13128
13129 if (protect) {
13130 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13131 } else {
13132 switch (nvcfg1) {
8590a603
MC
13133 case FLASH_5761VENDOR_ATMEL_ADB161D:
13134 case FLASH_5761VENDOR_ATMEL_MDB161D:
13135 case FLASH_5761VENDOR_ST_A_M45PE16:
13136 case FLASH_5761VENDOR_ST_M_M45PE16:
13137 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13138 break;
13139 case FLASH_5761VENDOR_ATMEL_ADB081D:
13140 case FLASH_5761VENDOR_ATMEL_MDB081D:
13141 case FLASH_5761VENDOR_ST_A_M45PE80:
13142 case FLASH_5761VENDOR_ST_M_M45PE80:
13143 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13144 break;
13145 case FLASH_5761VENDOR_ATMEL_ADB041D:
13146 case FLASH_5761VENDOR_ATMEL_MDB041D:
13147 case FLASH_5761VENDOR_ST_A_M45PE40:
13148 case FLASH_5761VENDOR_ST_M_M45PE40:
13149 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13150 break;
13151 case FLASH_5761VENDOR_ATMEL_ADB021D:
13152 case FLASH_5761VENDOR_ATMEL_MDB021D:
13153 case FLASH_5761VENDOR_ST_A_M45PE20:
13154 case FLASH_5761VENDOR_ST_M_M45PE20:
13155 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13156 break;
6b91fa02
MC
13157 }
13158 }
13159}
13160
b5d3772c
MC
13161static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13162{
13163 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13164 tg3_flag_set(tp, NVRAM_BUFFERED);
b5d3772c
MC
13165 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13166}
13167
321d32a0
MC
13168static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13169{
13170 u32 nvcfg1;
13171
13172 nvcfg1 = tr32(NVRAM_CFG1);
13173
13174 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13175 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13176 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13177 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13178 tg3_flag_set(tp, NVRAM_BUFFERED);
321d32a0
MC
13179 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13180
13181 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13182 tw32(NVRAM_CFG1, nvcfg1);
13183 return;
13184 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13185 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13186 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13187 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13188 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13189 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13190 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13191 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13192 tg3_flag_set(tp, NVRAM_BUFFERED);
13193 tg3_flag_set(tp, FLASH);
321d32a0
MC
13194
13195 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13196 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13197 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13198 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13199 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13200 break;
13201 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13202 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13203 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13204 break;
13205 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13206 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13207 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13208 break;
13209 }
13210 break;
13211 case FLASH_5752VENDOR_ST_M45PE10:
13212 case FLASH_5752VENDOR_ST_M45PE20:
13213 case FLASH_5752VENDOR_ST_M45PE40:
13214 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13215 tg3_flag_set(tp, NVRAM_BUFFERED);
13216 tg3_flag_set(tp, FLASH);
321d32a0
MC
13217
13218 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13219 case FLASH_5752VENDOR_ST_M45PE10:
13220 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13221 break;
13222 case FLASH_5752VENDOR_ST_M45PE20:
13223 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13224 break;
13225 case FLASH_5752VENDOR_ST_M45PE40:
13226 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13227 break;
13228 }
13229 break;
13230 default:
63c3a66f 13231 tg3_flag_set(tp, NO_NVRAM);
321d32a0
MC
13232 return;
13233 }
13234
a1b950d5
MC
13235 tg3_nvram_get_pagesize(tp, nvcfg1);
13236 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13237 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
a1b950d5
MC
13238}
13239
13240
13241static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13242{
13243 u32 nvcfg1;
13244
13245 nvcfg1 = tr32(NVRAM_CFG1);
13246
13247 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13248 case FLASH_5717VENDOR_ATMEL_EEPROM:
13249 case FLASH_5717VENDOR_MICRO_EEPROM:
13250 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13251 tg3_flag_set(tp, NVRAM_BUFFERED);
a1b950d5
MC
13252 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13253
13254 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13255 tw32(NVRAM_CFG1, nvcfg1);
13256 return;
13257 case FLASH_5717VENDOR_ATMEL_MDB011D:
13258 case FLASH_5717VENDOR_ATMEL_ADB011B:
13259 case FLASH_5717VENDOR_ATMEL_ADB011D:
13260 case FLASH_5717VENDOR_ATMEL_MDB021D:
13261 case FLASH_5717VENDOR_ATMEL_ADB021B:
13262 case FLASH_5717VENDOR_ATMEL_ADB021D:
13263 case FLASH_5717VENDOR_ATMEL_45USPT:
13264 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13265 tg3_flag_set(tp, NVRAM_BUFFERED);
13266 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13267
13268 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13269 case FLASH_5717VENDOR_ATMEL_MDB021D:
66ee33bf
MC
13270 /* Detect size with tg3_nvram_get_size() */
13271 break;
a1b950d5
MC
13272 case FLASH_5717VENDOR_ATMEL_ADB021B:
13273 case FLASH_5717VENDOR_ATMEL_ADB021D:
13274 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13275 break;
13276 default:
13277 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13278 break;
13279 }
321d32a0 13280 break;
a1b950d5
MC
13281 case FLASH_5717VENDOR_ST_M_M25PE10:
13282 case FLASH_5717VENDOR_ST_A_M25PE10:
13283 case FLASH_5717VENDOR_ST_M_M45PE10:
13284 case FLASH_5717VENDOR_ST_A_M45PE10:
13285 case FLASH_5717VENDOR_ST_M_M25PE20:
13286 case FLASH_5717VENDOR_ST_A_M25PE20:
13287 case FLASH_5717VENDOR_ST_M_M45PE20:
13288 case FLASH_5717VENDOR_ST_A_M45PE20:
13289 case FLASH_5717VENDOR_ST_25USPT:
13290 case FLASH_5717VENDOR_ST_45USPT:
13291 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13292 tg3_flag_set(tp, NVRAM_BUFFERED);
13293 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13294
13295 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13296 case FLASH_5717VENDOR_ST_M_M25PE20:
a1b950d5 13297 case FLASH_5717VENDOR_ST_M_M45PE20:
66ee33bf
MC
13298 /* Detect size with tg3_nvram_get_size() */
13299 break;
13300 case FLASH_5717VENDOR_ST_A_M25PE20:
a1b950d5
MC
13301 case FLASH_5717VENDOR_ST_A_M45PE20:
13302 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13303 break;
13304 default:
13305 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13306 break;
13307 }
321d32a0 13308 break;
a1b950d5 13309 default:
63c3a66f 13310 tg3_flag_set(tp, NO_NVRAM);
a1b950d5 13311 return;
321d32a0 13312 }
a1b950d5
MC
13313
13314 tg3_nvram_get_pagesize(tp, nvcfg1);
13315 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13316 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
321d32a0
MC
13317}
13318
9b91b5f1
MC
13319static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13320{
13321 u32 nvcfg1, nvmpinstrp;
13322
13323 nvcfg1 = tr32(NVRAM_CFG1);
13324 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13325
13326 switch (nvmpinstrp) {
13327 case FLASH_5720_EEPROM_HD:
13328 case FLASH_5720_EEPROM_LD:
13329 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13330 tg3_flag_set(tp, NVRAM_BUFFERED);
9b91b5f1
MC
13331
13332 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13333 tw32(NVRAM_CFG1, nvcfg1);
13334 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13335 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13336 else
13337 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13338 return;
13339 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13340 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13341 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13342 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13343 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13344 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13345 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13346 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13347 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13348 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13349 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13350 case FLASH_5720VENDOR_ATMEL_45USPT:
13351 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13352 tg3_flag_set(tp, NVRAM_BUFFERED);
13353 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13354
13355 switch (nvmpinstrp) {
13356 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13357 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13358 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13359 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13360 break;
13361 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13362 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13363 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13364 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13365 break;
13366 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13367 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13368 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13369 break;
13370 default:
13371 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13372 break;
13373 }
13374 break;
13375 case FLASH_5720VENDOR_M_ST_M25PE10:
13376 case FLASH_5720VENDOR_M_ST_M45PE10:
13377 case FLASH_5720VENDOR_A_ST_M25PE10:
13378 case FLASH_5720VENDOR_A_ST_M45PE10:
13379 case FLASH_5720VENDOR_M_ST_M25PE20:
13380 case FLASH_5720VENDOR_M_ST_M45PE20:
13381 case FLASH_5720VENDOR_A_ST_M25PE20:
13382 case FLASH_5720VENDOR_A_ST_M45PE20:
13383 case FLASH_5720VENDOR_M_ST_M25PE40:
13384 case FLASH_5720VENDOR_M_ST_M45PE40:
13385 case FLASH_5720VENDOR_A_ST_M25PE40:
13386 case FLASH_5720VENDOR_A_ST_M45PE40:
13387 case FLASH_5720VENDOR_M_ST_M25PE80:
13388 case FLASH_5720VENDOR_M_ST_M45PE80:
13389 case FLASH_5720VENDOR_A_ST_M25PE80:
13390 case FLASH_5720VENDOR_A_ST_M45PE80:
13391 case FLASH_5720VENDOR_ST_25USPT:
13392 case FLASH_5720VENDOR_ST_45USPT:
13393 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13394 tg3_flag_set(tp, NVRAM_BUFFERED);
13395 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13396
13397 switch (nvmpinstrp) {
13398 case FLASH_5720VENDOR_M_ST_M25PE20:
13399 case FLASH_5720VENDOR_M_ST_M45PE20:
13400 case FLASH_5720VENDOR_A_ST_M25PE20:
13401 case FLASH_5720VENDOR_A_ST_M45PE20:
13402 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13403 break;
13404 case FLASH_5720VENDOR_M_ST_M25PE40:
13405 case FLASH_5720VENDOR_M_ST_M45PE40:
13406 case FLASH_5720VENDOR_A_ST_M25PE40:
13407 case FLASH_5720VENDOR_A_ST_M45PE40:
13408 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13409 break;
13410 case FLASH_5720VENDOR_M_ST_M25PE80:
13411 case FLASH_5720VENDOR_M_ST_M45PE80:
13412 case FLASH_5720VENDOR_A_ST_M25PE80:
13413 case FLASH_5720VENDOR_A_ST_M45PE80:
13414 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13415 break;
13416 default:
13417 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13418 break;
13419 }
13420 break;
13421 default:
63c3a66f 13422 tg3_flag_set(tp, NO_NVRAM);
9b91b5f1
MC
13423 return;
13424 }
13425
13426 tg3_nvram_get_pagesize(tp, nvcfg1);
13427 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13428 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
9b91b5f1
MC
13429}
13430
1da177e4
LT
13431/* Chips other than 5700/5701 use the NVRAM for fetching info. */
13432static void __devinit tg3_nvram_init(struct tg3 *tp)
13433{
1da177e4
LT
13434 tw32_f(GRC_EEPROM_ADDR,
13435 (EEPROM_ADDR_FSM_RESET |
13436 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13437 EEPROM_ADDR_CLKPERD_SHIFT)));
13438
9d57f01c 13439 msleep(1);
1da177e4
LT
13440
13441 /* Enable seeprom accesses. */
13442 tw32_f(GRC_LOCAL_CTRL,
13443 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13444 udelay(100);
13445
13446 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13447 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
63c3a66f 13448 tg3_flag_set(tp, NVRAM);
1da177e4 13449
ec41c7df 13450 if (tg3_nvram_lock(tp)) {
5129c3a3
MC
13451 netdev_warn(tp->dev,
13452 "Cannot get nvram lock, %s failed\n",
05dbe005 13453 __func__);
ec41c7df
MC
13454 return;
13455 }
e6af301b 13456 tg3_enable_nvram_access(tp);
1da177e4 13457
989a9d23
MC
13458 tp->nvram_size = 0;
13459
361b4ac2
MC
13460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13461 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
13462 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13463 tg3_get_5755_nvram_info(tp);
d30cdd28 13464 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
57e6983c
MC
13465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1b27777a 13467 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
13468 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13469 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
13470 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13471 tg3_get_5906_nvram_info(tp);
b703df6f 13472 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 13473 tg3_flag(tp, 57765_CLASS))
321d32a0 13474 tg3_get_57780_nvram_info(tp);
9b91b5f1
MC
13475 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a1b950d5 13477 tg3_get_5717_nvram_info(tp);
9b91b5f1
MC
13478 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13479 tg3_get_5720_nvram_info(tp);
361b4ac2
MC
13480 else
13481 tg3_get_nvram_info(tp);
13482
989a9d23
MC
13483 if (tp->nvram_size == 0)
13484 tg3_get_nvram_size(tp);
1da177e4 13485
e6af301b 13486 tg3_disable_nvram_access(tp);
381291b7 13487 tg3_nvram_unlock(tp);
1da177e4
LT
13488
13489 } else {
63c3a66f
JP
13490 tg3_flag_clear(tp, NVRAM);
13491 tg3_flag_clear(tp, NVRAM_BUFFERED);
1da177e4
LT
13492
13493 tg3_get_eeprom_size(tp);
13494 }
13495}
13496
1da177e4
LT
13497struct subsys_tbl_ent {
13498 u16 subsys_vendor, subsys_devid;
13499 u32 phy_id;
13500};
13501
24daf2b0 13502static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
1da177e4 13503 /* Broadcom boards. */
24daf2b0 13504 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13505 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
24daf2b0 13506 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13507 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
24daf2b0 13508 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13509 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
24daf2b0
MC
13510 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13511 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13512 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13513 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
24daf2b0 13514 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13515 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13516 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13517 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13518 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13519 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
24daf2b0 13520 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13521 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
24daf2b0 13522 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13523 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
24daf2b0 13524 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13525 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
1da177e4
LT
13526
13527 /* 3com boards. */
24daf2b0 13528 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13529 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
24daf2b0 13530 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13531 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13532 { TG3PCI_SUBVENDOR_ID_3COM,
13533 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13534 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13535 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
24daf2b0 13536 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13537 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13538
13539 /* DELL boards. */
24daf2b0 13540 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13541 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
24daf2b0 13542 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13543 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
24daf2b0 13544 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13545 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
24daf2b0 13546 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13547 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
1da177e4
LT
13548
13549 /* Compaq boards. */
24daf2b0 13550 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13551 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
24daf2b0 13552 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13553 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13554 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13555 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13556 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13557 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
24daf2b0 13558 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13559 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13560
13561 /* IBM boards. */
24daf2b0
MC
13562 { TG3PCI_SUBVENDOR_ID_IBM,
13563 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
1da177e4
LT
13564};
13565
24daf2b0 13566static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
1da177e4
LT
13567{
13568 int i;
13569
13570 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13571 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13572 tp->pdev->subsystem_vendor) &&
13573 (subsys_id_to_phy_id[i].subsys_devid ==
13574 tp->pdev->subsystem_device))
13575 return &subsys_id_to_phy_id[i];
13576 }
13577 return NULL;
13578}
13579
7d0c41ef 13580static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 13581{
1da177e4 13582 u32 val;
f49639e6 13583
79eb6904 13584 tp->phy_id = TG3_PHY_ID_INVALID;
7d0c41ef
MC
13585 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13586
a85feb8c 13587 /* Assume an onboard device and WOL capable by default. */
63c3a66f
JP
13588 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13589 tg3_flag_set(tp, WOL_CAP);
72b845e0 13590
b5d3772c 13591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 13592 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
63c3a66f
JP
13593 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13594 tg3_flag_set(tp, IS_NIC);
9d26e213 13595 }
0527ba35
MC
13596 val = tr32(VCPU_CFGSHDW);
13597 if (val & VCPU_CFGSHDW_ASPM_DBNC)
63c3a66f 13598 tg3_flag_set(tp, ASPM_WORKAROUND);
0527ba35 13599 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
6fdbab9d 13600 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
63c3a66f 13601 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13602 device_set_wakeup_enable(&tp->pdev->dev, true);
13603 }
05ac4cb7 13604 goto done;
b5d3772c
MC
13605 }
13606
1da177e4
LT
13607 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13608 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13609 u32 nic_cfg, led_cfg;
a9daf367 13610 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
7d0c41ef 13611 int eeprom_phy_serdes = 0;
1da177e4
LT
13612
13613 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13614 tp->nic_sram_data_cfg = nic_cfg;
13615
13616 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13617 ver >>= NIC_SRAM_DATA_VER_SHIFT;
6ff6f81d
MC
13618 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13619 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13620 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
1da177e4
LT
13621 (ver > 0) && (ver < 0x100))
13622 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13623
a9daf367
MC
13624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13625 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13626
1da177e4
LT
13627 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13628 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13629 eeprom_phy_serdes = 1;
13630
13631 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13632 if (nic_phy_id != 0) {
13633 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13634 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13635
13636 eeprom_phy_id = (id1 >> 16) << 10;
13637 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13638 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13639 } else
13640 eeprom_phy_id = 0;
13641
7d0c41ef 13642 tp->phy_id = eeprom_phy_id;
747e8f8b 13643 if (eeprom_phy_serdes) {
63c3a66f 13644 if (!tg3_flag(tp, 5705_PLUS))
f07e9af3 13645 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
a50d0796 13646 else
f07e9af3 13647 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
747e8f8b 13648 }
7d0c41ef 13649
63c3a66f 13650 if (tg3_flag(tp, 5750_PLUS))
1da177e4
LT
13651 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13652 SHASTA_EXT_LED_MODE_MASK);
cbf46853 13653 else
1da177e4
LT
13654 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13655
13656 switch (led_cfg) {
13657 default:
13658 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13659 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13660 break;
13661
13662 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13663 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13664 break;
13665
13666 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13667 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
13668
13669 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13670 * read on some older 5700/5701 bootcode.
13671 */
13672 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13673 ASIC_REV_5700 ||
13674 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13675 ASIC_REV_5701)
13676 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13677
1da177e4
LT
13678 break;
13679
13680 case SHASTA_EXT_LED_SHARED:
13681 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13682 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13683 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13684 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13685 LED_CTRL_MODE_PHY_2);
13686 break;
13687
13688 case SHASTA_EXT_LED_MAC:
13689 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13690 break;
13691
13692 case SHASTA_EXT_LED_COMBO:
13693 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13694 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13695 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13696 LED_CTRL_MODE_PHY_2);
13697 break;
13698
855e1111 13699 }
1da177e4
LT
13700
13701 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13703 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13704 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13705
b2a5c19c
MC
13706 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13707 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 13708
9d26e213 13709 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
63c3a66f 13710 tg3_flag_set(tp, EEPROM_WRITE_PROT);
9d26e213
MC
13711 if ((tp->pdev->subsystem_vendor ==
13712 PCI_VENDOR_ID_ARIMA) &&
13713 (tp->pdev->subsystem_device == 0x205a ||
13714 tp->pdev->subsystem_device == 0x2063))
63c3a66f 13715 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
9d26e213 13716 } else {
63c3a66f
JP
13717 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13718 tg3_flag_set(tp, IS_NIC);
9d26e213 13719 }
1da177e4
LT
13720
13721 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f
JP
13722 tg3_flag_set(tp, ENABLE_ASF);
13723 if (tg3_flag(tp, 5750_PLUS))
13724 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4 13725 }
b2b98d4a
MC
13726
13727 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
63c3a66f
JP
13728 tg3_flag(tp, 5750_PLUS))
13729 tg3_flag_set(tp, ENABLE_APE);
b2b98d4a 13730
f07e9af3 13731 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
a85feb8c 13732 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
63c3a66f 13733 tg3_flag_clear(tp, WOL_CAP);
1da177e4 13734
63c3a66f 13735 if (tg3_flag(tp, WOL_CAP) &&
6fdbab9d 13736 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
63c3a66f 13737 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13738 device_set_wakeup_enable(&tp->pdev->dev, true);
13739 }
0527ba35 13740
1da177e4 13741 if (cfg2 & (1 << 17))
f07e9af3 13742 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
1da177e4
LT
13743
13744 /* serdes signal pre-emphasis in register 0x590 set by */
13745 /* bootcode if bit 18 is set */
13746 if (cfg2 & (1 << 18))
f07e9af3 13747 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
8ed5d97e 13748
63c3a66f
JP
13749 if ((tg3_flag(tp, 57765_PLUS) ||
13750 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13751 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
6833c043 13752 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
f07e9af3 13753 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
6833c043 13754
63c3a66f 13755 if (tg3_flag(tp, PCI_EXPRESS) &&
8c69b1e7 13756 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 13757 !tg3_flag(tp, 57765_PLUS)) {
8ed5d97e
MC
13758 u32 cfg3;
13759
13760 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13761 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
63c3a66f 13762 tg3_flag_set(tp, ASPM_WORKAROUND);
8ed5d97e 13763 }
a9daf367 13764
14417063 13765 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
63c3a66f 13766 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
a9daf367 13767 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
63c3a66f 13768 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
a9daf367 13769 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
63c3a66f 13770 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
1da177e4 13771 }
05ac4cb7 13772done:
63c3a66f 13773 if (tg3_flag(tp, WOL_CAP))
43067ed8 13774 device_set_wakeup_enable(&tp->pdev->dev,
63c3a66f 13775 tg3_flag(tp, WOL_ENABLE));
43067ed8
RW
13776 else
13777 device_set_wakeup_capable(&tp->pdev->dev, false);
7d0c41ef
MC
13778}
13779
b2a5c19c
MC
13780static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13781{
13782 int i;
13783 u32 val;
13784
13785 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13786 tw32(OTP_CTRL, cmd);
13787
13788 /* Wait for up to 1 ms for command to execute. */
13789 for (i = 0; i < 100; i++) {
13790 val = tr32(OTP_STATUS);
13791 if (val & OTP_STATUS_CMD_DONE)
13792 break;
13793 udelay(10);
13794 }
13795
13796 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13797}
13798
13799/* Read the gphy configuration from the OTP region of the chip. The gphy
13800 * configuration is a 32-bit value that straddles the alignment boundary.
13801 * We do two 32-bit reads and then shift and merge the results.
13802 */
13803static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13804{
13805 u32 bhalf_otp, thalf_otp;
13806
13807 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13808
13809 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13810 return 0;
13811
13812 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13813
13814 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13815 return 0;
13816
13817 thalf_otp = tr32(OTP_READ_DATA);
13818
13819 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13820
13821 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13822 return 0;
13823
13824 bhalf_otp = tr32(OTP_READ_DATA);
13825
13826 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13827}
13828
e256f8a3
MC
13829static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13830{
202ff1c2 13831 u32 adv = ADVERTISED_Autoneg;
e256f8a3
MC
13832
13833 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13834 adv |= ADVERTISED_1000baseT_Half |
13835 ADVERTISED_1000baseT_Full;
13836
13837 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13838 adv |= ADVERTISED_100baseT_Half |
13839 ADVERTISED_100baseT_Full |
13840 ADVERTISED_10baseT_Half |
13841 ADVERTISED_10baseT_Full |
13842 ADVERTISED_TP;
13843 else
13844 adv |= ADVERTISED_FIBRE;
13845
13846 tp->link_config.advertising = adv;
e740522e
MC
13847 tp->link_config.speed = SPEED_UNKNOWN;
13848 tp->link_config.duplex = DUPLEX_UNKNOWN;
e256f8a3 13849 tp->link_config.autoneg = AUTONEG_ENABLE;
e740522e
MC
13850 tp->link_config.active_speed = SPEED_UNKNOWN;
13851 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
34655ad6
MC
13852
13853 tp->old_link = -1;
e256f8a3
MC
13854}
13855
7d0c41ef
MC
13856static int __devinit tg3_phy_probe(struct tg3 *tp)
13857{
13858 u32 hw_phy_id_1, hw_phy_id_2;
13859 u32 hw_phy_id, hw_phy_id_masked;
13860 int err;
1da177e4 13861
e256f8a3 13862 /* flow control autonegotiation is default behavior */
63c3a66f 13863 tg3_flag_set(tp, PAUSE_AUTONEG);
e256f8a3
MC
13864 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13865
8151ad57
MC
13866 if (tg3_flag(tp, ENABLE_APE)) {
13867 switch (tp->pci_fn) {
13868 case 0:
13869 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13870 break;
13871 case 1:
13872 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13873 break;
13874 case 2:
13875 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13876 break;
13877 case 3:
13878 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13879 break;
13880 }
13881 }
13882
63c3a66f 13883 if (tg3_flag(tp, USE_PHYLIB))
b02fd9e3
MC
13884 return tg3_phy_init(tp);
13885
1da177e4 13886 /* Reading the PHY ID register can conflict with ASF
877d0310 13887 * firmware access to the PHY hardware.
1da177e4
LT
13888 */
13889 err = 0;
63c3a66f 13890 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
79eb6904 13891 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
1da177e4
LT
13892 } else {
13893 /* Now read the physical PHY_ID from the chip and verify
13894 * that it is sane. If it doesn't look good, we fall back
13895 * to either the hard-coded table based PHY_ID and failing
13896 * that the value found in the eeprom area.
13897 */
13898 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13899 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13900
13901 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13902 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13903 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13904
79eb6904 13905 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
1da177e4
LT
13906 }
13907
79eb6904 13908 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
1da177e4 13909 tp->phy_id = hw_phy_id;
79eb6904 13910 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
f07e9af3 13911 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
da6b2d01 13912 else
f07e9af3 13913 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
1da177e4 13914 } else {
79eb6904 13915 if (tp->phy_id != TG3_PHY_ID_INVALID) {
7d0c41ef
MC
13916 /* Do nothing, phy ID already set up in
13917 * tg3_get_eeprom_hw_cfg().
13918 */
1da177e4
LT
13919 } else {
13920 struct subsys_tbl_ent *p;
13921
13922 /* No eeprom signature? Try the hardcoded
13923 * subsys device table.
13924 */
24daf2b0 13925 p = tg3_lookup_by_subsys(tp);
1da177e4
LT
13926 if (!p)
13927 return -ENODEV;
13928
13929 tp->phy_id = p->phy_id;
13930 if (!tp->phy_id ||
79eb6904 13931 tp->phy_id == TG3_PHY_ID_BCM8002)
f07e9af3 13932 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
1da177e4
LT
13933 }
13934 }
13935
a6b68dab 13936 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
5baa5e9a
MC
13937 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13939 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
a6b68dab
MC
13940 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13941 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13942 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
52b02d04
MC
13943 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13944
e256f8a3
MC
13945 tg3_phy_init_link_config(tp);
13946
f07e9af3 13947 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
63c3a66f
JP
13948 !tg3_flag(tp, ENABLE_APE) &&
13949 !tg3_flag(tp, ENABLE_ASF)) {
e2bf73e7 13950 u32 bmsr, dummy;
1da177e4
LT
13951
13952 tg3_readphy(tp, MII_BMSR, &bmsr);
13953 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13954 (bmsr & BMSR_LSTATUS))
13955 goto skip_phy_reset;
6aa20a22 13956
1da177e4
LT
13957 err = tg3_phy_reset(tp);
13958 if (err)
13959 return err;
13960
42b64a45 13961 tg3_phy_set_wirespeed(tp);
1da177e4 13962
e2bf73e7 13963 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
42b64a45
MC
13964 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13965 tp->link_config.flowctrl);
1da177e4
LT
13966
13967 tg3_writephy(tp, MII_BMCR,
13968 BMCR_ANENABLE | BMCR_ANRESTART);
13969 }
1da177e4
LT
13970 }
13971
13972skip_phy_reset:
79eb6904 13973 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
13974 err = tg3_init_5401phy_dsp(tp);
13975 if (err)
13976 return err;
1da177e4 13977
1da177e4
LT
13978 err = tg3_init_5401phy_dsp(tp);
13979 }
13980
1da177e4
LT
13981 return err;
13982}
13983
184b8904 13984static void __devinit tg3_read_vpd(struct tg3 *tp)
1da177e4 13985{
a4a8bb15 13986 u8 *vpd_data;
4181b2c8 13987 unsigned int block_end, rosize, len;
535a490e 13988 u32 vpdlen;
184b8904 13989 int j, i = 0;
a4a8bb15 13990
535a490e 13991 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
a4a8bb15
MC
13992 if (!vpd_data)
13993 goto out_no_vpd;
1da177e4 13994
535a490e 13995 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
4181b2c8
MC
13996 if (i < 0)
13997 goto out_not_found;
1da177e4 13998
4181b2c8
MC
13999 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14000 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14001 i += PCI_VPD_LRDT_TAG_SIZE;
1da177e4 14002
535a490e 14003 if (block_end > vpdlen)
4181b2c8 14004 goto out_not_found;
af2c6a4a 14005
184b8904
MC
14006 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14007 PCI_VPD_RO_KEYWORD_MFR_ID);
14008 if (j > 0) {
14009 len = pci_vpd_info_field_size(&vpd_data[j]);
14010
14011 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14012 if (j + len > block_end || len != 4 ||
14013 memcmp(&vpd_data[j], "1028", 4))
14014 goto partno;
14015
14016 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14017 PCI_VPD_RO_KEYWORD_VENDOR0);
14018 if (j < 0)
14019 goto partno;
14020
14021 len = pci_vpd_info_field_size(&vpd_data[j]);
14022
14023 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14024 if (j + len > block_end)
14025 goto partno;
14026
14027 memcpy(tp->fw_ver, &vpd_data[j], len);
535a490e 14028 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
184b8904
MC
14029 }
14030
14031partno:
4181b2c8
MC
14032 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14033 PCI_VPD_RO_KEYWORD_PARTNO);
14034 if (i < 0)
14035 goto out_not_found;
af2c6a4a 14036
4181b2c8 14037 len = pci_vpd_info_field_size(&vpd_data[i]);
1da177e4 14038
4181b2c8
MC
14039 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14040 if (len > TG3_BPN_SIZE ||
535a490e 14041 (len + i) > vpdlen)
4181b2c8 14042 goto out_not_found;
1da177e4 14043
4181b2c8 14044 memcpy(tp->board_part_number, &vpd_data[i], len);
1da177e4 14045
1da177e4 14046out_not_found:
a4a8bb15 14047 kfree(vpd_data);
37a949c5 14048 if (tp->board_part_number[0])
a4a8bb15
MC
14049 return;
14050
14051out_no_vpd:
37a949c5 14052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
79d49695
MC
14053 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
37a949c5
MC
14055 strcpy(tp->board_part_number, "BCM5717");
14056 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14057 strcpy(tp->board_part_number, "BCM5718");
14058 else
14059 goto nomatch;
14060 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14062 strcpy(tp->board_part_number, "BCM57780");
14063 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14064 strcpy(tp->board_part_number, "BCM57760");
14065 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14066 strcpy(tp->board_part_number, "BCM57790");
14067 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14068 strcpy(tp->board_part_number, "BCM57788");
14069 else
14070 goto nomatch;
14071 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14072 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14073 strcpy(tp->board_part_number, "BCM57761");
14074 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14075 strcpy(tp->board_part_number, "BCM57765");
14076 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14077 strcpy(tp->board_part_number, "BCM57781");
14078 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14079 strcpy(tp->board_part_number, "BCM57785");
14080 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14081 strcpy(tp->board_part_number, "BCM57791");
14082 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14083 strcpy(tp->board_part_number, "BCM57795");
14084 else
14085 goto nomatch;
55086ad9
MC
14086 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14087 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14088 strcpy(tp->board_part_number, "BCM57762");
14089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14090 strcpy(tp->board_part_number, "BCM57766");
14091 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14092 strcpy(tp->board_part_number, "BCM57782");
14093 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14094 strcpy(tp->board_part_number, "BCM57786");
14095 else
14096 goto nomatch;
37a949c5 14097 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b5d3772c 14098 strcpy(tp->board_part_number, "BCM95906");
37a949c5
MC
14099 } else {
14100nomatch:
b5d3772c 14101 strcpy(tp->board_part_number, "none");
37a949c5 14102 }
1da177e4
LT
14103}
14104
9c8a620e
MC
14105static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14106{
14107 u32 val;
14108
e4f34110 14109 if (tg3_nvram_read(tp, offset, &val) ||
9c8a620e 14110 (val & 0xfc000000) != 0x0c000000 ||
e4f34110 14111 tg3_nvram_read(tp, offset + 4, &val) ||
9c8a620e
MC
14112 val != 0)
14113 return 0;
14114
14115 return 1;
14116}
14117
acd9c119
MC
14118static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14119{
ff3a7cb2 14120 u32 val, offset, start, ver_offset;
75f9936e 14121 int i, dst_off;
ff3a7cb2 14122 bool newver = false;
acd9c119
MC
14123
14124 if (tg3_nvram_read(tp, 0xc, &offset) ||
14125 tg3_nvram_read(tp, 0x4, &start))
14126 return;
14127
14128 offset = tg3_nvram_logical_addr(tp, offset);
14129
ff3a7cb2 14130 if (tg3_nvram_read(tp, offset, &val))
acd9c119
MC
14131 return;
14132
ff3a7cb2
MC
14133 if ((val & 0xfc000000) == 0x0c000000) {
14134 if (tg3_nvram_read(tp, offset + 4, &val))
acd9c119
MC
14135 return;
14136
ff3a7cb2
MC
14137 if (val == 0)
14138 newver = true;
14139 }
14140
75f9936e
MC
14141 dst_off = strlen(tp->fw_ver);
14142
ff3a7cb2 14143 if (newver) {
75f9936e
MC
14144 if (TG3_VER_SIZE - dst_off < 16 ||
14145 tg3_nvram_read(tp, offset + 8, &ver_offset))
ff3a7cb2
MC
14146 return;
14147
14148 offset = offset + ver_offset - start;
14149 for (i = 0; i < 16; i += 4) {
14150 __be32 v;
14151 if (tg3_nvram_read_be32(tp, offset + i, &v))
14152 return;
14153
75f9936e 14154 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
ff3a7cb2
MC
14155 }
14156 } else {
14157 u32 major, minor;
14158
14159 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14160 return;
14161
14162 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14163 TG3_NVM_BCVER_MAJSFT;
14164 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
75f9936e
MC
14165 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14166 "v%d.%02d", major, minor);
acd9c119
MC
14167 }
14168}
14169
a6f6cb1c
MC
14170static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14171{
14172 u32 val, major, minor;
14173
14174 /* Use native endian representation */
14175 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14176 return;
14177
14178 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14179 TG3_NVM_HWSB_CFG1_MAJSFT;
14180 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14181 TG3_NVM_HWSB_CFG1_MINSFT;
14182
14183 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14184}
14185
dfe00d7d
MC
14186static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14187{
14188 u32 offset, major, minor, build;
14189
75f9936e 14190 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
dfe00d7d
MC
14191
14192 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14193 return;
14194
14195 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14196 case TG3_EEPROM_SB_REVISION_0:
14197 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14198 break;
14199 case TG3_EEPROM_SB_REVISION_2:
14200 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14201 break;
14202 case TG3_EEPROM_SB_REVISION_3:
14203 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14204 break;
a4153d40
MC
14205 case TG3_EEPROM_SB_REVISION_4:
14206 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14207 break;
14208 case TG3_EEPROM_SB_REVISION_5:
14209 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14210 break;
bba226ac
MC
14211 case TG3_EEPROM_SB_REVISION_6:
14212 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14213 break;
dfe00d7d
MC
14214 default:
14215 return;
14216 }
14217
e4f34110 14218 if (tg3_nvram_read(tp, offset, &val))
dfe00d7d
MC
14219 return;
14220
14221 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14222 TG3_EEPROM_SB_EDH_BLD_SHFT;
14223 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14224 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14225 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14226
14227 if (minor > 99 || build > 26)
14228 return;
14229
75f9936e
MC
14230 offset = strlen(tp->fw_ver);
14231 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14232 " v%d.%02d", major, minor);
dfe00d7d
MC
14233
14234 if (build > 0) {
75f9936e
MC
14235 offset = strlen(tp->fw_ver);
14236 if (offset < TG3_VER_SIZE - 1)
14237 tp->fw_ver[offset] = 'a' + build - 1;
dfe00d7d
MC
14238 }
14239}
14240
acd9c119 14241static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
c4e6575c
MC
14242{
14243 u32 val, offset, start;
acd9c119 14244 int i, vlen;
9c8a620e
MC
14245
14246 for (offset = TG3_NVM_DIR_START;
14247 offset < TG3_NVM_DIR_END;
14248 offset += TG3_NVM_DIRENT_SIZE) {
e4f34110 14249 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
14250 return;
14251
9c8a620e
MC
14252 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14253 break;
14254 }
14255
14256 if (offset == TG3_NVM_DIR_END)
14257 return;
14258
63c3a66f 14259 if (!tg3_flag(tp, 5705_PLUS))
9c8a620e 14260 start = 0x08000000;
e4f34110 14261 else if (tg3_nvram_read(tp, offset - 4, &start))
9c8a620e
MC
14262 return;
14263
e4f34110 14264 if (tg3_nvram_read(tp, offset + 4, &offset) ||
9c8a620e 14265 !tg3_fw_img_is_valid(tp, offset) ||
e4f34110 14266 tg3_nvram_read(tp, offset + 8, &val))
9c8a620e
MC
14267 return;
14268
14269 offset += val - start;
14270
acd9c119 14271 vlen = strlen(tp->fw_ver);
9c8a620e 14272
acd9c119
MC
14273 tp->fw_ver[vlen++] = ',';
14274 tp->fw_ver[vlen++] = ' ';
9c8a620e
MC
14275
14276 for (i = 0; i < 4; i++) {
a9dc529d
MC
14277 __be32 v;
14278 if (tg3_nvram_read_be32(tp, offset, &v))
c4e6575c
MC
14279 return;
14280
b9fc7dc5 14281 offset += sizeof(v);
c4e6575c 14282
acd9c119
MC
14283 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14284 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
9c8a620e 14285 break;
c4e6575c 14286 }
9c8a620e 14287
acd9c119
MC
14288 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14289 vlen += sizeof(v);
c4e6575c 14290 }
acd9c119
MC
14291}
14292
165f4d1c 14293static void __devinit tg3_probe_ncsi(struct tg3 *tp)
7fd76445 14294{
7fd76445 14295 u32 apedata;
7fd76445
MC
14296
14297 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14298 if (apedata != APE_SEG_SIG_MAGIC)
14299 return;
14300
14301 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14302 if (!(apedata & APE_FW_STATUS_READY))
14303 return;
14304
165f4d1c
MC
14305 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14306 tg3_flag_set(tp, APE_HAS_NCSI);
14307}
14308
14309static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14310{
14311 int vlen;
14312 u32 apedata;
14313 char *fwtype;
14314
7fd76445
MC
14315 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14316
165f4d1c 14317 if (tg3_flag(tp, APE_HAS_NCSI))
ecc79648 14318 fwtype = "NCSI";
165f4d1c 14319 else
ecc79648
MC
14320 fwtype = "DASH";
14321
7fd76445
MC
14322 vlen = strlen(tp->fw_ver);
14323
ecc79648
MC
14324 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14325 fwtype,
7fd76445
MC
14326 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14327 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14328 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14329 (apedata & APE_FW_VERSION_BLDMSK));
14330}
14331
acd9c119
MC
14332static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14333{
14334 u32 val;
75f9936e 14335 bool vpd_vers = false;
acd9c119 14336
75f9936e
MC
14337 if (tp->fw_ver[0] != 0)
14338 vpd_vers = true;
df259d8c 14339
63c3a66f 14340 if (tg3_flag(tp, NO_NVRAM)) {
75f9936e 14341 strcat(tp->fw_ver, "sb");
df259d8c
MC
14342 return;
14343 }
14344
acd9c119
MC
14345 if (tg3_nvram_read(tp, 0, &val))
14346 return;
14347
14348 if (val == TG3_EEPROM_MAGIC)
14349 tg3_read_bc_ver(tp);
14350 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14351 tg3_read_sb_ver(tp, val);
a6f6cb1c
MC
14352 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14353 tg3_read_hwsb_ver(tp);
acd9c119 14354
165f4d1c
MC
14355 if (tg3_flag(tp, ENABLE_ASF)) {
14356 if (tg3_flag(tp, ENABLE_APE)) {
14357 tg3_probe_ncsi(tp);
14358 if (!vpd_vers)
14359 tg3_read_dash_ver(tp);
14360 } else if (!vpd_vers) {
14361 tg3_read_mgmtfw_ver(tp);
14362 }
c9cab24e 14363 }
9c8a620e
MC
14364
14365 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
14366}
14367
7cb32cf2
MC
14368static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14369{
63c3a66f 14370 if (tg3_flag(tp, LRG_PROD_RING_CAP))
de9f5230 14371 return TG3_RX_RET_MAX_SIZE_5717;
63c3a66f 14372 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
de9f5230 14373 return TG3_RX_RET_MAX_SIZE_5700;
7cb32cf2 14374 else
de9f5230 14375 return TG3_RX_RET_MAX_SIZE_5705;
7cb32cf2
MC
14376}
14377
4143470c 14378static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
895950c2
JP
14379 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14380 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14381 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14382 { },
14383};
14384
16c7fa7d
MC
14385static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14386{
14387 struct pci_dev *peer;
14388 unsigned int func, devnr = tp->pdev->devfn & ~7;
14389
14390 for (func = 0; func < 8; func++) {
14391 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14392 if (peer && peer != tp->pdev)
14393 break;
14394 pci_dev_put(peer);
14395 }
14396 /* 5704 can be configured in single-port mode, set peer to
14397 * tp->pdev in that case.
14398 */
14399 if (!peer) {
14400 peer = tp->pdev;
14401 return peer;
14402 }
14403
14404 /*
14405 * We don't need to keep the refcount elevated; there's no way
14406 * to remove one half of this device without removing the other
14407 */
14408 pci_dev_put(peer);
14409
14410 return peer;
14411}
14412
42b123b1
MC
14413static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14414{
14415 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14417 u32 reg;
14418
14419 /* All devices that use the alternate
14420 * ASIC REV location have a CPMU.
14421 */
14422 tg3_flag_set(tp, CPMU_PRESENT);
14423
14424 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
79d49695 14425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
42b123b1
MC
14426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14427 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14429 reg = TG3PCI_GEN2_PRODID_ASICREV;
14430 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14431 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14432 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14433 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14434 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14435 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14436 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14437 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14438 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14439 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14440 reg = TG3PCI_GEN15_PRODID_ASICREV;
14441 else
14442 reg = TG3PCI_PRODID_ASICREV;
14443
14444 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14445 }
14446
14447 /* Wrong chip ID in 5752 A0. This code can be removed later
14448 * as A0 is not in production.
14449 */
14450 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14451 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14452
79d49695
MC
14453 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14454 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14455
42b123b1
MC
14456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14458 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14459 tg3_flag_set(tp, 5717_PLUS);
14460
14461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14463 tg3_flag_set(tp, 57765_CLASS);
14464
14465 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14466 tg3_flag_set(tp, 57765_PLUS);
14467
14468 /* Intentionally exclude ASIC_REV_5906 */
14469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14475 tg3_flag(tp, 57765_PLUS))
14476 tg3_flag_set(tp, 5755_PLUS);
14477
14478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14480 tg3_flag_set(tp, 5780_CLASS);
14481
14482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14485 tg3_flag(tp, 5755_PLUS) ||
14486 tg3_flag(tp, 5780_CLASS))
14487 tg3_flag_set(tp, 5750_PLUS);
14488
14489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14490 tg3_flag(tp, 5750_PLUS))
14491 tg3_flag_set(tp, 5705_PLUS);
14492}
14493
3d567e0e
NNS
14494static bool tg3_10_100_only_device(struct tg3 *tp,
14495 const struct pci_device_id *ent)
14496{
14497 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14498
14499 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14500 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14501 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14502 return true;
14503
14504 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14506 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14507 return true;
14508 } else {
14509 return true;
14510 }
14511 }
14512
14513 return false;
14514}
14515
14516static int __devinit tg3_get_invariants(struct tg3 *tp,
14517 const struct pci_device_id *ent)
1da177e4 14518{
1da177e4 14519 u32 misc_ctrl_reg;
1da177e4
LT
14520 u32 pci_state_reg, grc_misc_cfg;
14521 u32 val;
14522 u16 pci_cmd;
5e7dfd0f 14523 int err;
1da177e4 14524
1da177e4
LT
14525 /* Force memory write invalidate off. If we leave it on,
14526 * then on 5700_BX chips we have to enable a workaround.
14527 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14528 * to match the cacheline size. The Broadcom driver have this
14529 * workaround but turns MWI off all the times so never uses
14530 * it. This seems to suggest that the workaround is insufficient.
14531 */
14532 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14533 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14534 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14535
16821285
MC
14536 /* Important! -- Make sure register accesses are byteswapped
14537 * correctly. Also, for those chips that require it, make
14538 * sure that indirect register accesses are enabled before
14539 * the first operation.
1da177e4
LT
14540 */
14541 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14542 &misc_ctrl_reg);
16821285
MC
14543 tp->misc_host_ctrl |= (misc_ctrl_reg &
14544 MISC_HOST_CTRL_CHIPREV);
14545 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14546 tp->misc_host_ctrl);
1da177e4 14547
42b123b1 14548 tg3_detect_asic_rev(tp, misc_ctrl_reg);
ff645bec 14549
6892914f
MC
14550 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14551 * we need to disable memory and use config. cycles
14552 * only to access all registers. The 5702/03 chips
14553 * can mistakenly decode the special cycles from the
14554 * ICH chipsets as memory write cycles, causing corruption
14555 * of register and memory space. Only certain ICH bridges
14556 * will drive special cycles with non-zero data during the
14557 * address phase which can fall within the 5703's address
14558 * range. This is not an ICH bug as the PCI spec allows
14559 * non-zero address during special cycles. However, only
14560 * these ICH bridges are known to drive non-zero addresses
14561 * during special cycles.
14562 *
14563 * Since special cycles do not cross PCI bridges, we only
14564 * enable this workaround if the 5703 is on the secondary
14565 * bus of these ICH bridges.
14566 */
14567 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14568 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14569 static struct tg3_dev_id {
14570 u32 vendor;
14571 u32 device;
14572 u32 rev;
14573 } ich_chipsets[] = {
14574 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14575 PCI_ANY_ID },
14576 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14577 PCI_ANY_ID },
14578 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14579 0xa },
14580 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14581 PCI_ANY_ID },
14582 { },
14583 };
14584 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14585 struct pci_dev *bridge = NULL;
14586
14587 while (pci_id->vendor != 0) {
14588 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14589 bridge);
14590 if (!bridge) {
14591 pci_id++;
14592 continue;
14593 }
14594 if (pci_id->rev != PCI_ANY_ID) {
44c10138 14595 if (bridge->revision > pci_id->rev)
6892914f
MC
14596 continue;
14597 }
14598 if (bridge->subordinate &&
14599 (bridge->subordinate->number ==
14600 tp->pdev->bus->number)) {
63c3a66f 14601 tg3_flag_set(tp, ICH_WORKAROUND);
6892914f
MC
14602 pci_dev_put(bridge);
14603 break;
14604 }
14605 }
14606 }
14607
6ff6f81d 14608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
41588ba1
MC
14609 static struct tg3_dev_id {
14610 u32 vendor;
14611 u32 device;
14612 } bridge_chipsets[] = {
14613 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14614 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14615 { },
14616 };
14617 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14618 struct pci_dev *bridge = NULL;
14619
14620 while (pci_id->vendor != 0) {
14621 bridge = pci_get_device(pci_id->vendor,
14622 pci_id->device,
14623 bridge);
14624 if (!bridge) {
14625 pci_id++;
14626 continue;
14627 }
14628 if (bridge->subordinate &&
14629 (bridge->subordinate->number <=
14630 tp->pdev->bus->number) &&
b918c62e 14631 (bridge->subordinate->busn_res.end >=
41588ba1 14632 tp->pdev->bus->number)) {
63c3a66f 14633 tg3_flag_set(tp, 5701_DMA_BUG);
41588ba1
MC
14634 pci_dev_put(bridge);
14635 break;
14636 }
14637 }
14638 }
14639
4a29cc2e
MC
14640 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14641 * DMA addresses > 40-bit. This bridge may have other additional
14642 * 57xx devices behind it in some 4-port NIC designs for example.
14643 * Any tg3 device found behind the bridge will also need the 40-bit
14644 * DMA workaround.
14645 */
42b123b1 14646 if (tg3_flag(tp, 5780_CLASS)) {
63c3a66f 14647 tg3_flag_set(tp, 40BIT_DMA_BUG);
4cf78e4f 14648 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
859a5887 14649 } else {
4a29cc2e
MC
14650 struct pci_dev *bridge = NULL;
14651
14652 do {
14653 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14654 PCI_DEVICE_ID_SERVERWORKS_EPB,
14655 bridge);
14656 if (bridge && bridge->subordinate &&
14657 (bridge->subordinate->number <=
14658 tp->pdev->bus->number) &&
b918c62e 14659 (bridge->subordinate->busn_res.end >=
4a29cc2e 14660 tp->pdev->bus->number)) {
63c3a66f 14661 tg3_flag_set(tp, 40BIT_DMA_BUG);
4a29cc2e
MC
14662 pci_dev_put(bridge);
14663 break;
14664 }
14665 } while (bridge);
14666 }
4cf78e4f 14667
f6eb9b1f 14668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3a1e19d3 14669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
7544b097
MC
14670 tp->pdev_peer = tg3_find_peer(tp);
14671
507399f1 14672 /* Determine TSO capabilities */
a0512944 14673 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
4d163b75 14674 ; /* Do nothing. HW bug. */
63c3a66f
JP
14675 else if (tg3_flag(tp, 57765_PLUS))
14676 tg3_flag_set(tp, HW_TSO_3);
14677 else if (tg3_flag(tp, 5755_PLUS) ||
e849cdc3 14678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f
JP
14679 tg3_flag_set(tp, HW_TSO_2);
14680 else if (tg3_flag(tp, 5750_PLUS)) {
14681 tg3_flag_set(tp, HW_TSO_1);
14682 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14684 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
63c3a66f 14685 tg3_flag_clear(tp, TSO_BUG);
507399f1
MC
14686 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14687 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14688 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 14689 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14691 tp->fw_needed = FIRMWARE_TG3TSO5;
14692 else
14693 tp->fw_needed = FIRMWARE_TG3TSO;
14694 }
14695
dabc5c67 14696 /* Selectively allow TSO based on operating conditions */
6ff6f81d
MC
14697 if (tg3_flag(tp, HW_TSO_1) ||
14698 tg3_flag(tp, HW_TSO_2) ||
14699 tg3_flag(tp, HW_TSO_3) ||
cf9ecf4b
MC
14700 tp->fw_needed) {
14701 /* For firmware TSO, assume ASF is disabled.
14702 * We'll disable TSO later if we discover ASF
14703 * is enabled in tg3_get_eeprom_hw_cfg().
14704 */
dabc5c67 14705 tg3_flag_set(tp, TSO_CAPABLE);
cf9ecf4b 14706 } else {
dabc5c67
MC
14707 tg3_flag_clear(tp, TSO_CAPABLE);
14708 tg3_flag_clear(tp, TSO_BUG);
14709 tp->fw_needed = NULL;
14710 }
14711
14712 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14713 tp->fw_needed = FIRMWARE_TG3;
14714
507399f1
MC
14715 tp->irq_max = 1;
14716
63c3a66f
JP
14717 if (tg3_flag(tp, 5750_PLUS)) {
14718 tg3_flag_set(tp, SUPPORT_MSI);
7544b097
MC
14719 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14720 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14721 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14722 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14723 tp->pdev_peer == tp->pdev))
63c3a66f 14724 tg3_flag_clear(tp, SUPPORT_MSI);
7544b097 14725
63c3a66f 14726 if (tg3_flag(tp, 5755_PLUS) ||
b5d3772c 14727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
63c3a66f 14728 tg3_flag_set(tp, 1SHOT_MSI);
52c0fd83 14729 }
4f125f42 14730
63c3a66f
JP
14731 if (tg3_flag(tp, 57765_PLUS)) {
14732 tg3_flag_set(tp, SUPPORT_MSIX);
507399f1
MC
14733 tp->irq_max = TG3_IRQ_MAX_VECS;
14734 }
f6eb9b1f 14735 }
0e1406dd 14736
9102426a
MC
14737 tp->txq_max = 1;
14738 tp->rxq_max = 1;
14739 if (tp->irq_max > 1) {
14740 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14741 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14742
14743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14745 tp->txq_max = tp->irq_max - 1;
14746 }
14747
b7abee6e
MC
14748 if (tg3_flag(tp, 5755_PLUS) ||
14749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f 14750 tg3_flag_set(tp, SHORT_DMA_BUG);
f6eb9b1f 14751
e31aa987 14752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a4cb428d 14753 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
e31aa987 14754
fa6b2aae
MC
14755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
63c3a66f 14758 tg3_flag_set(tp, LRG_PROD_RING_CAP);
de9f5230 14759
63c3a66f 14760 if (tg3_flag(tp, 57765_PLUS) &&
a0512944 14761 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
63c3a66f 14762 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
b703df6f 14763
63c3a66f
JP
14764 if (!tg3_flag(tp, 5705_PLUS) ||
14765 tg3_flag(tp, 5780_CLASS) ||
14766 tg3_flag(tp, USE_JUMBO_BDFLAG))
14767 tg3_flag_set(tp, JUMBO_CAPABLE);
0f893dc6 14768
52f4490c
MC
14769 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14770 &pci_state_reg);
14771
708ebb3a 14772 if (pci_is_pcie(tp->pdev)) {
5e7dfd0f
MC
14773 u16 lnkctl;
14774
63c3a66f 14775 tg3_flag_set(tp, PCI_EXPRESS);
5f5c51e3 14776
0f49bfbd 14777 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
5e7dfd0f 14778 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
7196cd6c
MC
14779 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14780 ASIC_REV_5906) {
63c3a66f 14781 tg3_flag_clear(tp, HW_TSO_2);
dabc5c67 14782 tg3_flag_clear(tp, TSO_CAPABLE);
7196cd6c 14783 }
5e7dfd0f 14784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0 14785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9cf74ebb
MC
14786 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14787 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
63c3a66f 14788 tg3_flag_set(tp, CLKREQ_BUG);
614b0590 14789 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
63c3a66f 14790 tg3_flag_set(tp, L1PLLPD_EN);
c7835a77 14791 }
52f4490c 14792 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
708ebb3a
JM
14793 /* BCM5785 devices are effectively PCIe devices, and should
14794 * follow PCIe codepaths, but do not have a PCIe capabilities
14795 * section.
93a700a9 14796 */
63c3a66f
JP
14797 tg3_flag_set(tp, PCI_EXPRESS);
14798 } else if (!tg3_flag(tp, 5705_PLUS) ||
14799 tg3_flag(tp, 5780_CLASS)) {
52f4490c
MC
14800 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14801 if (!tp->pcix_cap) {
2445e461
MC
14802 dev_err(&tp->pdev->dev,
14803 "Cannot find PCI-X capability, aborting\n");
52f4490c
MC
14804 return -EIO;
14805 }
14806
14807 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
63c3a66f 14808 tg3_flag_set(tp, PCIX_MODE);
52f4490c 14809 }
1da177e4 14810
399de50b
MC
14811 /* If we have an AMD 762 or VIA K8T800 chipset, write
14812 * reordering to the mailbox registers done by the host
14813 * controller can cause major troubles. We read back from
14814 * every mailbox register write to force the writes to be
14815 * posted to the chip in order.
14816 */
4143470c 14817 if (pci_dev_present(tg3_write_reorder_chipsets) &&
63c3a66f
JP
14818 !tg3_flag(tp, PCI_EXPRESS))
14819 tg3_flag_set(tp, MBOX_WRITE_REORDER);
399de50b 14820
69fc4053
MC
14821 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14822 &tp->pci_cacheline_sz);
14823 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14824 &tp->pci_lat_timer);
1da177e4
LT
14825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14826 tp->pci_lat_timer < 64) {
14827 tp->pci_lat_timer = 64;
69fc4053
MC
14828 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14829 tp->pci_lat_timer);
1da177e4
LT
14830 }
14831
16821285
MC
14832 /* Important! -- It is critical that the PCI-X hw workaround
14833 * situation is decided before the first MMIO register access.
14834 */
52f4490c
MC
14835 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14836 /* 5700 BX chips need to have their TX producer index
14837 * mailboxes written twice to workaround a bug.
14838 */
63c3a66f 14839 tg3_flag_set(tp, TXD_MBOX_HWBUG);
1da177e4 14840
52f4490c 14841 /* If we are in PCI-X mode, enable register write workaround.
1da177e4
LT
14842 *
14843 * The workaround is to use indirect register accesses
14844 * for all chip writes not to mailbox registers.
14845 */
63c3a66f 14846 if (tg3_flag(tp, PCIX_MODE)) {
1da177e4 14847 u32 pm_reg;
1da177e4 14848
63c3a66f 14849 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14850
14851 /* The chip can have it's power management PCI config
14852 * space registers clobbered due to this bug.
14853 * So explicitly force the chip into D0 here.
14854 */
9974a356
MC
14855 pci_read_config_dword(tp->pdev,
14856 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14857 &pm_reg);
14858 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14859 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
14860 pci_write_config_dword(tp->pdev,
14861 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14862 pm_reg);
14863
14864 /* Also, force SERR#/PERR# in PCI command. */
14865 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14866 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14867 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14868 }
14869 }
14870
1da177e4 14871 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
63c3a66f 14872 tg3_flag_set(tp, PCI_HIGH_SPEED);
1da177e4 14873 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
63c3a66f 14874 tg3_flag_set(tp, PCI_32BIT);
1da177e4
LT
14875
14876 /* Chip-specific fixup from Broadcom driver */
14877 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14878 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14879 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14880 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14881 }
14882
1ee582d8 14883 /* Default fast path register access methods */
20094930 14884 tp->read32 = tg3_read32;
1ee582d8 14885 tp->write32 = tg3_write32;
09ee929c 14886 tp->read32_mbox = tg3_read32;
20094930 14887 tp->write32_mbox = tg3_write32;
1ee582d8
MC
14888 tp->write32_tx_mbox = tg3_write32;
14889 tp->write32_rx_mbox = tg3_write32;
14890
14891 /* Various workaround register access methods */
63c3a66f 14892 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
1ee582d8 14893 tp->write32 = tg3_write_indirect_reg32;
98efd8a6 14894 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
63c3a66f 14895 (tg3_flag(tp, PCI_EXPRESS) &&
98efd8a6
MC
14896 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14897 /*
14898 * Back to back register writes can cause problems on these
14899 * chips, the workaround is to read back all reg writes
14900 * except those to mailbox regs.
14901 *
14902 * See tg3_write_indirect_reg32().
14903 */
1ee582d8 14904 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
14905 }
14906
63c3a66f 14907 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
1ee582d8 14908 tp->write32_tx_mbox = tg3_write32_tx_mbox;
63c3a66f 14909 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1ee582d8
MC
14910 tp->write32_rx_mbox = tg3_write_flush_reg32;
14911 }
20094930 14912
63c3a66f 14913 if (tg3_flag(tp, ICH_WORKAROUND)) {
6892914f
MC
14914 tp->read32 = tg3_read_indirect_reg32;
14915 tp->write32 = tg3_write_indirect_reg32;
14916 tp->read32_mbox = tg3_read_indirect_mbox;
14917 tp->write32_mbox = tg3_write_indirect_mbox;
14918 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14919 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14920
14921 iounmap(tp->regs);
22abe310 14922 tp->regs = NULL;
6892914f
MC
14923
14924 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14925 pci_cmd &= ~PCI_COMMAND_MEMORY;
14926 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14927 }
b5d3772c
MC
14928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14929 tp->read32_mbox = tg3_read32_mbox_5906;
14930 tp->write32_mbox = tg3_write32_mbox_5906;
14931 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14932 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14933 }
6892914f 14934
bbadf503 14935 if (tp->write32 == tg3_write_indirect_reg32 ||
63c3a66f 14936 (tg3_flag(tp, PCIX_MODE) &&
bbadf503 14937 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 14938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
63c3a66f 14939 tg3_flag_set(tp, SRAM_USE_CONFIG);
bbadf503 14940
16821285
MC
14941 /* The memory arbiter has to be enabled in order for SRAM accesses
14942 * to succeed. Normally on powerup the tg3 chip firmware will make
14943 * sure it is enabled, but other entities such as system netboot
14944 * code might disable it.
14945 */
14946 val = tr32(MEMARB_MODE);
14947 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14948
9dc5e342
MC
14949 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14951 tg3_flag(tp, 5780_CLASS)) {
14952 if (tg3_flag(tp, PCIX_MODE)) {
14953 pci_read_config_dword(tp->pdev,
14954 tp->pcix_cap + PCI_X_STATUS,
14955 &val);
14956 tp->pci_fn = val & 0x7;
14957 }
14958 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14959 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14960 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14961 NIC_SRAM_CPMUSTAT_SIG) {
14962 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14963 tp->pci_fn = tp->pci_fn ? 1 : 0;
14964 }
14965 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14967 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14968 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14969 NIC_SRAM_CPMUSTAT_SIG) {
14970 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14971 TG3_CPMU_STATUS_FSHFT_5719;
14972 }
69f11c99
MC
14973 }
14974
7d0c41ef 14975 /* Get eeprom hw config before calling tg3_set_power_state().
63c3a66f 14976 * In particular, the TG3_FLAG_IS_NIC flag must be
7d0c41ef
MC
14977 * determined before calling tg3_set_power_state() so that
14978 * we know whether or not to switch out of Vaux power.
14979 * When the flag is set, it means that GPIO1 is used for eeprom
14980 * write protect and also implies that it is a LOM where GPIOs
14981 * are not used to switch power.
6aa20a22 14982 */
7d0c41ef
MC
14983 tg3_get_eeprom_hw_cfg(tp);
14984
cf9ecf4b
MC
14985 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14986 tg3_flag_clear(tp, TSO_CAPABLE);
14987 tg3_flag_clear(tp, TSO_BUG);
14988 tp->fw_needed = NULL;
14989 }
14990
63c3a66f 14991 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
14992 /* Allow reads and writes to the
14993 * APE register and memory space.
14994 */
14995 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
14996 PCISTATE_ALLOW_APE_SHMEM_WR |
14997 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
14998 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14999 pci_state_reg);
c9cab24e
MC
15000
15001 tg3_ape_lock_init(tp);
0d3031d9
MC
15002 }
15003
16821285
MC
15004 /* Set up tp->grc_local_ctrl before calling
15005 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15006 * will bring 5700's external PHY out of reset.
314fba34
MC
15007 * It is also used as eeprom write protect on LOMs.
15008 */
15009 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
6ff6f81d 15010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
63c3a66f 15011 tg3_flag(tp, EEPROM_WRITE_PROT))
314fba34
MC
15012 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15013 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
15014 /* Unused GPIO3 must be driven as output on 5752 because there
15015 * are no pull-up resistors on unused GPIO pins.
15016 */
15017 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15018 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 15019
321d32a0 15020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
cb4ed1fd 15021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 15022 tg3_flag(tp, 57765_CLASS))
af36e6b6
MC
15023 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15024
8d519ab2
MC
15025 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15026 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
5f0c4a3c
MC
15027 /* Turn off the debug UART. */
15028 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
63c3a66f 15029 if (tg3_flag(tp, IS_NIC))
5f0c4a3c
MC
15030 /* Keep VMain power. */
15031 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15032 GRC_LCLCTRL_GPIO_OUTPUT0;
15033 }
15034
16821285
MC
15035 /* Switch out of Vaux if it is a NIC */
15036 tg3_pwrsrc_switch_to_vmain(tp);
1da177e4 15037
1da177e4
LT
15038 /* Derive initial jumbo mode from MTU assigned in
15039 * ether_setup() via the alloc_etherdev() call
15040 */
63c3a66f
JP
15041 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15042 tg3_flag_set(tp, JUMBO_RING_ENABLE);
1da177e4
LT
15043
15044 /* Determine WakeOnLan speed to use. */
15045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15046 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15047 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15048 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
63c3a66f 15049 tg3_flag_clear(tp, WOL_SPEED_100MB);
1da177e4 15050 } else {
63c3a66f 15051 tg3_flag_set(tp, WOL_SPEED_100MB);
1da177e4
LT
15052 }
15053
7f97a4bd 15054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
f07e9af3 15055 tp->phy_flags |= TG3_PHYFLG_IS_FET;
7f97a4bd 15056
1da177e4 15057 /* A few boards don't want Ethernet@WireSpeed phy feature */
6ff6f81d
MC
15058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15059 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
1da177e4 15060 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 15061 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
f07e9af3
MC
15062 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15063 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15064 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
1da177e4
LT
15065
15066 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15067 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
f07e9af3 15068 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
1da177e4 15069 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
f07e9af3 15070 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
1da177e4 15071
63c3a66f 15072 if (tg3_flag(tp, 5705_PLUS) &&
f07e9af3 15073 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
321d32a0 15074 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
f6eb9b1f 15075 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
63c3a66f 15076 !tg3_flag(tp, 57765_PLUS)) {
c424cb24 15077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 15078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
15079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
15081 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15082 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
f07e9af3 15083 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
c1d2a196 15084 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
f07e9af3 15085 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
321d32a0 15086 } else
f07e9af3 15087 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
c424cb24 15088 }
1da177e4 15089
b2a5c19c
MC
15090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15091 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15092 tp->phy_otp = tg3_read_otp_phycfg(tp);
15093 if (tp->phy_otp == 0)
15094 tp->phy_otp = TG3_OTP_DEFAULT;
15095 }
15096
63c3a66f 15097 if (tg3_flag(tp, CPMU_PRESENT))
8ef21428
MC
15098 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15099 else
15100 tp->mi_mode = MAC_MI_MODE_BASE;
15101
1da177e4 15102 tp->coalesce_mode = 0;
1da177e4
LT
15103 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15104 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15105 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15106
4d958473
MC
15107 /* Set these bits to enable statistics workaround. */
15108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15109 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15110 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15111 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15112 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15113 }
15114
321d32a0
MC
15115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
63c3a66f 15117 tg3_flag_set(tp, USE_PHYLIB);
57e6983c 15118
158d7abd
MC
15119 err = tg3_mdio_init(tp);
15120 if (err)
15121 return err;
1da177e4
LT
15122
15123 /* Initialize data/descriptor byte/word swapping. */
15124 val = tr32(GRC_MODE);
f2096f94
MC
15125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15126 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15127 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15128 GRC_MODE_B2HRX_ENABLE |
15129 GRC_MODE_HTX2B_ENABLE |
15130 GRC_MODE_HOST_STACKUP);
15131 else
15132 val &= GRC_MODE_HOST_STACKUP;
15133
1da177e4
LT
15134 tw32(GRC_MODE, val | tp->grc_mode);
15135
15136 tg3_switch_clocks(tp);
15137
15138 /* Clear this out for sanity. */
15139 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15140
15141 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15142 &pci_state_reg);
15143 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
63c3a66f 15144 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
1da177e4
LT
15145 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15146
15147 if (chiprevid == CHIPREV_ID_5701_A0 ||
15148 chiprevid == CHIPREV_ID_5701_B0 ||
15149 chiprevid == CHIPREV_ID_5701_B2 ||
15150 chiprevid == CHIPREV_ID_5701_B5) {
15151 void __iomem *sram_base;
15152
15153 /* Write some dummy words into the SRAM status block
15154 * area, see if it reads back correctly. If the return
15155 * value is bad, force enable the PCIX workaround.
15156 */
15157 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15158
15159 writel(0x00000000, sram_base);
15160 writel(0x00000000, sram_base + 4);
15161 writel(0xffffffff, sram_base + 4);
15162 if (readl(sram_base) != 0x00000000)
63c3a66f 15163 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
15164 }
15165 }
15166
15167 udelay(50);
15168 tg3_nvram_init(tp);
15169
15170 grc_misc_cfg = tr32(GRC_MISC_CFG);
15171 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15172
1da177e4
LT
15173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15174 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15175 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
63c3a66f 15176 tg3_flag_set(tp, IS_5788);
1da177e4 15177
63c3a66f 15178 if (!tg3_flag(tp, IS_5788) &&
6ff6f81d 15179 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
63c3a66f
JP
15180 tg3_flag_set(tp, TAGGED_STATUS);
15181 if (tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
15182 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15183 HOSTCC_MODE_CLRTICK_TXBD);
15184
15185 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15186 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15187 tp->misc_host_ctrl);
15188 }
15189
3bda1258 15190 /* Preserve the APE MAC_MODE bits */
63c3a66f 15191 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 15192 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258 15193 else
6e01b20b 15194 tp->mac_mode = 0;
3bda1258 15195
3d567e0e 15196 if (tg3_10_100_only_device(tp, ent))
f07e9af3 15197 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
1da177e4
LT
15198
15199 err = tg3_phy_probe(tp);
15200 if (err) {
2445e461 15201 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
1da177e4 15202 /* ... but do not return immediately ... */
b02fd9e3 15203 tg3_mdio_fini(tp);
1da177e4
LT
15204 }
15205
184b8904 15206 tg3_read_vpd(tp);
c4e6575c 15207 tg3_read_fw_ver(tp);
1da177e4 15208
f07e9af3
MC
15209 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15210 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15211 } else {
15212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
f07e9af3 15213 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4 15214 else
f07e9af3 15215 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15216 }
15217
15218 /* 5700 {AX,BX} chips have a broken status block link
15219 * change bit implementation, so we must use the
15220 * status register in those cases.
15221 */
15222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
63c3a66f 15223 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4 15224 else
63c3a66f 15225 tg3_flag_clear(tp, USE_LINKCHG_REG);
1da177e4
LT
15226
15227 /* The led_ctrl is set during tg3_phy_probe, here we might
15228 * have to force the link status polling mechanism based
15229 * upon subsystem IDs.
15230 */
15231 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 15232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
f07e9af3
MC
15233 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15234 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
63c3a66f 15235 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4
LT
15236 }
15237
15238 /* For all SERDES we poll the MAC status register. */
f07e9af3 15239 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
63c3a66f 15240 tg3_flag_set(tp, POLL_SERDES);
1da177e4 15241 else
63c3a66f 15242 tg3_flag_clear(tp, POLL_SERDES);
1da177e4 15243
9205fd9c 15244 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
d2757fc4 15245 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
1da177e4 15246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
63c3a66f 15247 tg3_flag(tp, PCIX_MODE)) {
9205fd9c 15248 tp->rx_offset = NET_SKB_PAD;
d2757fc4 15249#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9dc7a113 15250 tp->rx_copy_thresh = ~(u16)0;
d2757fc4
MC
15251#endif
15252 }
1da177e4 15253
2c49a44d
MC
15254 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15255 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
7cb32cf2
MC
15256 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15257
2c49a44d 15258 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
f92905de
MC
15259
15260 /* Increment the rx prod index on the rx std ring by at most
15261 * 8 for these chips to workaround hw errata.
15262 */
15263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15266 tp->rx_std_max_post = 8;
15267
63c3a66f 15268 if (tg3_flag(tp, ASPM_WORKAROUND))
8ed5d97e
MC
15269 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15270 PCIE_PWR_MGMT_L1_THRESH_MSK;
15271
1da177e4
LT
15272 return err;
15273}
15274
49b6e95f 15275#ifdef CONFIG_SPARC
1da177e4
LT
15276static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15277{
15278 struct net_device *dev = tp->dev;
15279 struct pci_dev *pdev = tp->pdev;
49b6e95f 15280 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 15281 const unsigned char *addr;
49b6e95f
DM
15282 int len;
15283
15284 addr = of_get_property(dp, "local-mac-address", &len);
15285 if (addr && len == 6) {
15286 memcpy(dev->dev_addr, addr, 6);
15287 memcpy(dev->perm_addr, dev->dev_addr, 6);
15288 return 0;
1da177e4
LT
15289 }
15290 return -ENODEV;
15291}
15292
15293static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15294{
15295 struct net_device *dev = tp->dev;
15296
15297 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 15298 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
15299 return 0;
15300}
15301#endif
15302
15303static int __devinit tg3_get_device_address(struct tg3 *tp)
15304{
15305 struct net_device *dev = tp->dev;
15306 u32 hi, lo, mac_offset;
008652b3 15307 int addr_ok = 0;
1da177e4 15308
49b6e95f 15309#ifdef CONFIG_SPARC
1da177e4
LT
15310 if (!tg3_get_macaddr_sparc(tp))
15311 return 0;
15312#endif
15313
15314 mac_offset = 0x7c;
6ff6f81d 15315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
63c3a66f 15316 tg3_flag(tp, 5780_CLASS)) {
1da177e4
LT
15317 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15318 mac_offset = 0xcc;
15319 if (tg3_nvram_lock(tp))
15320 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15321 else
15322 tg3_nvram_unlock(tp);
63c3a66f 15323 } else if (tg3_flag(tp, 5717_PLUS)) {
69f11c99 15324 if (tp->pci_fn & 1)
a1b950d5 15325 mac_offset = 0xcc;
69f11c99 15326 if (tp->pci_fn > 1)
a50d0796 15327 mac_offset += 0x18c;
a1b950d5 15328 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
b5d3772c 15329 mac_offset = 0x10;
1da177e4
LT
15330
15331 /* First try to get it from MAC address mailbox. */
15332 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15333 if ((hi >> 16) == 0x484b) {
15334 dev->dev_addr[0] = (hi >> 8) & 0xff;
15335 dev->dev_addr[1] = (hi >> 0) & 0xff;
15336
15337 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15338 dev->dev_addr[2] = (lo >> 24) & 0xff;
15339 dev->dev_addr[3] = (lo >> 16) & 0xff;
15340 dev->dev_addr[4] = (lo >> 8) & 0xff;
15341 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 15342
008652b3
MC
15343 /* Some old bootcode may report a 0 MAC address in SRAM */
15344 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15345 }
15346 if (!addr_ok) {
15347 /* Next, try NVRAM. */
63c3a66f 15348 if (!tg3_flag(tp, NO_NVRAM) &&
df259d8c 15349 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
6d348f2c 15350 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
62cedd11
MC
15351 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15352 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
008652b3
MC
15353 }
15354 /* Finally just fetch it out of the MAC control regs. */
15355 else {
15356 hi = tr32(MAC_ADDR_0_HIGH);
15357 lo = tr32(MAC_ADDR_0_LOW);
15358
15359 dev->dev_addr[5] = lo & 0xff;
15360 dev->dev_addr[4] = (lo >> 8) & 0xff;
15361 dev->dev_addr[3] = (lo >> 16) & 0xff;
15362 dev->dev_addr[2] = (lo >> 24) & 0xff;
15363 dev->dev_addr[1] = hi & 0xff;
15364 dev->dev_addr[0] = (hi >> 8) & 0xff;
15365 }
1da177e4
LT
15366 }
15367
15368 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 15369#ifdef CONFIG_SPARC
1da177e4
LT
15370 if (!tg3_get_default_macaddr_sparc(tp))
15371 return 0;
15372#endif
15373 return -EINVAL;
15374 }
2ff43697 15375 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
15376 return 0;
15377}
15378
59e6b434
DM
15379#define BOUNDARY_SINGLE_CACHELINE 1
15380#define BOUNDARY_MULTI_CACHELINE 2
15381
15382static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15383{
15384 int cacheline_size;
15385 u8 byte;
15386 int goal;
15387
15388 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15389 if (byte == 0)
15390 cacheline_size = 1024;
15391 else
15392 cacheline_size = (int) byte * 4;
15393
15394 /* On 5703 and later chips, the boundary bits have no
15395 * effect.
15396 */
15397 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15398 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
63c3a66f 15399 !tg3_flag(tp, PCI_EXPRESS))
59e6b434
DM
15400 goto out;
15401
15402#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15403 goal = BOUNDARY_MULTI_CACHELINE;
15404#else
15405#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15406 goal = BOUNDARY_SINGLE_CACHELINE;
15407#else
15408 goal = 0;
15409#endif
15410#endif
15411
63c3a66f 15412 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
15413 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15414 goto out;
15415 }
15416
59e6b434
DM
15417 if (!goal)
15418 goto out;
15419
15420 /* PCI controllers on most RISC systems tend to disconnect
15421 * when a device tries to burst across a cache-line boundary.
15422 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15423 *
15424 * Unfortunately, for PCI-E there are only limited
15425 * write-side controls for this, and thus for reads
15426 * we will still get the disconnects. We'll also waste
15427 * these PCI cycles for both read and write for chips
15428 * other than 5700 and 5701 which do not implement the
15429 * boundary bits.
15430 */
63c3a66f 15431 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15432 switch (cacheline_size) {
15433 case 16:
15434 case 32:
15435 case 64:
15436 case 128:
15437 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15438 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15439 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15440 } else {
15441 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15442 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15443 }
15444 break;
15445
15446 case 256:
15447 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15448 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15449 break;
15450
15451 default:
15452 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15453 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15454 break;
855e1111 15455 }
63c3a66f 15456 } else if (tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15457 switch (cacheline_size) {
15458 case 16:
15459 case 32:
15460 case 64:
15461 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15462 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15463 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15464 break;
15465 }
15466 /* fallthrough */
15467 case 128:
15468 default:
15469 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15470 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15471 break;
855e1111 15472 }
59e6b434
DM
15473 } else {
15474 switch (cacheline_size) {
15475 case 16:
15476 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15477 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15478 DMA_RWCTRL_WRITE_BNDRY_16);
15479 break;
15480 }
15481 /* fallthrough */
15482 case 32:
15483 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15484 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15485 DMA_RWCTRL_WRITE_BNDRY_32);
15486 break;
15487 }
15488 /* fallthrough */
15489 case 64:
15490 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15491 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15492 DMA_RWCTRL_WRITE_BNDRY_64);
15493 break;
15494 }
15495 /* fallthrough */
15496 case 128:
15497 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15498 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15499 DMA_RWCTRL_WRITE_BNDRY_128);
15500 break;
15501 }
15502 /* fallthrough */
15503 case 256:
15504 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15505 DMA_RWCTRL_WRITE_BNDRY_256);
15506 break;
15507 case 512:
15508 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15509 DMA_RWCTRL_WRITE_BNDRY_512);
15510 break;
15511 case 1024:
15512 default:
15513 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15514 DMA_RWCTRL_WRITE_BNDRY_1024);
15515 break;
855e1111 15516 }
59e6b434
DM
15517 }
15518
15519out:
15520 return val;
15521}
15522
1da177e4
LT
15523static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15524{
15525 struct tg3_internal_buffer_desc test_desc;
15526 u32 sram_dma_descs;
15527 int i, ret;
15528
15529 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15530
15531 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15532 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15533 tw32(RDMAC_STATUS, 0);
15534 tw32(WDMAC_STATUS, 0);
15535
15536 tw32(BUFMGR_MODE, 0);
15537 tw32(FTQ_RESET, 0);
15538
15539 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15540 test_desc.addr_lo = buf_dma & 0xffffffff;
15541 test_desc.nic_mbuf = 0x00002100;
15542 test_desc.len = size;
15543
15544 /*
15545 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15546 * the *second* time the tg3 driver was getting loaded after an
15547 * initial scan.
15548 *
15549 * Broadcom tells me:
15550 * ...the DMA engine is connected to the GRC block and a DMA
15551 * reset may affect the GRC block in some unpredictable way...
15552 * The behavior of resets to individual blocks has not been tested.
15553 *
15554 * Broadcom noted the GRC reset will also reset all sub-components.
15555 */
15556 if (to_device) {
15557 test_desc.cqid_sqid = (13 << 8) | 2;
15558
15559 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15560 udelay(40);
15561 } else {
15562 test_desc.cqid_sqid = (16 << 8) | 7;
15563
15564 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15565 udelay(40);
15566 }
15567 test_desc.flags = 0x00000005;
15568
15569 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15570 u32 val;
15571
15572 val = *(((u32 *)&test_desc) + i);
15573 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15574 sram_dma_descs + (i * sizeof(u32)));
15575 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15576 }
15577 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15578
859a5887 15579 if (to_device)
1da177e4 15580 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
859a5887 15581 else
1da177e4 15582 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
1da177e4
LT
15583
15584 ret = -ENODEV;
15585 for (i = 0; i < 40; i++) {
15586 u32 val;
15587
15588 if (to_device)
15589 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15590 else
15591 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15592 if ((val & 0xffff) == sram_dma_descs) {
15593 ret = 0;
15594 break;
15595 }
15596
15597 udelay(100);
15598 }
15599
15600 return ret;
15601}
15602
ded7340d 15603#define TEST_BUFFER_SIZE 0x2000
1da177e4 15604
4143470c 15605static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
895950c2
JP
15606 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15607 { },
15608};
15609
1da177e4
LT
15610static int __devinit tg3_test_dma(struct tg3 *tp)
15611{
15612 dma_addr_t buf_dma;
59e6b434 15613 u32 *buf, saved_dma_rwctrl;
cbf9ca6c 15614 int ret = 0;
1da177e4 15615
4bae65c8
MC
15616 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15617 &buf_dma, GFP_KERNEL);
1da177e4
LT
15618 if (!buf) {
15619 ret = -ENOMEM;
15620 goto out_nofree;
15621 }
15622
15623 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15624 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15625
59e6b434 15626 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4 15627
63c3a66f 15628 if (tg3_flag(tp, 57765_PLUS))
cbf9ca6c
MC
15629 goto out;
15630
63c3a66f 15631 if (tg3_flag(tp, PCI_EXPRESS)) {
1da177e4
LT
15632 /* DMA read watermark not used on PCIE */
15633 tp->dma_rwctrl |= 0x00180000;
63c3a66f 15634 } else if (!tg3_flag(tp, PCIX_MODE)) {
85e94ced
MC
15635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
15637 tp->dma_rwctrl |= 0x003f0000;
15638 else
15639 tp->dma_rwctrl |= 0x003f000f;
15640 } else {
15641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15643 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 15644 u32 read_water = 0x7;
1da177e4 15645
4a29cc2e
MC
15646 /* If the 5704 is behind the EPB bridge, we can
15647 * do the less restrictive ONE_DMA workaround for
15648 * better performance.
15649 */
63c3a66f 15650 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
4a29cc2e
MC
15651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15652 tp->dma_rwctrl |= 0x8000;
15653 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
15654 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15655
49afdeb6
MC
15656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15657 read_water = 4;
59e6b434 15658 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
15659 tp->dma_rwctrl |=
15660 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15661 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15662 (1 << 23);
4cf78e4f
MC
15663 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15664 /* 5780 always in PCIX mode */
15665 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
15666 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15667 /* 5714 always in PCIX mode */
15668 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
15669 } else {
15670 tp->dma_rwctrl |= 0x001b000f;
15671 }
15672 }
15673
15674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15676 tp->dma_rwctrl &= 0xfffffff0;
15677
15678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15680 /* Remove this if it causes problems for some boards. */
15681 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15682
15683 /* On 5700/5701 chips, we need to set this bit.
15684 * Otherwise the chip will issue cacheline transactions
15685 * to streamable DMA memory with not all the byte
15686 * enables turned on. This is an error on several
15687 * RISC PCI controllers, in particular sparc64.
15688 *
15689 * On 5703/5704 chips, this bit has been reassigned
15690 * a different meaning. In particular, it is used
15691 * on those chips to enable a PCI-X workaround.
15692 */
15693 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15694 }
15695
15696 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15697
15698#if 0
15699 /* Unneeded, already done by tg3_get_invariants. */
15700 tg3_switch_clocks(tp);
15701#endif
15702
1da177e4
LT
15703 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15704 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15705 goto out;
15706
59e6b434
DM
15707 /* It is best to perform DMA test with maximum write burst size
15708 * to expose the 5700/5701 write DMA bug.
15709 */
15710 saved_dma_rwctrl = tp->dma_rwctrl;
15711 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15712 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15713
1da177e4
LT
15714 while (1) {
15715 u32 *p = buf, i;
15716
15717 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15718 p[i] = i;
15719
15720 /* Send the buffer to the chip. */
15721 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15722 if (ret) {
2445e461
MC
15723 dev_err(&tp->pdev->dev,
15724 "%s: Buffer write failed. err = %d\n",
15725 __func__, ret);
1da177e4
LT
15726 break;
15727 }
15728
15729#if 0
15730 /* validate data reached card RAM correctly. */
15731 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15732 u32 val;
15733 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15734 if (le32_to_cpu(val) != p[i]) {
2445e461
MC
15735 dev_err(&tp->pdev->dev,
15736 "%s: Buffer corrupted on device! "
15737 "(%d != %d)\n", __func__, val, i);
1da177e4
LT
15738 /* ret = -ENODEV here? */
15739 }
15740 p[i] = 0;
15741 }
15742#endif
15743 /* Now read it back. */
15744 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15745 if (ret) {
5129c3a3
MC
15746 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15747 "err = %d\n", __func__, ret);
1da177e4
LT
15748 break;
15749 }
15750
15751 /* Verify it. */
15752 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15753 if (p[i] == i)
15754 continue;
15755
59e6b434
DM
15756 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15757 DMA_RWCTRL_WRITE_BNDRY_16) {
15758 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
15759 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15760 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15761 break;
15762 } else {
2445e461
MC
15763 dev_err(&tp->pdev->dev,
15764 "%s: Buffer corrupted on read back! "
15765 "(%d != %d)\n", __func__, p[i], i);
1da177e4
LT
15766 ret = -ENODEV;
15767 goto out;
15768 }
15769 }
15770
15771 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15772 /* Success. */
15773 ret = 0;
15774 break;
15775 }
15776 }
59e6b434
DM
15777 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15778 DMA_RWCTRL_WRITE_BNDRY_16) {
15779 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
15780 * now look for chipsets that are known to expose the
15781 * DMA bug without failing the test.
59e6b434 15782 */
4143470c 15783 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
6d1cfbab
MC
15784 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15785 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
859a5887 15786 } else {
6d1cfbab
MC
15787 /* Safe to use the calculated DMA boundary. */
15788 tp->dma_rwctrl = saved_dma_rwctrl;
859a5887 15789 }
6d1cfbab 15790
59e6b434
DM
15791 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15792 }
1da177e4
LT
15793
15794out:
4bae65c8 15795 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
1da177e4
LT
15796out_nofree:
15797 return ret;
15798}
15799
1da177e4
LT
15800static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15801{
63c3a66f 15802 if (tg3_flag(tp, 57765_PLUS)) {
666bc831
MC
15803 tp->bufmgr_config.mbuf_read_dma_low_water =
15804 DEFAULT_MB_RDMA_LOW_WATER_5705;
15805 tp->bufmgr_config.mbuf_mac_rx_low_water =
15806 DEFAULT_MB_MACRX_LOW_WATER_57765;
15807 tp->bufmgr_config.mbuf_high_water =
15808 DEFAULT_MB_HIGH_WATER_57765;
15809
15810 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15811 DEFAULT_MB_RDMA_LOW_WATER_5705;
15812 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15813 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15814 tp->bufmgr_config.mbuf_high_water_jumbo =
15815 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
63c3a66f 15816 } else if (tg3_flag(tp, 5705_PLUS)) {
fdfec172
MC
15817 tp->bufmgr_config.mbuf_read_dma_low_water =
15818 DEFAULT_MB_RDMA_LOW_WATER_5705;
15819 tp->bufmgr_config.mbuf_mac_rx_low_water =
15820 DEFAULT_MB_MACRX_LOW_WATER_5705;
15821 tp->bufmgr_config.mbuf_high_water =
15822 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
15823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15824 tp->bufmgr_config.mbuf_mac_rx_low_water =
15825 DEFAULT_MB_MACRX_LOW_WATER_5906;
15826 tp->bufmgr_config.mbuf_high_water =
15827 DEFAULT_MB_HIGH_WATER_5906;
15828 }
fdfec172
MC
15829
15830 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15831 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15832 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15833 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15834 tp->bufmgr_config.mbuf_high_water_jumbo =
15835 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15836 } else {
15837 tp->bufmgr_config.mbuf_read_dma_low_water =
15838 DEFAULT_MB_RDMA_LOW_WATER;
15839 tp->bufmgr_config.mbuf_mac_rx_low_water =
15840 DEFAULT_MB_MACRX_LOW_WATER;
15841 tp->bufmgr_config.mbuf_high_water =
15842 DEFAULT_MB_HIGH_WATER;
15843
15844 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15845 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15846 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15847 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15848 tp->bufmgr_config.mbuf_high_water_jumbo =
15849 DEFAULT_MB_HIGH_WATER_JUMBO;
15850 }
1da177e4
LT
15851
15852 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15853 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15854}
15855
15856static char * __devinit tg3_phy_string(struct tg3 *tp)
15857{
79eb6904
MC
15858 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15859 case TG3_PHY_ID_BCM5400: return "5400";
15860 case TG3_PHY_ID_BCM5401: return "5401";
15861 case TG3_PHY_ID_BCM5411: return "5411";
15862 case TG3_PHY_ID_BCM5701: return "5701";
15863 case TG3_PHY_ID_BCM5703: return "5703";
15864 case TG3_PHY_ID_BCM5704: return "5704";
15865 case TG3_PHY_ID_BCM5705: return "5705";
15866 case TG3_PHY_ID_BCM5750: return "5750";
15867 case TG3_PHY_ID_BCM5752: return "5752";
15868 case TG3_PHY_ID_BCM5714: return "5714";
15869 case TG3_PHY_ID_BCM5780: return "5780";
15870 case TG3_PHY_ID_BCM5755: return "5755";
15871 case TG3_PHY_ID_BCM5787: return "5787";
15872 case TG3_PHY_ID_BCM5784: return "5784";
15873 case TG3_PHY_ID_BCM5756: return "5722/5756";
15874 case TG3_PHY_ID_BCM5906: return "5906";
15875 case TG3_PHY_ID_BCM5761: return "5761";
15876 case TG3_PHY_ID_BCM5718C: return "5718C";
15877 case TG3_PHY_ID_BCM5718S: return "5718S";
15878 case TG3_PHY_ID_BCM57765: return "57765";
302b500b 15879 case TG3_PHY_ID_BCM5719C: return "5719C";
6418f2c1 15880 case TG3_PHY_ID_BCM5720C: return "5720C";
79eb6904 15881 case TG3_PHY_ID_BCM8002: return "8002/serdes";
1da177e4
LT
15882 case 0: return "serdes";
15883 default: return "unknown";
855e1111 15884 }
1da177e4
LT
15885}
15886
f9804ddb
MC
15887static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15888{
63c3a66f 15889 if (tg3_flag(tp, PCI_EXPRESS)) {
f9804ddb
MC
15890 strcpy(str, "PCI Express");
15891 return str;
63c3a66f 15892 } else if (tg3_flag(tp, PCIX_MODE)) {
f9804ddb
MC
15893 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15894
15895 strcpy(str, "PCIX:");
15896
15897 if ((clock_ctrl == 7) ||
15898 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15899 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15900 strcat(str, "133MHz");
15901 else if (clock_ctrl == 0)
15902 strcat(str, "33MHz");
15903 else if (clock_ctrl == 2)
15904 strcat(str, "50MHz");
15905 else if (clock_ctrl == 4)
15906 strcat(str, "66MHz");
15907 else if (clock_ctrl == 6)
15908 strcat(str, "100MHz");
f9804ddb
MC
15909 } else {
15910 strcpy(str, "PCI:");
63c3a66f 15911 if (tg3_flag(tp, PCI_HIGH_SPEED))
f9804ddb
MC
15912 strcat(str, "66MHz");
15913 else
15914 strcat(str, "33MHz");
15915 }
63c3a66f 15916 if (tg3_flag(tp, PCI_32BIT))
f9804ddb
MC
15917 strcat(str, ":32-bit");
15918 else
15919 strcat(str, ":64-bit");
15920 return str;
15921}
15922
15f9850d
DM
15923static void __devinit tg3_init_coal(struct tg3 *tp)
15924{
15925 struct ethtool_coalesce *ec = &tp->coal;
15926
15927 memset(ec, 0, sizeof(*ec));
15928 ec->cmd = ETHTOOL_GCOALESCE;
15929 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15930 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15931 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15932 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15933 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15934 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15935 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15936 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15937 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15938
15939 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15940 HOSTCC_MODE_CLRTICK_TXBD)) {
15941 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15942 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15943 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15944 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15945 }
d244c892 15946
63c3a66f 15947 if (tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
15948 ec->rx_coalesce_usecs_irq = 0;
15949 ec->tx_coalesce_usecs_irq = 0;
15950 ec->stats_block_coalesce_usecs = 0;
15951 }
15f9850d
DM
15952}
15953
1da177e4
LT
15954static int __devinit tg3_init_one(struct pci_dev *pdev,
15955 const struct pci_device_id *ent)
15956{
1da177e4
LT
15957 struct net_device *dev;
15958 struct tg3 *tp;
646c9edd
MC
15959 int i, err, pm_cap;
15960 u32 sndmbx, rcvmbx, intmbx;
f9804ddb 15961 char str[40];
72f2afb8 15962 u64 dma_mask, persist_dma_mask;
c8f44aff 15963 netdev_features_t features = 0;
1da177e4 15964
05dbe005 15965 printk_once(KERN_INFO "%s\n", version);
1da177e4
LT
15966
15967 err = pci_enable_device(pdev);
15968 if (err) {
2445e461 15969 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
15970 return err;
15971 }
15972
1da177e4
LT
15973 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15974 if (err) {
2445e461 15975 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
15976 goto err_out_disable_pdev;
15977 }
15978
15979 pci_set_master(pdev);
15980
15981 /* Find power-management capability. */
15982 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15983 if (pm_cap == 0) {
2445e461
MC
15984 dev_err(&pdev->dev,
15985 "Cannot find Power Management capability, aborting\n");
1da177e4
LT
15986 err = -EIO;
15987 goto err_out_free_res;
15988 }
15989
16821285
MC
15990 err = pci_set_power_state(pdev, PCI_D0);
15991 if (err) {
15992 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15993 goto err_out_free_res;
15994 }
15995
fe5f5787 15996 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
1da177e4 15997 if (!dev) {
1da177e4 15998 err = -ENOMEM;
16821285 15999 goto err_out_power_down;
1da177e4
LT
16000 }
16001
1da177e4
LT
16002 SET_NETDEV_DEV(dev, &pdev->dev);
16003
1da177e4
LT
16004 tp = netdev_priv(dev);
16005 tp->pdev = pdev;
16006 tp->dev = dev;
16007 tp->pm_cap = pm_cap;
1da177e4
LT
16008 tp->rx_mode = TG3_DEF_RX_MODE;
16009 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 16010
1da177e4
LT
16011 if (tg3_debug > 0)
16012 tp->msg_enable = tg3_debug;
16013 else
16014 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16015
16016 /* The word/byte swap controls here control register access byte
16017 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16018 * setting below.
16019 */
16020 tp->misc_host_ctrl =
16021 MISC_HOST_CTRL_MASK_PCI_INT |
16022 MISC_HOST_CTRL_WORD_SWAP |
16023 MISC_HOST_CTRL_INDIR_ACCESS |
16024 MISC_HOST_CTRL_PCISTATE_RW;
16025
16026 /* The NONFRM (non-frame) byte/word swap controls take effect
16027 * on descriptor entries, anything which isn't packet data.
16028 *
16029 * The StrongARM chips on the board (one for tx, one for rx)
16030 * are running in big-endian mode.
16031 */
16032 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16033 GRC_MODE_WSWAP_NONFRM_DATA);
16034#ifdef __BIG_ENDIAN
16035 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16036#endif
16037 spin_lock_init(&tp->lock);
1da177e4 16038 spin_lock_init(&tp->indirect_lock);
c4028958 16039 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4 16040
d5fe488a 16041 tp->regs = pci_ioremap_bar(pdev, BAR_0);
ab0049b4 16042 if (!tp->regs) {
ab96b241 16043 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
1da177e4
LT
16044 err = -ENOMEM;
16045 goto err_out_free_dev;
16046 }
16047
c9cab24e
MC
16048 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16049 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
79d49695 16053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
c9cab24e
MC
16054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16055 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16056 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16057 tg3_flag_set(tp, ENABLE_APE);
16058 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16059 if (!tp->aperegs) {
16060 dev_err(&pdev->dev,
16061 "Cannot map APE registers, aborting\n");
16062 err = -ENOMEM;
16063 goto err_out_iounmap;
16064 }
16065 }
16066
1da177e4
LT
16067 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16068 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
1da177e4 16069
1da177e4 16070 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4 16071 dev->watchdog_timeo = TG3_TX_TIMEOUT;
2ffcc981 16072 dev->netdev_ops = &tg3_netdev_ops;
1da177e4 16073 dev->irq = pdev->irq;
1da177e4 16074
3d567e0e 16075 err = tg3_get_invariants(tp, ent);
1da177e4 16076 if (err) {
ab96b241
MC
16077 dev_err(&pdev->dev,
16078 "Problem fetching invariants of chip, aborting\n");
c9cab24e 16079 goto err_out_apeunmap;
1da177e4
LT
16080 }
16081
4a29cc2e
MC
16082 /* The EPB bridge inside 5714, 5715, and 5780 and any
16083 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
16084 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16085 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16086 * do DMA address check in tg3_start_xmit().
16087 */
63c3a66f 16088 if (tg3_flag(tp, IS_5788))
284901a9 16089 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
63c3a66f 16090 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
50cf156a 16091 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
72f2afb8 16092#ifdef CONFIG_HIGHMEM
6a35528a 16093 dma_mask = DMA_BIT_MASK(64);
72f2afb8 16094#endif
4a29cc2e 16095 } else
6a35528a 16096 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
72f2afb8
MC
16097
16098 /* Configure DMA attributes. */
284901a9 16099 if (dma_mask > DMA_BIT_MASK(32)) {
72f2afb8
MC
16100 err = pci_set_dma_mask(pdev, dma_mask);
16101 if (!err) {
0da0606f 16102 features |= NETIF_F_HIGHDMA;
72f2afb8
MC
16103 err = pci_set_consistent_dma_mask(pdev,
16104 persist_dma_mask);
16105 if (err < 0) {
ab96b241
MC
16106 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16107 "DMA for consistent allocations\n");
c9cab24e 16108 goto err_out_apeunmap;
72f2afb8
MC
16109 }
16110 }
16111 }
284901a9
YH
16112 if (err || dma_mask == DMA_BIT_MASK(32)) {
16113 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
72f2afb8 16114 if (err) {
ab96b241
MC
16115 dev_err(&pdev->dev,
16116 "No usable DMA configuration, aborting\n");
c9cab24e 16117 goto err_out_apeunmap;
72f2afb8
MC
16118 }
16119 }
16120
fdfec172 16121 tg3_init_bufmgr_config(tp);
1da177e4 16122
0da0606f
MC
16123 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16124
16125 /* 5700 B0 chips do not support checksumming correctly due
16126 * to hardware bugs.
16127 */
16128 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16129 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16130
16131 if (tg3_flag(tp, 5755_PLUS))
16132 features |= NETIF_F_IPV6_CSUM;
16133 }
16134
4e3a7aaa
MC
16135 /* TSO is on by default on chips that support hardware TSO.
16136 * Firmware TSO on older chips gives lower performance, so it
16137 * is off by default, but can be enabled using ethtool.
16138 */
63c3a66f
JP
16139 if ((tg3_flag(tp, HW_TSO_1) ||
16140 tg3_flag(tp, HW_TSO_2) ||
16141 tg3_flag(tp, HW_TSO_3)) &&
0da0606f
MC
16142 (features & NETIF_F_IP_CSUM))
16143 features |= NETIF_F_TSO;
63c3a66f 16144 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
0da0606f
MC
16145 if (features & NETIF_F_IPV6_CSUM)
16146 features |= NETIF_F_TSO6;
63c3a66f 16147 if (tg3_flag(tp, HW_TSO_3) ||
e849cdc3 16148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c
MC
16149 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16150 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
63c3a66f 16151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
dc668910 16152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
0da0606f 16153 features |= NETIF_F_TSO_ECN;
b0026624 16154 }
1da177e4 16155
d542fe27
MC
16156 dev->features |= features;
16157 dev->vlan_features |= features;
16158
06c03c02
MB
16159 /*
16160 * Add loopback capability only for a subset of devices that support
16161 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16162 * loopback for the remaining devices.
16163 */
16164 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16165 !tg3_flag(tp, CPMU_PRESENT))
16166 /* Add the loopback capability */
0da0606f
MC
16167 features |= NETIF_F_LOOPBACK;
16168
0da0606f 16169 dev->hw_features |= features;
06c03c02 16170
1da177e4 16171 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
63c3a66f 16172 !tg3_flag(tp, TSO_CAPABLE) &&
1da177e4 16173 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
63c3a66f 16174 tg3_flag_set(tp, MAX_RXPEND_64);
1da177e4
LT
16175 tp->rx_pending = 63;
16176 }
16177
1da177e4
LT
16178 err = tg3_get_device_address(tp);
16179 if (err) {
ab96b241
MC
16180 dev_err(&pdev->dev,
16181 "Could not obtain valid ethernet address, aborting\n");
c9cab24e 16182 goto err_out_apeunmap;
c88864df
MC
16183 }
16184
1da177e4
LT
16185 /*
16186 * Reset chip in case UNDI or EFI driver did not shutdown
16187 * DMA self test will enable WDMAC and we'll see (spurious)
16188 * pending DMA on the PCI bus at that point.
16189 */
16190 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16191 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 16192 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 16193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
16194 }
16195
16196 err = tg3_test_dma(tp);
16197 if (err) {
ab96b241 16198 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
c88864df 16199 goto err_out_apeunmap;
1da177e4
LT
16200 }
16201
78f90dcf
MC
16202 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16203 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16204 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6fd45cb8 16205 for (i = 0; i < tp->irq_max; i++) {
78f90dcf
MC
16206 struct tg3_napi *tnapi = &tp->napi[i];
16207
16208 tnapi->tp = tp;
16209 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16210
16211 tnapi->int_mbox = intmbx;
93a700a9 16212 if (i <= 4)
78f90dcf
MC
16213 intmbx += 0x8;
16214 else
16215 intmbx += 0x4;
16216
16217 tnapi->consmbox = rcvmbx;
16218 tnapi->prodmbox = sndmbx;
16219
66cfd1bd 16220 if (i)
78f90dcf 16221 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
66cfd1bd 16222 else
78f90dcf 16223 tnapi->coal_now = HOSTCC_MODE_NOW;
78f90dcf 16224
63c3a66f 16225 if (!tg3_flag(tp, SUPPORT_MSIX))
78f90dcf
MC
16226 break;
16227
16228 /*
16229 * If we support MSIX, we'll be using RSS. If we're using
16230 * RSS, the first vector only handles link interrupts and the
16231 * remaining vectors handle rx and tx interrupts. Reuse the
16232 * mailbox values for the next iteration. The values we setup
16233 * above are still useful for the single vectored mode.
16234 */
16235 if (!i)
16236 continue;
16237
16238 rcvmbx += 0x8;
16239
16240 if (sndmbx & 0x4)
16241 sndmbx -= 0x4;
16242 else
16243 sndmbx += 0xc;
16244 }
16245
15f9850d
DM
16246 tg3_init_coal(tp);
16247
c49a1561
MC
16248 pci_set_drvdata(pdev, dev);
16249
cd0d7228
MC
16250 if (tg3_flag(tp, 5717_PLUS)) {
16251 /* Resume a low-power mode */
16252 tg3_frob_aux_power(tp, false);
16253 }
16254
21f7638e
MC
16255 tg3_timer_init(tp);
16256
1da177e4
LT
16257 err = register_netdev(dev);
16258 if (err) {
ab96b241 16259 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
0d3031d9 16260 goto err_out_apeunmap;
1da177e4
LT
16261 }
16262
05dbe005
JP
16263 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16264 tp->board_part_number,
16265 tp->pci_chip_rev_id,
16266 tg3_bus_string(tp, str),
16267 dev->dev_addr);
1da177e4 16268
f07e9af3 16269 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7
MC
16270 struct phy_device *phydev;
16271 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
5129c3a3
MC
16272 netdev_info(dev,
16273 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
05dbe005 16274 phydev->drv->name, dev_name(&phydev->dev));
f07e9af3
MC
16275 } else {
16276 char *ethtype;
16277
16278 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16279 ethtype = "10/100Base-TX";
16280 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16281 ethtype = "1000Base-SX";
16282 else
16283 ethtype = "10/100/1000Base-T";
16284
5129c3a3 16285 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
47007831
MC
16286 "(WireSpeed[%d], EEE[%d])\n",
16287 tg3_phy_string(tp), ethtype,
16288 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16289 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
f07e9af3 16290 }
05dbe005
JP
16291
16292 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
dc668910 16293 (dev->features & NETIF_F_RXCSUM) != 0,
63c3a66f 16294 tg3_flag(tp, USE_LINKCHG_REG) != 0,
f07e9af3 16295 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
63c3a66f
JP
16296 tg3_flag(tp, ENABLE_ASF) != 0,
16297 tg3_flag(tp, TSO_CAPABLE) != 0);
05dbe005
JP
16298 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16299 tp->dma_rwctrl,
16300 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16301 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
1da177e4 16302
b45aa2f6
MC
16303 pci_save_state(pdev);
16304
1da177e4
LT
16305 return 0;
16306
0d3031d9
MC
16307err_out_apeunmap:
16308 if (tp->aperegs) {
16309 iounmap(tp->aperegs);
16310 tp->aperegs = NULL;
16311 }
16312
1da177e4 16313err_out_iounmap:
6892914f
MC
16314 if (tp->regs) {
16315 iounmap(tp->regs);
22abe310 16316 tp->regs = NULL;
6892914f 16317 }
1da177e4
LT
16318
16319err_out_free_dev:
16320 free_netdev(dev);
16321
16821285
MC
16322err_out_power_down:
16323 pci_set_power_state(pdev, PCI_D3hot);
16324
1da177e4
LT
16325err_out_free_res:
16326 pci_release_regions(pdev);
16327
16328err_out_disable_pdev:
16329 pci_disable_device(pdev);
16330 pci_set_drvdata(pdev, NULL);
16331 return err;
16332}
16333
16334static void __devexit tg3_remove_one(struct pci_dev *pdev)
16335{
16336 struct net_device *dev = pci_get_drvdata(pdev);
16337
16338 if (dev) {
16339 struct tg3 *tp = netdev_priv(dev);
16340
e3c5530b 16341 release_firmware(tp->fw);
077f849d 16342
db219973 16343 tg3_reset_task_cancel(tp);
158d7abd 16344
e730c823 16345 if (tg3_flag(tp, USE_PHYLIB)) {
b02fd9e3 16346 tg3_phy_fini(tp);
158d7abd 16347 tg3_mdio_fini(tp);
b02fd9e3 16348 }
158d7abd 16349
1da177e4 16350 unregister_netdev(dev);
0d3031d9
MC
16351 if (tp->aperegs) {
16352 iounmap(tp->aperegs);
16353 tp->aperegs = NULL;
16354 }
6892914f
MC
16355 if (tp->regs) {
16356 iounmap(tp->regs);
22abe310 16357 tp->regs = NULL;
6892914f 16358 }
1da177e4
LT
16359 free_netdev(dev);
16360 pci_release_regions(pdev);
16361 pci_disable_device(pdev);
16362 pci_set_drvdata(pdev, NULL);
16363 }
16364}
16365
aa6027ca 16366#ifdef CONFIG_PM_SLEEP
c866b7ea 16367static int tg3_suspend(struct device *device)
1da177e4 16368{
c866b7ea 16369 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16370 struct net_device *dev = pci_get_drvdata(pdev);
16371 struct tg3 *tp = netdev_priv(dev);
16372 int err;
16373
16374 if (!netif_running(dev))
16375 return 0;
16376
db219973 16377 tg3_reset_task_cancel(tp);
b02fd9e3 16378 tg3_phy_stop(tp);
1da177e4
LT
16379 tg3_netif_stop(tp);
16380
21f7638e 16381 tg3_timer_stop(tp);
1da177e4 16382
f47c11ee 16383 tg3_full_lock(tp, 1);
1da177e4 16384 tg3_disable_ints(tp);
f47c11ee 16385 tg3_full_unlock(tp);
1da177e4
LT
16386
16387 netif_device_detach(dev);
16388
f47c11ee 16389 tg3_full_lock(tp, 0);
944d980e 16390 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
63c3a66f 16391 tg3_flag_clear(tp, INIT_COMPLETE);
f47c11ee 16392 tg3_full_unlock(tp);
1da177e4 16393
c866b7ea 16394 err = tg3_power_down_prepare(tp);
1da177e4 16395 if (err) {
b02fd9e3
MC
16396 int err2;
16397
f47c11ee 16398 tg3_full_lock(tp, 0);
1da177e4 16399
63c3a66f 16400 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
16401 err2 = tg3_restart_hw(tp, 1);
16402 if (err2)
b9ec6c1b 16403 goto out;
1da177e4 16404
21f7638e 16405 tg3_timer_start(tp);
1da177e4
LT
16406
16407 netif_device_attach(dev);
16408 tg3_netif_start(tp);
16409
b9ec6c1b 16410out:
f47c11ee 16411 tg3_full_unlock(tp);
b02fd9e3
MC
16412
16413 if (!err2)
16414 tg3_phy_start(tp);
1da177e4
LT
16415 }
16416
16417 return err;
16418}
16419
c866b7ea 16420static int tg3_resume(struct device *device)
1da177e4 16421{
c866b7ea 16422 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16423 struct net_device *dev = pci_get_drvdata(pdev);
16424 struct tg3 *tp = netdev_priv(dev);
16425 int err;
16426
16427 if (!netif_running(dev))
16428 return 0;
16429
1da177e4
LT
16430 netif_device_attach(dev);
16431
f47c11ee 16432 tg3_full_lock(tp, 0);
1da177e4 16433
63c3a66f 16434 tg3_flag_set(tp, INIT_COMPLETE);
b9ec6c1b
MC
16435 err = tg3_restart_hw(tp, 1);
16436 if (err)
16437 goto out;
1da177e4 16438
21f7638e 16439 tg3_timer_start(tp);
1da177e4 16440
1da177e4
LT
16441 tg3_netif_start(tp);
16442
b9ec6c1b 16443out:
f47c11ee 16444 tg3_full_unlock(tp);
1da177e4 16445
b02fd9e3
MC
16446 if (!err)
16447 tg3_phy_start(tp);
16448
b9ec6c1b 16449 return err;
1da177e4
LT
16450}
16451
c866b7ea 16452static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
aa6027ca
ED
16453#define TG3_PM_OPS (&tg3_pm_ops)
16454
16455#else
16456
16457#define TG3_PM_OPS NULL
16458
16459#endif /* CONFIG_PM_SLEEP */
c866b7ea 16460
b45aa2f6
MC
16461/**
16462 * tg3_io_error_detected - called when PCI error is detected
16463 * @pdev: Pointer to PCI device
16464 * @state: The current pci connection state
16465 *
16466 * This function is called after a PCI bus error affecting
16467 * this device has been detected.
16468 */
16469static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16470 pci_channel_state_t state)
16471{
16472 struct net_device *netdev = pci_get_drvdata(pdev);
16473 struct tg3 *tp = netdev_priv(netdev);
16474 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16475
16476 netdev_info(netdev, "PCI I/O error detected\n");
16477
16478 rtnl_lock();
16479
16480 if (!netif_running(netdev))
16481 goto done;
16482
16483 tg3_phy_stop(tp);
16484
16485 tg3_netif_stop(tp);
16486
21f7638e 16487 tg3_timer_stop(tp);
b45aa2f6
MC
16488
16489 /* Want to make sure that the reset task doesn't run */
db219973 16490 tg3_reset_task_cancel(tp);
b45aa2f6
MC
16491
16492 netif_device_detach(netdev);
16493
16494 /* Clean up software state, even if MMIO is blocked */
16495 tg3_full_lock(tp, 0);
16496 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16497 tg3_full_unlock(tp);
16498
16499done:
16500 if (state == pci_channel_io_perm_failure)
16501 err = PCI_ERS_RESULT_DISCONNECT;
16502 else
16503 pci_disable_device(pdev);
16504
16505 rtnl_unlock();
16506
16507 return err;
16508}
16509
16510/**
16511 * tg3_io_slot_reset - called after the pci bus has been reset.
16512 * @pdev: Pointer to PCI device
16513 *
16514 * Restart the card from scratch, as if from a cold-boot.
16515 * At this point, the card has exprienced a hard reset,
16516 * followed by fixups by BIOS, and has its config space
16517 * set up identically to what it was at cold boot.
16518 */
16519static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16520{
16521 struct net_device *netdev = pci_get_drvdata(pdev);
16522 struct tg3 *tp = netdev_priv(netdev);
16523 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16524 int err;
16525
16526 rtnl_lock();
16527
16528 if (pci_enable_device(pdev)) {
16529 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16530 goto done;
16531 }
16532
16533 pci_set_master(pdev);
16534 pci_restore_state(pdev);
16535 pci_save_state(pdev);
16536
16537 if (!netif_running(netdev)) {
16538 rc = PCI_ERS_RESULT_RECOVERED;
16539 goto done;
16540 }
16541
16542 err = tg3_power_up(tp);
bed9829f 16543 if (err)
b45aa2f6 16544 goto done;
b45aa2f6
MC
16545
16546 rc = PCI_ERS_RESULT_RECOVERED;
16547
16548done:
16549 rtnl_unlock();
16550
16551 return rc;
16552}
16553
16554/**
16555 * tg3_io_resume - called when traffic can start flowing again.
16556 * @pdev: Pointer to PCI device
16557 *
16558 * This callback is called when the error recovery driver tells
16559 * us that its OK to resume normal operation.
16560 */
16561static void tg3_io_resume(struct pci_dev *pdev)
16562{
16563 struct net_device *netdev = pci_get_drvdata(pdev);
16564 struct tg3 *tp = netdev_priv(netdev);
16565 int err;
16566
16567 rtnl_lock();
16568
16569 if (!netif_running(netdev))
16570 goto done;
16571
16572 tg3_full_lock(tp, 0);
63c3a66f 16573 tg3_flag_set(tp, INIT_COMPLETE);
b45aa2f6
MC
16574 err = tg3_restart_hw(tp, 1);
16575 tg3_full_unlock(tp);
16576 if (err) {
16577 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16578 goto done;
16579 }
16580
16581 netif_device_attach(netdev);
16582
21f7638e 16583 tg3_timer_start(tp);
b45aa2f6
MC
16584
16585 tg3_netif_start(tp);
16586
16587 tg3_phy_start(tp);
16588
16589done:
16590 rtnl_unlock();
16591}
16592
3646f0e5 16593static const struct pci_error_handlers tg3_err_handler = {
b45aa2f6
MC
16594 .error_detected = tg3_io_error_detected,
16595 .slot_reset = tg3_io_slot_reset,
16596 .resume = tg3_io_resume
16597};
16598
1da177e4
LT
16599static struct pci_driver tg3_driver = {
16600 .name = DRV_MODULE_NAME,
16601 .id_table = tg3_pci_tbl,
16602 .probe = tg3_init_one,
16603 .remove = __devexit_p(tg3_remove_one),
b45aa2f6 16604 .err_handler = &tg3_err_handler,
aa6027ca 16605 .driver.pm = TG3_PM_OPS,
1da177e4
LT
16606};
16607
16608static int __init tg3_init(void)
16609{
29917620 16610 return pci_register_driver(&tg3_driver);
1da177e4
LT
16611}
16612
16613static void __exit tg3_cleanup(void)
16614{
16615 pci_unregister_driver(&tg3_driver);
16616}
16617
16618module_init(tg3_init);
16619module_exit(tg3_cleanup);
This page took 3.25508 seconds and 5 git commands to generate.