tg3: Cleanup firmware parsing code
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217
218 static char version[] =
219 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257 TG3_DRV_DATA_FLAG_5705_10_100},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286 PCI_VENDOR_ID_LENOVO,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347 {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353 const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355 { "rx_octets" },
356 { "rx_fragments" },
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
360 { "rx_fcs_errors" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
367 { "rx_jabbers" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
381
382 { "tx_octets" },
383 { "tx_collisions" },
384
385 { "tx_xon_sent" },
386 { "tx_xoff_sent" },
387 { "tx_flow_control" },
388 { "tx_mac_errors" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
391 { "tx_deferred" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
412 { "tx_discards" },
413 { "tx_errors" },
414
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
417 { "rxbds_empty" },
418 { "rx_discards" },
419 { "rx_errors" },
420 { "rx_threshold_hit" },
421
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
425
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
428 { "nic_irqs" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
431
432 { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
444
445
446 static const struct {
447 const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449 [TG3_NVRAM_TEST] = { "nvram test (online) " },
450 [TG3_LINK_TEST] = { "link test (online) " },
451 [TG3_REGISTER_TEST] = { "register test (offline)" },
452 [TG3_MEMORY_TEST] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
457 };
458
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464 writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469 return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474 writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479 return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484 unsigned long flags;
485
486 spin_lock_irqsave(&tp->indirect_lock, flags);
487 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494 writel(val, tp->regs + off);
495 readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500 unsigned long flags;
501 u32 val;
502
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512 unsigned long flags;
513
514 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516 TG3_64BIT_REG_LOW, val);
517 return;
518 }
519 if (off == TG3_RX_STD_PROD_IDX_REG) {
520 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521 TG3_64BIT_REG_LOW, val);
522 return;
523 }
524
525 spin_lock_irqsave(&tp->indirect_lock, flags);
526 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528 spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
532 */
533 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534 (val == 0x1)) {
535 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537 }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542 unsigned long flags;
543 u32 val;
544
545 spin_lock_irqsave(&tp->indirect_lock, flags);
546 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548 spin_unlock_irqrestore(&tp->indirect_lock, flags);
549 return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556 */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560 /* Non-posted methods */
561 tp->write32(tp, off, val);
562 else {
563 /* Posted method */
564 tg3_write32(tp, off, val);
565 if (usec_wait)
566 udelay(usec_wait);
567 tp->read32(tp, off);
568 }
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
571 */
572 if (usec_wait)
573 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578 tp->write32_mbox(tp, off, val);
579 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581 !tg3_flag(tp, ICH_WORKAROUND)))
582 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587 void __iomem *mbox = tp->regs + off;
588 writel(val, mbox);
589 if (tg3_flag(tp, TXD_MBOX_HWBUG))
590 writel(val, mbox);
591 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592 tg3_flag(tp, FLUSH_POSTED_WRITES))
593 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598 return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603 writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619 unsigned long flags;
620
621 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623 return;
624
625 spin_lock_irqsave(&tp->indirect_lock, flags);
626 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632 } else {
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 }
639 spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644 unsigned long flags;
645
646 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648 *val = 0;
649 return;
650 }
651
652 spin_lock_irqsave(&tp->indirect_lock, flags);
653 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659 } else {
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 }
666 spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671 int i;
672 u32 regbase, bit;
673
674 if (tg3_asic_rev(tp) == ASIC_REV_5761)
675 regbase = TG3_APE_LOCK_GRANT;
676 else
677 regbase = TG3_APE_PER_LOCK_GRANT;
678
679 /* Make sure the driver hasn't any stale locks. */
680 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681 switch (i) {
682 case TG3_APE_LOCK_PHY0:
683 case TG3_APE_LOCK_PHY1:
684 case TG3_APE_LOCK_PHY2:
685 case TG3_APE_LOCK_PHY3:
686 bit = APE_LOCK_GRANT_DRIVER;
687 break;
688 default:
689 if (!tp->pci_fn)
690 bit = APE_LOCK_GRANT_DRIVER;
691 else
692 bit = 1 << tp->pci_fn;
693 }
694 tg3_ape_write32(tp, regbase + 4 * i, bit);
695 }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701 int i, off;
702 int ret = 0;
703 u32 status, req, gnt, bit;
704
705 if (!tg3_flag(tp, ENABLE_APE))
706 return 0;
707
708 switch (locknum) {
709 case TG3_APE_LOCK_GPIO:
710 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711 return 0;
712 case TG3_APE_LOCK_GRC:
713 case TG3_APE_LOCK_MEM:
714 if (!tp->pci_fn)
715 bit = APE_LOCK_REQ_DRIVER;
716 else
717 bit = 1 << tp->pci_fn;
718 break;
719 case TG3_APE_LOCK_PHY0:
720 case TG3_APE_LOCK_PHY1:
721 case TG3_APE_LOCK_PHY2:
722 case TG3_APE_LOCK_PHY3:
723 bit = APE_LOCK_REQ_DRIVER;
724 break;
725 default:
726 return -EINVAL;
727 }
728
729 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730 req = TG3_APE_LOCK_REQ;
731 gnt = TG3_APE_LOCK_GRANT;
732 } else {
733 req = TG3_APE_PER_LOCK_REQ;
734 gnt = TG3_APE_PER_LOCK_GRANT;
735 }
736
737 off = 4 * locknum;
738
739 tg3_ape_write32(tp, req + off, bit);
740
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i = 0; i < 100; i++) {
743 status = tg3_ape_read32(tp, gnt + off);
744 if (status == bit)
745 break;
746 udelay(10);
747 }
748
749 if (status != bit) {
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp, gnt + off, bit);
752 ret = -EBUSY;
753 }
754
755 return ret;
756 }
757
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760 u32 gnt, bit;
761
762 if (!tg3_flag(tp, ENABLE_APE))
763 return;
764
765 switch (locknum) {
766 case TG3_APE_LOCK_GPIO:
767 if (tg3_asic_rev(tp) == ASIC_REV_5761)
768 return;
769 case TG3_APE_LOCK_GRC:
770 case TG3_APE_LOCK_MEM:
771 if (!tp->pci_fn)
772 bit = APE_LOCK_GRANT_DRIVER;
773 else
774 bit = 1 << tp->pci_fn;
775 break;
776 case TG3_APE_LOCK_PHY0:
777 case TG3_APE_LOCK_PHY1:
778 case TG3_APE_LOCK_PHY2:
779 case TG3_APE_LOCK_PHY3:
780 bit = APE_LOCK_GRANT_DRIVER;
781 break;
782 default:
783 return;
784 }
785
786 if (tg3_asic_rev(tp) == ASIC_REV_5761)
787 gnt = TG3_APE_LOCK_GRANT;
788 else
789 gnt = TG3_APE_PER_LOCK_GRANT;
790
791 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796 u32 apedata;
797
798 while (timeout_us) {
799 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800 return -EBUSY;
801
802 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804 break;
805
806 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808 udelay(10);
809 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810 }
811
812 return timeout_us ? 0 : -EBUSY;
813 }
814
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817 u32 i, apedata;
818
819 for (i = 0; i < timeout_us / 10; i++) {
820 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 break;
824
825 udelay(10);
826 }
827
828 return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832 u32 len)
833 {
834 int err;
835 u32 i, bufoff, msgoff, maxlen, apedata;
836
837 if (!tg3_flag(tp, APE_HAS_NCSI))
838 return 0;
839
840 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841 if (apedata != APE_SEG_SIG_MAGIC)
842 return -ENODEV;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845 if (!(apedata & APE_FW_STATUS_READY))
846 return -EAGAIN;
847
848 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849 TG3_APE_SHMEM_BASE;
850 msgoff = bufoff + 2 * sizeof(u32);
851 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853 while (len) {
854 u32 length;
855
856 /* Cap xfer sizes to scratchpad limits. */
857 length = (len > maxlen) ? maxlen : len;
858 len -= length;
859
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
862 return -EAGAIN;
863
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err = tg3_ape_event_lock(tp, 1000);
866 if (err)
867 return err;
868
869 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870 APE_EVENT_STATUS_SCRTCHPD_READ |
871 APE_EVENT_STATUS_EVENT_PENDING;
872 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874 tg3_ape_write32(tp, bufoff, base_off);
875 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880 base_off += length;
881
882 if (tg3_ape_wait_for_event(tp, 30000))
883 return -EAGAIN;
884
885 for (i = 0; length; i += 4, length -= 4) {
886 u32 val = tg3_ape_read32(tp, msgoff + i);
887 memcpy(data, &val, sizeof(u32));
888 data++;
889 }
890 }
891
892 return 0;
893 }
894
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897 int err;
898 u32 apedata;
899
900 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901 if (apedata != APE_SEG_SIG_MAGIC)
902 return -EAGAIN;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905 if (!(apedata & APE_FW_STATUS_READY))
906 return -EAGAIN;
907
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err = tg3_ape_event_lock(tp, 1000);
910 if (err)
911 return err;
912
913 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914 event | APE_EVENT_STATUS_EVENT_PENDING);
915
916 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918
919 return 0;
920 }
921
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924 u32 event;
925 u32 apedata;
926
927 if (!tg3_flag(tp, ENABLE_APE))
928 return;
929
930 switch (kind) {
931 case RESET_KIND_INIT:
932 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933 APE_HOST_SEG_SIG_MAGIC);
934 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935 APE_HOST_SEG_LEN_MAGIC);
936 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941 APE_HOST_BEHAV_NO_PHYLOCK);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943 TG3_APE_HOST_DRVR_STATE_START);
944
945 event = APE_EVENT_STATUS_STATE_START;
946 break;
947 case RESET_KIND_SHUTDOWN:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
952 */
953 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954
955 if (device_may_wakeup(&tp->pdev->dev) &&
956 tg3_flag(tp, WOL_ENABLE)) {
957 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958 TG3_APE_HOST_WOL_SPEED_AUTO);
959 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960 } else
961 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962
963 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964
965 event = APE_EVENT_STATUS_STATE_UNLOAD;
966 break;
967 case RESET_KIND_SUSPEND:
968 event = APE_EVENT_STATUS_STATE_SUSPEND;
969 break;
970 default:
971 return;
972 }
973
974 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976 tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981 int i;
982
983 tw32(TG3PCI_MISC_HOST_CTRL,
984 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985 for (i = 0; i < tp->irq_max; i++)
986 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991 int i;
992
993 tp->irq_sync = 0;
994 wmb();
995
996 tw32(TG3PCI_MISC_HOST_CTRL,
997 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000 for (i = 0; i < tp->irq_cnt; i++) {
1001 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004 if (tg3_flag(tp, 1SHOT_MSI))
1005 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007 tp->coal_now |= tnapi->coal_now;
1008 }
1009
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp, TAGGED_STATUS) &&
1012 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014 else
1015 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022 struct tg3 *tp = tnapi->tp;
1023 struct tg3_hw_status *sblk = tnapi->hw_status;
1024 unsigned int work_exists = 0;
1025
1026 /* check for phy events */
1027 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028 if (sblk->status & SD_STATUS_LINK_CHG)
1029 work_exists = 1;
1030 }
1031
1032 /* check for TX work to do */
1033 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034 work_exists = 1;
1035
1036 /* check for RX work to do */
1037 if (tnapi->rx_rcb_prod_idx &&
1038 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039 work_exists = 1;
1040
1041 return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1048 */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051 struct tg3 *tp = tnapi->tp;
1052
1053 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054 mmiowb();
1055
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1059 */
1060 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067 u32 clock_ctrl;
1068 u32 orig_clock_ctrl;
1069
1070 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071 return;
1072
1073 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075 orig_clock_ctrl = clock_ctrl;
1076 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077 CLOCK_CTRL_CLKRUN_OENABLE |
1078 0x1f);
1079 tp->pci_clock_ctrl = clock_ctrl;
1080
1081 if (tg3_flag(tp, 5705_PLUS)) {
1082 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085 }
1086 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl |
1089 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090 40);
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093 40);
1094 }
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS 5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101 u32 *val)
1102 {
1103 u32 frame_val;
1104 unsigned int loops;
1105 int ret;
1106
1107 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108 tw32_f(MAC_MI_MODE,
1109 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110 udelay(80);
1111 }
1112
1113 tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115 *val = 0x0;
1116
1117 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118 MI_COM_PHY_ADDR_MASK);
1119 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120 MI_COM_REG_ADDR_MASK);
1121 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123 tw32_f(MAC_MI_COM, frame_val);
1124
1125 loops = PHY_BUSY_LOOPS;
1126 while (loops != 0) {
1127 udelay(10);
1128 frame_val = tr32(MAC_MI_COM);
1129
1130 if ((frame_val & MI_COM_BUSY) == 0) {
1131 udelay(5);
1132 frame_val = tr32(MAC_MI_COM);
1133 break;
1134 }
1135 loops -= 1;
1136 }
1137
1138 ret = -EBUSY;
1139 if (loops != 0) {
1140 *val = frame_val & MI_COM_DATA_MASK;
1141 ret = 0;
1142 }
1143
1144 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146 udelay(80);
1147 }
1148
1149 tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151 return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160 u32 val)
1161 {
1162 u32 frame_val;
1163 unsigned int loops;
1164 int ret;
1165
1166 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168 return 0;
1169
1170 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171 tw32_f(MAC_MI_MODE,
1172 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173 udelay(80);
1174 }
1175
1176 tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179 MI_COM_PHY_ADDR_MASK);
1180 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181 MI_COM_REG_ADDR_MASK);
1182 frame_val |= (val & MI_COM_DATA_MASK);
1183 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185 tw32_f(MAC_MI_COM, frame_val);
1186
1187 loops = PHY_BUSY_LOOPS;
1188 while (loops != 0) {
1189 udelay(10);
1190 frame_val = tr32(MAC_MI_COM);
1191 if ((frame_val & MI_COM_BUSY) == 0) {
1192 udelay(5);
1193 frame_val = tr32(MAC_MI_COM);
1194 break;
1195 }
1196 loops -= 1;
1197 }
1198
1199 ret = -EBUSY;
1200 if (loops != 0)
1201 ret = 0;
1202
1203 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205 udelay(80);
1206 }
1207
1208 tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210 return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220 int err;
1221
1222 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223 if (err)
1224 goto done;
1225
1226 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227 if (err)
1228 goto done;
1229
1230 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238 return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243 int err;
1244
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246 if (err)
1247 goto done;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261 return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266 int err;
1267
1268 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269 if (!err)
1270 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272 return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277 int err;
1278
1279 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280 if (!err)
1281 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283 return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288 int err;
1289
1290 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC);
1293 if (!err)
1294 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296 return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309 u32 val;
1310 int err;
1311
1312 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314 if (err)
1315 return err;
1316 if (enable)
1317
1318 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319 else
1320 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325 return err;
1326 }
1327
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330 u32 phy_control;
1331 int limit, err;
1332
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1335 */
1336 phy_control = BMCR_RESET;
1337 err = tg3_writephy(tp, MII_BMCR, phy_control);
1338 if (err != 0)
1339 return -EBUSY;
1340
1341 limit = 5000;
1342 while (limit--) {
1343 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344 if (err != 0)
1345 return -EBUSY;
1346
1347 if ((phy_control & BMCR_RESET) == 0) {
1348 udelay(40);
1349 break;
1350 }
1351 udelay(10);
1352 }
1353 if (limit < 0)
1354 return -EBUSY;
1355
1356 return 0;
1357 }
1358
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361 struct tg3 *tp = bp->priv;
1362 u32 val;
1363
1364 spin_lock_bh(&tp->lock);
1365
1366 if (tg3_readphy(tp, reg, &val))
1367 val = -EIO;
1368
1369 spin_unlock_bh(&tp->lock);
1370
1371 return val;
1372 }
1373
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 ret = 0;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (tg3_writephy(tp, reg, val))
1382 ret = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return ret;
1387 }
1388
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391 return 0;
1392 }
1393
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396 u32 val;
1397 struct phy_device *phydev;
1398
1399 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401 case PHY_ID_BCM50610:
1402 case PHY_ID_BCM50610M:
1403 val = MAC_PHYCFG2_50610_LED_MODES;
1404 break;
1405 case PHY_ID_BCMAC131:
1406 val = MAC_PHYCFG2_AC131_LED_MODES;
1407 break;
1408 case PHY_ID_RTL8211C:
1409 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410 break;
1411 case PHY_ID_RTL8201E:
1412 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413 break;
1414 default:
1415 return;
1416 }
1417
1418 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419 tw32(MAC_PHYCFG2, val);
1420
1421 val = tr32(MAC_PHYCFG1);
1422 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425 tw32(MAC_PHYCFG1, val);
1426
1427 return;
1428 }
1429
1430 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432 MAC_PHYCFG2_FMODE_MASK_MASK |
1433 MAC_PHYCFG2_GMODE_MASK_MASK |
1434 MAC_PHYCFG2_ACT_MASK_MASK |
1435 MAC_PHYCFG2_QUAL_MASK_MASK |
1436 MAC_PHYCFG2_INBAND_ENABLE;
1437
1438 tw32(MAC_PHYCFG2, val);
1439
1440 val = tr32(MAC_PHYCFG1);
1441 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448 }
1449 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451 tw32(MAC_PHYCFG1, val);
1452
1453 val = tr32(MAC_EXT_RGMII_MODE);
1454 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455 MAC_RGMII_MODE_RX_QUALITY |
1456 MAC_RGMII_MODE_RX_ACTIVITY |
1457 MAC_RGMII_MODE_RX_ENG_DET |
1458 MAC_RGMII_MODE_TX_ENABLE |
1459 MAC_RGMII_MODE_TX_LOWPWR |
1460 MAC_RGMII_MODE_TX_RESET);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_RGMII_MODE_RX_INT_B |
1464 MAC_RGMII_MODE_RX_QUALITY |
1465 MAC_RGMII_MODE_RX_ACTIVITY |
1466 MAC_RGMII_MODE_RX_ENG_DET;
1467 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 val |= MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET;
1471 }
1472 tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478 tw32_f(MAC_MI_MODE, tp->mi_mode);
1479 udelay(80);
1480
1481 if (tg3_flag(tp, MDIOBUS_INITED) &&
1482 tg3_asic_rev(tp) == ASIC_REV_5785)
1483 tg3_mdio_config_5785(tp);
1484 }
1485
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488 int i;
1489 u32 reg;
1490 struct phy_device *phydev;
1491
1492 if (tg3_flag(tp, 5717_PLUS)) {
1493 u32 is_serdes;
1494
1495 tp->phy_addr = tp->pci_fn + 1;
1496
1497 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1498 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499 else
1500 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES;
1502 if (is_serdes)
1503 tp->phy_addr += 7;
1504 } else
1505 tp->phy_addr = TG3_PHY_MII_ADDR;
1506
1507 tg3_mdio_start(tp);
1508
1509 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510 return 0;
1511
1512 tp->mdio_bus = mdiobus_alloc();
1513 if (tp->mdio_bus == NULL)
1514 return -ENOMEM;
1515
1516 tp->mdio_bus->name = "tg3 mdio bus";
1517 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519 tp->mdio_bus->priv = tp;
1520 tp->mdio_bus->parent = &tp->pdev->dev;
1521 tp->mdio_bus->read = &tg3_mdio_read;
1522 tp->mdio_bus->write = &tg3_mdio_write;
1523 tp->mdio_bus->reset = &tg3_mdio_reset;
1524 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525 tp->mdio_bus->irq = &tp->mdio_irq[0];
1526
1527 for (i = 0; i < PHY_MAX_ADDR; i++)
1528 tp->mdio_bus->irq[i] = PHY_POLL;
1529
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1534 */
1535 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536 tg3_bmcr_reset(tp);
1537
1538 i = mdiobus_register(tp->mdio_bus);
1539 if (i) {
1540 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541 mdiobus_free(tp->mdio_bus);
1542 return i;
1543 }
1544
1545 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546
1547 if (!phydev || !phydev->drv) {
1548 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549 mdiobus_unregister(tp->mdio_bus);
1550 mdiobus_free(tp->mdio_bus);
1551 return -ENODEV;
1552 }
1553
1554 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555 case PHY_ID_BCM57780:
1556 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558 break;
1559 case PHY_ID_BCM50610:
1560 case PHY_ID_BCM50610M:
1561 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562 PHY_BRCM_RX_REFCLK_UNUSED |
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571 /* fallthru */
1572 case PHY_ID_RTL8211C:
1573 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574 break;
1575 case PHY_ID_RTL8201E:
1576 case PHY_ID_BCMAC131:
1577 phydev->interface = PHY_INTERFACE_MODE_MII;
1578 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580 break;
1581 }
1582
1583 tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586 tg3_mdio_config_5785(tp);
1587
1588 return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593 if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 tg3_flag_clear(tp, MDIOBUS_INITED);
1595 mdiobus_unregister(tp->mdio_bus);
1596 mdiobus_free(tp->mdio_bus);
1597 }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603 u32 val;
1604
1605 val = tr32(GRC_RX_CPU_EVENT);
1606 val |= GRC_RX_CPU_DRIVER_EVENT;
1607 tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609 tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617 int i;
1618 unsigned int delay_cnt;
1619 long time_remain;
1620
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain = (long)(tp->last_event_jiffies + 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624 (long)jiffies;
1625 if (time_remain < 0)
1626 return;
1627
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt = jiffies_to_usecs(time_remain);
1630 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 delay_cnt = (delay_cnt >> 3) + 1;
1633
1634 for (i = 0; i < delay_cnt; i++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636 break;
1637 udelay(8);
1638 }
1639 }
1640
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644 u32 reg, val;
1645
1646 val = 0;
1647 if (!tg3_readphy(tp, MII_BMCR, &reg))
1648 val = reg << 16;
1649 if (!tg3_readphy(tp, MII_BMSR, &reg))
1650 val |= (reg & 0xffff);
1651 *data++ = val;
1652
1653 val = 0;
1654 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655 val = reg << 16;
1656 if (!tg3_readphy(tp, MII_LPA, &reg))
1657 val |= (reg & 0xffff);
1658 *data++ = val;
1659
1660 val = 0;
1661 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663 val = reg << 16;
1664 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665 val |= (reg & 0xffff);
1666 }
1667 *data++ = val;
1668
1669 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670 val = reg << 16;
1671 else
1672 val = 0;
1673 *data++ = val;
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679 u32 data[4];
1680
1681 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682 return;
1683
1684 tg3_phy_gather_ump_data(tp, data);
1685
1686 tg3_wait_for_event_ack(tp);
1687
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694
1695 tg3_generate_fw_event(tp);
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp);
1704
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706
1707 tg3_generate_fw_event(tp);
1708
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp);
1711 }
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719
1720 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721 switch (kind) {
1722 case RESET_KIND_INIT:
1723 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724 DRV_STATE_START);
1725 break;
1726
1727 case RESET_KIND_SHUTDOWN:
1728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729 DRV_STATE_UNLOAD);
1730 break;
1731
1732 case RESET_KIND_SUSPEND:
1733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 DRV_STATE_SUSPEND);
1735 break;
1736
1737 default:
1738 break;
1739 }
1740 }
1741
1742 if (kind == RESET_KIND_INIT ||
1743 kind == RESET_KIND_SUSPEND)
1744 tg3_ape_driver_state_change(tp, kind);
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 switch (kind) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1755 break;
1756
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1760 break;
1761
1762 default:
1763 break;
1764 }
1765 }
1766
1767 if (kind == RESET_KIND_SHUTDOWN)
1768 tg3_ape_driver_state_change(tp, kind);
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774 if (tg3_flag(tp, ENABLE_ASF)) {
1775 switch (kind) {
1776 case RESET_KIND_INIT:
1777 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 DRV_STATE_START);
1779 break;
1780
1781 case RESET_KIND_SHUTDOWN:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 DRV_STATE_UNLOAD);
1784 break;
1785
1786 case RESET_KIND_SUSPEND:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_SUSPEND);
1789 break;
1790
1791 default:
1792 break;
1793 }
1794 }
1795 }
1796
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799 int i;
1800 u32 val;
1801
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1804 return 0;
1805 }
1806
1807 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 return 0;
1812 udelay(100);
1813 }
1814 return -ENODEV;
1815 }
1816
1817 /* Wait for firmware initialization to complete. */
1818 for (i = 0; i < 100000; i++) {
1819 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821 break;
1822 udelay(10);
1823 }
1824
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1829 */
1830 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831 tg3_flag_set(tp, NO_FWARE_REPORTED);
1832
1833 netdev_info(tp->dev, "No firmware running\n");
1834 }
1835
1836 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1839 */
1840 mdelay(10);
1841 }
1842
1843 return 0;
1844 }
1845
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848 if (!netif_carrier_ok(tp->dev)) {
1849 netif_info(tp, link, tp->dev, "Link is down\n");
1850 tg3_ump_link_report(tp);
1851 } else if (netif_msg_link(tp)) {
1852 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853 (tp->link_config.active_speed == SPEED_1000 ?
1854 1000 :
1855 (tp->link_config.active_speed == SPEED_100 ?
1856 100 : 10)),
1857 (tp->link_config.active_duplex == DUPLEX_FULL ?
1858 "full" : "half"));
1859
1860 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862 "on" : "off",
1863 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864 "on" : "off");
1865
1866 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867 netdev_info(tp->dev, "EEE is %s\n",
1868 tp->setlpicnt ? "enabled" : "disabled");
1869
1870 tg3_ump_link_report(tp);
1871 }
1872 }
1873
1874 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1875 {
1876 u16 miireg;
1877
1878 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1879 miireg = ADVERTISE_1000XPAUSE;
1880 else if (flow_ctrl & FLOW_CTRL_TX)
1881 miireg = ADVERTISE_1000XPSE_ASYM;
1882 else if (flow_ctrl & FLOW_CTRL_RX)
1883 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1884 else
1885 miireg = 0;
1886
1887 return miireg;
1888 }
1889
1890 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1891 {
1892 u8 cap = 0;
1893
1894 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1895 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1896 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1897 if (lcladv & ADVERTISE_1000XPAUSE)
1898 cap = FLOW_CTRL_RX;
1899 if (rmtadv & ADVERTISE_1000XPAUSE)
1900 cap = FLOW_CTRL_TX;
1901 }
1902
1903 return cap;
1904 }
1905
1906 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1907 {
1908 u8 autoneg;
1909 u8 flowctrl = 0;
1910 u32 old_rx_mode = tp->rx_mode;
1911 u32 old_tx_mode = tp->tx_mode;
1912
1913 if (tg3_flag(tp, USE_PHYLIB))
1914 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1915 else
1916 autoneg = tp->link_config.autoneg;
1917
1918 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1919 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1920 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1921 else
1922 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1923 } else
1924 flowctrl = tp->link_config.flowctrl;
1925
1926 tp->link_config.active_flowctrl = flowctrl;
1927
1928 if (flowctrl & FLOW_CTRL_RX)
1929 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1930 else
1931 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1932
1933 if (old_rx_mode != tp->rx_mode)
1934 tw32_f(MAC_RX_MODE, tp->rx_mode);
1935
1936 if (flowctrl & FLOW_CTRL_TX)
1937 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1938 else
1939 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1940
1941 if (old_tx_mode != tp->tx_mode)
1942 tw32_f(MAC_TX_MODE, tp->tx_mode);
1943 }
1944
1945 static void tg3_adjust_link(struct net_device *dev)
1946 {
1947 u8 oldflowctrl, linkmesg = 0;
1948 u32 mac_mode, lcl_adv, rmt_adv;
1949 struct tg3 *tp = netdev_priv(dev);
1950 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1951
1952 spin_lock_bh(&tp->lock);
1953
1954 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1955 MAC_MODE_HALF_DUPLEX);
1956
1957 oldflowctrl = tp->link_config.active_flowctrl;
1958
1959 if (phydev->link) {
1960 lcl_adv = 0;
1961 rmt_adv = 0;
1962
1963 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1964 mac_mode |= MAC_MODE_PORT_MODE_MII;
1965 else if (phydev->speed == SPEED_1000 ||
1966 tg3_asic_rev(tp) != ASIC_REV_5785)
1967 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1968 else
1969 mac_mode |= MAC_MODE_PORT_MODE_MII;
1970
1971 if (phydev->duplex == DUPLEX_HALF)
1972 mac_mode |= MAC_MODE_HALF_DUPLEX;
1973 else {
1974 lcl_adv = mii_advertise_flowctrl(
1975 tp->link_config.flowctrl);
1976
1977 if (phydev->pause)
1978 rmt_adv = LPA_PAUSE_CAP;
1979 if (phydev->asym_pause)
1980 rmt_adv |= LPA_PAUSE_ASYM;
1981 }
1982
1983 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1984 } else
1985 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1986
1987 if (mac_mode != tp->mac_mode) {
1988 tp->mac_mode = mac_mode;
1989 tw32_f(MAC_MODE, tp->mac_mode);
1990 udelay(40);
1991 }
1992
1993 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1994 if (phydev->speed == SPEED_10)
1995 tw32(MAC_MI_STAT,
1996 MAC_MI_STAT_10MBPS_MODE |
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1998 else
1999 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000 }
2001
2002 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2003 tw32(MAC_TX_LENGTHS,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2005 (6 << TX_LENGTHS_IPG_SHIFT) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2007 else
2008 tw32(MAC_TX_LENGTHS,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2010 (6 << TX_LENGTHS_IPG_SHIFT) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2012
2013 if (phydev->link != tp->old_link ||
2014 phydev->speed != tp->link_config.active_speed ||
2015 phydev->duplex != tp->link_config.active_duplex ||
2016 oldflowctrl != tp->link_config.active_flowctrl)
2017 linkmesg = 1;
2018
2019 tp->old_link = phydev->link;
2020 tp->link_config.active_speed = phydev->speed;
2021 tp->link_config.active_duplex = phydev->duplex;
2022
2023 spin_unlock_bh(&tp->lock);
2024
2025 if (linkmesg)
2026 tg3_link_report(tp);
2027 }
2028
2029 static int tg3_phy_init(struct tg3 *tp)
2030 {
2031 struct phy_device *phydev;
2032
2033 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2034 return 0;
2035
2036 /* Bring the PHY back to a known state. */
2037 tg3_bmcr_reset(tp);
2038
2039 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2040
2041 /* Attach the MAC to the PHY. */
2042 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2043 tg3_adjust_link, phydev->interface);
2044 if (IS_ERR(phydev)) {
2045 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev);
2047 }
2048
2049 /* Mask with MAC supported features. */
2050 switch (phydev->interface) {
2051 case PHY_INTERFACE_MODE_GMII:
2052 case PHY_INTERFACE_MODE_RGMII:
2053 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2054 phydev->supported &= (PHY_GBIT_FEATURES |
2055 SUPPORTED_Pause |
2056 SUPPORTED_Asym_Pause);
2057 break;
2058 }
2059 /* fallthru */
2060 case PHY_INTERFACE_MODE_MII:
2061 phydev->supported &= (PHY_BASIC_FEATURES |
2062 SUPPORTED_Pause |
2063 SUPPORTED_Asym_Pause);
2064 break;
2065 default:
2066 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 return -EINVAL;
2068 }
2069
2070 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2071
2072 phydev->advertising = phydev->supported;
2073
2074 return 0;
2075 }
2076
2077 static void tg3_phy_start(struct tg3 *tp)
2078 {
2079 struct phy_device *phydev;
2080
2081 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2082 return;
2083
2084 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2085
2086 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2087 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2088 phydev->speed = tp->link_config.speed;
2089 phydev->duplex = tp->link_config.duplex;
2090 phydev->autoneg = tp->link_config.autoneg;
2091 phydev->advertising = tp->link_config.advertising;
2092 }
2093
2094 phy_start(phydev);
2095
2096 phy_start_aneg(phydev);
2097 }
2098
2099 static void tg3_phy_stop(struct tg3 *tp)
2100 {
2101 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2102 return;
2103
2104 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2105 }
2106
2107 static void tg3_phy_fini(struct tg3 *tp)
2108 {
2109 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2110 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2111 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2112 }
2113 }
2114
2115 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2116 {
2117 int err;
2118 u32 val;
2119
2120 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2121 return 0;
2122
2123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err = tg3_phy_auxctl_write(tp,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2128 0x4c20);
2129 goto done;
2130 }
2131
2132 err = tg3_phy_auxctl_read(tp,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2134 if (err)
2135 return err;
2136
2137 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2138 err = tg3_phy_auxctl_write(tp,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2140
2141 done:
2142 return err;
2143 }
2144
2145 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2146 {
2147 u32 phytest;
2148
2149 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2150 u32 phy;
2151
2152 tg3_writephy(tp, MII_TG3_FET_TEST,
2153 phytest | MII_TG3_FET_SHADOW_EN);
2154 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2155 if (enable)
2156 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2157 else
2158 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2160 }
2161 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2162 }
2163 }
2164
2165 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2166 {
2167 u32 reg;
2168
2169 if (!tg3_flag(tp, 5705_PLUS) ||
2170 (tg3_flag(tp, 5717_PLUS) &&
2171 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2172 return;
2173
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2175 tg3_phy_fet_toggle_apd(tp, enable);
2176 return;
2177 }
2178
2179 reg = MII_TG3_MISC_SHDW_WREN |
2180 MII_TG3_MISC_SHDW_SCR5_SEL |
2181 MII_TG3_MISC_SHDW_SCR5_LPED |
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2183 MII_TG3_MISC_SHDW_SCR5_SDTL |
2184 MII_TG3_MISC_SHDW_SCR5_C125OE;
2185 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2186 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2187
2188 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2189
2190
2191 reg = MII_TG3_MISC_SHDW_WREN |
2192 MII_TG3_MISC_SHDW_APD_SEL |
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2194 if (enable)
2195 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2196
2197 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2198 }
2199
2200 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2201 {
2202 u32 phy;
2203
2204 if (!tg3_flag(tp, 5705_PLUS) ||
2205 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2206 return;
2207
2208 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2209 u32 ephy;
2210
2211 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2212 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2213
2214 tg3_writephy(tp, MII_TG3_FET_TEST,
2215 ephy | MII_TG3_FET_SHADOW_EN);
2216 if (!tg3_readphy(tp, reg, &phy)) {
2217 if (enable)
2218 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2219 else
2220 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221 tg3_writephy(tp, reg, phy);
2222 }
2223 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2224 }
2225 } else {
2226 int ret;
2227
2228 ret = tg3_phy_auxctl_read(tp,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2230 if (!ret) {
2231 if (enable)
2232 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2233 else
2234 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235 tg3_phy_auxctl_write(tp,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2237 }
2238 }
2239 }
2240
2241 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2242 {
2243 int ret;
2244 u32 val;
2245
2246 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2247 return;
2248
2249 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2250 if (!ret)
2251 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2252 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2253 }
2254
2255 static void tg3_phy_apply_otp(struct tg3 *tp)
2256 {
2257 u32 otp, phy;
2258
2259 if (!tp->phy_otp)
2260 return;
2261
2262 otp = tp->phy_otp;
2263
2264 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2265 return;
2266
2267 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2268 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2269 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2270
2271 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2272 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2273 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2274
2275 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2276 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2277 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2278
2279 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2280 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2281
2282 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2284
2285 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2286 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2287 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2288
2289 tg3_phy_toggle_auxctl_smdsp(tp, false);
2290 }
2291
2292 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2293 {
2294 u32 val;
2295
2296 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2297 return;
2298
2299 tp->setlpicnt = 0;
2300
2301 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2302 current_link_up == 1 &&
2303 tp->link_config.active_duplex == DUPLEX_FULL &&
2304 (tp->link_config.active_speed == SPEED_100 ||
2305 tp->link_config.active_speed == SPEED_1000)) {
2306 u32 eeectl;
2307
2308 if (tp->link_config.active_speed == SPEED_1000)
2309 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2310 else
2311 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2312
2313 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2314
2315 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2316 TG3_CL45_D7_EEERES_STAT, &val);
2317
2318 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2319 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2320 tp->setlpicnt = 2;
2321 }
2322
2323 if (!tp->setlpicnt) {
2324 if (current_link_up == 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2326 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp, false);
2328 }
2329
2330 val = tr32(TG3_CPMU_EEE_MODE);
2331 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2332 }
2333 }
2334
2335 static void tg3_phy_eee_enable(struct tg3 *tp)
2336 {
2337 u32 val;
2338
2339 if (tp->link_config.active_speed == SPEED_1000 &&
2340 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2341 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2342 tg3_flag(tp, 57765_CLASS)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2344 val = MII_TG3_DSP_TAP26_ALNOKO |
2345 MII_TG3_DSP_TAP26_RMRXSTO;
2346 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2347 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348 }
2349
2350 val = tr32(TG3_CPMU_EEE_MODE);
2351 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2352 }
2353
2354 static int tg3_wait_macro_done(struct tg3 *tp)
2355 {
2356 int limit = 100;
2357
2358 while (limit--) {
2359 u32 tmp32;
2360
2361 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2362 if ((tmp32 & 0x1000) == 0)
2363 break;
2364 }
2365 }
2366 if (limit < 0)
2367 return -EBUSY;
2368
2369 return 0;
2370 }
2371
2372 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2373 {
2374 static const u32 test_pat[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2379 };
2380 int chan;
2381
2382 for (chan = 0; chan < 4; chan++) {
2383 int i;
2384
2385 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2386 (chan * 0x2000) | 0x0200);
2387 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2388
2389 for (i = 0; i < 6; i++)
2390 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2391 test_pat[chan][i]);
2392
2393 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2394 if (tg3_wait_macro_done(tp)) {
2395 *resetp = 1;
2396 return -EBUSY;
2397 }
2398
2399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2400 (chan * 0x2000) | 0x0200);
2401 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2402 if (tg3_wait_macro_done(tp)) {
2403 *resetp = 1;
2404 return -EBUSY;
2405 }
2406
2407 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2408 if (tg3_wait_macro_done(tp)) {
2409 *resetp = 1;
2410 return -EBUSY;
2411 }
2412
2413 for (i = 0; i < 6; i += 2) {
2414 u32 low, high;
2415
2416 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2418 tg3_wait_macro_done(tp)) {
2419 *resetp = 1;
2420 return -EBUSY;
2421 }
2422 low &= 0x7fff;
2423 high &= 0x000f;
2424 if (low != test_pat[chan][i] ||
2425 high != test_pat[chan][i+1]) {
2426 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2429
2430 return -EBUSY;
2431 }
2432 }
2433 }
2434
2435 return 0;
2436 }
2437
2438 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2439 {
2440 int chan;
2441
2442 for (chan = 0; chan < 4; chan++) {
2443 int i;
2444
2445 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2446 (chan * 0x2000) | 0x0200);
2447 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2448 for (i = 0; i < 6; i++)
2449 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2451 if (tg3_wait_macro_done(tp))
2452 return -EBUSY;
2453 }
2454
2455 return 0;
2456 }
2457
2458 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2459 {
2460 u32 reg32, phy9_orig;
2461 int retries, do_phy_reset, err;
2462
2463 retries = 10;
2464 do_phy_reset = 1;
2465 do {
2466 if (do_phy_reset) {
2467 err = tg3_bmcr_reset(tp);
2468 if (err)
2469 return err;
2470 do_phy_reset = 0;
2471 }
2472
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2475 continue;
2476
2477 reg32 |= 0x3000;
2478 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2479
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp, MII_BMCR,
2482 BMCR_FULLDPLX | BMCR_SPEED1000);
2483
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2486 continue;
2487
2488 tg3_writephy(tp, MII_CTRL1000,
2489 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2490
2491 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2492 if (err)
2493 return err;
2494
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp, 0x8005, 0x0800);
2497
2498 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2499 if (!err)
2500 break;
2501 } while (--retries);
2502
2503 err = tg3_phy_reset_chanpat(tp);
2504 if (err)
2505 return err;
2506
2507 tg3_phydsp_write(tp, 0x8005, 0x0000);
2508
2509 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2510 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2511
2512 tg3_phy_toggle_auxctl_smdsp(tp, false);
2513
2514 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2515
2516 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2517 reg32 &= ~0x3000;
2518 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2519 } else if (!err)
2520 err = -EBUSY;
2521
2522 return err;
2523 }
2524
2525 static void tg3_carrier_on(struct tg3 *tp)
2526 {
2527 netif_carrier_on(tp->dev);
2528 tp->link_up = true;
2529 }
2530
2531 static void tg3_carrier_off(struct tg3 *tp)
2532 {
2533 netif_carrier_off(tp->dev);
2534 tp->link_up = false;
2535 }
2536
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2539 */
2540 static int tg3_phy_reset(struct tg3 *tp)
2541 {
2542 u32 val, cpmuctrl;
2543 int err;
2544
2545 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2546 val = tr32(GRC_MISC_CFG);
2547 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2548 udelay(40);
2549 }
2550 err = tg3_readphy(tp, MII_BMSR, &val);
2551 err |= tg3_readphy(tp, MII_BMSR, &val);
2552 if (err != 0)
2553 return -EBUSY;
2554
2555 if (netif_running(tp->dev) && tp->link_up) {
2556 tg3_carrier_off(tp);
2557 tg3_link_report(tp);
2558 }
2559
2560 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2561 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2562 tg3_asic_rev(tp) == ASIC_REV_5705) {
2563 err = tg3_phy_reset_5703_4_5(tp);
2564 if (err)
2565 return err;
2566 goto out;
2567 }
2568
2569 cpmuctrl = 0;
2570 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2571 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2572 cpmuctrl = tr32(TG3_CPMU_CTRL);
2573 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2574 tw32(TG3_CPMU_CTRL,
2575 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2576 }
2577
2578 err = tg3_bmcr_reset(tp);
2579 if (err)
2580 return err;
2581
2582 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2583 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2584 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2585
2586 tw32(TG3_CPMU_CTRL, cpmuctrl);
2587 }
2588
2589 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2590 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2591 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2592 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5) {
2594 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2595 udelay(40);
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597 }
2598 }
2599
2600 if (tg3_flag(tp, 5717_PLUS) &&
2601 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2602 return 0;
2603
2604 tg3_phy_apply_otp(tp);
2605
2606 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2607 tg3_phy_toggle_apd(tp, true);
2608 else
2609 tg3_phy_toggle_apd(tp, false);
2610
2611 out:
2612 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2614 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp, false);
2617 }
2618
2619 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2620 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622 }
2623
2624 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2626 tg3_phydsp_write(tp, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp, false);
2630 }
2631 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2633 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2634 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2635 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2636 tg3_writephy(tp, MII_TG3_TEST1,
2637 MII_TG3_TEST1_TRIM_EN | 0x4);
2638 } else
2639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2640
2641 tg3_phy_toggle_auxctl_smdsp(tp, false);
2642 }
2643 }
2644
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2650 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err = tg3_phy_auxctl_read(tp,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2654 if (!err)
2655 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2656 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2657 }
2658
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2661 */
2662 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2663 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2664 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2665 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2666 }
2667
2668 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2671 }
2672
2673 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2674 tg3_phydsp_write(tp, 0xffb, 0x4000);
2675
2676 tg3_phy_toggle_automdix(tp, 1);
2677 tg3_phy_set_wirespeed(tp);
2678 return 0;
2679 }
2680
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2690
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2696
2697 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 {
2699 u32 status, shift;
2700
2701 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2702 tg3_asic_rev(tp) == ASIC_REV_5719)
2703 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2704 else
2705 status = tr32(TG3_CPMU_DRV_STATUS);
2706
2707 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2708 status &= ~(TG3_GPIO_MSG_MASK << shift);
2709 status |= (newstat << shift);
2710
2711 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2712 tg3_asic_rev(tp) == ASIC_REV_5719)
2713 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2714 else
2715 tw32(TG3_CPMU_DRV_STATUS, status);
2716
2717 return status >> TG3_APE_GPIO_MSG_SHIFT;
2718 }
2719
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2721 {
2722 if (!tg3_flag(tp, IS_NIC))
2723 return 0;
2724
2725 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2726 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2727 tg3_asic_rev(tp) == ASIC_REV_5720) {
2728 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2729 return -EIO;
2730
2731 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2732
2733 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY);
2735
2736 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2737 } else {
2738 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740 }
2741
2742 return 0;
2743 }
2744
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 {
2747 u32 grc_local_ctrl;
2748
2749 if (!tg3_flag(tp, IS_NIC) ||
2750 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2751 tg3_asic_rev(tp) == ASIC_REV_5701)
2752 return;
2753
2754 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2755
2756 tw32_wait_f(GRC_LOCAL_CTRL,
2757 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY);
2759
2760 tw32_wait_f(GRC_LOCAL_CTRL,
2761 grc_local_ctrl,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764 tw32_wait_f(GRC_LOCAL_CTRL,
2765 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 }
2768
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2770 {
2771 if (!tg3_flag(tp, IS_NIC))
2772 return;
2773
2774 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2775 tg3_asic_rev(tp) == ASIC_REV_5701) {
2776 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2777 (GRC_LCLCTRL_GPIO_OE0 |
2778 GRC_LCLCTRL_GPIO_OE1 |
2779 GRC_LCLCTRL_GPIO_OE2 |
2780 GRC_LCLCTRL_GPIO_OUTPUT0 |
2781 GRC_LCLCTRL_GPIO_OUTPUT1),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY);
2783 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2787 GRC_LCLCTRL_GPIO_OE1 |
2788 GRC_LCLCTRL_GPIO_OE2 |
2789 GRC_LCLCTRL_GPIO_OUTPUT0 |
2790 GRC_LCLCTRL_GPIO_OUTPUT1 |
2791 tp->grc_local_ctrl;
2792 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2796 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2800 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2802 } else {
2803 u32 no_gpio2;
2804 u32 grc_local_ctrl = 0;
2805
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2808 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2809 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2810 grc_local_ctrl,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2812 }
2813
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2 = tp->nic_sram_data_cfg &
2816 NIC_SRAM_DATA_CFG_NO_GPIO2;
2817
2818 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT1 |
2822 GRC_LCLCTRL_GPIO_OUTPUT2;
2823 if (no_gpio2) {
2824 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2825 GRC_LCLCTRL_GPIO_OUTPUT2);
2826 }
2827 tw32_wait_f(GRC_LOCAL_CTRL,
2828 tp->grc_local_ctrl | grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2832
2833 tw32_wait_f(GRC_LOCAL_CTRL,
2834 tp->grc_local_ctrl | grc_local_ctrl,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837 if (!no_gpio2) {
2838 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2839 tw32_wait_f(GRC_LOCAL_CTRL,
2840 tp->grc_local_ctrl | grc_local_ctrl,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 }
2843 }
2844 }
2845
2846 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 {
2848 u32 msg = 0;
2849
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2852 return;
2853
2854 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2855 msg = TG3_GPIO_MSG_NEED_VAUX;
2856
2857 msg = tg3_set_function_status(tp, msg);
2858
2859 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2860 goto done;
2861
2862 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2863 tg3_pwrsrc_switch_to_vaux(tp);
2864 else
2865 tg3_pwrsrc_die_with_vmain(tp);
2866
2867 done:
2868 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2869 }
2870
2871 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2872 {
2873 bool need_vaux = false;
2874
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2877 return;
2878
2879 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2880 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2881 tg3_asic_rev(tp) == ASIC_REV_5720) {
2882 tg3_frob_aux_power_5717(tp, include_wol ?
2883 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884 return;
2885 }
2886
2887 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2888 struct net_device *dev_peer;
2889
2890 dev_peer = pci_get_drvdata(tp->pdev_peer);
2891
2892 /* remove_one() may have been run on the peer. */
2893 if (dev_peer) {
2894 struct tg3 *tp_peer = netdev_priv(dev_peer);
2895
2896 if (tg3_flag(tp_peer, INIT_COMPLETE))
2897 return;
2898
2899 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2900 tg3_flag(tp_peer, ENABLE_ASF))
2901 need_vaux = true;
2902 }
2903 }
2904
2905 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2906 tg3_flag(tp, ENABLE_ASF))
2907 need_vaux = true;
2908
2909 if (need_vaux)
2910 tg3_pwrsrc_switch_to_vaux(tp);
2911 else
2912 tg3_pwrsrc_die_with_vmain(tp);
2913 }
2914
2915 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2916 {
2917 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2918 return 1;
2919 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2920 if (speed != SPEED_10)
2921 return 1;
2922 } else if (speed == SPEED_10)
2923 return 1;
2924
2925 return 0;
2926 }
2927
2928 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 {
2930 u32 val;
2931
2932 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2933 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2934 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2935 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2936
2937 sg_dig_ctrl |=
2938 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2939 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2940 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2941 }
2942 return;
2943 }
2944
2945 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2946 tg3_bmcr_reset(tp);
2947 val = tr32(GRC_MISC_CFG);
2948 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2949 udelay(40);
2950 return;
2951 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2952 u32 phytest;
2953 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2954 u32 phy;
2955
2956 tg3_writephy(tp, MII_ADVERTISE, 0);
2957 tg3_writephy(tp, MII_BMCR,
2958 BMCR_ANENABLE | BMCR_ANRESTART);
2959
2960 tg3_writephy(tp, MII_TG3_FET_TEST,
2961 phytest | MII_TG3_FET_SHADOW_EN);
2962 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2963 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2964 tg3_writephy(tp,
2965 MII_TG3_FET_SHDW_AUXMODE4,
2966 phy);
2967 }
2968 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2969 }
2970 return;
2971 } else if (do_low_power) {
2972 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2974
2975 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2977 MII_TG3_AUXCTL_PCTL_VREG_11V;
2978 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2979 }
2980
2981 /* The PHY should not be powered down on some chips because
2982 * of bugs.
2983 */
2984 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2985 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2986 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2987 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2988 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2989 !tp->pci_fn))
2990 return;
2991
2992 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2993 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2994 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2995 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2996 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2998 }
2999
3000 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3001 }
3002
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3 *tp)
3005 {
3006 if (tg3_flag(tp, NVRAM)) {
3007 int i;
3008
3009 if (tp->nvram_lock_cnt == 0) {
3010 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3011 for (i = 0; i < 8000; i++) {
3012 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3013 break;
3014 udelay(20);
3015 }
3016 if (i == 8000) {
3017 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018 return -ENODEV;
3019 }
3020 }
3021 tp->nvram_lock_cnt++;
3022 }
3023 return 0;
3024 }
3025
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3 *tp)
3028 {
3029 if (tg3_flag(tp, NVRAM)) {
3030 if (tp->nvram_lock_cnt > 0)
3031 tp->nvram_lock_cnt--;
3032 if (tp->nvram_lock_cnt == 0)
3033 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034 }
3035 }
3036
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3 *tp)
3039 {
3040 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3041 u32 nvaccess = tr32(NVRAM_ACCESS);
3042
3043 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044 }
3045 }
3046
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3 *tp)
3049 {
3050 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3051 u32 nvaccess = tr32(NVRAM_ACCESS);
3052
3053 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054 }
3055 }
3056
3057 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3058 u32 offset, u32 *val)
3059 {
3060 u32 tmp;
3061 int i;
3062
3063 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3064 return -EINVAL;
3065
3066 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3067 EEPROM_ADDR_DEVID_MASK |
3068 EEPROM_ADDR_READ);
3069 tw32(GRC_EEPROM_ADDR,
3070 tmp |
3071 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3072 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3073 EEPROM_ADDR_ADDR_MASK) |
3074 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3075
3076 for (i = 0; i < 1000; i++) {
3077 tmp = tr32(GRC_EEPROM_ADDR);
3078
3079 if (tmp & EEPROM_ADDR_COMPLETE)
3080 break;
3081 msleep(1);
3082 }
3083 if (!(tmp & EEPROM_ADDR_COMPLETE))
3084 return -EBUSY;
3085
3086 tmp = tr32(GRC_EEPROM_DATA);
3087
3088 /*
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3091 */
3092 *val = swab32(tmp);
3093
3094 return 0;
3095 }
3096
3097 #define NVRAM_CMD_TIMEOUT 10000
3098
3099 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 {
3101 int i;
3102
3103 tw32(NVRAM_CMD, nvram_cmd);
3104 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3105 udelay(10);
3106 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3107 udelay(10);
3108 break;
3109 }
3110 }
3111
3112 if (i == NVRAM_CMD_TIMEOUT)
3113 return -EBUSY;
3114
3115 return 0;
3116 }
3117
3118 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3119 {
3120 if (tg3_flag(tp, NVRAM) &&
3121 tg3_flag(tp, NVRAM_BUFFERED) &&
3122 tg3_flag(tp, FLASH) &&
3123 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3124 (tp->nvram_jedecnum == JEDEC_ATMEL))
3125
3126 addr = ((addr / tp->nvram_pagesize) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS) +
3128 (addr % tp->nvram_pagesize);
3129
3130 return addr;
3131 }
3132
3133 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3134 {
3135 if (tg3_flag(tp, NVRAM) &&
3136 tg3_flag(tp, NVRAM_BUFFERED) &&
3137 tg3_flag(tp, FLASH) &&
3138 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3139 (tp->nvram_jedecnum == JEDEC_ATMEL))
3140
3141 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3142 tp->nvram_pagesize) +
3143 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3144
3145 return addr;
3146 }
3147
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3153 */
3154 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 {
3156 int ret;
3157
3158 if (!tg3_flag(tp, NVRAM))
3159 return tg3_nvram_read_using_eeprom(tp, offset, val);
3160
3161 offset = tg3_nvram_phys_addr(tp, offset);
3162
3163 if (offset > NVRAM_ADDR_MSK)
3164 return -EINVAL;
3165
3166 ret = tg3_nvram_lock(tp);
3167 if (ret)
3168 return ret;
3169
3170 tg3_enable_nvram_access(tp);
3171
3172 tw32(NVRAM_ADDR, offset);
3173 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3174 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3175
3176 if (ret == 0)
3177 *val = tr32(NVRAM_RDDATA);
3178
3179 tg3_disable_nvram_access(tp);
3180
3181 tg3_nvram_unlock(tp);
3182
3183 return ret;
3184 }
3185
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3188 {
3189 u32 v;
3190 int res = tg3_nvram_read(tp, offset, &v);
3191 if (!res)
3192 *val = cpu_to_be32(v);
3193 return res;
3194 }
3195
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3197 u32 offset, u32 len, u8 *buf)
3198 {
3199 int i, j, rc = 0;
3200 u32 val;
3201
3202 for (i = 0; i < len; i += 4) {
3203 u32 addr;
3204 __be32 data;
3205
3206 addr = offset + i;
3207
3208 memcpy(&data, buf + i, 4);
3209
3210 /*
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3215 */
3216 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3217
3218 val = tr32(GRC_EEPROM_ADDR);
3219 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3220
3221 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3222 EEPROM_ADDR_READ);
3223 tw32(GRC_EEPROM_ADDR, val |
3224 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3225 (addr & EEPROM_ADDR_ADDR_MASK) |
3226 EEPROM_ADDR_START |
3227 EEPROM_ADDR_WRITE);
3228
3229 for (j = 0; j < 1000; j++) {
3230 val = tr32(GRC_EEPROM_ADDR);
3231
3232 if (val & EEPROM_ADDR_COMPLETE)
3233 break;
3234 msleep(1);
3235 }
3236 if (!(val & EEPROM_ADDR_COMPLETE)) {
3237 rc = -EBUSY;
3238 break;
3239 }
3240 }
3241
3242 return rc;
3243 }
3244
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247 u8 *buf)
3248 {
3249 int ret = 0;
3250 u32 pagesize = tp->nvram_pagesize;
3251 u32 pagemask = pagesize - 1;
3252 u32 nvram_cmd;
3253 u8 *tmp;
3254
3255 tmp = kmalloc(pagesize, GFP_KERNEL);
3256 if (tmp == NULL)
3257 return -ENOMEM;
3258
3259 while (len) {
3260 int j;
3261 u32 phy_addr, page_off, size;
3262
3263 phy_addr = offset & ~pagemask;
3264
3265 for (j = 0; j < pagesize; j += 4) {
3266 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3267 (__be32 *) (tmp + j));
3268 if (ret)
3269 break;
3270 }
3271 if (ret)
3272 break;
3273
3274 page_off = offset & pagemask;
3275 size = pagesize;
3276 if (len < size)
3277 size = len;
3278
3279 len -= size;
3280
3281 memcpy(tmp + page_off, buf, size);
3282
3283 offset = offset + (pagesize - page_off);
3284
3285 tg3_enable_nvram_access(tp);
3286
3287 /*
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3290 */
3291 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292
3293 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3294 break;
3295
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR, phy_addr);
3298
3299 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3301
3302 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3303 break;
3304
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3307
3308 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3309 break;
3310
3311 for (j = 0; j < pagesize; j += 4) {
3312 __be32 data;
3313
3314 data = *((__be32 *) (tmp + j));
3315
3316 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3317
3318 tw32(NVRAM_ADDR, phy_addr + j);
3319
3320 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321 NVRAM_CMD_WR;
3322
3323 if (j == 0)
3324 nvram_cmd |= NVRAM_CMD_FIRST;
3325 else if (j == (pagesize - 4))
3326 nvram_cmd |= NVRAM_CMD_LAST;
3327
3328 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329 if (ret)
3330 break;
3331 }
3332 if (ret)
3333 break;
3334 }
3335
3336 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3337 tg3_nvram_exec_cmd(tp, nvram_cmd);
3338
3339 kfree(tmp);
3340
3341 return ret;
3342 }
3343
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3346 u8 *buf)
3347 {
3348 int i, ret = 0;
3349
3350 for (i = 0; i < len; i += 4, offset += 4) {
3351 u32 page_off, phy_addr, nvram_cmd;
3352 __be32 data;
3353
3354 memcpy(&data, buf + i, 4);
3355 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3356
3357 page_off = offset % tp->nvram_pagesize;
3358
3359 phy_addr = tg3_nvram_phys_addr(tp, offset);
3360
3361 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3362
3363 if (page_off == 0 || i == 0)
3364 nvram_cmd |= NVRAM_CMD_FIRST;
3365 if (page_off == (tp->nvram_pagesize - 4))
3366 nvram_cmd |= NVRAM_CMD_LAST;
3367
3368 if (i == (len - 4))
3369 nvram_cmd |= NVRAM_CMD_LAST;
3370
3371 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3372 !tg3_flag(tp, FLASH) ||
3373 !tg3_flag(tp, 57765_PLUS))
3374 tw32(NVRAM_ADDR, phy_addr);
3375
3376 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3377 !tg3_flag(tp, 5755_PLUS) &&
3378 (tp->nvram_jedecnum == JEDEC_ST) &&
3379 (nvram_cmd & NVRAM_CMD_FIRST)) {
3380 u32 cmd;
3381
3382 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383 ret = tg3_nvram_exec_cmd(tp, cmd);
3384 if (ret)
3385 break;
3386 }
3387 if (!tg3_flag(tp, FLASH)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3390 }
3391
3392 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3393 if (ret)
3394 break;
3395 }
3396 return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 {
3402 int ret;
3403
3404 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3405 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407 udelay(40);
3408 }
3409
3410 if (!tg3_flag(tp, NVRAM)) {
3411 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412 } else {
3413 u32 grc_mode;
3414
3415 ret = tg3_nvram_lock(tp);
3416 if (ret)
3417 return ret;
3418
3419 tg3_enable_nvram_access(tp);
3420 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3421 tw32(NVRAM_WRITE1, 0x406);
3422
3423 grc_mode = tr32(GRC_MODE);
3424 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3425
3426 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3427 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3428 buf);
3429 } else {
3430 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431 buf);
3432 }
3433
3434 grc_mode = tr32(GRC_MODE);
3435 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3436
3437 tg3_disable_nvram_access(tp);
3438 tg3_nvram_unlock(tp);
3439 }
3440
3441 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3442 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3443 udelay(40);
3444 }
3445
3446 return ret;
3447 }
3448
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3453
3454 /* tp->lock is held. */
3455 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3456 {
3457 int i;
3458 const int iters = 10000;
3459
3460 for (i = 0; i < iters; i++) {
3461 tw32(cpu_base + CPU_STATE, 0xffffffff);
3462 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3463 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3464 break;
3465 }
3466
3467 return (i == iters) ? -EBUSY : 0;
3468 }
3469
3470 /* tp->lock is held. */
3471 static int tg3_rxcpu_pause(struct tg3 *tp)
3472 {
3473 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3474
3475 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3476 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3477 udelay(10);
3478
3479 return rc;
3480 }
3481
3482 /* tp->lock is held. */
3483 static int tg3_txcpu_pause(struct tg3 *tp)
3484 {
3485 return tg3_pause_cpu(tp, TX_CPU_BASE);
3486 }
3487
3488 /* tp->lock is held. */
3489 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3490 {
3491 tw32(cpu_base + CPU_STATE, 0xffffffff);
3492 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3493 }
3494
3495 /* tp->lock is held. */
3496 static void tg3_rxcpu_resume(struct tg3 *tp)
3497 {
3498 tg3_resume_cpu(tp, RX_CPU_BASE);
3499 }
3500
3501 /* tp->lock is held. */
3502 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3503 {
3504 int rc;
3505
3506 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3507
3508 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3509 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3510
3511 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3512 return 0;
3513 }
3514 if (cpu_base == RX_CPU_BASE) {
3515 rc = tg3_rxcpu_pause(tp);
3516 } else {
3517 /*
3518 * There is only an Rx CPU for the 5750 derivative in the
3519 * BCM4785.
3520 */
3521 if (tg3_flag(tp, IS_SSB_CORE))
3522 return 0;
3523
3524 rc = tg3_txcpu_pause(tp);
3525 }
3526
3527 if (rc) {
3528 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3529 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3530 return -ENODEV;
3531 }
3532
3533 /* Clear firmware's nvram arbitration. */
3534 if (tg3_flag(tp, NVRAM))
3535 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3536 return 0;
3537 }
3538
3539 /* tp->lock is held. */
3540 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3541 u32 cpu_scratch_base, int cpu_scratch_size,
3542 const struct tg3_firmware_hdr *fw_hdr)
3543 {
3544 int err, lock_err, i;
3545 void (*write_op)(struct tg3 *, u32, u32);
3546 u32 *fw_data = (u32 *)(fw_hdr + 1);
3547
3548 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3549 netdev_err(tp->dev,
3550 "%s: Trying to load TX cpu firmware which is 5705\n",
3551 __func__);
3552 return -EINVAL;
3553 }
3554
3555 if (tg3_flag(tp, 5705_PLUS))
3556 write_op = tg3_write_mem;
3557 else
3558 write_op = tg3_write_indirect_reg32;
3559
3560 /* It is possible that bootcode is still loading at this point.
3561 * Get the nvram lock first before halting the cpu.
3562 */
3563 lock_err = tg3_nvram_lock(tp);
3564 err = tg3_halt_cpu(tp, cpu_base);
3565 if (!lock_err)
3566 tg3_nvram_unlock(tp);
3567 if (err)
3568 goto out;
3569
3570 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3571 write_op(tp, cpu_scratch_base + i, 0);
3572 tw32(cpu_base + CPU_STATE, 0xffffffff);
3573 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3574 for (i = 0; i < (tp->fw->size - TG3_FW_HDR_LEN) / sizeof(u32); i++)
3575 write_op(tp, cpu_scratch_base +
3576 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3577 (i * sizeof(u32)),
3578 be32_to_cpu(fw_data[i]));
3579
3580
3581 err = 0;
3582
3583 out:
3584 return err;
3585 }
3586
3587 /* tp->lock is held. */
3588 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3589 {
3590 int i;
3591 const int iters = 5;
3592
3593 tw32(cpu_base + CPU_STATE, 0xffffffff);
3594 tw32_f(cpu_base + CPU_PC, pc);
3595
3596 for (i = 0; i < iters; i++) {
3597 if (tr32(cpu_base + CPU_PC) == pc)
3598 break;
3599 tw32(cpu_base + CPU_STATE, 0xffffffff);
3600 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3601 tw32_f(cpu_base + CPU_PC, pc);
3602 udelay(1000);
3603 }
3604
3605 return (i == iters) ? -EBUSY : 0;
3606 }
3607
3608 /* tp->lock is held. */
3609 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3610 {
3611 const struct tg3_firmware_hdr *fw_hdr;
3612 int err;
3613
3614 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3615
3616 /* Firmware blob starts with version numbers, followed by
3617 start address and length. We are setting complete length.
3618 length = end_address_of_bss - start_address_of_text.
3619 Remainder is the blob to be loaded contiguously
3620 from start address. */
3621
3622 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3623 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3624 fw_hdr);
3625 if (err)
3626 return err;
3627
3628 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3629 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3630 fw_hdr);
3631 if (err)
3632 return err;
3633
3634 /* Now startup only the RX cpu. */
3635 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3636 be32_to_cpu(fw_hdr->base_addr));
3637 if (err) {
3638 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3639 "should be %08x\n", __func__,
3640 tr32(RX_CPU_BASE + CPU_PC),
3641 be32_to_cpu(fw_hdr->base_addr));
3642 return -ENODEV;
3643 }
3644
3645 tg3_rxcpu_resume(tp);
3646
3647 return 0;
3648 }
3649
3650 /* tp->lock is held. */
3651 static int tg3_load_tso_firmware(struct tg3 *tp)
3652 {
3653 const struct tg3_firmware_hdr *fw_hdr;
3654 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3655 int err;
3656
3657 if (!tg3_flag(tp, FW_TSO))
3658 return 0;
3659
3660 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3661
3662 /* Firmware blob starts with version numbers, followed by
3663 start address and length. We are setting complete length.
3664 length = end_address_of_bss - start_address_of_text.
3665 Remainder is the blob to be loaded contiguously
3666 from start address. */
3667
3668 cpu_scratch_size = tp->fw_len;
3669
3670 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3671 cpu_base = RX_CPU_BASE;
3672 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3673 } else {
3674 cpu_base = TX_CPU_BASE;
3675 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3676 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3677 }
3678
3679 err = tg3_load_firmware_cpu(tp, cpu_base,
3680 cpu_scratch_base, cpu_scratch_size,
3681 fw_hdr);
3682 if (err)
3683 return err;
3684
3685 /* Now startup the cpu. */
3686 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3687 be32_to_cpu(fw_hdr->base_addr));
3688 if (err) {
3689 netdev_err(tp->dev,
3690 "%s fails to set CPU PC, is %08x should be %08x\n",
3691 __func__, tr32(cpu_base + CPU_PC),
3692 be32_to_cpu(fw_hdr->base_addr));
3693 return -ENODEV;
3694 }
3695
3696 tg3_resume_cpu(tp, cpu_base);
3697 return 0;
3698 }
3699
3700
3701 /* tp->lock is held. */
3702 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3703 {
3704 u32 addr_high, addr_low;
3705 int i;
3706
3707 addr_high = ((tp->dev->dev_addr[0] << 8) |
3708 tp->dev->dev_addr[1]);
3709 addr_low = ((tp->dev->dev_addr[2] << 24) |
3710 (tp->dev->dev_addr[3] << 16) |
3711 (tp->dev->dev_addr[4] << 8) |
3712 (tp->dev->dev_addr[5] << 0));
3713 for (i = 0; i < 4; i++) {
3714 if (i == 1 && skip_mac_1)
3715 continue;
3716 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3717 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3718 }
3719
3720 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3721 tg3_asic_rev(tp) == ASIC_REV_5704) {
3722 for (i = 0; i < 12; i++) {
3723 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3724 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3725 }
3726 }
3727
3728 addr_high = (tp->dev->dev_addr[0] +
3729 tp->dev->dev_addr[1] +
3730 tp->dev->dev_addr[2] +
3731 tp->dev->dev_addr[3] +
3732 tp->dev->dev_addr[4] +
3733 tp->dev->dev_addr[5]) &
3734 TX_BACKOFF_SEED_MASK;
3735 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3736 }
3737
3738 static void tg3_enable_register_access(struct tg3 *tp)
3739 {
3740 /*
3741 * Make sure register accesses (indirect or otherwise) will function
3742 * correctly.
3743 */
3744 pci_write_config_dword(tp->pdev,
3745 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3746 }
3747
3748 static int tg3_power_up(struct tg3 *tp)
3749 {
3750 int err;
3751
3752 tg3_enable_register_access(tp);
3753
3754 err = pci_set_power_state(tp->pdev, PCI_D0);
3755 if (!err) {
3756 /* Switch out of Vaux if it is a NIC */
3757 tg3_pwrsrc_switch_to_vmain(tp);
3758 } else {
3759 netdev_err(tp->dev, "Transition to D0 failed\n");
3760 }
3761
3762 return err;
3763 }
3764
3765 static int tg3_setup_phy(struct tg3 *, int);
3766
3767 static int tg3_power_down_prepare(struct tg3 *tp)
3768 {
3769 u32 misc_host_ctrl;
3770 bool device_should_wake, do_low_power;
3771
3772 tg3_enable_register_access(tp);
3773
3774 /* Restore the CLKREQ setting. */
3775 if (tg3_flag(tp, CLKREQ_BUG))
3776 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3777 PCI_EXP_LNKCTL_CLKREQ_EN);
3778
3779 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3780 tw32(TG3PCI_MISC_HOST_CTRL,
3781 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3782
3783 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3784 tg3_flag(tp, WOL_ENABLE);
3785
3786 if (tg3_flag(tp, USE_PHYLIB)) {
3787 do_low_power = false;
3788 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3789 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3790 struct phy_device *phydev;
3791 u32 phyid, advertising;
3792
3793 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3794
3795 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3796
3797 tp->link_config.speed = phydev->speed;
3798 tp->link_config.duplex = phydev->duplex;
3799 tp->link_config.autoneg = phydev->autoneg;
3800 tp->link_config.advertising = phydev->advertising;
3801
3802 advertising = ADVERTISED_TP |
3803 ADVERTISED_Pause |
3804 ADVERTISED_Autoneg |
3805 ADVERTISED_10baseT_Half;
3806
3807 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3808 if (tg3_flag(tp, WOL_SPEED_100MB))
3809 advertising |=
3810 ADVERTISED_100baseT_Half |
3811 ADVERTISED_100baseT_Full |
3812 ADVERTISED_10baseT_Full;
3813 else
3814 advertising |= ADVERTISED_10baseT_Full;
3815 }
3816
3817 phydev->advertising = advertising;
3818
3819 phy_start_aneg(phydev);
3820
3821 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3822 if (phyid != PHY_ID_BCMAC131) {
3823 phyid &= PHY_BCM_OUI_MASK;
3824 if (phyid == PHY_BCM_OUI_1 ||
3825 phyid == PHY_BCM_OUI_2 ||
3826 phyid == PHY_BCM_OUI_3)
3827 do_low_power = true;
3828 }
3829 }
3830 } else {
3831 do_low_power = true;
3832
3833 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3834 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3835
3836 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3837 tg3_setup_phy(tp, 0);
3838 }
3839
3840 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3841 u32 val;
3842
3843 val = tr32(GRC_VCPU_EXT_CTRL);
3844 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3845 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3846 int i;
3847 u32 val;
3848
3849 for (i = 0; i < 200; i++) {
3850 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3851 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3852 break;
3853 msleep(1);
3854 }
3855 }
3856 if (tg3_flag(tp, WOL_CAP))
3857 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3858 WOL_DRV_STATE_SHUTDOWN |
3859 WOL_DRV_WOL |
3860 WOL_SET_MAGIC_PKT);
3861
3862 if (device_should_wake) {
3863 u32 mac_mode;
3864
3865 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3866 if (do_low_power &&
3867 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3868 tg3_phy_auxctl_write(tp,
3869 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3870 MII_TG3_AUXCTL_PCTL_WOL_EN |
3871 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3872 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3873 udelay(40);
3874 }
3875
3876 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3877 mac_mode = MAC_MODE_PORT_MODE_GMII;
3878 else
3879 mac_mode = MAC_MODE_PORT_MODE_MII;
3880
3881 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3882 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3883 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3884 SPEED_100 : SPEED_10;
3885 if (tg3_5700_link_polarity(tp, speed))
3886 mac_mode |= MAC_MODE_LINK_POLARITY;
3887 else
3888 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3889 }
3890 } else {
3891 mac_mode = MAC_MODE_PORT_MODE_TBI;
3892 }
3893
3894 if (!tg3_flag(tp, 5750_PLUS))
3895 tw32(MAC_LED_CTRL, tp->led_ctrl);
3896
3897 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3898 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3899 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3900 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3901
3902 if (tg3_flag(tp, ENABLE_APE))
3903 mac_mode |= MAC_MODE_APE_TX_EN |
3904 MAC_MODE_APE_RX_EN |
3905 MAC_MODE_TDE_ENABLE;
3906
3907 tw32_f(MAC_MODE, mac_mode);
3908 udelay(100);
3909
3910 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3911 udelay(10);
3912 }
3913
3914 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3915 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3916 tg3_asic_rev(tp) == ASIC_REV_5701)) {
3917 u32 base_val;
3918
3919 base_val = tp->pci_clock_ctrl;
3920 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3921 CLOCK_CTRL_TXCLK_DISABLE);
3922
3923 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3924 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3925 } else if (tg3_flag(tp, 5780_CLASS) ||
3926 tg3_flag(tp, CPMU_PRESENT) ||
3927 tg3_asic_rev(tp) == ASIC_REV_5906) {
3928 /* do nothing */
3929 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3930 u32 newbits1, newbits2;
3931
3932 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3933 tg3_asic_rev(tp) == ASIC_REV_5701) {
3934 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3935 CLOCK_CTRL_TXCLK_DISABLE |
3936 CLOCK_CTRL_ALTCLK);
3937 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3938 } else if (tg3_flag(tp, 5705_PLUS)) {
3939 newbits1 = CLOCK_CTRL_625_CORE;
3940 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3941 } else {
3942 newbits1 = CLOCK_CTRL_ALTCLK;
3943 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3944 }
3945
3946 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3947 40);
3948
3949 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3950 40);
3951
3952 if (!tg3_flag(tp, 5705_PLUS)) {
3953 u32 newbits3;
3954
3955 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3956 tg3_asic_rev(tp) == ASIC_REV_5701) {
3957 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3958 CLOCK_CTRL_TXCLK_DISABLE |
3959 CLOCK_CTRL_44MHZ_CORE);
3960 } else {
3961 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3962 }
3963
3964 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3965 tp->pci_clock_ctrl | newbits3, 40);
3966 }
3967 }
3968
3969 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3970 tg3_power_down_phy(tp, do_low_power);
3971
3972 tg3_frob_aux_power(tp, true);
3973
3974 /* Workaround for unstable PLL clock */
3975 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3976 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
3977 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
3978 u32 val = tr32(0x7d00);
3979
3980 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3981 tw32(0x7d00, val);
3982 if (!tg3_flag(tp, ENABLE_ASF)) {
3983 int err;
3984
3985 err = tg3_nvram_lock(tp);
3986 tg3_halt_cpu(tp, RX_CPU_BASE);
3987 if (!err)
3988 tg3_nvram_unlock(tp);
3989 }
3990 }
3991
3992 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3993
3994 return 0;
3995 }
3996
3997 static void tg3_power_down(struct tg3 *tp)
3998 {
3999 tg3_power_down_prepare(tp);
4000
4001 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4002 pci_set_power_state(tp->pdev, PCI_D3hot);
4003 }
4004
4005 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4006 {
4007 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4008 case MII_TG3_AUX_STAT_10HALF:
4009 *speed = SPEED_10;
4010 *duplex = DUPLEX_HALF;
4011 break;
4012
4013 case MII_TG3_AUX_STAT_10FULL:
4014 *speed = SPEED_10;
4015 *duplex = DUPLEX_FULL;
4016 break;
4017
4018 case MII_TG3_AUX_STAT_100HALF:
4019 *speed = SPEED_100;
4020 *duplex = DUPLEX_HALF;
4021 break;
4022
4023 case MII_TG3_AUX_STAT_100FULL:
4024 *speed = SPEED_100;
4025 *duplex = DUPLEX_FULL;
4026 break;
4027
4028 case MII_TG3_AUX_STAT_1000HALF:
4029 *speed = SPEED_1000;
4030 *duplex = DUPLEX_HALF;
4031 break;
4032
4033 case MII_TG3_AUX_STAT_1000FULL:
4034 *speed = SPEED_1000;
4035 *duplex = DUPLEX_FULL;
4036 break;
4037
4038 default:
4039 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4040 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4041 SPEED_10;
4042 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4043 DUPLEX_HALF;
4044 break;
4045 }
4046 *speed = SPEED_UNKNOWN;
4047 *duplex = DUPLEX_UNKNOWN;
4048 break;
4049 }
4050 }
4051
4052 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4053 {
4054 int err = 0;
4055 u32 val, new_adv;
4056
4057 new_adv = ADVERTISE_CSMA;
4058 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4059 new_adv |= mii_advertise_flowctrl(flowctrl);
4060
4061 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4062 if (err)
4063 goto done;
4064
4065 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4066 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4067
4068 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4069 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4070 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4071
4072 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4073 if (err)
4074 goto done;
4075 }
4076
4077 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4078 goto done;
4079
4080 tw32(TG3_CPMU_EEE_MODE,
4081 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4082
4083 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4084 if (!err) {
4085 u32 err2;
4086
4087 val = 0;
4088 /* Advertise 100-BaseTX EEE ability */
4089 if (advertise & ADVERTISED_100baseT_Full)
4090 val |= MDIO_AN_EEE_ADV_100TX;
4091 /* Advertise 1000-BaseT EEE ability */
4092 if (advertise & ADVERTISED_1000baseT_Full)
4093 val |= MDIO_AN_EEE_ADV_1000T;
4094 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4095 if (err)
4096 val = 0;
4097
4098 switch (tg3_asic_rev(tp)) {
4099 case ASIC_REV_5717:
4100 case ASIC_REV_57765:
4101 case ASIC_REV_57766:
4102 case ASIC_REV_5719:
4103 /* If we advertised any eee advertisements above... */
4104 if (val)
4105 val = MII_TG3_DSP_TAP26_ALNOKO |
4106 MII_TG3_DSP_TAP26_RMRXSTO |
4107 MII_TG3_DSP_TAP26_OPCSINPT;
4108 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4109 /* Fall through */
4110 case ASIC_REV_5720:
4111 case ASIC_REV_5762:
4112 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4113 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4114 MII_TG3_DSP_CH34TP2_HIBW01);
4115 }
4116
4117 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4118 if (!err)
4119 err = err2;
4120 }
4121
4122 done:
4123 return err;
4124 }
4125
4126 static void tg3_phy_copper_begin(struct tg3 *tp)
4127 {
4128 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4129 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4130 u32 adv, fc;
4131
4132 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4133 adv = ADVERTISED_10baseT_Half |
4134 ADVERTISED_10baseT_Full;
4135 if (tg3_flag(tp, WOL_SPEED_100MB))
4136 adv |= ADVERTISED_100baseT_Half |
4137 ADVERTISED_100baseT_Full;
4138
4139 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4140 } else {
4141 adv = tp->link_config.advertising;
4142 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4143 adv &= ~(ADVERTISED_1000baseT_Half |
4144 ADVERTISED_1000baseT_Full);
4145
4146 fc = tp->link_config.flowctrl;
4147 }
4148
4149 tg3_phy_autoneg_cfg(tp, adv, fc);
4150
4151 tg3_writephy(tp, MII_BMCR,
4152 BMCR_ANENABLE | BMCR_ANRESTART);
4153 } else {
4154 int i;
4155 u32 bmcr, orig_bmcr;
4156
4157 tp->link_config.active_speed = tp->link_config.speed;
4158 tp->link_config.active_duplex = tp->link_config.duplex;
4159
4160 bmcr = 0;
4161 switch (tp->link_config.speed) {
4162 default:
4163 case SPEED_10:
4164 break;
4165
4166 case SPEED_100:
4167 bmcr |= BMCR_SPEED100;
4168 break;
4169
4170 case SPEED_1000:
4171 bmcr |= BMCR_SPEED1000;
4172 break;
4173 }
4174
4175 if (tp->link_config.duplex == DUPLEX_FULL)
4176 bmcr |= BMCR_FULLDPLX;
4177
4178 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4179 (bmcr != orig_bmcr)) {
4180 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4181 for (i = 0; i < 1500; i++) {
4182 u32 tmp;
4183
4184 udelay(10);
4185 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4186 tg3_readphy(tp, MII_BMSR, &tmp))
4187 continue;
4188 if (!(tmp & BMSR_LSTATUS)) {
4189 udelay(40);
4190 break;
4191 }
4192 }
4193 tg3_writephy(tp, MII_BMCR, bmcr);
4194 udelay(40);
4195 }
4196 }
4197 }
4198
4199 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4200 {
4201 int err;
4202
4203 /* Turn off tap power management. */
4204 /* Set Extended packet length bit */
4205 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4206
4207 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4208 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4209 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4210 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4211 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4212
4213 udelay(40);
4214
4215 return err;
4216 }
4217
4218 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4219 {
4220 u32 advmsk, tgtadv, advertising;
4221
4222 advertising = tp->link_config.advertising;
4223 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4224
4225 advmsk = ADVERTISE_ALL;
4226 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4227 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4228 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4229 }
4230
4231 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4232 return false;
4233
4234 if ((*lcladv & advmsk) != tgtadv)
4235 return false;
4236
4237 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4238 u32 tg3_ctrl;
4239
4240 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4241
4242 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4243 return false;
4244
4245 if (tgtadv &&
4246 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4247 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4248 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4249 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4250 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4251 } else {
4252 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4253 }
4254
4255 if (tg3_ctrl != tgtadv)
4256 return false;
4257 }
4258
4259 return true;
4260 }
4261
4262 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4263 {
4264 u32 lpeth = 0;
4265
4266 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4267 u32 val;
4268
4269 if (tg3_readphy(tp, MII_STAT1000, &val))
4270 return false;
4271
4272 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4273 }
4274
4275 if (tg3_readphy(tp, MII_LPA, rmtadv))
4276 return false;
4277
4278 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4279 tp->link_config.rmt_adv = lpeth;
4280
4281 return true;
4282 }
4283
4284 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4285 {
4286 if (curr_link_up != tp->link_up) {
4287 if (curr_link_up) {
4288 tg3_carrier_on(tp);
4289 } else {
4290 tg3_carrier_off(tp);
4291 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4292 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4293 }
4294
4295 tg3_link_report(tp);
4296 return true;
4297 }
4298
4299 return false;
4300 }
4301
4302 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4303 {
4304 int current_link_up;
4305 u32 bmsr, val;
4306 u32 lcl_adv, rmt_adv;
4307 u16 current_speed;
4308 u8 current_duplex;
4309 int i, err;
4310
4311 tw32(MAC_EVENT, 0);
4312
4313 tw32_f(MAC_STATUS,
4314 (MAC_STATUS_SYNC_CHANGED |
4315 MAC_STATUS_CFG_CHANGED |
4316 MAC_STATUS_MI_COMPLETION |
4317 MAC_STATUS_LNKSTATE_CHANGED));
4318 udelay(40);
4319
4320 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4321 tw32_f(MAC_MI_MODE,
4322 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4323 udelay(80);
4324 }
4325
4326 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4327
4328 /* Some third-party PHYs need to be reset on link going
4329 * down.
4330 */
4331 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4332 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4333 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4334 tp->link_up) {
4335 tg3_readphy(tp, MII_BMSR, &bmsr);
4336 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4337 !(bmsr & BMSR_LSTATUS))
4338 force_reset = 1;
4339 }
4340 if (force_reset)
4341 tg3_phy_reset(tp);
4342
4343 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4344 tg3_readphy(tp, MII_BMSR, &bmsr);
4345 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4346 !tg3_flag(tp, INIT_COMPLETE))
4347 bmsr = 0;
4348
4349 if (!(bmsr & BMSR_LSTATUS)) {
4350 err = tg3_init_5401phy_dsp(tp);
4351 if (err)
4352 return err;
4353
4354 tg3_readphy(tp, MII_BMSR, &bmsr);
4355 for (i = 0; i < 1000; i++) {
4356 udelay(10);
4357 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4358 (bmsr & BMSR_LSTATUS)) {
4359 udelay(40);
4360 break;
4361 }
4362 }
4363
4364 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4365 TG3_PHY_REV_BCM5401_B0 &&
4366 !(bmsr & BMSR_LSTATUS) &&
4367 tp->link_config.active_speed == SPEED_1000) {
4368 err = tg3_phy_reset(tp);
4369 if (!err)
4370 err = tg3_init_5401phy_dsp(tp);
4371 if (err)
4372 return err;
4373 }
4374 }
4375 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4376 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4377 /* 5701 {A0,B0} CRC bug workaround */
4378 tg3_writephy(tp, 0x15, 0x0a75);
4379 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4380 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4381 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4382 }
4383
4384 /* Clear pending interrupts... */
4385 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4386 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4387
4388 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4389 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4390 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4391 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4392
4393 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4394 tg3_asic_rev(tp) == ASIC_REV_5701) {
4395 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4396 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4397 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4398 else
4399 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4400 }
4401
4402 current_link_up = 0;
4403 current_speed = SPEED_UNKNOWN;
4404 current_duplex = DUPLEX_UNKNOWN;
4405 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4406 tp->link_config.rmt_adv = 0;
4407
4408 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4409 err = tg3_phy_auxctl_read(tp,
4410 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4411 &val);
4412 if (!err && !(val & (1 << 10))) {
4413 tg3_phy_auxctl_write(tp,
4414 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4415 val | (1 << 10));
4416 goto relink;
4417 }
4418 }
4419
4420 bmsr = 0;
4421 for (i = 0; i < 100; i++) {
4422 tg3_readphy(tp, MII_BMSR, &bmsr);
4423 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4424 (bmsr & BMSR_LSTATUS))
4425 break;
4426 udelay(40);
4427 }
4428
4429 if (bmsr & BMSR_LSTATUS) {
4430 u32 aux_stat, bmcr;
4431
4432 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4433 for (i = 0; i < 2000; i++) {
4434 udelay(10);
4435 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4436 aux_stat)
4437 break;
4438 }
4439
4440 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4441 &current_speed,
4442 &current_duplex);
4443
4444 bmcr = 0;
4445 for (i = 0; i < 200; i++) {
4446 tg3_readphy(tp, MII_BMCR, &bmcr);
4447 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4448 continue;
4449 if (bmcr && bmcr != 0x7fff)
4450 break;
4451 udelay(10);
4452 }
4453
4454 lcl_adv = 0;
4455 rmt_adv = 0;
4456
4457 tp->link_config.active_speed = current_speed;
4458 tp->link_config.active_duplex = current_duplex;
4459
4460 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4461 if ((bmcr & BMCR_ANENABLE) &&
4462 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4463 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4464 current_link_up = 1;
4465 } else {
4466 if (!(bmcr & BMCR_ANENABLE) &&
4467 tp->link_config.speed == current_speed &&
4468 tp->link_config.duplex == current_duplex &&
4469 tp->link_config.flowctrl ==
4470 tp->link_config.active_flowctrl) {
4471 current_link_up = 1;
4472 }
4473 }
4474
4475 if (current_link_up == 1 &&
4476 tp->link_config.active_duplex == DUPLEX_FULL) {
4477 u32 reg, bit;
4478
4479 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4480 reg = MII_TG3_FET_GEN_STAT;
4481 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4482 } else {
4483 reg = MII_TG3_EXT_STAT;
4484 bit = MII_TG3_EXT_STAT_MDIX;
4485 }
4486
4487 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4488 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4489
4490 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4491 }
4492 }
4493
4494 relink:
4495 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4496 tg3_phy_copper_begin(tp);
4497
4498 if (tg3_flag(tp, ROBOSWITCH)) {
4499 current_link_up = 1;
4500 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4501 current_speed = SPEED_1000;
4502 current_duplex = DUPLEX_FULL;
4503 tp->link_config.active_speed = current_speed;
4504 tp->link_config.active_duplex = current_duplex;
4505 }
4506
4507 tg3_readphy(tp, MII_BMSR, &bmsr);
4508 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4509 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4510 current_link_up = 1;
4511 }
4512
4513 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4514 if (current_link_up == 1) {
4515 if (tp->link_config.active_speed == SPEED_100 ||
4516 tp->link_config.active_speed == SPEED_10)
4517 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4518 else
4519 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4520 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4521 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4522 else
4523 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4524
4525 /* In order for the 5750 core in BCM4785 chip to work properly
4526 * in RGMII mode, the Led Control Register must be set up.
4527 */
4528 if (tg3_flag(tp, RGMII_MODE)) {
4529 u32 led_ctrl = tr32(MAC_LED_CTRL);
4530 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4531
4532 if (tp->link_config.active_speed == SPEED_10)
4533 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4534 else if (tp->link_config.active_speed == SPEED_100)
4535 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4536 LED_CTRL_100MBPS_ON);
4537 else if (tp->link_config.active_speed == SPEED_1000)
4538 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4539 LED_CTRL_1000MBPS_ON);
4540
4541 tw32(MAC_LED_CTRL, led_ctrl);
4542 udelay(40);
4543 }
4544
4545 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4546 if (tp->link_config.active_duplex == DUPLEX_HALF)
4547 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4548
4549 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4550 if (current_link_up == 1 &&
4551 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4552 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4553 else
4554 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4555 }
4556
4557 /* ??? Without this setting Netgear GA302T PHY does not
4558 * ??? send/receive packets...
4559 */
4560 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4561 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4562 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4563 tw32_f(MAC_MI_MODE, tp->mi_mode);
4564 udelay(80);
4565 }
4566
4567 tw32_f(MAC_MODE, tp->mac_mode);
4568 udelay(40);
4569
4570 tg3_phy_eee_adjust(tp, current_link_up);
4571
4572 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4573 /* Polled via timer. */
4574 tw32_f(MAC_EVENT, 0);
4575 } else {
4576 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4577 }
4578 udelay(40);
4579
4580 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4581 current_link_up == 1 &&
4582 tp->link_config.active_speed == SPEED_1000 &&
4583 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4584 udelay(120);
4585 tw32_f(MAC_STATUS,
4586 (MAC_STATUS_SYNC_CHANGED |
4587 MAC_STATUS_CFG_CHANGED));
4588 udelay(40);
4589 tg3_write_mem(tp,
4590 NIC_SRAM_FIRMWARE_MBOX,
4591 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4592 }
4593
4594 /* Prevent send BD corruption. */
4595 if (tg3_flag(tp, CLKREQ_BUG)) {
4596 if (tp->link_config.active_speed == SPEED_100 ||
4597 tp->link_config.active_speed == SPEED_10)
4598 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4599 PCI_EXP_LNKCTL_CLKREQ_EN);
4600 else
4601 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4602 PCI_EXP_LNKCTL_CLKREQ_EN);
4603 }
4604
4605 tg3_test_and_report_link_chg(tp, current_link_up);
4606
4607 return 0;
4608 }
4609
4610 struct tg3_fiber_aneginfo {
4611 int state;
4612 #define ANEG_STATE_UNKNOWN 0
4613 #define ANEG_STATE_AN_ENABLE 1
4614 #define ANEG_STATE_RESTART_INIT 2
4615 #define ANEG_STATE_RESTART 3
4616 #define ANEG_STATE_DISABLE_LINK_OK 4
4617 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4618 #define ANEG_STATE_ABILITY_DETECT 6
4619 #define ANEG_STATE_ACK_DETECT_INIT 7
4620 #define ANEG_STATE_ACK_DETECT 8
4621 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4622 #define ANEG_STATE_COMPLETE_ACK 10
4623 #define ANEG_STATE_IDLE_DETECT_INIT 11
4624 #define ANEG_STATE_IDLE_DETECT 12
4625 #define ANEG_STATE_LINK_OK 13
4626 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4627 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4628
4629 u32 flags;
4630 #define MR_AN_ENABLE 0x00000001
4631 #define MR_RESTART_AN 0x00000002
4632 #define MR_AN_COMPLETE 0x00000004
4633 #define MR_PAGE_RX 0x00000008
4634 #define MR_NP_LOADED 0x00000010
4635 #define MR_TOGGLE_TX 0x00000020
4636 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4637 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4638 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4639 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4640 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4641 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4642 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4643 #define MR_TOGGLE_RX 0x00002000
4644 #define MR_NP_RX 0x00004000
4645
4646 #define MR_LINK_OK 0x80000000
4647
4648 unsigned long link_time, cur_time;
4649
4650 u32 ability_match_cfg;
4651 int ability_match_count;
4652
4653 char ability_match, idle_match, ack_match;
4654
4655 u32 txconfig, rxconfig;
4656 #define ANEG_CFG_NP 0x00000080
4657 #define ANEG_CFG_ACK 0x00000040
4658 #define ANEG_CFG_RF2 0x00000020
4659 #define ANEG_CFG_RF1 0x00000010
4660 #define ANEG_CFG_PS2 0x00000001
4661 #define ANEG_CFG_PS1 0x00008000
4662 #define ANEG_CFG_HD 0x00004000
4663 #define ANEG_CFG_FD 0x00002000
4664 #define ANEG_CFG_INVAL 0x00001f06
4665
4666 };
4667 #define ANEG_OK 0
4668 #define ANEG_DONE 1
4669 #define ANEG_TIMER_ENAB 2
4670 #define ANEG_FAILED -1
4671
4672 #define ANEG_STATE_SETTLE_TIME 10000
4673
4674 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4675 struct tg3_fiber_aneginfo *ap)
4676 {
4677 u16 flowctrl;
4678 unsigned long delta;
4679 u32 rx_cfg_reg;
4680 int ret;
4681
4682 if (ap->state == ANEG_STATE_UNKNOWN) {
4683 ap->rxconfig = 0;
4684 ap->link_time = 0;
4685 ap->cur_time = 0;
4686 ap->ability_match_cfg = 0;
4687 ap->ability_match_count = 0;
4688 ap->ability_match = 0;
4689 ap->idle_match = 0;
4690 ap->ack_match = 0;
4691 }
4692 ap->cur_time++;
4693
4694 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4695 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4696
4697 if (rx_cfg_reg != ap->ability_match_cfg) {
4698 ap->ability_match_cfg = rx_cfg_reg;
4699 ap->ability_match = 0;
4700 ap->ability_match_count = 0;
4701 } else {
4702 if (++ap->ability_match_count > 1) {
4703 ap->ability_match = 1;
4704 ap->ability_match_cfg = rx_cfg_reg;
4705 }
4706 }
4707 if (rx_cfg_reg & ANEG_CFG_ACK)
4708 ap->ack_match = 1;
4709 else
4710 ap->ack_match = 0;
4711
4712 ap->idle_match = 0;
4713 } else {
4714 ap->idle_match = 1;
4715 ap->ability_match_cfg = 0;
4716 ap->ability_match_count = 0;
4717 ap->ability_match = 0;
4718 ap->ack_match = 0;
4719
4720 rx_cfg_reg = 0;
4721 }
4722
4723 ap->rxconfig = rx_cfg_reg;
4724 ret = ANEG_OK;
4725
4726 switch (ap->state) {
4727 case ANEG_STATE_UNKNOWN:
4728 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4729 ap->state = ANEG_STATE_AN_ENABLE;
4730
4731 /* fallthru */
4732 case ANEG_STATE_AN_ENABLE:
4733 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4734 if (ap->flags & MR_AN_ENABLE) {
4735 ap->link_time = 0;
4736 ap->cur_time = 0;
4737 ap->ability_match_cfg = 0;
4738 ap->ability_match_count = 0;
4739 ap->ability_match = 0;
4740 ap->idle_match = 0;
4741 ap->ack_match = 0;
4742
4743 ap->state = ANEG_STATE_RESTART_INIT;
4744 } else {
4745 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4746 }
4747 break;
4748
4749 case ANEG_STATE_RESTART_INIT:
4750 ap->link_time = ap->cur_time;
4751 ap->flags &= ~(MR_NP_LOADED);
4752 ap->txconfig = 0;
4753 tw32(MAC_TX_AUTO_NEG, 0);
4754 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4755 tw32_f(MAC_MODE, tp->mac_mode);
4756 udelay(40);
4757
4758 ret = ANEG_TIMER_ENAB;
4759 ap->state = ANEG_STATE_RESTART;
4760
4761 /* fallthru */
4762 case ANEG_STATE_RESTART:
4763 delta = ap->cur_time - ap->link_time;
4764 if (delta > ANEG_STATE_SETTLE_TIME)
4765 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4766 else
4767 ret = ANEG_TIMER_ENAB;
4768 break;
4769
4770 case ANEG_STATE_DISABLE_LINK_OK:
4771 ret = ANEG_DONE;
4772 break;
4773
4774 case ANEG_STATE_ABILITY_DETECT_INIT:
4775 ap->flags &= ~(MR_TOGGLE_TX);
4776 ap->txconfig = ANEG_CFG_FD;
4777 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4778 if (flowctrl & ADVERTISE_1000XPAUSE)
4779 ap->txconfig |= ANEG_CFG_PS1;
4780 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4781 ap->txconfig |= ANEG_CFG_PS2;
4782 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4783 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4784 tw32_f(MAC_MODE, tp->mac_mode);
4785 udelay(40);
4786
4787 ap->state = ANEG_STATE_ABILITY_DETECT;
4788 break;
4789
4790 case ANEG_STATE_ABILITY_DETECT:
4791 if (ap->ability_match != 0 && ap->rxconfig != 0)
4792 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4793 break;
4794
4795 case ANEG_STATE_ACK_DETECT_INIT:
4796 ap->txconfig |= ANEG_CFG_ACK;
4797 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4798 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4799 tw32_f(MAC_MODE, tp->mac_mode);
4800 udelay(40);
4801
4802 ap->state = ANEG_STATE_ACK_DETECT;
4803
4804 /* fallthru */
4805 case ANEG_STATE_ACK_DETECT:
4806 if (ap->ack_match != 0) {
4807 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4808 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4809 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4810 } else {
4811 ap->state = ANEG_STATE_AN_ENABLE;
4812 }
4813 } else if (ap->ability_match != 0 &&
4814 ap->rxconfig == 0) {
4815 ap->state = ANEG_STATE_AN_ENABLE;
4816 }
4817 break;
4818
4819 case ANEG_STATE_COMPLETE_ACK_INIT:
4820 if (ap->rxconfig & ANEG_CFG_INVAL) {
4821 ret = ANEG_FAILED;
4822 break;
4823 }
4824 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4825 MR_LP_ADV_HALF_DUPLEX |
4826 MR_LP_ADV_SYM_PAUSE |
4827 MR_LP_ADV_ASYM_PAUSE |
4828 MR_LP_ADV_REMOTE_FAULT1 |
4829 MR_LP_ADV_REMOTE_FAULT2 |
4830 MR_LP_ADV_NEXT_PAGE |
4831 MR_TOGGLE_RX |
4832 MR_NP_RX);
4833 if (ap->rxconfig & ANEG_CFG_FD)
4834 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4835 if (ap->rxconfig & ANEG_CFG_HD)
4836 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4837 if (ap->rxconfig & ANEG_CFG_PS1)
4838 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4839 if (ap->rxconfig & ANEG_CFG_PS2)
4840 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4841 if (ap->rxconfig & ANEG_CFG_RF1)
4842 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4843 if (ap->rxconfig & ANEG_CFG_RF2)
4844 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4845 if (ap->rxconfig & ANEG_CFG_NP)
4846 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4847
4848 ap->link_time = ap->cur_time;
4849
4850 ap->flags ^= (MR_TOGGLE_TX);
4851 if (ap->rxconfig & 0x0008)
4852 ap->flags |= MR_TOGGLE_RX;
4853 if (ap->rxconfig & ANEG_CFG_NP)
4854 ap->flags |= MR_NP_RX;
4855 ap->flags |= MR_PAGE_RX;
4856
4857 ap->state = ANEG_STATE_COMPLETE_ACK;
4858 ret = ANEG_TIMER_ENAB;
4859 break;
4860
4861 case ANEG_STATE_COMPLETE_ACK:
4862 if (ap->ability_match != 0 &&
4863 ap->rxconfig == 0) {
4864 ap->state = ANEG_STATE_AN_ENABLE;
4865 break;
4866 }
4867 delta = ap->cur_time - ap->link_time;
4868 if (delta > ANEG_STATE_SETTLE_TIME) {
4869 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4870 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4871 } else {
4872 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4873 !(ap->flags & MR_NP_RX)) {
4874 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4875 } else {
4876 ret = ANEG_FAILED;
4877 }
4878 }
4879 }
4880 break;
4881
4882 case ANEG_STATE_IDLE_DETECT_INIT:
4883 ap->link_time = ap->cur_time;
4884 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4885 tw32_f(MAC_MODE, tp->mac_mode);
4886 udelay(40);
4887
4888 ap->state = ANEG_STATE_IDLE_DETECT;
4889 ret = ANEG_TIMER_ENAB;
4890 break;
4891
4892 case ANEG_STATE_IDLE_DETECT:
4893 if (ap->ability_match != 0 &&
4894 ap->rxconfig == 0) {
4895 ap->state = ANEG_STATE_AN_ENABLE;
4896 break;
4897 }
4898 delta = ap->cur_time - ap->link_time;
4899 if (delta > ANEG_STATE_SETTLE_TIME) {
4900 /* XXX another gem from the Broadcom driver :( */
4901 ap->state = ANEG_STATE_LINK_OK;
4902 }
4903 break;
4904
4905 case ANEG_STATE_LINK_OK:
4906 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4907 ret = ANEG_DONE;
4908 break;
4909
4910 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4911 /* ??? unimplemented */
4912 break;
4913
4914 case ANEG_STATE_NEXT_PAGE_WAIT:
4915 /* ??? unimplemented */
4916 break;
4917
4918 default:
4919 ret = ANEG_FAILED;
4920 break;
4921 }
4922
4923 return ret;
4924 }
4925
4926 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4927 {
4928 int res = 0;
4929 struct tg3_fiber_aneginfo aninfo;
4930 int status = ANEG_FAILED;
4931 unsigned int tick;
4932 u32 tmp;
4933
4934 tw32_f(MAC_TX_AUTO_NEG, 0);
4935
4936 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4937 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4938 udelay(40);
4939
4940 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4941 udelay(40);
4942
4943 memset(&aninfo, 0, sizeof(aninfo));
4944 aninfo.flags |= MR_AN_ENABLE;
4945 aninfo.state = ANEG_STATE_UNKNOWN;
4946 aninfo.cur_time = 0;
4947 tick = 0;
4948 while (++tick < 195000) {
4949 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4950 if (status == ANEG_DONE || status == ANEG_FAILED)
4951 break;
4952
4953 udelay(1);
4954 }
4955
4956 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4957 tw32_f(MAC_MODE, tp->mac_mode);
4958 udelay(40);
4959
4960 *txflags = aninfo.txconfig;
4961 *rxflags = aninfo.flags;
4962
4963 if (status == ANEG_DONE &&
4964 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4965 MR_LP_ADV_FULL_DUPLEX)))
4966 res = 1;
4967
4968 return res;
4969 }
4970
4971 static void tg3_init_bcm8002(struct tg3 *tp)
4972 {
4973 u32 mac_status = tr32(MAC_STATUS);
4974 int i;
4975
4976 /* Reset when initting first time or we have a link. */
4977 if (tg3_flag(tp, INIT_COMPLETE) &&
4978 !(mac_status & MAC_STATUS_PCS_SYNCED))
4979 return;
4980
4981 /* Set PLL lock range. */
4982 tg3_writephy(tp, 0x16, 0x8007);
4983
4984 /* SW reset */
4985 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4986
4987 /* Wait for reset to complete. */
4988 /* XXX schedule_timeout() ... */
4989 for (i = 0; i < 500; i++)
4990 udelay(10);
4991
4992 /* Config mode; select PMA/Ch 1 regs. */
4993 tg3_writephy(tp, 0x10, 0x8411);
4994
4995 /* Enable auto-lock and comdet, select txclk for tx. */
4996 tg3_writephy(tp, 0x11, 0x0a10);
4997
4998 tg3_writephy(tp, 0x18, 0x00a0);
4999 tg3_writephy(tp, 0x16, 0x41ff);
5000
5001 /* Assert and deassert POR. */
5002 tg3_writephy(tp, 0x13, 0x0400);
5003 udelay(40);
5004 tg3_writephy(tp, 0x13, 0x0000);
5005
5006 tg3_writephy(tp, 0x11, 0x0a50);
5007 udelay(40);
5008 tg3_writephy(tp, 0x11, 0x0a10);
5009
5010 /* Wait for signal to stabilize */
5011 /* XXX schedule_timeout() ... */
5012 for (i = 0; i < 15000; i++)
5013 udelay(10);
5014
5015 /* Deselect the channel register so we can read the PHYID
5016 * later.
5017 */
5018 tg3_writephy(tp, 0x10, 0x8011);
5019 }
5020
5021 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5022 {
5023 u16 flowctrl;
5024 u32 sg_dig_ctrl, sg_dig_status;
5025 u32 serdes_cfg, expected_sg_dig_ctrl;
5026 int workaround, port_a;
5027 int current_link_up;
5028
5029 serdes_cfg = 0;
5030 expected_sg_dig_ctrl = 0;
5031 workaround = 0;
5032 port_a = 1;
5033 current_link_up = 0;
5034
5035 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5036 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5037 workaround = 1;
5038 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5039 port_a = 0;
5040
5041 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5042 /* preserve bits 20-23 for voltage regulator */
5043 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5044 }
5045
5046 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5047
5048 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5049 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5050 if (workaround) {
5051 u32 val = serdes_cfg;
5052
5053 if (port_a)
5054 val |= 0xc010000;
5055 else
5056 val |= 0x4010000;
5057 tw32_f(MAC_SERDES_CFG, val);
5058 }
5059
5060 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5061 }
5062 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5063 tg3_setup_flow_control(tp, 0, 0);
5064 current_link_up = 1;
5065 }
5066 goto out;
5067 }
5068
5069 /* Want auto-negotiation. */
5070 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5071
5072 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5073 if (flowctrl & ADVERTISE_1000XPAUSE)
5074 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5075 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5076 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5077
5078 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5079 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5080 tp->serdes_counter &&
5081 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5082 MAC_STATUS_RCVD_CFG)) ==
5083 MAC_STATUS_PCS_SYNCED)) {
5084 tp->serdes_counter--;
5085 current_link_up = 1;
5086 goto out;
5087 }
5088 restart_autoneg:
5089 if (workaround)
5090 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5091 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5092 udelay(5);
5093 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5094
5095 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5096 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5097 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5098 MAC_STATUS_SIGNAL_DET)) {
5099 sg_dig_status = tr32(SG_DIG_STATUS);
5100 mac_status = tr32(MAC_STATUS);
5101
5102 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5103 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5104 u32 local_adv = 0, remote_adv = 0;
5105
5106 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5107 local_adv |= ADVERTISE_1000XPAUSE;
5108 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5109 local_adv |= ADVERTISE_1000XPSE_ASYM;
5110
5111 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5112 remote_adv |= LPA_1000XPAUSE;
5113 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5114 remote_adv |= LPA_1000XPAUSE_ASYM;
5115
5116 tp->link_config.rmt_adv =
5117 mii_adv_to_ethtool_adv_x(remote_adv);
5118
5119 tg3_setup_flow_control(tp, local_adv, remote_adv);
5120 current_link_up = 1;
5121 tp->serdes_counter = 0;
5122 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5123 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5124 if (tp->serdes_counter)
5125 tp->serdes_counter--;
5126 else {
5127 if (workaround) {
5128 u32 val = serdes_cfg;
5129
5130 if (port_a)
5131 val |= 0xc010000;
5132 else
5133 val |= 0x4010000;
5134
5135 tw32_f(MAC_SERDES_CFG, val);
5136 }
5137
5138 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5139 udelay(40);
5140
5141 /* Link parallel detection - link is up */
5142 /* only if we have PCS_SYNC and not */
5143 /* receiving config code words */
5144 mac_status = tr32(MAC_STATUS);
5145 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5146 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5147 tg3_setup_flow_control(tp, 0, 0);
5148 current_link_up = 1;
5149 tp->phy_flags |=
5150 TG3_PHYFLG_PARALLEL_DETECT;
5151 tp->serdes_counter =
5152 SERDES_PARALLEL_DET_TIMEOUT;
5153 } else
5154 goto restart_autoneg;
5155 }
5156 }
5157 } else {
5158 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5159 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5160 }
5161
5162 out:
5163 return current_link_up;
5164 }
5165
5166 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5167 {
5168 int current_link_up = 0;
5169
5170 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5171 goto out;
5172
5173 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5174 u32 txflags, rxflags;
5175 int i;
5176
5177 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5178 u32 local_adv = 0, remote_adv = 0;
5179
5180 if (txflags & ANEG_CFG_PS1)
5181 local_adv |= ADVERTISE_1000XPAUSE;
5182 if (txflags & ANEG_CFG_PS2)
5183 local_adv |= ADVERTISE_1000XPSE_ASYM;
5184
5185 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5186 remote_adv |= LPA_1000XPAUSE;
5187 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5188 remote_adv |= LPA_1000XPAUSE_ASYM;
5189
5190 tp->link_config.rmt_adv =
5191 mii_adv_to_ethtool_adv_x(remote_adv);
5192
5193 tg3_setup_flow_control(tp, local_adv, remote_adv);
5194
5195 current_link_up = 1;
5196 }
5197 for (i = 0; i < 30; i++) {
5198 udelay(20);
5199 tw32_f(MAC_STATUS,
5200 (MAC_STATUS_SYNC_CHANGED |
5201 MAC_STATUS_CFG_CHANGED));
5202 udelay(40);
5203 if ((tr32(MAC_STATUS) &
5204 (MAC_STATUS_SYNC_CHANGED |
5205 MAC_STATUS_CFG_CHANGED)) == 0)
5206 break;
5207 }
5208
5209 mac_status = tr32(MAC_STATUS);
5210 if (current_link_up == 0 &&
5211 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5212 !(mac_status & MAC_STATUS_RCVD_CFG))
5213 current_link_up = 1;
5214 } else {
5215 tg3_setup_flow_control(tp, 0, 0);
5216
5217 /* Forcing 1000FD link up. */
5218 current_link_up = 1;
5219
5220 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5221 udelay(40);
5222
5223 tw32_f(MAC_MODE, tp->mac_mode);
5224 udelay(40);
5225 }
5226
5227 out:
5228 return current_link_up;
5229 }
5230
5231 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5232 {
5233 u32 orig_pause_cfg;
5234 u16 orig_active_speed;
5235 u8 orig_active_duplex;
5236 u32 mac_status;
5237 int current_link_up;
5238 int i;
5239
5240 orig_pause_cfg = tp->link_config.active_flowctrl;
5241 orig_active_speed = tp->link_config.active_speed;
5242 orig_active_duplex = tp->link_config.active_duplex;
5243
5244 if (!tg3_flag(tp, HW_AUTONEG) &&
5245 tp->link_up &&
5246 tg3_flag(tp, INIT_COMPLETE)) {
5247 mac_status = tr32(MAC_STATUS);
5248 mac_status &= (MAC_STATUS_PCS_SYNCED |
5249 MAC_STATUS_SIGNAL_DET |
5250 MAC_STATUS_CFG_CHANGED |
5251 MAC_STATUS_RCVD_CFG);
5252 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5253 MAC_STATUS_SIGNAL_DET)) {
5254 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5255 MAC_STATUS_CFG_CHANGED));
5256 return 0;
5257 }
5258 }
5259
5260 tw32_f(MAC_TX_AUTO_NEG, 0);
5261
5262 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5263 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5264 tw32_f(MAC_MODE, tp->mac_mode);
5265 udelay(40);
5266
5267 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5268 tg3_init_bcm8002(tp);
5269
5270 /* Enable link change event even when serdes polling. */
5271 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5272 udelay(40);
5273
5274 current_link_up = 0;
5275 tp->link_config.rmt_adv = 0;
5276 mac_status = tr32(MAC_STATUS);
5277
5278 if (tg3_flag(tp, HW_AUTONEG))
5279 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5280 else
5281 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5282
5283 tp->napi[0].hw_status->status =
5284 (SD_STATUS_UPDATED |
5285 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5286
5287 for (i = 0; i < 100; i++) {
5288 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5289 MAC_STATUS_CFG_CHANGED));
5290 udelay(5);
5291 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5292 MAC_STATUS_CFG_CHANGED |
5293 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5294 break;
5295 }
5296
5297 mac_status = tr32(MAC_STATUS);
5298 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5299 current_link_up = 0;
5300 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5301 tp->serdes_counter == 0) {
5302 tw32_f(MAC_MODE, (tp->mac_mode |
5303 MAC_MODE_SEND_CONFIGS));
5304 udelay(1);
5305 tw32_f(MAC_MODE, tp->mac_mode);
5306 }
5307 }
5308
5309 if (current_link_up == 1) {
5310 tp->link_config.active_speed = SPEED_1000;
5311 tp->link_config.active_duplex = DUPLEX_FULL;
5312 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5313 LED_CTRL_LNKLED_OVERRIDE |
5314 LED_CTRL_1000MBPS_ON));
5315 } else {
5316 tp->link_config.active_speed = SPEED_UNKNOWN;
5317 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5318 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5319 LED_CTRL_LNKLED_OVERRIDE |
5320 LED_CTRL_TRAFFIC_OVERRIDE));
5321 }
5322
5323 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5324 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5325 if (orig_pause_cfg != now_pause_cfg ||
5326 orig_active_speed != tp->link_config.active_speed ||
5327 orig_active_duplex != tp->link_config.active_duplex)
5328 tg3_link_report(tp);
5329 }
5330
5331 return 0;
5332 }
5333
5334 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5335 {
5336 int current_link_up, err = 0;
5337 u32 bmsr, bmcr;
5338 u16 current_speed;
5339 u8 current_duplex;
5340 u32 local_adv, remote_adv;
5341
5342 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5343 tw32_f(MAC_MODE, tp->mac_mode);
5344 udelay(40);
5345
5346 tw32(MAC_EVENT, 0);
5347
5348 tw32_f(MAC_STATUS,
5349 (MAC_STATUS_SYNC_CHANGED |
5350 MAC_STATUS_CFG_CHANGED |
5351 MAC_STATUS_MI_COMPLETION |
5352 MAC_STATUS_LNKSTATE_CHANGED));
5353 udelay(40);
5354
5355 if (force_reset)
5356 tg3_phy_reset(tp);
5357
5358 current_link_up = 0;
5359 current_speed = SPEED_UNKNOWN;
5360 current_duplex = DUPLEX_UNKNOWN;
5361 tp->link_config.rmt_adv = 0;
5362
5363 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5364 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5365 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5366 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5367 bmsr |= BMSR_LSTATUS;
5368 else
5369 bmsr &= ~BMSR_LSTATUS;
5370 }
5371
5372 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5373
5374 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5375 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5376 /* do nothing, just check for link up at the end */
5377 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5378 u32 adv, newadv;
5379
5380 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5381 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5382 ADVERTISE_1000XPAUSE |
5383 ADVERTISE_1000XPSE_ASYM |
5384 ADVERTISE_SLCT);
5385
5386 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5387 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5388
5389 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5390 tg3_writephy(tp, MII_ADVERTISE, newadv);
5391 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5392 tg3_writephy(tp, MII_BMCR, bmcr);
5393
5394 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5395 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5396 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5397
5398 return err;
5399 }
5400 } else {
5401 u32 new_bmcr;
5402
5403 bmcr &= ~BMCR_SPEED1000;
5404 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5405
5406 if (tp->link_config.duplex == DUPLEX_FULL)
5407 new_bmcr |= BMCR_FULLDPLX;
5408
5409 if (new_bmcr != bmcr) {
5410 /* BMCR_SPEED1000 is a reserved bit that needs
5411 * to be set on write.
5412 */
5413 new_bmcr |= BMCR_SPEED1000;
5414
5415 /* Force a linkdown */
5416 if (tp->link_up) {
5417 u32 adv;
5418
5419 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5420 adv &= ~(ADVERTISE_1000XFULL |
5421 ADVERTISE_1000XHALF |
5422 ADVERTISE_SLCT);
5423 tg3_writephy(tp, MII_ADVERTISE, adv);
5424 tg3_writephy(tp, MII_BMCR, bmcr |
5425 BMCR_ANRESTART |
5426 BMCR_ANENABLE);
5427 udelay(10);
5428 tg3_carrier_off(tp);
5429 }
5430 tg3_writephy(tp, MII_BMCR, new_bmcr);
5431 bmcr = new_bmcr;
5432 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5433 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5434 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5435 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5436 bmsr |= BMSR_LSTATUS;
5437 else
5438 bmsr &= ~BMSR_LSTATUS;
5439 }
5440 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5441 }
5442 }
5443
5444 if (bmsr & BMSR_LSTATUS) {
5445 current_speed = SPEED_1000;
5446 current_link_up = 1;
5447 if (bmcr & BMCR_FULLDPLX)
5448 current_duplex = DUPLEX_FULL;
5449 else
5450 current_duplex = DUPLEX_HALF;
5451
5452 local_adv = 0;
5453 remote_adv = 0;
5454
5455 if (bmcr & BMCR_ANENABLE) {
5456 u32 common;
5457
5458 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5459 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5460 common = local_adv & remote_adv;
5461 if (common & (ADVERTISE_1000XHALF |
5462 ADVERTISE_1000XFULL)) {
5463 if (common & ADVERTISE_1000XFULL)
5464 current_duplex = DUPLEX_FULL;
5465 else
5466 current_duplex = DUPLEX_HALF;
5467
5468 tp->link_config.rmt_adv =
5469 mii_adv_to_ethtool_adv_x(remote_adv);
5470 } else if (!tg3_flag(tp, 5780_CLASS)) {
5471 /* Link is up via parallel detect */
5472 } else {
5473 current_link_up = 0;
5474 }
5475 }
5476 }
5477
5478 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5479 tg3_setup_flow_control(tp, local_adv, remote_adv);
5480
5481 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5482 if (tp->link_config.active_duplex == DUPLEX_HALF)
5483 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5484
5485 tw32_f(MAC_MODE, tp->mac_mode);
5486 udelay(40);
5487
5488 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5489
5490 tp->link_config.active_speed = current_speed;
5491 tp->link_config.active_duplex = current_duplex;
5492
5493 tg3_test_and_report_link_chg(tp, current_link_up);
5494 return err;
5495 }
5496
5497 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5498 {
5499 if (tp->serdes_counter) {
5500 /* Give autoneg time to complete. */
5501 tp->serdes_counter--;
5502 return;
5503 }
5504
5505 if (!tp->link_up &&
5506 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5507 u32 bmcr;
5508
5509 tg3_readphy(tp, MII_BMCR, &bmcr);
5510 if (bmcr & BMCR_ANENABLE) {
5511 u32 phy1, phy2;
5512
5513 /* Select shadow register 0x1f */
5514 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5515 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5516
5517 /* Select expansion interrupt status register */
5518 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5519 MII_TG3_DSP_EXP1_INT_STAT);
5520 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5521 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5522
5523 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5524 /* We have signal detect and not receiving
5525 * config code words, link is up by parallel
5526 * detection.
5527 */
5528
5529 bmcr &= ~BMCR_ANENABLE;
5530 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5531 tg3_writephy(tp, MII_BMCR, bmcr);
5532 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5533 }
5534 }
5535 } else if (tp->link_up &&
5536 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5537 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5538 u32 phy2;
5539
5540 /* Select expansion interrupt status register */
5541 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5542 MII_TG3_DSP_EXP1_INT_STAT);
5543 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5544 if (phy2 & 0x20) {
5545 u32 bmcr;
5546
5547 /* Config code words received, turn on autoneg. */
5548 tg3_readphy(tp, MII_BMCR, &bmcr);
5549 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5550
5551 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5552
5553 }
5554 }
5555 }
5556
5557 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5558 {
5559 u32 val;
5560 int err;
5561
5562 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5563 err = tg3_setup_fiber_phy(tp, force_reset);
5564 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5565 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5566 else
5567 err = tg3_setup_copper_phy(tp, force_reset);
5568
5569 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5570 u32 scale;
5571
5572 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5573 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5574 scale = 65;
5575 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5576 scale = 6;
5577 else
5578 scale = 12;
5579
5580 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5581 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5582 tw32(GRC_MISC_CFG, val);
5583 }
5584
5585 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5586 (6 << TX_LENGTHS_IPG_SHIFT);
5587 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5588 tg3_asic_rev(tp) == ASIC_REV_5762)
5589 val |= tr32(MAC_TX_LENGTHS) &
5590 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5591 TX_LENGTHS_CNT_DWN_VAL_MSK);
5592
5593 if (tp->link_config.active_speed == SPEED_1000 &&
5594 tp->link_config.active_duplex == DUPLEX_HALF)
5595 tw32(MAC_TX_LENGTHS, val |
5596 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5597 else
5598 tw32(MAC_TX_LENGTHS, val |
5599 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5600
5601 if (!tg3_flag(tp, 5705_PLUS)) {
5602 if (tp->link_up) {
5603 tw32(HOSTCC_STAT_COAL_TICKS,
5604 tp->coal.stats_block_coalesce_usecs);
5605 } else {
5606 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5607 }
5608 }
5609
5610 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5611 val = tr32(PCIE_PWR_MGMT_THRESH);
5612 if (!tp->link_up)
5613 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5614 tp->pwrmgmt_thresh;
5615 else
5616 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5617 tw32(PCIE_PWR_MGMT_THRESH, val);
5618 }
5619
5620 return err;
5621 }
5622
5623 /* tp->lock must be held */
5624 static u64 tg3_refclk_read(struct tg3 *tp)
5625 {
5626 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5627 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5628 }
5629
5630 /* tp->lock must be held */
5631 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5632 {
5633 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5634 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5635 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5636 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5637 }
5638
5639 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5640 static inline void tg3_full_unlock(struct tg3 *tp);
5641 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5642 {
5643 struct tg3 *tp = netdev_priv(dev);
5644
5645 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5646 SOF_TIMESTAMPING_RX_SOFTWARE |
5647 SOF_TIMESTAMPING_SOFTWARE |
5648 SOF_TIMESTAMPING_TX_HARDWARE |
5649 SOF_TIMESTAMPING_RX_HARDWARE |
5650 SOF_TIMESTAMPING_RAW_HARDWARE;
5651
5652 if (tp->ptp_clock)
5653 info->phc_index = ptp_clock_index(tp->ptp_clock);
5654 else
5655 info->phc_index = -1;
5656
5657 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5658
5659 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5660 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5661 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5662 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5663 return 0;
5664 }
5665
5666 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5667 {
5668 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5669 bool neg_adj = false;
5670 u32 correction = 0;
5671
5672 if (ppb < 0) {
5673 neg_adj = true;
5674 ppb = -ppb;
5675 }
5676
5677 /* Frequency adjustment is performed using hardware with a 24 bit
5678 * accumulator and a programmable correction value. On each clk, the
5679 * correction value gets added to the accumulator and when it
5680 * overflows, the time counter is incremented/decremented.
5681 *
5682 * So conversion from ppb to correction value is
5683 * ppb * (1 << 24) / 1000000000
5684 */
5685 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5686 TG3_EAV_REF_CLK_CORRECT_MASK;
5687
5688 tg3_full_lock(tp, 0);
5689
5690 if (correction)
5691 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5692 TG3_EAV_REF_CLK_CORRECT_EN |
5693 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5694 else
5695 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5696
5697 tg3_full_unlock(tp);
5698
5699 return 0;
5700 }
5701
5702 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5703 {
5704 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5705
5706 tg3_full_lock(tp, 0);
5707 tp->ptp_adjust += delta;
5708 tg3_full_unlock(tp);
5709
5710 return 0;
5711 }
5712
5713 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5714 {
5715 u64 ns;
5716 u32 remainder;
5717 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5718
5719 tg3_full_lock(tp, 0);
5720 ns = tg3_refclk_read(tp);
5721 ns += tp->ptp_adjust;
5722 tg3_full_unlock(tp);
5723
5724 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5725 ts->tv_nsec = remainder;
5726
5727 return 0;
5728 }
5729
5730 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5731 const struct timespec *ts)
5732 {
5733 u64 ns;
5734 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5735
5736 ns = timespec_to_ns(ts);
5737
5738 tg3_full_lock(tp, 0);
5739 tg3_refclk_write(tp, ns);
5740 tp->ptp_adjust = 0;
5741 tg3_full_unlock(tp);
5742
5743 return 0;
5744 }
5745
5746 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5747 struct ptp_clock_request *rq, int on)
5748 {
5749 return -EOPNOTSUPP;
5750 }
5751
5752 static const struct ptp_clock_info tg3_ptp_caps = {
5753 .owner = THIS_MODULE,
5754 .name = "tg3 clock",
5755 .max_adj = 250000000,
5756 .n_alarm = 0,
5757 .n_ext_ts = 0,
5758 .n_per_out = 0,
5759 .pps = 0,
5760 .adjfreq = tg3_ptp_adjfreq,
5761 .adjtime = tg3_ptp_adjtime,
5762 .gettime = tg3_ptp_gettime,
5763 .settime = tg3_ptp_settime,
5764 .enable = tg3_ptp_enable,
5765 };
5766
5767 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5768 struct skb_shared_hwtstamps *timestamp)
5769 {
5770 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5771 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5772 tp->ptp_adjust);
5773 }
5774
5775 /* tp->lock must be held */
5776 static void tg3_ptp_init(struct tg3 *tp)
5777 {
5778 if (!tg3_flag(tp, PTP_CAPABLE))
5779 return;
5780
5781 /* Initialize the hardware clock to the system time. */
5782 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5783 tp->ptp_adjust = 0;
5784 tp->ptp_info = tg3_ptp_caps;
5785 }
5786
5787 /* tp->lock must be held */
5788 static void tg3_ptp_resume(struct tg3 *tp)
5789 {
5790 if (!tg3_flag(tp, PTP_CAPABLE))
5791 return;
5792
5793 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5794 tp->ptp_adjust = 0;
5795 }
5796
5797 static void tg3_ptp_fini(struct tg3 *tp)
5798 {
5799 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5800 return;
5801
5802 ptp_clock_unregister(tp->ptp_clock);
5803 tp->ptp_clock = NULL;
5804 tp->ptp_adjust = 0;
5805 }
5806
5807 static inline int tg3_irq_sync(struct tg3 *tp)
5808 {
5809 return tp->irq_sync;
5810 }
5811
5812 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5813 {
5814 int i;
5815
5816 dst = (u32 *)((u8 *)dst + off);
5817 for (i = 0; i < len; i += sizeof(u32))
5818 *dst++ = tr32(off + i);
5819 }
5820
5821 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5822 {
5823 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5824 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5825 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5826 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5827 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5828 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5829 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5830 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5831 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5832 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5833 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5834 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5835 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5836 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5837 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5838 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5839 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5840 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5841 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5842
5843 if (tg3_flag(tp, SUPPORT_MSIX))
5844 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5845
5846 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5847 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5848 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5849 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5850 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5851 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5852 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5853 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5854
5855 if (!tg3_flag(tp, 5705_PLUS)) {
5856 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5857 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5858 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5859 }
5860
5861 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5862 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5863 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5864 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5865 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5866
5867 if (tg3_flag(tp, NVRAM))
5868 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5869 }
5870
5871 static void tg3_dump_state(struct tg3 *tp)
5872 {
5873 int i;
5874 u32 *regs;
5875
5876 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5877 if (!regs)
5878 return;
5879
5880 if (tg3_flag(tp, PCI_EXPRESS)) {
5881 /* Read up to but not including private PCI registers */
5882 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5883 regs[i / sizeof(u32)] = tr32(i);
5884 } else
5885 tg3_dump_legacy_regs(tp, regs);
5886
5887 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5888 if (!regs[i + 0] && !regs[i + 1] &&
5889 !regs[i + 2] && !regs[i + 3])
5890 continue;
5891
5892 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5893 i * 4,
5894 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5895 }
5896
5897 kfree(regs);
5898
5899 for (i = 0; i < tp->irq_cnt; i++) {
5900 struct tg3_napi *tnapi = &tp->napi[i];
5901
5902 /* SW status block */
5903 netdev_err(tp->dev,
5904 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5905 i,
5906 tnapi->hw_status->status,
5907 tnapi->hw_status->status_tag,
5908 tnapi->hw_status->rx_jumbo_consumer,
5909 tnapi->hw_status->rx_consumer,
5910 tnapi->hw_status->rx_mini_consumer,
5911 tnapi->hw_status->idx[0].rx_producer,
5912 tnapi->hw_status->idx[0].tx_consumer);
5913
5914 netdev_err(tp->dev,
5915 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5916 i,
5917 tnapi->last_tag, tnapi->last_irq_tag,
5918 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5919 tnapi->rx_rcb_ptr,
5920 tnapi->prodring.rx_std_prod_idx,
5921 tnapi->prodring.rx_std_cons_idx,
5922 tnapi->prodring.rx_jmb_prod_idx,
5923 tnapi->prodring.rx_jmb_cons_idx);
5924 }
5925 }
5926
5927 /* This is called whenever we suspect that the system chipset is re-
5928 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5929 * is bogus tx completions. We try to recover by setting the
5930 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5931 * in the workqueue.
5932 */
5933 static void tg3_tx_recover(struct tg3 *tp)
5934 {
5935 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5936 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5937
5938 netdev_warn(tp->dev,
5939 "The system may be re-ordering memory-mapped I/O "
5940 "cycles to the network device, attempting to recover. "
5941 "Please report the problem to the driver maintainer "
5942 "and include system chipset information.\n");
5943
5944 spin_lock(&tp->lock);
5945 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5946 spin_unlock(&tp->lock);
5947 }
5948
5949 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5950 {
5951 /* Tell compiler to fetch tx indices from memory. */
5952 barrier();
5953 return tnapi->tx_pending -
5954 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5955 }
5956
5957 /* Tigon3 never reports partial packet sends. So we do not
5958 * need special logic to handle SKBs that have not had all
5959 * of their frags sent yet, like SunGEM does.
5960 */
5961 static void tg3_tx(struct tg3_napi *tnapi)
5962 {
5963 struct tg3 *tp = tnapi->tp;
5964 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5965 u32 sw_idx = tnapi->tx_cons;
5966 struct netdev_queue *txq;
5967 int index = tnapi - tp->napi;
5968 unsigned int pkts_compl = 0, bytes_compl = 0;
5969
5970 if (tg3_flag(tp, ENABLE_TSS))
5971 index--;
5972
5973 txq = netdev_get_tx_queue(tp->dev, index);
5974
5975 while (sw_idx != hw_idx) {
5976 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5977 struct sk_buff *skb = ri->skb;
5978 int i, tx_bug = 0;
5979
5980 if (unlikely(skb == NULL)) {
5981 tg3_tx_recover(tp);
5982 return;
5983 }
5984
5985 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5986 struct skb_shared_hwtstamps timestamp;
5987 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5988 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5989
5990 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5991
5992 skb_tstamp_tx(skb, &timestamp);
5993 }
5994
5995 pci_unmap_single(tp->pdev,
5996 dma_unmap_addr(ri, mapping),
5997 skb_headlen(skb),
5998 PCI_DMA_TODEVICE);
5999
6000 ri->skb = NULL;
6001
6002 while (ri->fragmented) {
6003 ri->fragmented = false;
6004 sw_idx = NEXT_TX(sw_idx);
6005 ri = &tnapi->tx_buffers[sw_idx];
6006 }
6007
6008 sw_idx = NEXT_TX(sw_idx);
6009
6010 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6011 ri = &tnapi->tx_buffers[sw_idx];
6012 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6013 tx_bug = 1;
6014
6015 pci_unmap_page(tp->pdev,
6016 dma_unmap_addr(ri, mapping),
6017 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6018 PCI_DMA_TODEVICE);
6019
6020 while (ri->fragmented) {
6021 ri->fragmented = false;
6022 sw_idx = NEXT_TX(sw_idx);
6023 ri = &tnapi->tx_buffers[sw_idx];
6024 }
6025
6026 sw_idx = NEXT_TX(sw_idx);
6027 }
6028
6029 pkts_compl++;
6030 bytes_compl += skb->len;
6031
6032 dev_kfree_skb(skb);
6033
6034 if (unlikely(tx_bug)) {
6035 tg3_tx_recover(tp);
6036 return;
6037 }
6038 }
6039
6040 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6041
6042 tnapi->tx_cons = sw_idx;
6043
6044 /* Need to make the tx_cons update visible to tg3_start_xmit()
6045 * before checking for netif_queue_stopped(). Without the
6046 * memory barrier, there is a small possibility that tg3_start_xmit()
6047 * will miss it and cause the queue to be stopped forever.
6048 */
6049 smp_mb();
6050
6051 if (unlikely(netif_tx_queue_stopped(txq) &&
6052 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6053 __netif_tx_lock(txq, smp_processor_id());
6054 if (netif_tx_queue_stopped(txq) &&
6055 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6056 netif_tx_wake_queue(txq);
6057 __netif_tx_unlock(txq);
6058 }
6059 }
6060
6061 static void tg3_frag_free(bool is_frag, void *data)
6062 {
6063 if (is_frag)
6064 put_page(virt_to_head_page(data));
6065 else
6066 kfree(data);
6067 }
6068
6069 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6070 {
6071 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6072 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6073
6074 if (!ri->data)
6075 return;
6076
6077 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6078 map_sz, PCI_DMA_FROMDEVICE);
6079 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6080 ri->data = NULL;
6081 }
6082
6083
6084 /* Returns size of skb allocated or < 0 on error.
6085 *
6086 * We only need to fill in the address because the other members
6087 * of the RX descriptor are invariant, see tg3_init_rings.
6088 *
6089 * Note the purposeful assymetry of cpu vs. chip accesses. For
6090 * posting buffers we only dirty the first cache line of the RX
6091 * descriptor (containing the address). Whereas for the RX status
6092 * buffers the cpu only reads the last cacheline of the RX descriptor
6093 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6094 */
6095 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6096 u32 opaque_key, u32 dest_idx_unmasked,
6097 unsigned int *frag_size)
6098 {
6099 struct tg3_rx_buffer_desc *desc;
6100 struct ring_info *map;
6101 u8 *data;
6102 dma_addr_t mapping;
6103 int skb_size, data_size, dest_idx;
6104
6105 switch (opaque_key) {
6106 case RXD_OPAQUE_RING_STD:
6107 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6108 desc = &tpr->rx_std[dest_idx];
6109 map = &tpr->rx_std_buffers[dest_idx];
6110 data_size = tp->rx_pkt_map_sz;
6111 break;
6112
6113 case RXD_OPAQUE_RING_JUMBO:
6114 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6115 desc = &tpr->rx_jmb[dest_idx].std;
6116 map = &tpr->rx_jmb_buffers[dest_idx];
6117 data_size = TG3_RX_JMB_MAP_SZ;
6118 break;
6119
6120 default:
6121 return -EINVAL;
6122 }
6123
6124 /* Do not overwrite any of the map or rp information
6125 * until we are sure we can commit to a new buffer.
6126 *
6127 * Callers depend upon this behavior and assume that
6128 * we leave everything unchanged if we fail.
6129 */
6130 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6131 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6132 if (skb_size <= PAGE_SIZE) {
6133 data = netdev_alloc_frag(skb_size);
6134 *frag_size = skb_size;
6135 } else {
6136 data = kmalloc(skb_size, GFP_ATOMIC);
6137 *frag_size = 0;
6138 }
6139 if (!data)
6140 return -ENOMEM;
6141
6142 mapping = pci_map_single(tp->pdev,
6143 data + TG3_RX_OFFSET(tp),
6144 data_size,
6145 PCI_DMA_FROMDEVICE);
6146 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6147 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6148 return -EIO;
6149 }
6150
6151 map->data = data;
6152 dma_unmap_addr_set(map, mapping, mapping);
6153
6154 desc->addr_hi = ((u64)mapping >> 32);
6155 desc->addr_lo = ((u64)mapping & 0xffffffff);
6156
6157 return data_size;
6158 }
6159
6160 /* We only need to move over in the address because the other
6161 * members of the RX descriptor are invariant. See notes above
6162 * tg3_alloc_rx_data for full details.
6163 */
6164 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6165 struct tg3_rx_prodring_set *dpr,
6166 u32 opaque_key, int src_idx,
6167 u32 dest_idx_unmasked)
6168 {
6169 struct tg3 *tp = tnapi->tp;
6170 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6171 struct ring_info *src_map, *dest_map;
6172 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6173 int dest_idx;
6174
6175 switch (opaque_key) {
6176 case RXD_OPAQUE_RING_STD:
6177 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6178 dest_desc = &dpr->rx_std[dest_idx];
6179 dest_map = &dpr->rx_std_buffers[dest_idx];
6180 src_desc = &spr->rx_std[src_idx];
6181 src_map = &spr->rx_std_buffers[src_idx];
6182 break;
6183
6184 case RXD_OPAQUE_RING_JUMBO:
6185 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6186 dest_desc = &dpr->rx_jmb[dest_idx].std;
6187 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6188 src_desc = &spr->rx_jmb[src_idx].std;
6189 src_map = &spr->rx_jmb_buffers[src_idx];
6190 break;
6191
6192 default:
6193 return;
6194 }
6195
6196 dest_map->data = src_map->data;
6197 dma_unmap_addr_set(dest_map, mapping,
6198 dma_unmap_addr(src_map, mapping));
6199 dest_desc->addr_hi = src_desc->addr_hi;
6200 dest_desc->addr_lo = src_desc->addr_lo;
6201
6202 /* Ensure that the update to the skb happens after the physical
6203 * addresses have been transferred to the new BD location.
6204 */
6205 smp_wmb();
6206
6207 src_map->data = NULL;
6208 }
6209
6210 /* The RX ring scheme is composed of multiple rings which post fresh
6211 * buffers to the chip, and one special ring the chip uses to report
6212 * status back to the host.
6213 *
6214 * The special ring reports the status of received packets to the
6215 * host. The chip does not write into the original descriptor the
6216 * RX buffer was obtained from. The chip simply takes the original
6217 * descriptor as provided by the host, updates the status and length
6218 * field, then writes this into the next status ring entry.
6219 *
6220 * Each ring the host uses to post buffers to the chip is described
6221 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6222 * it is first placed into the on-chip ram. When the packet's length
6223 * is known, it walks down the TG3_BDINFO entries to select the ring.
6224 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6225 * which is within the range of the new packet's length is chosen.
6226 *
6227 * The "separate ring for rx status" scheme may sound queer, but it makes
6228 * sense from a cache coherency perspective. If only the host writes
6229 * to the buffer post rings, and only the chip writes to the rx status
6230 * rings, then cache lines never move beyond shared-modified state.
6231 * If both the host and chip were to write into the same ring, cache line
6232 * eviction could occur since both entities want it in an exclusive state.
6233 */
6234 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6235 {
6236 struct tg3 *tp = tnapi->tp;
6237 u32 work_mask, rx_std_posted = 0;
6238 u32 std_prod_idx, jmb_prod_idx;
6239 u32 sw_idx = tnapi->rx_rcb_ptr;
6240 u16 hw_idx;
6241 int received;
6242 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6243
6244 hw_idx = *(tnapi->rx_rcb_prod_idx);
6245 /*
6246 * We need to order the read of hw_idx and the read of
6247 * the opaque cookie.
6248 */
6249 rmb();
6250 work_mask = 0;
6251 received = 0;
6252 std_prod_idx = tpr->rx_std_prod_idx;
6253 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6254 while (sw_idx != hw_idx && budget > 0) {
6255 struct ring_info *ri;
6256 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6257 unsigned int len;
6258 struct sk_buff *skb;
6259 dma_addr_t dma_addr;
6260 u32 opaque_key, desc_idx, *post_ptr;
6261 u8 *data;
6262 u64 tstamp = 0;
6263
6264 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6265 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6266 if (opaque_key == RXD_OPAQUE_RING_STD) {
6267 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6268 dma_addr = dma_unmap_addr(ri, mapping);
6269 data = ri->data;
6270 post_ptr = &std_prod_idx;
6271 rx_std_posted++;
6272 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6273 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6274 dma_addr = dma_unmap_addr(ri, mapping);
6275 data = ri->data;
6276 post_ptr = &jmb_prod_idx;
6277 } else
6278 goto next_pkt_nopost;
6279
6280 work_mask |= opaque_key;
6281
6282 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6283 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6284 drop_it:
6285 tg3_recycle_rx(tnapi, tpr, opaque_key,
6286 desc_idx, *post_ptr);
6287 drop_it_no_recycle:
6288 /* Other statistics kept track of by card. */
6289 tp->rx_dropped++;
6290 goto next_pkt;
6291 }
6292
6293 prefetch(data + TG3_RX_OFFSET(tp));
6294 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6295 ETH_FCS_LEN;
6296
6297 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6298 RXD_FLAG_PTPSTAT_PTPV1 ||
6299 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6300 RXD_FLAG_PTPSTAT_PTPV2) {
6301 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6302 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6303 }
6304
6305 if (len > TG3_RX_COPY_THRESH(tp)) {
6306 int skb_size;
6307 unsigned int frag_size;
6308
6309 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6310 *post_ptr, &frag_size);
6311 if (skb_size < 0)
6312 goto drop_it;
6313
6314 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6315 PCI_DMA_FROMDEVICE);
6316
6317 skb = build_skb(data, frag_size);
6318 if (!skb) {
6319 tg3_frag_free(frag_size != 0, data);
6320 goto drop_it_no_recycle;
6321 }
6322 skb_reserve(skb, TG3_RX_OFFSET(tp));
6323 /* Ensure that the update to the data happens
6324 * after the usage of the old DMA mapping.
6325 */
6326 smp_wmb();
6327
6328 ri->data = NULL;
6329
6330 } else {
6331 tg3_recycle_rx(tnapi, tpr, opaque_key,
6332 desc_idx, *post_ptr);
6333
6334 skb = netdev_alloc_skb(tp->dev,
6335 len + TG3_RAW_IP_ALIGN);
6336 if (skb == NULL)
6337 goto drop_it_no_recycle;
6338
6339 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6340 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6341 memcpy(skb->data,
6342 data + TG3_RX_OFFSET(tp),
6343 len);
6344 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6345 }
6346
6347 skb_put(skb, len);
6348 if (tstamp)
6349 tg3_hwclock_to_timestamp(tp, tstamp,
6350 skb_hwtstamps(skb));
6351
6352 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6353 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6354 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6355 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6356 skb->ip_summed = CHECKSUM_UNNECESSARY;
6357 else
6358 skb_checksum_none_assert(skb);
6359
6360 skb->protocol = eth_type_trans(skb, tp->dev);
6361
6362 if (len > (tp->dev->mtu + ETH_HLEN) &&
6363 skb->protocol != htons(ETH_P_8021Q)) {
6364 dev_kfree_skb(skb);
6365 goto drop_it_no_recycle;
6366 }
6367
6368 if (desc->type_flags & RXD_FLAG_VLAN &&
6369 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6370 __vlan_hwaccel_put_tag(skb,
6371 desc->err_vlan & RXD_VLAN_MASK);
6372
6373 napi_gro_receive(&tnapi->napi, skb);
6374
6375 received++;
6376 budget--;
6377
6378 next_pkt:
6379 (*post_ptr)++;
6380
6381 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6382 tpr->rx_std_prod_idx = std_prod_idx &
6383 tp->rx_std_ring_mask;
6384 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6385 tpr->rx_std_prod_idx);
6386 work_mask &= ~RXD_OPAQUE_RING_STD;
6387 rx_std_posted = 0;
6388 }
6389 next_pkt_nopost:
6390 sw_idx++;
6391 sw_idx &= tp->rx_ret_ring_mask;
6392
6393 /* Refresh hw_idx to see if there is new work */
6394 if (sw_idx == hw_idx) {
6395 hw_idx = *(tnapi->rx_rcb_prod_idx);
6396 rmb();
6397 }
6398 }
6399
6400 /* ACK the status ring. */
6401 tnapi->rx_rcb_ptr = sw_idx;
6402 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6403
6404 /* Refill RX ring(s). */
6405 if (!tg3_flag(tp, ENABLE_RSS)) {
6406 /* Sync BD data before updating mailbox */
6407 wmb();
6408
6409 if (work_mask & RXD_OPAQUE_RING_STD) {
6410 tpr->rx_std_prod_idx = std_prod_idx &
6411 tp->rx_std_ring_mask;
6412 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6413 tpr->rx_std_prod_idx);
6414 }
6415 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6416 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6417 tp->rx_jmb_ring_mask;
6418 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6419 tpr->rx_jmb_prod_idx);
6420 }
6421 mmiowb();
6422 } else if (work_mask) {
6423 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6424 * updated before the producer indices can be updated.
6425 */
6426 smp_wmb();
6427
6428 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6429 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6430
6431 if (tnapi != &tp->napi[1]) {
6432 tp->rx_refill = true;
6433 napi_schedule(&tp->napi[1].napi);
6434 }
6435 }
6436
6437 return received;
6438 }
6439
6440 static void tg3_poll_link(struct tg3 *tp)
6441 {
6442 /* handle link change and other phy events */
6443 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6444 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6445
6446 if (sblk->status & SD_STATUS_LINK_CHG) {
6447 sblk->status = SD_STATUS_UPDATED |
6448 (sblk->status & ~SD_STATUS_LINK_CHG);
6449 spin_lock(&tp->lock);
6450 if (tg3_flag(tp, USE_PHYLIB)) {
6451 tw32_f(MAC_STATUS,
6452 (MAC_STATUS_SYNC_CHANGED |
6453 MAC_STATUS_CFG_CHANGED |
6454 MAC_STATUS_MI_COMPLETION |
6455 MAC_STATUS_LNKSTATE_CHANGED));
6456 udelay(40);
6457 } else
6458 tg3_setup_phy(tp, 0);
6459 spin_unlock(&tp->lock);
6460 }
6461 }
6462 }
6463
6464 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6465 struct tg3_rx_prodring_set *dpr,
6466 struct tg3_rx_prodring_set *spr)
6467 {
6468 u32 si, di, cpycnt, src_prod_idx;
6469 int i, err = 0;
6470
6471 while (1) {
6472 src_prod_idx = spr->rx_std_prod_idx;
6473
6474 /* Make sure updates to the rx_std_buffers[] entries and the
6475 * standard producer index are seen in the correct order.
6476 */
6477 smp_rmb();
6478
6479 if (spr->rx_std_cons_idx == src_prod_idx)
6480 break;
6481
6482 if (spr->rx_std_cons_idx < src_prod_idx)
6483 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6484 else
6485 cpycnt = tp->rx_std_ring_mask + 1 -
6486 spr->rx_std_cons_idx;
6487
6488 cpycnt = min(cpycnt,
6489 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6490
6491 si = spr->rx_std_cons_idx;
6492 di = dpr->rx_std_prod_idx;
6493
6494 for (i = di; i < di + cpycnt; i++) {
6495 if (dpr->rx_std_buffers[i].data) {
6496 cpycnt = i - di;
6497 err = -ENOSPC;
6498 break;
6499 }
6500 }
6501
6502 if (!cpycnt)
6503 break;
6504
6505 /* Ensure that updates to the rx_std_buffers ring and the
6506 * shadowed hardware producer ring from tg3_recycle_skb() are
6507 * ordered correctly WRT the skb check above.
6508 */
6509 smp_rmb();
6510
6511 memcpy(&dpr->rx_std_buffers[di],
6512 &spr->rx_std_buffers[si],
6513 cpycnt * sizeof(struct ring_info));
6514
6515 for (i = 0; i < cpycnt; i++, di++, si++) {
6516 struct tg3_rx_buffer_desc *sbd, *dbd;
6517 sbd = &spr->rx_std[si];
6518 dbd = &dpr->rx_std[di];
6519 dbd->addr_hi = sbd->addr_hi;
6520 dbd->addr_lo = sbd->addr_lo;
6521 }
6522
6523 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6524 tp->rx_std_ring_mask;
6525 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6526 tp->rx_std_ring_mask;
6527 }
6528
6529 while (1) {
6530 src_prod_idx = spr->rx_jmb_prod_idx;
6531
6532 /* Make sure updates to the rx_jmb_buffers[] entries and
6533 * the jumbo producer index are seen in the correct order.
6534 */
6535 smp_rmb();
6536
6537 if (spr->rx_jmb_cons_idx == src_prod_idx)
6538 break;
6539
6540 if (spr->rx_jmb_cons_idx < src_prod_idx)
6541 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6542 else
6543 cpycnt = tp->rx_jmb_ring_mask + 1 -
6544 spr->rx_jmb_cons_idx;
6545
6546 cpycnt = min(cpycnt,
6547 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6548
6549 si = spr->rx_jmb_cons_idx;
6550 di = dpr->rx_jmb_prod_idx;
6551
6552 for (i = di; i < di + cpycnt; i++) {
6553 if (dpr->rx_jmb_buffers[i].data) {
6554 cpycnt = i - di;
6555 err = -ENOSPC;
6556 break;
6557 }
6558 }
6559
6560 if (!cpycnt)
6561 break;
6562
6563 /* Ensure that updates to the rx_jmb_buffers ring and the
6564 * shadowed hardware producer ring from tg3_recycle_skb() are
6565 * ordered correctly WRT the skb check above.
6566 */
6567 smp_rmb();
6568
6569 memcpy(&dpr->rx_jmb_buffers[di],
6570 &spr->rx_jmb_buffers[si],
6571 cpycnt * sizeof(struct ring_info));
6572
6573 for (i = 0; i < cpycnt; i++, di++, si++) {
6574 struct tg3_rx_buffer_desc *sbd, *dbd;
6575 sbd = &spr->rx_jmb[si].std;
6576 dbd = &dpr->rx_jmb[di].std;
6577 dbd->addr_hi = sbd->addr_hi;
6578 dbd->addr_lo = sbd->addr_lo;
6579 }
6580
6581 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6582 tp->rx_jmb_ring_mask;
6583 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6584 tp->rx_jmb_ring_mask;
6585 }
6586
6587 return err;
6588 }
6589
6590 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6591 {
6592 struct tg3 *tp = tnapi->tp;
6593
6594 /* run TX completion thread */
6595 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6596 tg3_tx(tnapi);
6597 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6598 return work_done;
6599 }
6600
6601 if (!tnapi->rx_rcb_prod_idx)
6602 return work_done;
6603
6604 /* run RX thread, within the bounds set by NAPI.
6605 * All RX "locking" is done by ensuring outside
6606 * code synchronizes with tg3->napi.poll()
6607 */
6608 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6609 work_done += tg3_rx(tnapi, budget - work_done);
6610
6611 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6612 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6613 int i, err = 0;
6614 u32 std_prod_idx = dpr->rx_std_prod_idx;
6615 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6616
6617 tp->rx_refill = false;
6618 for (i = 1; i <= tp->rxq_cnt; i++)
6619 err |= tg3_rx_prodring_xfer(tp, dpr,
6620 &tp->napi[i].prodring);
6621
6622 wmb();
6623
6624 if (std_prod_idx != dpr->rx_std_prod_idx)
6625 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6626 dpr->rx_std_prod_idx);
6627
6628 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6629 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6630 dpr->rx_jmb_prod_idx);
6631
6632 mmiowb();
6633
6634 if (err)
6635 tw32_f(HOSTCC_MODE, tp->coal_now);
6636 }
6637
6638 return work_done;
6639 }
6640
6641 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6642 {
6643 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6644 schedule_work(&tp->reset_task);
6645 }
6646
6647 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6648 {
6649 cancel_work_sync(&tp->reset_task);
6650 tg3_flag_clear(tp, RESET_TASK_PENDING);
6651 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6652 }
6653
6654 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6655 {
6656 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6657 struct tg3 *tp = tnapi->tp;
6658 int work_done = 0;
6659 struct tg3_hw_status *sblk = tnapi->hw_status;
6660
6661 while (1) {
6662 work_done = tg3_poll_work(tnapi, work_done, budget);
6663
6664 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6665 goto tx_recovery;
6666
6667 if (unlikely(work_done >= budget))
6668 break;
6669
6670 /* tp->last_tag is used in tg3_int_reenable() below
6671 * to tell the hw how much work has been processed,
6672 * so we must read it before checking for more work.
6673 */
6674 tnapi->last_tag = sblk->status_tag;
6675 tnapi->last_irq_tag = tnapi->last_tag;
6676 rmb();
6677
6678 /* check for RX/TX work to do */
6679 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6680 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6681
6682 /* This test here is not race free, but will reduce
6683 * the number of interrupts by looping again.
6684 */
6685 if (tnapi == &tp->napi[1] && tp->rx_refill)
6686 continue;
6687
6688 napi_complete(napi);
6689 /* Reenable interrupts. */
6690 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6691
6692 /* This test here is synchronized by napi_schedule()
6693 * and napi_complete() to close the race condition.
6694 */
6695 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6696 tw32(HOSTCC_MODE, tp->coalesce_mode |
6697 HOSTCC_MODE_ENABLE |
6698 tnapi->coal_now);
6699 }
6700 mmiowb();
6701 break;
6702 }
6703 }
6704
6705 return work_done;
6706
6707 tx_recovery:
6708 /* work_done is guaranteed to be less than budget. */
6709 napi_complete(napi);
6710 tg3_reset_task_schedule(tp);
6711 return work_done;
6712 }
6713
6714 static void tg3_process_error(struct tg3 *tp)
6715 {
6716 u32 val;
6717 bool real_error = false;
6718
6719 if (tg3_flag(tp, ERROR_PROCESSED))
6720 return;
6721
6722 /* Check Flow Attention register */
6723 val = tr32(HOSTCC_FLOW_ATTN);
6724 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6725 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6726 real_error = true;
6727 }
6728
6729 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6730 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6731 real_error = true;
6732 }
6733
6734 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6735 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6736 real_error = true;
6737 }
6738
6739 if (!real_error)
6740 return;
6741
6742 tg3_dump_state(tp);
6743
6744 tg3_flag_set(tp, ERROR_PROCESSED);
6745 tg3_reset_task_schedule(tp);
6746 }
6747
6748 static int tg3_poll(struct napi_struct *napi, int budget)
6749 {
6750 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6751 struct tg3 *tp = tnapi->tp;
6752 int work_done = 0;
6753 struct tg3_hw_status *sblk = tnapi->hw_status;
6754
6755 while (1) {
6756 if (sblk->status & SD_STATUS_ERROR)
6757 tg3_process_error(tp);
6758
6759 tg3_poll_link(tp);
6760
6761 work_done = tg3_poll_work(tnapi, work_done, budget);
6762
6763 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6764 goto tx_recovery;
6765
6766 if (unlikely(work_done >= budget))
6767 break;
6768
6769 if (tg3_flag(tp, TAGGED_STATUS)) {
6770 /* tp->last_tag is used in tg3_int_reenable() below
6771 * to tell the hw how much work has been processed,
6772 * so we must read it before checking for more work.
6773 */
6774 tnapi->last_tag = sblk->status_tag;
6775 tnapi->last_irq_tag = tnapi->last_tag;
6776 rmb();
6777 } else
6778 sblk->status &= ~SD_STATUS_UPDATED;
6779
6780 if (likely(!tg3_has_work(tnapi))) {
6781 napi_complete(napi);
6782 tg3_int_reenable(tnapi);
6783 break;
6784 }
6785 }
6786
6787 return work_done;
6788
6789 tx_recovery:
6790 /* work_done is guaranteed to be less than budget. */
6791 napi_complete(napi);
6792 tg3_reset_task_schedule(tp);
6793 return work_done;
6794 }
6795
6796 static void tg3_napi_disable(struct tg3 *tp)
6797 {
6798 int i;
6799
6800 for (i = tp->irq_cnt - 1; i >= 0; i--)
6801 napi_disable(&tp->napi[i].napi);
6802 }
6803
6804 static void tg3_napi_enable(struct tg3 *tp)
6805 {
6806 int i;
6807
6808 for (i = 0; i < tp->irq_cnt; i++)
6809 napi_enable(&tp->napi[i].napi);
6810 }
6811
6812 static void tg3_napi_init(struct tg3 *tp)
6813 {
6814 int i;
6815
6816 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6817 for (i = 1; i < tp->irq_cnt; i++)
6818 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6819 }
6820
6821 static void tg3_napi_fini(struct tg3 *tp)
6822 {
6823 int i;
6824
6825 for (i = 0; i < tp->irq_cnt; i++)
6826 netif_napi_del(&tp->napi[i].napi);
6827 }
6828
6829 static inline void tg3_netif_stop(struct tg3 *tp)
6830 {
6831 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6832 tg3_napi_disable(tp);
6833 netif_carrier_off(tp->dev);
6834 netif_tx_disable(tp->dev);
6835 }
6836
6837 /* tp->lock must be held */
6838 static inline void tg3_netif_start(struct tg3 *tp)
6839 {
6840 tg3_ptp_resume(tp);
6841
6842 /* NOTE: unconditional netif_tx_wake_all_queues is only
6843 * appropriate so long as all callers are assured to
6844 * have free tx slots (such as after tg3_init_hw)
6845 */
6846 netif_tx_wake_all_queues(tp->dev);
6847
6848 if (tp->link_up)
6849 netif_carrier_on(tp->dev);
6850
6851 tg3_napi_enable(tp);
6852 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6853 tg3_enable_ints(tp);
6854 }
6855
6856 static void tg3_irq_quiesce(struct tg3 *tp)
6857 {
6858 int i;
6859
6860 BUG_ON(tp->irq_sync);
6861
6862 tp->irq_sync = 1;
6863 smp_mb();
6864
6865 for (i = 0; i < tp->irq_cnt; i++)
6866 synchronize_irq(tp->napi[i].irq_vec);
6867 }
6868
6869 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6870 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6871 * with as well. Most of the time, this is not necessary except when
6872 * shutting down the device.
6873 */
6874 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6875 {
6876 spin_lock_bh(&tp->lock);
6877 if (irq_sync)
6878 tg3_irq_quiesce(tp);
6879 }
6880
6881 static inline void tg3_full_unlock(struct tg3 *tp)
6882 {
6883 spin_unlock_bh(&tp->lock);
6884 }
6885
6886 /* One-shot MSI handler - Chip automatically disables interrupt
6887 * after sending MSI so driver doesn't have to do it.
6888 */
6889 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6890 {
6891 struct tg3_napi *tnapi = dev_id;
6892 struct tg3 *tp = tnapi->tp;
6893
6894 prefetch(tnapi->hw_status);
6895 if (tnapi->rx_rcb)
6896 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6897
6898 if (likely(!tg3_irq_sync(tp)))
6899 napi_schedule(&tnapi->napi);
6900
6901 return IRQ_HANDLED;
6902 }
6903
6904 /* MSI ISR - No need to check for interrupt sharing and no need to
6905 * flush status block and interrupt mailbox. PCI ordering rules
6906 * guarantee that MSI will arrive after the status block.
6907 */
6908 static irqreturn_t tg3_msi(int irq, void *dev_id)
6909 {
6910 struct tg3_napi *tnapi = dev_id;
6911 struct tg3 *tp = tnapi->tp;
6912
6913 prefetch(tnapi->hw_status);
6914 if (tnapi->rx_rcb)
6915 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6916 /*
6917 * Writing any value to intr-mbox-0 clears PCI INTA# and
6918 * chip-internal interrupt pending events.
6919 * Writing non-zero to intr-mbox-0 additional tells the
6920 * NIC to stop sending us irqs, engaging "in-intr-handler"
6921 * event coalescing.
6922 */
6923 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6924 if (likely(!tg3_irq_sync(tp)))
6925 napi_schedule(&tnapi->napi);
6926
6927 return IRQ_RETVAL(1);
6928 }
6929
6930 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6931 {
6932 struct tg3_napi *tnapi = dev_id;
6933 struct tg3 *tp = tnapi->tp;
6934 struct tg3_hw_status *sblk = tnapi->hw_status;
6935 unsigned int handled = 1;
6936
6937 /* In INTx mode, it is possible for the interrupt to arrive at
6938 * the CPU before the status block posted prior to the interrupt.
6939 * Reading the PCI State register will confirm whether the
6940 * interrupt is ours and will flush the status block.
6941 */
6942 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6943 if (tg3_flag(tp, CHIP_RESETTING) ||
6944 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6945 handled = 0;
6946 goto out;
6947 }
6948 }
6949
6950 /*
6951 * Writing any value to intr-mbox-0 clears PCI INTA# and
6952 * chip-internal interrupt pending events.
6953 * Writing non-zero to intr-mbox-0 additional tells the
6954 * NIC to stop sending us irqs, engaging "in-intr-handler"
6955 * event coalescing.
6956 *
6957 * Flush the mailbox to de-assert the IRQ immediately to prevent
6958 * spurious interrupts. The flush impacts performance but
6959 * excessive spurious interrupts can be worse in some cases.
6960 */
6961 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6962 if (tg3_irq_sync(tp))
6963 goto out;
6964 sblk->status &= ~SD_STATUS_UPDATED;
6965 if (likely(tg3_has_work(tnapi))) {
6966 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6967 napi_schedule(&tnapi->napi);
6968 } else {
6969 /* No work, shared interrupt perhaps? re-enable
6970 * interrupts, and flush that PCI write
6971 */
6972 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6973 0x00000000);
6974 }
6975 out:
6976 return IRQ_RETVAL(handled);
6977 }
6978
6979 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6980 {
6981 struct tg3_napi *tnapi = dev_id;
6982 struct tg3 *tp = tnapi->tp;
6983 struct tg3_hw_status *sblk = tnapi->hw_status;
6984 unsigned int handled = 1;
6985
6986 /* In INTx mode, it is possible for the interrupt to arrive at
6987 * the CPU before the status block posted prior to the interrupt.
6988 * Reading the PCI State register will confirm whether the
6989 * interrupt is ours and will flush the status block.
6990 */
6991 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6992 if (tg3_flag(tp, CHIP_RESETTING) ||
6993 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6994 handled = 0;
6995 goto out;
6996 }
6997 }
6998
6999 /*
7000 * writing any value to intr-mbox-0 clears PCI INTA# and
7001 * chip-internal interrupt pending events.
7002 * writing non-zero to intr-mbox-0 additional tells the
7003 * NIC to stop sending us irqs, engaging "in-intr-handler"
7004 * event coalescing.
7005 *
7006 * Flush the mailbox to de-assert the IRQ immediately to prevent
7007 * spurious interrupts. The flush impacts performance but
7008 * excessive spurious interrupts can be worse in some cases.
7009 */
7010 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7011
7012 /*
7013 * In a shared interrupt configuration, sometimes other devices'
7014 * interrupts will scream. We record the current status tag here
7015 * so that the above check can report that the screaming interrupts
7016 * are unhandled. Eventually they will be silenced.
7017 */
7018 tnapi->last_irq_tag = sblk->status_tag;
7019
7020 if (tg3_irq_sync(tp))
7021 goto out;
7022
7023 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7024
7025 napi_schedule(&tnapi->napi);
7026
7027 out:
7028 return IRQ_RETVAL(handled);
7029 }
7030
7031 /* ISR for interrupt test */
7032 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7033 {
7034 struct tg3_napi *tnapi = dev_id;
7035 struct tg3 *tp = tnapi->tp;
7036 struct tg3_hw_status *sblk = tnapi->hw_status;
7037
7038 if ((sblk->status & SD_STATUS_UPDATED) ||
7039 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7040 tg3_disable_ints(tp);
7041 return IRQ_RETVAL(1);
7042 }
7043 return IRQ_RETVAL(0);
7044 }
7045
7046 #ifdef CONFIG_NET_POLL_CONTROLLER
7047 static void tg3_poll_controller(struct net_device *dev)
7048 {
7049 int i;
7050 struct tg3 *tp = netdev_priv(dev);
7051
7052 if (tg3_irq_sync(tp))
7053 return;
7054
7055 for (i = 0; i < tp->irq_cnt; i++)
7056 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7057 }
7058 #endif
7059
7060 static void tg3_tx_timeout(struct net_device *dev)
7061 {
7062 struct tg3 *tp = netdev_priv(dev);
7063
7064 if (netif_msg_tx_err(tp)) {
7065 netdev_err(dev, "transmit timed out, resetting\n");
7066 tg3_dump_state(tp);
7067 }
7068
7069 tg3_reset_task_schedule(tp);
7070 }
7071
7072 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7073 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7074 {
7075 u32 base = (u32) mapping & 0xffffffff;
7076
7077 return (base > 0xffffdcc0) && (base + len + 8 < base);
7078 }
7079
7080 /* Test for DMA addresses > 40-bit */
7081 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7082 int len)
7083 {
7084 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7085 if (tg3_flag(tp, 40BIT_DMA_BUG))
7086 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7087 return 0;
7088 #else
7089 return 0;
7090 #endif
7091 }
7092
7093 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7094 dma_addr_t mapping, u32 len, u32 flags,
7095 u32 mss, u32 vlan)
7096 {
7097 txbd->addr_hi = ((u64) mapping >> 32);
7098 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7099 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7100 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7101 }
7102
7103 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7104 dma_addr_t map, u32 len, u32 flags,
7105 u32 mss, u32 vlan)
7106 {
7107 struct tg3 *tp = tnapi->tp;
7108 bool hwbug = false;
7109
7110 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7111 hwbug = true;
7112
7113 if (tg3_4g_overflow_test(map, len))
7114 hwbug = true;
7115
7116 if (tg3_40bit_overflow_test(tp, map, len))
7117 hwbug = true;
7118
7119 if (tp->dma_limit) {
7120 u32 prvidx = *entry;
7121 u32 tmp_flag = flags & ~TXD_FLAG_END;
7122 while (len > tp->dma_limit && *budget) {
7123 u32 frag_len = tp->dma_limit;
7124 len -= tp->dma_limit;
7125
7126 /* Avoid the 8byte DMA problem */
7127 if (len <= 8) {
7128 len += tp->dma_limit / 2;
7129 frag_len = tp->dma_limit / 2;
7130 }
7131
7132 tnapi->tx_buffers[*entry].fragmented = true;
7133
7134 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7135 frag_len, tmp_flag, mss, vlan);
7136 *budget -= 1;
7137 prvidx = *entry;
7138 *entry = NEXT_TX(*entry);
7139
7140 map += frag_len;
7141 }
7142
7143 if (len) {
7144 if (*budget) {
7145 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7146 len, flags, mss, vlan);
7147 *budget -= 1;
7148 *entry = NEXT_TX(*entry);
7149 } else {
7150 hwbug = true;
7151 tnapi->tx_buffers[prvidx].fragmented = false;
7152 }
7153 }
7154 } else {
7155 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7156 len, flags, mss, vlan);
7157 *entry = NEXT_TX(*entry);
7158 }
7159
7160 return hwbug;
7161 }
7162
7163 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7164 {
7165 int i;
7166 struct sk_buff *skb;
7167 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7168
7169 skb = txb->skb;
7170 txb->skb = NULL;
7171
7172 pci_unmap_single(tnapi->tp->pdev,
7173 dma_unmap_addr(txb, mapping),
7174 skb_headlen(skb),
7175 PCI_DMA_TODEVICE);
7176
7177 while (txb->fragmented) {
7178 txb->fragmented = false;
7179 entry = NEXT_TX(entry);
7180 txb = &tnapi->tx_buffers[entry];
7181 }
7182
7183 for (i = 0; i <= last; i++) {
7184 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7185
7186 entry = NEXT_TX(entry);
7187 txb = &tnapi->tx_buffers[entry];
7188
7189 pci_unmap_page(tnapi->tp->pdev,
7190 dma_unmap_addr(txb, mapping),
7191 skb_frag_size(frag), PCI_DMA_TODEVICE);
7192
7193 while (txb->fragmented) {
7194 txb->fragmented = false;
7195 entry = NEXT_TX(entry);
7196 txb = &tnapi->tx_buffers[entry];
7197 }
7198 }
7199 }
7200
7201 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7202 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7203 struct sk_buff **pskb,
7204 u32 *entry, u32 *budget,
7205 u32 base_flags, u32 mss, u32 vlan)
7206 {
7207 struct tg3 *tp = tnapi->tp;
7208 struct sk_buff *new_skb, *skb = *pskb;
7209 dma_addr_t new_addr = 0;
7210 int ret = 0;
7211
7212 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7213 new_skb = skb_copy(skb, GFP_ATOMIC);
7214 else {
7215 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7216
7217 new_skb = skb_copy_expand(skb,
7218 skb_headroom(skb) + more_headroom,
7219 skb_tailroom(skb), GFP_ATOMIC);
7220 }
7221
7222 if (!new_skb) {
7223 ret = -1;
7224 } else {
7225 /* New SKB is guaranteed to be linear. */
7226 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7227 PCI_DMA_TODEVICE);
7228 /* Make sure the mapping succeeded */
7229 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7230 dev_kfree_skb(new_skb);
7231 ret = -1;
7232 } else {
7233 u32 save_entry = *entry;
7234
7235 base_flags |= TXD_FLAG_END;
7236
7237 tnapi->tx_buffers[*entry].skb = new_skb;
7238 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7239 mapping, new_addr);
7240
7241 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7242 new_skb->len, base_flags,
7243 mss, vlan)) {
7244 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7245 dev_kfree_skb(new_skb);
7246 ret = -1;
7247 }
7248 }
7249 }
7250
7251 dev_kfree_skb(skb);
7252 *pskb = new_skb;
7253 return ret;
7254 }
7255
7256 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7257
7258 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7259 * TSO header is greater than 80 bytes.
7260 */
7261 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7262 {
7263 struct sk_buff *segs, *nskb;
7264 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7265
7266 /* Estimate the number of fragments in the worst case */
7267 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7268 netif_stop_queue(tp->dev);
7269
7270 /* netif_tx_stop_queue() must be done before checking
7271 * checking tx index in tg3_tx_avail() below, because in
7272 * tg3_tx(), we update tx index before checking for
7273 * netif_tx_queue_stopped().
7274 */
7275 smp_mb();
7276 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7277 return NETDEV_TX_BUSY;
7278
7279 netif_wake_queue(tp->dev);
7280 }
7281
7282 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7283 if (IS_ERR(segs))
7284 goto tg3_tso_bug_end;
7285
7286 do {
7287 nskb = segs;
7288 segs = segs->next;
7289 nskb->next = NULL;
7290 tg3_start_xmit(nskb, tp->dev);
7291 } while (segs);
7292
7293 tg3_tso_bug_end:
7294 dev_kfree_skb(skb);
7295
7296 return NETDEV_TX_OK;
7297 }
7298
7299 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7300 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7301 */
7302 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7303 {
7304 struct tg3 *tp = netdev_priv(dev);
7305 u32 len, entry, base_flags, mss, vlan = 0;
7306 u32 budget;
7307 int i = -1, would_hit_hwbug;
7308 dma_addr_t mapping;
7309 struct tg3_napi *tnapi;
7310 struct netdev_queue *txq;
7311 unsigned int last;
7312
7313 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7314 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7315 if (tg3_flag(tp, ENABLE_TSS))
7316 tnapi++;
7317
7318 budget = tg3_tx_avail(tnapi);
7319
7320 /* We are running in BH disabled context with netif_tx_lock
7321 * and TX reclaim runs via tp->napi.poll inside of a software
7322 * interrupt. Furthermore, IRQ processing runs lockless so we have
7323 * no IRQ context deadlocks to worry about either. Rejoice!
7324 */
7325 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7326 if (!netif_tx_queue_stopped(txq)) {
7327 netif_tx_stop_queue(txq);
7328
7329 /* This is a hard error, log it. */
7330 netdev_err(dev,
7331 "BUG! Tx Ring full when queue awake!\n");
7332 }
7333 return NETDEV_TX_BUSY;
7334 }
7335
7336 entry = tnapi->tx_prod;
7337 base_flags = 0;
7338 if (skb->ip_summed == CHECKSUM_PARTIAL)
7339 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7340
7341 mss = skb_shinfo(skb)->gso_size;
7342 if (mss) {
7343 struct iphdr *iph;
7344 u32 tcp_opt_len, hdr_len;
7345
7346 if (skb_header_cloned(skb) &&
7347 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7348 goto drop;
7349
7350 iph = ip_hdr(skb);
7351 tcp_opt_len = tcp_optlen(skb);
7352
7353 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7354
7355 if (!skb_is_gso_v6(skb)) {
7356 iph->check = 0;
7357 iph->tot_len = htons(mss + hdr_len);
7358 }
7359
7360 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7361 tg3_flag(tp, TSO_BUG))
7362 return tg3_tso_bug(tp, skb);
7363
7364 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7365 TXD_FLAG_CPU_POST_DMA);
7366
7367 if (tg3_flag(tp, HW_TSO_1) ||
7368 tg3_flag(tp, HW_TSO_2) ||
7369 tg3_flag(tp, HW_TSO_3)) {
7370 tcp_hdr(skb)->check = 0;
7371 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7372 } else
7373 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7374 iph->daddr, 0,
7375 IPPROTO_TCP,
7376 0);
7377
7378 if (tg3_flag(tp, HW_TSO_3)) {
7379 mss |= (hdr_len & 0xc) << 12;
7380 if (hdr_len & 0x10)
7381 base_flags |= 0x00000010;
7382 base_flags |= (hdr_len & 0x3e0) << 5;
7383 } else if (tg3_flag(tp, HW_TSO_2))
7384 mss |= hdr_len << 9;
7385 else if (tg3_flag(tp, HW_TSO_1) ||
7386 tg3_asic_rev(tp) == ASIC_REV_5705) {
7387 if (tcp_opt_len || iph->ihl > 5) {
7388 int tsflags;
7389
7390 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7391 mss |= (tsflags << 11);
7392 }
7393 } else {
7394 if (tcp_opt_len || iph->ihl > 5) {
7395 int tsflags;
7396
7397 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7398 base_flags |= tsflags << 12;
7399 }
7400 }
7401 }
7402
7403 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7404 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7405 base_flags |= TXD_FLAG_JMB_PKT;
7406
7407 if (vlan_tx_tag_present(skb)) {
7408 base_flags |= TXD_FLAG_VLAN;
7409 vlan = vlan_tx_tag_get(skb);
7410 }
7411
7412 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7413 tg3_flag(tp, TX_TSTAMP_EN)) {
7414 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7415 base_flags |= TXD_FLAG_HWTSTAMP;
7416 }
7417
7418 len = skb_headlen(skb);
7419
7420 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7421 if (pci_dma_mapping_error(tp->pdev, mapping))
7422 goto drop;
7423
7424
7425 tnapi->tx_buffers[entry].skb = skb;
7426 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7427
7428 would_hit_hwbug = 0;
7429
7430 if (tg3_flag(tp, 5701_DMA_BUG))
7431 would_hit_hwbug = 1;
7432
7433 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7434 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7435 mss, vlan)) {
7436 would_hit_hwbug = 1;
7437 } else if (skb_shinfo(skb)->nr_frags > 0) {
7438 u32 tmp_mss = mss;
7439
7440 if (!tg3_flag(tp, HW_TSO_1) &&
7441 !tg3_flag(tp, HW_TSO_2) &&
7442 !tg3_flag(tp, HW_TSO_3))
7443 tmp_mss = 0;
7444
7445 /* Now loop through additional data
7446 * fragments, and queue them.
7447 */
7448 last = skb_shinfo(skb)->nr_frags - 1;
7449 for (i = 0; i <= last; i++) {
7450 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7451
7452 len = skb_frag_size(frag);
7453 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7454 len, DMA_TO_DEVICE);
7455
7456 tnapi->tx_buffers[entry].skb = NULL;
7457 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7458 mapping);
7459 if (dma_mapping_error(&tp->pdev->dev, mapping))
7460 goto dma_error;
7461
7462 if (!budget ||
7463 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7464 len, base_flags |
7465 ((i == last) ? TXD_FLAG_END : 0),
7466 tmp_mss, vlan)) {
7467 would_hit_hwbug = 1;
7468 break;
7469 }
7470 }
7471 }
7472
7473 if (would_hit_hwbug) {
7474 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7475
7476 /* If the workaround fails due to memory/mapping
7477 * failure, silently drop this packet.
7478 */
7479 entry = tnapi->tx_prod;
7480 budget = tg3_tx_avail(tnapi);
7481 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7482 base_flags, mss, vlan))
7483 goto drop_nofree;
7484 }
7485
7486 skb_tx_timestamp(skb);
7487 netdev_tx_sent_queue(txq, skb->len);
7488
7489 /* Sync BD data before updating mailbox */
7490 wmb();
7491
7492 /* Packets are ready, update Tx producer idx local and on card. */
7493 tw32_tx_mbox(tnapi->prodmbox, entry);
7494
7495 tnapi->tx_prod = entry;
7496 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7497 netif_tx_stop_queue(txq);
7498
7499 /* netif_tx_stop_queue() must be done before checking
7500 * checking tx index in tg3_tx_avail() below, because in
7501 * tg3_tx(), we update tx index before checking for
7502 * netif_tx_queue_stopped().
7503 */
7504 smp_mb();
7505 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7506 netif_tx_wake_queue(txq);
7507 }
7508
7509 mmiowb();
7510 return NETDEV_TX_OK;
7511
7512 dma_error:
7513 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7514 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7515 drop:
7516 dev_kfree_skb(skb);
7517 drop_nofree:
7518 tp->tx_dropped++;
7519 return NETDEV_TX_OK;
7520 }
7521
7522 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7523 {
7524 if (enable) {
7525 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7526 MAC_MODE_PORT_MODE_MASK);
7527
7528 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7529
7530 if (!tg3_flag(tp, 5705_PLUS))
7531 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7532
7533 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7534 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7535 else
7536 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7537 } else {
7538 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7539
7540 if (tg3_flag(tp, 5705_PLUS) ||
7541 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7542 tg3_asic_rev(tp) == ASIC_REV_5700)
7543 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7544 }
7545
7546 tw32(MAC_MODE, tp->mac_mode);
7547 udelay(40);
7548 }
7549
7550 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7551 {
7552 u32 val, bmcr, mac_mode, ptest = 0;
7553
7554 tg3_phy_toggle_apd(tp, false);
7555 tg3_phy_toggle_automdix(tp, 0);
7556
7557 if (extlpbk && tg3_phy_set_extloopbk(tp))
7558 return -EIO;
7559
7560 bmcr = BMCR_FULLDPLX;
7561 switch (speed) {
7562 case SPEED_10:
7563 break;
7564 case SPEED_100:
7565 bmcr |= BMCR_SPEED100;
7566 break;
7567 case SPEED_1000:
7568 default:
7569 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7570 speed = SPEED_100;
7571 bmcr |= BMCR_SPEED100;
7572 } else {
7573 speed = SPEED_1000;
7574 bmcr |= BMCR_SPEED1000;
7575 }
7576 }
7577
7578 if (extlpbk) {
7579 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7580 tg3_readphy(tp, MII_CTRL1000, &val);
7581 val |= CTL1000_AS_MASTER |
7582 CTL1000_ENABLE_MASTER;
7583 tg3_writephy(tp, MII_CTRL1000, val);
7584 } else {
7585 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7586 MII_TG3_FET_PTEST_TRIM_2;
7587 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7588 }
7589 } else
7590 bmcr |= BMCR_LOOPBACK;
7591
7592 tg3_writephy(tp, MII_BMCR, bmcr);
7593
7594 /* The write needs to be flushed for the FETs */
7595 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7596 tg3_readphy(tp, MII_BMCR, &bmcr);
7597
7598 udelay(40);
7599
7600 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7601 tg3_asic_rev(tp) == ASIC_REV_5785) {
7602 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7603 MII_TG3_FET_PTEST_FRC_TX_LINK |
7604 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7605
7606 /* The write needs to be flushed for the AC131 */
7607 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7608 }
7609
7610 /* Reset to prevent losing 1st rx packet intermittently */
7611 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7612 tg3_flag(tp, 5780_CLASS)) {
7613 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7614 udelay(10);
7615 tw32_f(MAC_RX_MODE, tp->rx_mode);
7616 }
7617
7618 mac_mode = tp->mac_mode &
7619 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7620 if (speed == SPEED_1000)
7621 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7622 else
7623 mac_mode |= MAC_MODE_PORT_MODE_MII;
7624
7625 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7626 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7627
7628 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7629 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7630 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7631 mac_mode |= MAC_MODE_LINK_POLARITY;
7632
7633 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7634 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7635 }
7636
7637 tw32(MAC_MODE, mac_mode);
7638 udelay(40);
7639
7640 return 0;
7641 }
7642
7643 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7644 {
7645 struct tg3 *tp = netdev_priv(dev);
7646
7647 if (features & NETIF_F_LOOPBACK) {
7648 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7649 return;
7650
7651 spin_lock_bh(&tp->lock);
7652 tg3_mac_loopback(tp, true);
7653 netif_carrier_on(tp->dev);
7654 spin_unlock_bh(&tp->lock);
7655 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7656 } else {
7657 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7658 return;
7659
7660 spin_lock_bh(&tp->lock);
7661 tg3_mac_loopback(tp, false);
7662 /* Force link status check */
7663 tg3_setup_phy(tp, 1);
7664 spin_unlock_bh(&tp->lock);
7665 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7666 }
7667 }
7668
7669 static netdev_features_t tg3_fix_features(struct net_device *dev,
7670 netdev_features_t features)
7671 {
7672 struct tg3 *tp = netdev_priv(dev);
7673
7674 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7675 features &= ~NETIF_F_ALL_TSO;
7676
7677 return features;
7678 }
7679
7680 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7681 {
7682 netdev_features_t changed = dev->features ^ features;
7683
7684 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7685 tg3_set_loopback(dev, features);
7686
7687 return 0;
7688 }
7689
7690 static void tg3_rx_prodring_free(struct tg3 *tp,
7691 struct tg3_rx_prodring_set *tpr)
7692 {
7693 int i;
7694
7695 if (tpr != &tp->napi[0].prodring) {
7696 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7697 i = (i + 1) & tp->rx_std_ring_mask)
7698 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7699 tp->rx_pkt_map_sz);
7700
7701 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7702 for (i = tpr->rx_jmb_cons_idx;
7703 i != tpr->rx_jmb_prod_idx;
7704 i = (i + 1) & tp->rx_jmb_ring_mask) {
7705 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7706 TG3_RX_JMB_MAP_SZ);
7707 }
7708 }
7709
7710 return;
7711 }
7712
7713 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7714 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7715 tp->rx_pkt_map_sz);
7716
7717 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7718 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7719 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7720 TG3_RX_JMB_MAP_SZ);
7721 }
7722 }
7723
7724 /* Initialize rx rings for packet processing.
7725 *
7726 * The chip has been shut down and the driver detached from
7727 * the networking, so no interrupts or new tx packets will
7728 * end up in the driver. tp->{tx,}lock are held and thus
7729 * we may not sleep.
7730 */
7731 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7732 struct tg3_rx_prodring_set *tpr)
7733 {
7734 u32 i, rx_pkt_dma_sz;
7735
7736 tpr->rx_std_cons_idx = 0;
7737 tpr->rx_std_prod_idx = 0;
7738 tpr->rx_jmb_cons_idx = 0;
7739 tpr->rx_jmb_prod_idx = 0;
7740
7741 if (tpr != &tp->napi[0].prodring) {
7742 memset(&tpr->rx_std_buffers[0], 0,
7743 TG3_RX_STD_BUFF_RING_SIZE(tp));
7744 if (tpr->rx_jmb_buffers)
7745 memset(&tpr->rx_jmb_buffers[0], 0,
7746 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7747 goto done;
7748 }
7749
7750 /* Zero out all descriptors. */
7751 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7752
7753 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7754 if (tg3_flag(tp, 5780_CLASS) &&
7755 tp->dev->mtu > ETH_DATA_LEN)
7756 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7757 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7758
7759 /* Initialize invariants of the rings, we only set this
7760 * stuff once. This works because the card does not
7761 * write into the rx buffer posting rings.
7762 */
7763 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7764 struct tg3_rx_buffer_desc *rxd;
7765
7766 rxd = &tpr->rx_std[i];
7767 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7768 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7769 rxd->opaque = (RXD_OPAQUE_RING_STD |
7770 (i << RXD_OPAQUE_INDEX_SHIFT));
7771 }
7772
7773 /* Now allocate fresh SKBs for each rx ring. */
7774 for (i = 0; i < tp->rx_pending; i++) {
7775 unsigned int frag_size;
7776
7777 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7778 &frag_size) < 0) {
7779 netdev_warn(tp->dev,
7780 "Using a smaller RX standard ring. Only "
7781 "%d out of %d buffers were allocated "
7782 "successfully\n", i, tp->rx_pending);
7783 if (i == 0)
7784 goto initfail;
7785 tp->rx_pending = i;
7786 break;
7787 }
7788 }
7789
7790 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7791 goto done;
7792
7793 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7794
7795 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7796 goto done;
7797
7798 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7799 struct tg3_rx_buffer_desc *rxd;
7800
7801 rxd = &tpr->rx_jmb[i].std;
7802 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7803 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7804 RXD_FLAG_JUMBO;
7805 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7806 (i << RXD_OPAQUE_INDEX_SHIFT));
7807 }
7808
7809 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7810 unsigned int frag_size;
7811
7812 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7813 &frag_size) < 0) {
7814 netdev_warn(tp->dev,
7815 "Using a smaller RX jumbo ring. Only %d "
7816 "out of %d buffers were allocated "
7817 "successfully\n", i, tp->rx_jumbo_pending);
7818 if (i == 0)
7819 goto initfail;
7820 tp->rx_jumbo_pending = i;
7821 break;
7822 }
7823 }
7824
7825 done:
7826 return 0;
7827
7828 initfail:
7829 tg3_rx_prodring_free(tp, tpr);
7830 return -ENOMEM;
7831 }
7832
7833 static void tg3_rx_prodring_fini(struct tg3 *tp,
7834 struct tg3_rx_prodring_set *tpr)
7835 {
7836 kfree(tpr->rx_std_buffers);
7837 tpr->rx_std_buffers = NULL;
7838 kfree(tpr->rx_jmb_buffers);
7839 tpr->rx_jmb_buffers = NULL;
7840 if (tpr->rx_std) {
7841 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7842 tpr->rx_std, tpr->rx_std_mapping);
7843 tpr->rx_std = NULL;
7844 }
7845 if (tpr->rx_jmb) {
7846 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7847 tpr->rx_jmb, tpr->rx_jmb_mapping);
7848 tpr->rx_jmb = NULL;
7849 }
7850 }
7851
7852 static int tg3_rx_prodring_init(struct tg3 *tp,
7853 struct tg3_rx_prodring_set *tpr)
7854 {
7855 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7856 GFP_KERNEL);
7857 if (!tpr->rx_std_buffers)
7858 return -ENOMEM;
7859
7860 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7861 TG3_RX_STD_RING_BYTES(tp),
7862 &tpr->rx_std_mapping,
7863 GFP_KERNEL);
7864 if (!tpr->rx_std)
7865 goto err_out;
7866
7867 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7868 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7869 GFP_KERNEL);
7870 if (!tpr->rx_jmb_buffers)
7871 goto err_out;
7872
7873 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7874 TG3_RX_JMB_RING_BYTES(tp),
7875 &tpr->rx_jmb_mapping,
7876 GFP_KERNEL);
7877 if (!tpr->rx_jmb)
7878 goto err_out;
7879 }
7880
7881 return 0;
7882
7883 err_out:
7884 tg3_rx_prodring_fini(tp, tpr);
7885 return -ENOMEM;
7886 }
7887
7888 /* Free up pending packets in all rx/tx rings.
7889 *
7890 * The chip has been shut down and the driver detached from
7891 * the networking, so no interrupts or new tx packets will
7892 * end up in the driver. tp->{tx,}lock is not held and we are not
7893 * in an interrupt context and thus may sleep.
7894 */
7895 static void tg3_free_rings(struct tg3 *tp)
7896 {
7897 int i, j;
7898
7899 for (j = 0; j < tp->irq_cnt; j++) {
7900 struct tg3_napi *tnapi = &tp->napi[j];
7901
7902 tg3_rx_prodring_free(tp, &tnapi->prodring);
7903
7904 if (!tnapi->tx_buffers)
7905 continue;
7906
7907 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7908 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7909
7910 if (!skb)
7911 continue;
7912
7913 tg3_tx_skb_unmap(tnapi, i,
7914 skb_shinfo(skb)->nr_frags - 1);
7915
7916 dev_kfree_skb_any(skb);
7917 }
7918 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7919 }
7920 }
7921
7922 /* Initialize tx/rx rings for packet processing.
7923 *
7924 * The chip has been shut down and the driver detached from
7925 * the networking, so no interrupts or new tx packets will
7926 * end up in the driver. tp->{tx,}lock are held and thus
7927 * we may not sleep.
7928 */
7929 static int tg3_init_rings(struct tg3 *tp)
7930 {
7931 int i;
7932
7933 /* Free up all the SKBs. */
7934 tg3_free_rings(tp);
7935
7936 for (i = 0; i < tp->irq_cnt; i++) {
7937 struct tg3_napi *tnapi = &tp->napi[i];
7938
7939 tnapi->last_tag = 0;
7940 tnapi->last_irq_tag = 0;
7941 tnapi->hw_status->status = 0;
7942 tnapi->hw_status->status_tag = 0;
7943 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7944
7945 tnapi->tx_prod = 0;
7946 tnapi->tx_cons = 0;
7947 if (tnapi->tx_ring)
7948 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7949
7950 tnapi->rx_rcb_ptr = 0;
7951 if (tnapi->rx_rcb)
7952 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7953
7954 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7955 tg3_free_rings(tp);
7956 return -ENOMEM;
7957 }
7958 }
7959
7960 return 0;
7961 }
7962
7963 static void tg3_mem_tx_release(struct tg3 *tp)
7964 {
7965 int i;
7966
7967 for (i = 0; i < tp->irq_max; i++) {
7968 struct tg3_napi *tnapi = &tp->napi[i];
7969
7970 if (tnapi->tx_ring) {
7971 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7972 tnapi->tx_ring, tnapi->tx_desc_mapping);
7973 tnapi->tx_ring = NULL;
7974 }
7975
7976 kfree(tnapi->tx_buffers);
7977 tnapi->tx_buffers = NULL;
7978 }
7979 }
7980
7981 static int tg3_mem_tx_acquire(struct tg3 *tp)
7982 {
7983 int i;
7984 struct tg3_napi *tnapi = &tp->napi[0];
7985
7986 /* If multivector TSS is enabled, vector 0 does not handle
7987 * tx interrupts. Don't allocate any resources for it.
7988 */
7989 if (tg3_flag(tp, ENABLE_TSS))
7990 tnapi++;
7991
7992 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7993 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7994 TG3_TX_RING_SIZE, GFP_KERNEL);
7995 if (!tnapi->tx_buffers)
7996 goto err_out;
7997
7998 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7999 TG3_TX_RING_BYTES,
8000 &tnapi->tx_desc_mapping,
8001 GFP_KERNEL);
8002 if (!tnapi->tx_ring)
8003 goto err_out;
8004 }
8005
8006 return 0;
8007
8008 err_out:
8009 tg3_mem_tx_release(tp);
8010 return -ENOMEM;
8011 }
8012
8013 static void tg3_mem_rx_release(struct tg3 *tp)
8014 {
8015 int i;
8016
8017 for (i = 0; i < tp->irq_max; i++) {
8018 struct tg3_napi *tnapi = &tp->napi[i];
8019
8020 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8021
8022 if (!tnapi->rx_rcb)
8023 continue;
8024
8025 dma_free_coherent(&tp->pdev->dev,
8026 TG3_RX_RCB_RING_BYTES(tp),
8027 tnapi->rx_rcb,
8028 tnapi->rx_rcb_mapping);
8029 tnapi->rx_rcb = NULL;
8030 }
8031 }
8032
8033 static int tg3_mem_rx_acquire(struct tg3 *tp)
8034 {
8035 unsigned int i, limit;
8036
8037 limit = tp->rxq_cnt;
8038
8039 /* If RSS is enabled, we need a (dummy) producer ring
8040 * set on vector zero. This is the true hw prodring.
8041 */
8042 if (tg3_flag(tp, ENABLE_RSS))
8043 limit++;
8044
8045 for (i = 0; i < limit; i++) {
8046 struct tg3_napi *tnapi = &tp->napi[i];
8047
8048 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8049 goto err_out;
8050
8051 /* If multivector RSS is enabled, vector 0
8052 * does not handle rx or tx interrupts.
8053 * Don't allocate any resources for it.
8054 */
8055 if (!i && tg3_flag(tp, ENABLE_RSS))
8056 continue;
8057
8058 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8059 TG3_RX_RCB_RING_BYTES(tp),
8060 &tnapi->rx_rcb_mapping,
8061 GFP_KERNEL);
8062 if (!tnapi->rx_rcb)
8063 goto err_out;
8064
8065 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8066 }
8067
8068 return 0;
8069
8070 err_out:
8071 tg3_mem_rx_release(tp);
8072 return -ENOMEM;
8073 }
8074
8075 /*
8076 * Must not be invoked with interrupt sources disabled and
8077 * the hardware shutdown down.
8078 */
8079 static void tg3_free_consistent(struct tg3 *tp)
8080 {
8081 int i;
8082
8083 for (i = 0; i < tp->irq_cnt; i++) {
8084 struct tg3_napi *tnapi = &tp->napi[i];
8085
8086 if (tnapi->hw_status) {
8087 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8088 tnapi->hw_status,
8089 tnapi->status_mapping);
8090 tnapi->hw_status = NULL;
8091 }
8092 }
8093
8094 tg3_mem_rx_release(tp);
8095 tg3_mem_tx_release(tp);
8096
8097 if (tp->hw_stats) {
8098 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8099 tp->hw_stats, tp->stats_mapping);
8100 tp->hw_stats = NULL;
8101 }
8102 }
8103
8104 /*
8105 * Must not be invoked with interrupt sources disabled and
8106 * the hardware shutdown down. Can sleep.
8107 */
8108 static int tg3_alloc_consistent(struct tg3 *tp)
8109 {
8110 int i;
8111
8112 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8113 sizeof(struct tg3_hw_stats),
8114 &tp->stats_mapping,
8115 GFP_KERNEL);
8116 if (!tp->hw_stats)
8117 goto err_out;
8118
8119 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8120
8121 for (i = 0; i < tp->irq_cnt; i++) {
8122 struct tg3_napi *tnapi = &tp->napi[i];
8123 struct tg3_hw_status *sblk;
8124
8125 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8126 TG3_HW_STATUS_SIZE,
8127 &tnapi->status_mapping,
8128 GFP_KERNEL);
8129 if (!tnapi->hw_status)
8130 goto err_out;
8131
8132 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8133 sblk = tnapi->hw_status;
8134
8135 if (tg3_flag(tp, ENABLE_RSS)) {
8136 u16 *prodptr = NULL;
8137
8138 /*
8139 * When RSS is enabled, the status block format changes
8140 * slightly. The "rx_jumbo_consumer", "reserved",
8141 * and "rx_mini_consumer" members get mapped to the
8142 * other three rx return ring producer indexes.
8143 */
8144 switch (i) {
8145 case 1:
8146 prodptr = &sblk->idx[0].rx_producer;
8147 break;
8148 case 2:
8149 prodptr = &sblk->rx_jumbo_consumer;
8150 break;
8151 case 3:
8152 prodptr = &sblk->reserved;
8153 break;
8154 case 4:
8155 prodptr = &sblk->rx_mini_consumer;
8156 break;
8157 }
8158 tnapi->rx_rcb_prod_idx = prodptr;
8159 } else {
8160 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8161 }
8162 }
8163
8164 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8165 goto err_out;
8166
8167 return 0;
8168
8169 err_out:
8170 tg3_free_consistent(tp);
8171 return -ENOMEM;
8172 }
8173
8174 #define MAX_WAIT_CNT 1000
8175
8176 /* To stop a block, clear the enable bit and poll till it
8177 * clears. tp->lock is held.
8178 */
8179 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8180 {
8181 unsigned int i;
8182 u32 val;
8183
8184 if (tg3_flag(tp, 5705_PLUS)) {
8185 switch (ofs) {
8186 case RCVLSC_MODE:
8187 case DMAC_MODE:
8188 case MBFREE_MODE:
8189 case BUFMGR_MODE:
8190 case MEMARB_MODE:
8191 /* We can't enable/disable these bits of the
8192 * 5705/5750, just say success.
8193 */
8194 return 0;
8195
8196 default:
8197 break;
8198 }
8199 }
8200
8201 val = tr32(ofs);
8202 val &= ~enable_bit;
8203 tw32_f(ofs, val);
8204
8205 for (i = 0; i < MAX_WAIT_CNT; i++) {
8206 udelay(100);
8207 val = tr32(ofs);
8208 if ((val & enable_bit) == 0)
8209 break;
8210 }
8211
8212 if (i == MAX_WAIT_CNT && !silent) {
8213 dev_err(&tp->pdev->dev,
8214 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8215 ofs, enable_bit);
8216 return -ENODEV;
8217 }
8218
8219 return 0;
8220 }
8221
8222 /* tp->lock is held. */
8223 static int tg3_abort_hw(struct tg3 *tp, int silent)
8224 {
8225 int i, err;
8226
8227 tg3_disable_ints(tp);
8228
8229 tp->rx_mode &= ~RX_MODE_ENABLE;
8230 tw32_f(MAC_RX_MODE, tp->rx_mode);
8231 udelay(10);
8232
8233 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8234 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8235 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8236 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8237 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8238 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8239
8240 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8241 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8242 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8243 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8244 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8245 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8246 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8247
8248 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8249 tw32_f(MAC_MODE, tp->mac_mode);
8250 udelay(40);
8251
8252 tp->tx_mode &= ~TX_MODE_ENABLE;
8253 tw32_f(MAC_TX_MODE, tp->tx_mode);
8254
8255 for (i = 0; i < MAX_WAIT_CNT; i++) {
8256 udelay(100);
8257 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8258 break;
8259 }
8260 if (i >= MAX_WAIT_CNT) {
8261 dev_err(&tp->pdev->dev,
8262 "%s timed out, TX_MODE_ENABLE will not clear "
8263 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8264 err |= -ENODEV;
8265 }
8266
8267 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8268 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8269 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8270
8271 tw32(FTQ_RESET, 0xffffffff);
8272 tw32(FTQ_RESET, 0x00000000);
8273
8274 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8275 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8276
8277 for (i = 0; i < tp->irq_cnt; i++) {
8278 struct tg3_napi *tnapi = &tp->napi[i];
8279 if (tnapi->hw_status)
8280 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8281 }
8282
8283 return err;
8284 }
8285
8286 /* Save PCI command register before chip reset */
8287 static void tg3_save_pci_state(struct tg3 *tp)
8288 {
8289 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8290 }
8291
8292 /* Restore PCI state after chip reset */
8293 static void tg3_restore_pci_state(struct tg3 *tp)
8294 {
8295 u32 val;
8296
8297 /* Re-enable indirect register accesses. */
8298 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8299 tp->misc_host_ctrl);
8300
8301 /* Set MAX PCI retry to zero. */
8302 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8303 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8304 tg3_flag(tp, PCIX_MODE))
8305 val |= PCISTATE_RETRY_SAME_DMA;
8306 /* Allow reads and writes to the APE register and memory space. */
8307 if (tg3_flag(tp, ENABLE_APE))
8308 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8309 PCISTATE_ALLOW_APE_SHMEM_WR |
8310 PCISTATE_ALLOW_APE_PSPACE_WR;
8311 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8312
8313 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8314
8315 if (!tg3_flag(tp, PCI_EXPRESS)) {
8316 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8317 tp->pci_cacheline_sz);
8318 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8319 tp->pci_lat_timer);
8320 }
8321
8322 /* Make sure PCI-X relaxed ordering bit is clear. */
8323 if (tg3_flag(tp, PCIX_MODE)) {
8324 u16 pcix_cmd;
8325
8326 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8327 &pcix_cmd);
8328 pcix_cmd &= ~PCI_X_CMD_ERO;
8329 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8330 pcix_cmd);
8331 }
8332
8333 if (tg3_flag(tp, 5780_CLASS)) {
8334
8335 /* Chip reset on 5780 will reset MSI enable bit,
8336 * so need to restore it.
8337 */
8338 if (tg3_flag(tp, USING_MSI)) {
8339 u16 ctrl;
8340
8341 pci_read_config_word(tp->pdev,
8342 tp->msi_cap + PCI_MSI_FLAGS,
8343 &ctrl);
8344 pci_write_config_word(tp->pdev,
8345 tp->msi_cap + PCI_MSI_FLAGS,
8346 ctrl | PCI_MSI_FLAGS_ENABLE);
8347 val = tr32(MSGINT_MODE);
8348 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8349 }
8350 }
8351 }
8352
8353 /* tp->lock is held. */
8354 static int tg3_chip_reset(struct tg3 *tp)
8355 {
8356 u32 val;
8357 void (*write_op)(struct tg3 *, u32, u32);
8358 int i, err;
8359
8360 tg3_nvram_lock(tp);
8361
8362 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8363
8364 /* No matching tg3_nvram_unlock() after this because
8365 * chip reset below will undo the nvram lock.
8366 */
8367 tp->nvram_lock_cnt = 0;
8368
8369 /* GRC_MISC_CFG core clock reset will clear the memory
8370 * enable bit in PCI register 4 and the MSI enable bit
8371 * on some chips, so we save relevant registers here.
8372 */
8373 tg3_save_pci_state(tp);
8374
8375 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8376 tg3_flag(tp, 5755_PLUS))
8377 tw32(GRC_FASTBOOT_PC, 0);
8378
8379 /*
8380 * We must avoid the readl() that normally takes place.
8381 * It locks machines, causes machine checks, and other
8382 * fun things. So, temporarily disable the 5701
8383 * hardware workaround, while we do the reset.
8384 */
8385 write_op = tp->write32;
8386 if (write_op == tg3_write_flush_reg32)
8387 tp->write32 = tg3_write32;
8388
8389 /* Prevent the irq handler from reading or writing PCI registers
8390 * during chip reset when the memory enable bit in the PCI command
8391 * register may be cleared. The chip does not generate interrupt
8392 * at this time, but the irq handler may still be called due to irq
8393 * sharing or irqpoll.
8394 */
8395 tg3_flag_set(tp, CHIP_RESETTING);
8396 for (i = 0; i < tp->irq_cnt; i++) {
8397 struct tg3_napi *tnapi = &tp->napi[i];
8398 if (tnapi->hw_status) {
8399 tnapi->hw_status->status = 0;
8400 tnapi->hw_status->status_tag = 0;
8401 }
8402 tnapi->last_tag = 0;
8403 tnapi->last_irq_tag = 0;
8404 }
8405 smp_mb();
8406
8407 for (i = 0; i < tp->irq_cnt; i++)
8408 synchronize_irq(tp->napi[i].irq_vec);
8409
8410 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8411 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8412 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8413 }
8414
8415 /* do the reset */
8416 val = GRC_MISC_CFG_CORECLK_RESET;
8417
8418 if (tg3_flag(tp, PCI_EXPRESS)) {
8419 /* Force PCIe 1.0a mode */
8420 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8421 !tg3_flag(tp, 57765_PLUS) &&
8422 tr32(TG3_PCIE_PHY_TSTCTL) ==
8423 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8424 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8425
8426 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8427 tw32(GRC_MISC_CFG, (1 << 29));
8428 val |= (1 << 29);
8429 }
8430 }
8431
8432 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8433 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8434 tw32(GRC_VCPU_EXT_CTRL,
8435 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8436 }
8437
8438 /* Manage gphy power for all CPMU absent PCIe devices. */
8439 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8440 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8441
8442 tw32(GRC_MISC_CFG, val);
8443
8444 /* restore 5701 hardware bug workaround write method */
8445 tp->write32 = write_op;
8446
8447 /* Unfortunately, we have to delay before the PCI read back.
8448 * Some 575X chips even will not respond to a PCI cfg access
8449 * when the reset command is given to the chip.
8450 *
8451 * How do these hardware designers expect things to work
8452 * properly if the PCI write is posted for a long period
8453 * of time? It is always necessary to have some method by
8454 * which a register read back can occur to push the write
8455 * out which does the reset.
8456 *
8457 * For most tg3 variants the trick below was working.
8458 * Ho hum...
8459 */
8460 udelay(120);
8461
8462 /* Flush PCI posted writes. The normal MMIO registers
8463 * are inaccessible at this time so this is the only
8464 * way to make this reliably (actually, this is no longer
8465 * the case, see above). I tried to use indirect
8466 * register read/write but this upset some 5701 variants.
8467 */
8468 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8469
8470 udelay(120);
8471
8472 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8473 u16 val16;
8474
8475 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8476 int j;
8477 u32 cfg_val;
8478
8479 /* Wait for link training to complete. */
8480 for (j = 0; j < 5000; j++)
8481 udelay(100);
8482
8483 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8484 pci_write_config_dword(tp->pdev, 0xc4,
8485 cfg_val | (1 << 15));
8486 }
8487
8488 /* Clear the "no snoop" and "relaxed ordering" bits. */
8489 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8490 /*
8491 * Older PCIe devices only support the 128 byte
8492 * MPS setting. Enforce the restriction.
8493 */
8494 if (!tg3_flag(tp, CPMU_PRESENT))
8495 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8496 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8497
8498 /* Clear error status */
8499 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8500 PCI_EXP_DEVSTA_CED |
8501 PCI_EXP_DEVSTA_NFED |
8502 PCI_EXP_DEVSTA_FED |
8503 PCI_EXP_DEVSTA_URD);
8504 }
8505
8506 tg3_restore_pci_state(tp);
8507
8508 tg3_flag_clear(tp, CHIP_RESETTING);
8509 tg3_flag_clear(tp, ERROR_PROCESSED);
8510
8511 val = 0;
8512 if (tg3_flag(tp, 5780_CLASS))
8513 val = tr32(MEMARB_MODE);
8514 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8515
8516 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8517 tg3_stop_fw(tp);
8518 tw32(0x5000, 0x400);
8519 }
8520
8521 if (tg3_flag(tp, IS_SSB_CORE)) {
8522 /*
8523 * BCM4785: In order to avoid repercussions from using
8524 * potentially defective internal ROM, stop the Rx RISC CPU,
8525 * which is not required.
8526 */
8527 tg3_stop_fw(tp);
8528 tg3_halt_cpu(tp, RX_CPU_BASE);
8529 }
8530
8531 tw32(GRC_MODE, tp->grc_mode);
8532
8533 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8534 val = tr32(0xc4);
8535
8536 tw32(0xc4, val | (1 << 15));
8537 }
8538
8539 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8540 tg3_asic_rev(tp) == ASIC_REV_5705) {
8541 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8542 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8543 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8544 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8545 }
8546
8547 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8548 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8549 val = tp->mac_mode;
8550 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8551 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8552 val = tp->mac_mode;
8553 } else
8554 val = 0;
8555
8556 tw32_f(MAC_MODE, val);
8557 udelay(40);
8558
8559 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8560
8561 err = tg3_poll_fw(tp);
8562 if (err)
8563 return err;
8564
8565 tg3_mdio_start(tp);
8566
8567 if (tg3_flag(tp, PCI_EXPRESS) &&
8568 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8569 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8570 !tg3_flag(tp, 57765_PLUS)) {
8571 val = tr32(0x7c00);
8572
8573 tw32(0x7c00, val | (1 << 25));
8574 }
8575
8576 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8577 val = tr32(TG3_CPMU_CLCK_ORIDE);
8578 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8579 }
8580
8581 /* Reprobe ASF enable state. */
8582 tg3_flag_clear(tp, ENABLE_ASF);
8583 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8584 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8585 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8586 u32 nic_cfg;
8587
8588 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8589 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8590 tg3_flag_set(tp, ENABLE_ASF);
8591 tp->last_event_jiffies = jiffies;
8592 if (tg3_flag(tp, 5750_PLUS))
8593 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8594 }
8595 }
8596
8597 return 0;
8598 }
8599
8600 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8601 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8602
8603 /* tp->lock is held. */
8604 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8605 {
8606 int err;
8607
8608 tg3_stop_fw(tp);
8609
8610 tg3_write_sig_pre_reset(tp, kind);
8611
8612 tg3_abort_hw(tp, silent);
8613 err = tg3_chip_reset(tp);
8614
8615 __tg3_set_mac_addr(tp, 0);
8616
8617 tg3_write_sig_legacy(tp, kind);
8618 tg3_write_sig_post_reset(tp, kind);
8619
8620 if (tp->hw_stats) {
8621 /* Save the stats across chip resets... */
8622 tg3_get_nstats(tp, &tp->net_stats_prev);
8623 tg3_get_estats(tp, &tp->estats_prev);
8624
8625 /* And make sure the next sample is new data */
8626 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8627 }
8628
8629 if (err)
8630 return err;
8631
8632 return 0;
8633 }
8634
8635 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8636 {
8637 struct tg3 *tp = netdev_priv(dev);
8638 struct sockaddr *addr = p;
8639 int err = 0, skip_mac_1 = 0;
8640
8641 if (!is_valid_ether_addr(addr->sa_data))
8642 return -EADDRNOTAVAIL;
8643
8644 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8645
8646 if (!netif_running(dev))
8647 return 0;
8648
8649 if (tg3_flag(tp, ENABLE_ASF)) {
8650 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8651
8652 addr0_high = tr32(MAC_ADDR_0_HIGH);
8653 addr0_low = tr32(MAC_ADDR_0_LOW);
8654 addr1_high = tr32(MAC_ADDR_1_HIGH);
8655 addr1_low = tr32(MAC_ADDR_1_LOW);
8656
8657 /* Skip MAC addr 1 if ASF is using it. */
8658 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8659 !(addr1_high == 0 && addr1_low == 0))
8660 skip_mac_1 = 1;
8661 }
8662 spin_lock_bh(&tp->lock);
8663 __tg3_set_mac_addr(tp, skip_mac_1);
8664 spin_unlock_bh(&tp->lock);
8665
8666 return err;
8667 }
8668
8669 /* tp->lock is held. */
8670 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8671 dma_addr_t mapping, u32 maxlen_flags,
8672 u32 nic_addr)
8673 {
8674 tg3_write_mem(tp,
8675 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8676 ((u64) mapping >> 32));
8677 tg3_write_mem(tp,
8678 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8679 ((u64) mapping & 0xffffffff));
8680 tg3_write_mem(tp,
8681 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8682 maxlen_flags);
8683
8684 if (!tg3_flag(tp, 5705_PLUS))
8685 tg3_write_mem(tp,
8686 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8687 nic_addr);
8688 }
8689
8690
8691 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8692 {
8693 int i = 0;
8694
8695 if (!tg3_flag(tp, ENABLE_TSS)) {
8696 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8697 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8698 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8699 } else {
8700 tw32(HOSTCC_TXCOL_TICKS, 0);
8701 tw32(HOSTCC_TXMAX_FRAMES, 0);
8702 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8703
8704 for (; i < tp->txq_cnt; i++) {
8705 u32 reg;
8706
8707 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8708 tw32(reg, ec->tx_coalesce_usecs);
8709 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8710 tw32(reg, ec->tx_max_coalesced_frames);
8711 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8712 tw32(reg, ec->tx_max_coalesced_frames_irq);
8713 }
8714 }
8715
8716 for (; i < tp->irq_max - 1; i++) {
8717 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8718 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8719 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8720 }
8721 }
8722
8723 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8724 {
8725 int i = 0;
8726 u32 limit = tp->rxq_cnt;
8727
8728 if (!tg3_flag(tp, ENABLE_RSS)) {
8729 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8730 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8731 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8732 limit--;
8733 } else {
8734 tw32(HOSTCC_RXCOL_TICKS, 0);
8735 tw32(HOSTCC_RXMAX_FRAMES, 0);
8736 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8737 }
8738
8739 for (; i < limit; i++) {
8740 u32 reg;
8741
8742 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8743 tw32(reg, ec->rx_coalesce_usecs);
8744 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8745 tw32(reg, ec->rx_max_coalesced_frames);
8746 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8747 tw32(reg, ec->rx_max_coalesced_frames_irq);
8748 }
8749
8750 for (; i < tp->irq_max - 1; i++) {
8751 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8752 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8753 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8754 }
8755 }
8756
8757 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8758 {
8759 tg3_coal_tx_init(tp, ec);
8760 tg3_coal_rx_init(tp, ec);
8761
8762 if (!tg3_flag(tp, 5705_PLUS)) {
8763 u32 val = ec->stats_block_coalesce_usecs;
8764
8765 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8766 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8767
8768 if (!tp->link_up)
8769 val = 0;
8770
8771 tw32(HOSTCC_STAT_COAL_TICKS, val);
8772 }
8773 }
8774
8775 /* tp->lock is held. */
8776 static void tg3_rings_reset(struct tg3 *tp)
8777 {
8778 int i;
8779 u32 stblk, txrcb, rxrcb, limit;
8780 struct tg3_napi *tnapi = &tp->napi[0];
8781
8782 /* Disable all transmit rings but the first. */
8783 if (!tg3_flag(tp, 5705_PLUS))
8784 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8785 else if (tg3_flag(tp, 5717_PLUS))
8786 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8787 else if (tg3_flag(tp, 57765_CLASS) ||
8788 tg3_asic_rev(tp) == ASIC_REV_5762)
8789 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8790 else
8791 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8792
8793 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8794 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8795 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8796 BDINFO_FLAGS_DISABLED);
8797
8798
8799 /* Disable all receive return rings but the first. */
8800 if (tg3_flag(tp, 5717_PLUS))
8801 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8802 else if (!tg3_flag(tp, 5705_PLUS))
8803 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8804 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8805 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8806 tg3_flag(tp, 57765_CLASS))
8807 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8808 else
8809 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8810
8811 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8812 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8813 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8814 BDINFO_FLAGS_DISABLED);
8815
8816 /* Disable interrupts */
8817 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8818 tp->napi[0].chk_msi_cnt = 0;
8819 tp->napi[0].last_rx_cons = 0;
8820 tp->napi[0].last_tx_cons = 0;
8821
8822 /* Zero mailbox registers. */
8823 if (tg3_flag(tp, SUPPORT_MSIX)) {
8824 for (i = 1; i < tp->irq_max; i++) {
8825 tp->napi[i].tx_prod = 0;
8826 tp->napi[i].tx_cons = 0;
8827 if (tg3_flag(tp, ENABLE_TSS))
8828 tw32_mailbox(tp->napi[i].prodmbox, 0);
8829 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8830 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8831 tp->napi[i].chk_msi_cnt = 0;
8832 tp->napi[i].last_rx_cons = 0;
8833 tp->napi[i].last_tx_cons = 0;
8834 }
8835 if (!tg3_flag(tp, ENABLE_TSS))
8836 tw32_mailbox(tp->napi[0].prodmbox, 0);
8837 } else {
8838 tp->napi[0].tx_prod = 0;
8839 tp->napi[0].tx_cons = 0;
8840 tw32_mailbox(tp->napi[0].prodmbox, 0);
8841 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8842 }
8843
8844 /* Make sure the NIC-based send BD rings are disabled. */
8845 if (!tg3_flag(tp, 5705_PLUS)) {
8846 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8847 for (i = 0; i < 16; i++)
8848 tw32_tx_mbox(mbox + i * 8, 0);
8849 }
8850
8851 txrcb = NIC_SRAM_SEND_RCB;
8852 rxrcb = NIC_SRAM_RCV_RET_RCB;
8853
8854 /* Clear status block in ram. */
8855 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8856
8857 /* Set status block DMA address */
8858 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8859 ((u64) tnapi->status_mapping >> 32));
8860 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8861 ((u64) tnapi->status_mapping & 0xffffffff));
8862
8863 if (tnapi->tx_ring) {
8864 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8865 (TG3_TX_RING_SIZE <<
8866 BDINFO_FLAGS_MAXLEN_SHIFT),
8867 NIC_SRAM_TX_BUFFER_DESC);
8868 txrcb += TG3_BDINFO_SIZE;
8869 }
8870
8871 if (tnapi->rx_rcb) {
8872 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8873 (tp->rx_ret_ring_mask + 1) <<
8874 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8875 rxrcb += TG3_BDINFO_SIZE;
8876 }
8877
8878 stblk = HOSTCC_STATBLCK_RING1;
8879
8880 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8881 u64 mapping = (u64)tnapi->status_mapping;
8882 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8883 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8884
8885 /* Clear status block in ram. */
8886 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8887
8888 if (tnapi->tx_ring) {
8889 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8890 (TG3_TX_RING_SIZE <<
8891 BDINFO_FLAGS_MAXLEN_SHIFT),
8892 NIC_SRAM_TX_BUFFER_DESC);
8893 txrcb += TG3_BDINFO_SIZE;
8894 }
8895
8896 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8897 ((tp->rx_ret_ring_mask + 1) <<
8898 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8899
8900 stblk += 8;
8901 rxrcb += TG3_BDINFO_SIZE;
8902 }
8903 }
8904
8905 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8906 {
8907 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8908
8909 if (!tg3_flag(tp, 5750_PLUS) ||
8910 tg3_flag(tp, 5780_CLASS) ||
8911 tg3_asic_rev(tp) == ASIC_REV_5750 ||
8912 tg3_asic_rev(tp) == ASIC_REV_5752 ||
8913 tg3_flag(tp, 57765_PLUS))
8914 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8915 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8916 tg3_asic_rev(tp) == ASIC_REV_5787)
8917 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8918 else
8919 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8920
8921 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8922 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8923
8924 val = min(nic_rep_thresh, host_rep_thresh);
8925 tw32(RCVBDI_STD_THRESH, val);
8926
8927 if (tg3_flag(tp, 57765_PLUS))
8928 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8929
8930 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8931 return;
8932
8933 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8934
8935 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8936
8937 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8938 tw32(RCVBDI_JUMBO_THRESH, val);
8939
8940 if (tg3_flag(tp, 57765_PLUS))
8941 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8942 }
8943
8944 static inline u32 calc_crc(unsigned char *buf, int len)
8945 {
8946 u32 reg;
8947 u32 tmp;
8948 int j, k;
8949
8950 reg = 0xffffffff;
8951
8952 for (j = 0; j < len; j++) {
8953 reg ^= buf[j];
8954
8955 for (k = 0; k < 8; k++) {
8956 tmp = reg & 0x01;
8957
8958 reg >>= 1;
8959
8960 if (tmp)
8961 reg ^= 0xedb88320;
8962 }
8963 }
8964
8965 return ~reg;
8966 }
8967
8968 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8969 {
8970 /* accept or reject all multicast frames */
8971 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8972 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8973 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8974 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8975 }
8976
8977 static void __tg3_set_rx_mode(struct net_device *dev)
8978 {
8979 struct tg3 *tp = netdev_priv(dev);
8980 u32 rx_mode;
8981
8982 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8983 RX_MODE_KEEP_VLAN_TAG);
8984
8985 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8986 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8987 * flag clear.
8988 */
8989 if (!tg3_flag(tp, ENABLE_ASF))
8990 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8991 #endif
8992
8993 if (dev->flags & IFF_PROMISC) {
8994 /* Promiscuous mode. */
8995 rx_mode |= RX_MODE_PROMISC;
8996 } else if (dev->flags & IFF_ALLMULTI) {
8997 /* Accept all multicast. */
8998 tg3_set_multi(tp, 1);
8999 } else if (netdev_mc_empty(dev)) {
9000 /* Reject all multicast. */
9001 tg3_set_multi(tp, 0);
9002 } else {
9003 /* Accept one or more multicast(s). */
9004 struct netdev_hw_addr *ha;
9005 u32 mc_filter[4] = { 0, };
9006 u32 regidx;
9007 u32 bit;
9008 u32 crc;
9009
9010 netdev_for_each_mc_addr(ha, dev) {
9011 crc = calc_crc(ha->addr, ETH_ALEN);
9012 bit = ~crc & 0x7f;
9013 regidx = (bit & 0x60) >> 5;
9014 bit &= 0x1f;
9015 mc_filter[regidx] |= (1 << bit);
9016 }
9017
9018 tw32(MAC_HASH_REG_0, mc_filter[0]);
9019 tw32(MAC_HASH_REG_1, mc_filter[1]);
9020 tw32(MAC_HASH_REG_2, mc_filter[2]);
9021 tw32(MAC_HASH_REG_3, mc_filter[3]);
9022 }
9023
9024 if (rx_mode != tp->rx_mode) {
9025 tp->rx_mode = rx_mode;
9026 tw32_f(MAC_RX_MODE, rx_mode);
9027 udelay(10);
9028 }
9029 }
9030
9031 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9032 {
9033 int i;
9034
9035 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9036 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9037 }
9038
9039 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9040 {
9041 int i;
9042
9043 if (!tg3_flag(tp, SUPPORT_MSIX))
9044 return;
9045
9046 if (tp->rxq_cnt == 1) {
9047 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9048 return;
9049 }
9050
9051 /* Validate table against current IRQ count */
9052 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9053 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9054 break;
9055 }
9056
9057 if (i != TG3_RSS_INDIR_TBL_SIZE)
9058 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9059 }
9060
9061 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9062 {
9063 int i = 0;
9064 u32 reg = MAC_RSS_INDIR_TBL_0;
9065
9066 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9067 u32 val = tp->rss_ind_tbl[i];
9068 i++;
9069 for (; i % 8; i++) {
9070 val <<= 4;
9071 val |= tp->rss_ind_tbl[i];
9072 }
9073 tw32(reg, val);
9074 reg += 4;
9075 }
9076 }
9077
9078 /* tp->lock is held. */
9079 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9080 {
9081 u32 val, rdmac_mode;
9082 int i, err, limit;
9083 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9084
9085 tg3_disable_ints(tp);
9086
9087 tg3_stop_fw(tp);
9088
9089 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9090
9091 if (tg3_flag(tp, INIT_COMPLETE))
9092 tg3_abort_hw(tp, 1);
9093
9094 /* Enable MAC control of LPI */
9095 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9096 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9097 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9098 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9099 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9100
9101 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9102
9103 tw32_f(TG3_CPMU_EEE_CTRL,
9104 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9105
9106 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9107 TG3_CPMU_EEEMD_LPI_IN_TX |
9108 TG3_CPMU_EEEMD_LPI_IN_RX |
9109 TG3_CPMU_EEEMD_EEE_ENABLE;
9110
9111 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9112 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9113
9114 if (tg3_flag(tp, ENABLE_APE))
9115 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9116
9117 tw32_f(TG3_CPMU_EEE_MODE, val);
9118
9119 tw32_f(TG3_CPMU_EEE_DBTMR1,
9120 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9121 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9122
9123 tw32_f(TG3_CPMU_EEE_DBTMR2,
9124 TG3_CPMU_DBTMR2_APE_TX_2047US |
9125 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9126 }
9127
9128 if (reset_phy)
9129 tg3_phy_reset(tp);
9130
9131 err = tg3_chip_reset(tp);
9132 if (err)
9133 return err;
9134
9135 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9136
9137 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9138 val = tr32(TG3_CPMU_CTRL);
9139 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9140 tw32(TG3_CPMU_CTRL, val);
9141
9142 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9143 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9144 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9145 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9146
9147 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9148 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9149 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9150 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9151
9152 val = tr32(TG3_CPMU_HST_ACC);
9153 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9154 val |= CPMU_HST_ACC_MACCLK_6_25;
9155 tw32(TG3_CPMU_HST_ACC, val);
9156 }
9157
9158 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9159 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9160 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9161 PCIE_PWR_MGMT_L1_THRESH_4MS;
9162 tw32(PCIE_PWR_MGMT_THRESH, val);
9163
9164 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9165 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9166
9167 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9168
9169 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9170 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9171 }
9172
9173 if (tg3_flag(tp, L1PLLPD_EN)) {
9174 u32 grc_mode = tr32(GRC_MODE);
9175
9176 /* Access the lower 1K of PL PCIE block registers. */
9177 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9178 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9179
9180 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9181 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9182 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9183
9184 tw32(GRC_MODE, grc_mode);
9185 }
9186
9187 if (tg3_flag(tp, 57765_CLASS)) {
9188 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9189 u32 grc_mode = tr32(GRC_MODE);
9190
9191 /* Access the lower 1K of PL PCIE block registers. */
9192 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9193 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9194
9195 val = tr32(TG3_PCIE_TLDLPL_PORT +
9196 TG3_PCIE_PL_LO_PHYCTL5);
9197 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9198 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9199
9200 tw32(GRC_MODE, grc_mode);
9201 }
9202
9203 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9204 u32 grc_mode;
9205
9206 /* Fix transmit hangs */
9207 val = tr32(TG3_CPMU_PADRNG_CTL);
9208 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9209 tw32(TG3_CPMU_PADRNG_CTL, val);
9210
9211 grc_mode = tr32(GRC_MODE);
9212
9213 /* Access the lower 1K of DL PCIE block registers. */
9214 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9215 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9216
9217 val = tr32(TG3_PCIE_TLDLPL_PORT +
9218 TG3_PCIE_DL_LO_FTSMAX);
9219 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9220 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9221 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9222
9223 tw32(GRC_MODE, grc_mode);
9224 }
9225
9226 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9227 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9228 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9229 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9230 }
9231
9232 /* This works around an issue with Athlon chipsets on
9233 * B3 tigon3 silicon. This bit has no effect on any
9234 * other revision. But do not set this on PCI Express
9235 * chips and don't even touch the clocks if the CPMU is present.
9236 */
9237 if (!tg3_flag(tp, CPMU_PRESENT)) {
9238 if (!tg3_flag(tp, PCI_EXPRESS))
9239 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9240 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9241 }
9242
9243 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9244 tg3_flag(tp, PCIX_MODE)) {
9245 val = tr32(TG3PCI_PCISTATE);
9246 val |= PCISTATE_RETRY_SAME_DMA;
9247 tw32(TG3PCI_PCISTATE, val);
9248 }
9249
9250 if (tg3_flag(tp, ENABLE_APE)) {
9251 /* Allow reads and writes to the
9252 * APE register and memory space.
9253 */
9254 val = tr32(TG3PCI_PCISTATE);
9255 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9256 PCISTATE_ALLOW_APE_SHMEM_WR |
9257 PCISTATE_ALLOW_APE_PSPACE_WR;
9258 tw32(TG3PCI_PCISTATE, val);
9259 }
9260
9261 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9262 /* Enable some hw fixes. */
9263 val = tr32(TG3PCI_MSI_DATA);
9264 val |= (1 << 26) | (1 << 28) | (1 << 29);
9265 tw32(TG3PCI_MSI_DATA, val);
9266 }
9267
9268 /* Descriptor ring init may make accesses to the
9269 * NIC SRAM area to setup the TX descriptors, so we
9270 * can only do this after the hardware has been
9271 * successfully reset.
9272 */
9273 err = tg3_init_rings(tp);
9274 if (err)
9275 return err;
9276
9277 if (tg3_flag(tp, 57765_PLUS)) {
9278 val = tr32(TG3PCI_DMA_RW_CTRL) &
9279 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9280 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9281 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9282 if (!tg3_flag(tp, 57765_CLASS) &&
9283 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9284 tg3_asic_rev(tp) != ASIC_REV_5762)
9285 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9286 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9287 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9288 tg3_asic_rev(tp) != ASIC_REV_5761) {
9289 /* This value is determined during the probe time DMA
9290 * engine test, tg3_test_dma.
9291 */
9292 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9293 }
9294
9295 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9296 GRC_MODE_4X_NIC_SEND_RINGS |
9297 GRC_MODE_NO_TX_PHDR_CSUM |
9298 GRC_MODE_NO_RX_PHDR_CSUM);
9299 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9300
9301 /* Pseudo-header checksum is done by hardware logic and not
9302 * the offload processers, so make the chip do the pseudo-
9303 * header checksums on receive. For transmit it is more
9304 * convenient to do the pseudo-header checksum in software
9305 * as Linux does that on transmit for us in all cases.
9306 */
9307 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9308
9309 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9310 if (tp->rxptpctl)
9311 tw32(TG3_RX_PTP_CTL,
9312 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9313
9314 if (tg3_flag(tp, PTP_CAPABLE))
9315 val |= GRC_MODE_TIME_SYNC_ENABLE;
9316
9317 tw32(GRC_MODE, tp->grc_mode | val);
9318
9319 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9320 val = tr32(GRC_MISC_CFG);
9321 val &= ~0xff;
9322 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9323 tw32(GRC_MISC_CFG, val);
9324
9325 /* Initialize MBUF/DESC pool. */
9326 if (tg3_flag(tp, 5750_PLUS)) {
9327 /* Do nothing. */
9328 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9329 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9330 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9331 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9332 else
9333 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9334 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9335 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9336 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9337 int fw_len;
9338
9339 fw_len = tp->fw_len;
9340 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9341 tw32(BUFMGR_MB_POOL_ADDR,
9342 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9343 tw32(BUFMGR_MB_POOL_SIZE,
9344 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9345 }
9346
9347 if (tp->dev->mtu <= ETH_DATA_LEN) {
9348 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9349 tp->bufmgr_config.mbuf_read_dma_low_water);
9350 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9351 tp->bufmgr_config.mbuf_mac_rx_low_water);
9352 tw32(BUFMGR_MB_HIGH_WATER,
9353 tp->bufmgr_config.mbuf_high_water);
9354 } else {
9355 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9356 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9357 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9358 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9359 tw32(BUFMGR_MB_HIGH_WATER,
9360 tp->bufmgr_config.mbuf_high_water_jumbo);
9361 }
9362 tw32(BUFMGR_DMA_LOW_WATER,
9363 tp->bufmgr_config.dma_low_water);
9364 tw32(BUFMGR_DMA_HIGH_WATER,
9365 tp->bufmgr_config.dma_high_water);
9366
9367 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9368 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9369 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9370 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9371 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9372 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9373 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9374 tw32(BUFMGR_MODE, val);
9375 for (i = 0; i < 2000; i++) {
9376 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9377 break;
9378 udelay(10);
9379 }
9380 if (i >= 2000) {
9381 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9382 return -ENODEV;
9383 }
9384
9385 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9386 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9387
9388 tg3_setup_rxbd_thresholds(tp);
9389
9390 /* Initialize TG3_BDINFO's at:
9391 * RCVDBDI_STD_BD: standard eth size rx ring
9392 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9393 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9394 *
9395 * like so:
9396 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9397 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9398 * ring attribute flags
9399 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9400 *
9401 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9402 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9403 *
9404 * The size of each ring is fixed in the firmware, but the location is
9405 * configurable.
9406 */
9407 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9408 ((u64) tpr->rx_std_mapping >> 32));
9409 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9410 ((u64) tpr->rx_std_mapping & 0xffffffff));
9411 if (!tg3_flag(tp, 5717_PLUS))
9412 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9413 NIC_SRAM_RX_BUFFER_DESC);
9414
9415 /* Disable the mini ring */
9416 if (!tg3_flag(tp, 5705_PLUS))
9417 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9418 BDINFO_FLAGS_DISABLED);
9419
9420 /* Program the jumbo buffer descriptor ring control
9421 * blocks on those devices that have them.
9422 */
9423 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9424 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9425
9426 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9427 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9428 ((u64) tpr->rx_jmb_mapping >> 32));
9429 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9430 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9431 val = TG3_RX_JMB_RING_SIZE(tp) <<
9432 BDINFO_FLAGS_MAXLEN_SHIFT;
9433 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9434 val | BDINFO_FLAGS_USE_EXT_RECV);
9435 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9436 tg3_flag(tp, 57765_CLASS) ||
9437 tg3_asic_rev(tp) == ASIC_REV_5762)
9438 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9439 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9440 } else {
9441 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9442 BDINFO_FLAGS_DISABLED);
9443 }
9444
9445 if (tg3_flag(tp, 57765_PLUS)) {
9446 val = TG3_RX_STD_RING_SIZE(tp);
9447 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9448 val |= (TG3_RX_STD_DMA_SZ << 2);
9449 } else
9450 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9451 } else
9452 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9453
9454 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9455
9456 tpr->rx_std_prod_idx = tp->rx_pending;
9457 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9458
9459 tpr->rx_jmb_prod_idx =
9460 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9461 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9462
9463 tg3_rings_reset(tp);
9464
9465 /* Initialize MAC address and backoff seed. */
9466 __tg3_set_mac_addr(tp, 0);
9467
9468 /* MTU + ethernet header + FCS + optional VLAN tag */
9469 tw32(MAC_RX_MTU_SIZE,
9470 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9471
9472 /* The slot time is changed by tg3_setup_phy if we
9473 * run at gigabit with half duplex.
9474 */
9475 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9476 (6 << TX_LENGTHS_IPG_SHIFT) |
9477 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9478
9479 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9480 tg3_asic_rev(tp) == ASIC_REV_5762)
9481 val |= tr32(MAC_TX_LENGTHS) &
9482 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9483 TX_LENGTHS_CNT_DWN_VAL_MSK);
9484
9485 tw32(MAC_TX_LENGTHS, val);
9486
9487 /* Receive rules. */
9488 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9489 tw32(RCVLPC_CONFIG, 0x0181);
9490
9491 /* Calculate RDMAC_MODE setting early, we need it to determine
9492 * the RCVLPC_STATE_ENABLE mask.
9493 */
9494 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9495 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9496 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9497 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9498 RDMAC_MODE_LNGREAD_ENAB);
9499
9500 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9501 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9502
9503 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9504 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9505 tg3_asic_rev(tp) == ASIC_REV_57780)
9506 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9507 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9508 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9509
9510 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9511 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9512 if (tg3_flag(tp, TSO_CAPABLE) &&
9513 tg3_asic_rev(tp) == ASIC_REV_5705) {
9514 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9515 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9516 !tg3_flag(tp, IS_5788)) {
9517 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9518 }
9519 }
9520
9521 if (tg3_flag(tp, PCI_EXPRESS))
9522 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9523
9524 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9525 tp->dma_limit = 0;
9526 if (tp->dev->mtu <= ETH_DATA_LEN) {
9527 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9528 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9529 }
9530 }
9531
9532 if (tg3_flag(tp, HW_TSO_1) ||
9533 tg3_flag(tp, HW_TSO_2) ||
9534 tg3_flag(tp, HW_TSO_3))
9535 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9536
9537 if (tg3_flag(tp, 57765_PLUS) ||
9538 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9539 tg3_asic_rev(tp) == ASIC_REV_57780)
9540 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9541
9542 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9543 tg3_asic_rev(tp) == ASIC_REV_5762)
9544 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9545
9546 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9547 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9548 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9549 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9550 tg3_flag(tp, 57765_PLUS)) {
9551 u32 tgtreg;
9552
9553 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9554 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9555 else
9556 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9557
9558 val = tr32(tgtreg);
9559 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9560 tg3_asic_rev(tp) == ASIC_REV_5762) {
9561 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9562 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9563 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9564 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9565 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9566 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9567 }
9568 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9569 }
9570
9571 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9572 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9573 tg3_asic_rev(tp) == ASIC_REV_5762) {
9574 u32 tgtreg;
9575
9576 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9577 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9578 else
9579 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9580
9581 val = tr32(tgtreg);
9582 tw32(tgtreg, val |
9583 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9584 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9585 }
9586
9587 /* Receive/send statistics. */
9588 if (tg3_flag(tp, 5750_PLUS)) {
9589 val = tr32(RCVLPC_STATS_ENABLE);
9590 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9591 tw32(RCVLPC_STATS_ENABLE, val);
9592 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9593 tg3_flag(tp, TSO_CAPABLE)) {
9594 val = tr32(RCVLPC_STATS_ENABLE);
9595 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9596 tw32(RCVLPC_STATS_ENABLE, val);
9597 } else {
9598 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9599 }
9600 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9601 tw32(SNDDATAI_STATSENAB, 0xffffff);
9602 tw32(SNDDATAI_STATSCTRL,
9603 (SNDDATAI_SCTRL_ENABLE |
9604 SNDDATAI_SCTRL_FASTUPD));
9605
9606 /* Setup host coalescing engine. */
9607 tw32(HOSTCC_MODE, 0);
9608 for (i = 0; i < 2000; i++) {
9609 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9610 break;
9611 udelay(10);
9612 }
9613
9614 __tg3_set_coalesce(tp, &tp->coal);
9615
9616 if (!tg3_flag(tp, 5705_PLUS)) {
9617 /* Status/statistics block address. See tg3_timer,
9618 * the tg3_periodic_fetch_stats call there, and
9619 * tg3_get_stats to see how this works for 5705/5750 chips.
9620 */
9621 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9622 ((u64) tp->stats_mapping >> 32));
9623 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9624 ((u64) tp->stats_mapping & 0xffffffff));
9625 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9626
9627 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9628
9629 /* Clear statistics and status block memory areas */
9630 for (i = NIC_SRAM_STATS_BLK;
9631 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9632 i += sizeof(u32)) {
9633 tg3_write_mem(tp, i, 0);
9634 udelay(40);
9635 }
9636 }
9637
9638 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9639
9640 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9641 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9642 if (!tg3_flag(tp, 5705_PLUS))
9643 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9644
9645 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9646 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9647 /* reset to prevent losing 1st rx packet intermittently */
9648 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9649 udelay(10);
9650 }
9651
9652 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9653 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9654 MAC_MODE_FHDE_ENABLE;
9655 if (tg3_flag(tp, ENABLE_APE))
9656 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9657 if (!tg3_flag(tp, 5705_PLUS) &&
9658 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9659 tg3_asic_rev(tp) != ASIC_REV_5700)
9660 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9661 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9662 udelay(40);
9663
9664 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9665 * If TG3_FLAG_IS_NIC is zero, we should read the
9666 * register to preserve the GPIO settings for LOMs. The GPIOs,
9667 * whether used as inputs or outputs, are set by boot code after
9668 * reset.
9669 */
9670 if (!tg3_flag(tp, IS_NIC)) {
9671 u32 gpio_mask;
9672
9673 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9674 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9675 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9676
9677 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9678 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9679 GRC_LCLCTRL_GPIO_OUTPUT3;
9680
9681 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9682 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9683
9684 tp->grc_local_ctrl &= ~gpio_mask;
9685 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9686
9687 /* GPIO1 must be driven high for eeprom write protect */
9688 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9689 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9690 GRC_LCLCTRL_GPIO_OUTPUT1);
9691 }
9692 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9693 udelay(100);
9694
9695 if (tg3_flag(tp, USING_MSIX)) {
9696 val = tr32(MSGINT_MODE);
9697 val |= MSGINT_MODE_ENABLE;
9698 if (tp->irq_cnt > 1)
9699 val |= MSGINT_MODE_MULTIVEC_EN;
9700 if (!tg3_flag(tp, 1SHOT_MSI))
9701 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9702 tw32(MSGINT_MODE, val);
9703 }
9704
9705 if (!tg3_flag(tp, 5705_PLUS)) {
9706 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9707 udelay(40);
9708 }
9709
9710 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9711 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9712 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9713 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9714 WDMAC_MODE_LNGREAD_ENAB);
9715
9716 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9717 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9718 if (tg3_flag(tp, TSO_CAPABLE) &&
9719 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9720 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9721 /* nothing */
9722 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9723 !tg3_flag(tp, IS_5788)) {
9724 val |= WDMAC_MODE_RX_ACCEL;
9725 }
9726 }
9727
9728 /* Enable host coalescing bug fix */
9729 if (tg3_flag(tp, 5755_PLUS))
9730 val |= WDMAC_MODE_STATUS_TAG_FIX;
9731
9732 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9733 val |= WDMAC_MODE_BURST_ALL_DATA;
9734
9735 tw32_f(WDMAC_MODE, val);
9736 udelay(40);
9737
9738 if (tg3_flag(tp, PCIX_MODE)) {
9739 u16 pcix_cmd;
9740
9741 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9742 &pcix_cmd);
9743 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9744 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9745 pcix_cmd |= PCI_X_CMD_READ_2K;
9746 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9747 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9748 pcix_cmd |= PCI_X_CMD_READ_2K;
9749 }
9750 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9751 pcix_cmd);
9752 }
9753
9754 tw32_f(RDMAC_MODE, rdmac_mode);
9755 udelay(40);
9756
9757 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9758 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9759 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9760 break;
9761 }
9762 if (i < TG3_NUM_RDMA_CHANNELS) {
9763 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9764 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9765 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9766 tg3_flag_set(tp, 5719_RDMA_BUG);
9767 }
9768 }
9769
9770 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9771 if (!tg3_flag(tp, 5705_PLUS))
9772 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9773
9774 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9775 tw32(SNDDATAC_MODE,
9776 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9777 else
9778 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9779
9780 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9781 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9782 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9783 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9784 val |= RCVDBDI_MODE_LRG_RING_SZ;
9785 tw32(RCVDBDI_MODE, val);
9786 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9787 if (tg3_flag(tp, HW_TSO_1) ||
9788 tg3_flag(tp, HW_TSO_2) ||
9789 tg3_flag(tp, HW_TSO_3))
9790 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9791 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9792 if (tg3_flag(tp, ENABLE_TSS))
9793 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9794 tw32(SNDBDI_MODE, val);
9795 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9796
9797 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9798 err = tg3_load_5701_a0_firmware_fix(tp);
9799 if (err)
9800 return err;
9801 }
9802
9803 if (tg3_flag(tp, TSO_CAPABLE)) {
9804 err = tg3_load_tso_firmware(tp);
9805 if (err)
9806 return err;
9807 }
9808
9809 tp->tx_mode = TX_MODE_ENABLE;
9810
9811 if (tg3_flag(tp, 5755_PLUS) ||
9812 tg3_asic_rev(tp) == ASIC_REV_5906)
9813 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9814
9815 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9816 tg3_asic_rev(tp) == ASIC_REV_5762) {
9817 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9818 tp->tx_mode &= ~val;
9819 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9820 }
9821
9822 tw32_f(MAC_TX_MODE, tp->tx_mode);
9823 udelay(100);
9824
9825 if (tg3_flag(tp, ENABLE_RSS)) {
9826 tg3_rss_write_indir_tbl(tp);
9827
9828 /* Setup the "secret" hash key. */
9829 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9830 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9831 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9832 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9833 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9834 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9835 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9836 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9837 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9838 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9839 }
9840
9841 tp->rx_mode = RX_MODE_ENABLE;
9842 if (tg3_flag(tp, 5755_PLUS))
9843 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9844
9845 if (tg3_flag(tp, ENABLE_RSS))
9846 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9847 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9848 RX_MODE_RSS_IPV6_HASH_EN |
9849 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9850 RX_MODE_RSS_IPV4_HASH_EN |
9851 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9852
9853 tw32_f(MAC_RX_MODE, tp->rx_mode);
9854 udelay(10);
9855
9856 tw32(MAC_LED_CTRL, tp->led_ctrl);
9857
9858 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9859 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9860 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9861 udelay(10);
9862 }
9863 tw32_f(MAC_RX_MODE, tp->rx_mode);
9864 udelay(10);
9865
9866 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9867 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9868 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9869 /* Set drive transmission level to 1.2V */
9870 /* only if the signal pre-emphasis bit is not set */
9871 val = tr32(MAC_SERDES_CFG);
9872 val &= 0xfffff000;
9873 val |= 0x880;
9874 tw32(MAC_SERDES_CFG, val);
9875 }
9876 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9877 tw32(MAC_SERDES_CFG, 0x616000);
9878 }
9879
9880 /* Prevent chip from dropping frames when flow control
9881 * is enabled.
9882 */
9883 if (tg3_flag(tp, 57765_CLASS))
9884 val = 1;
9885 else
9886 val = 2;
9887 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9888
9889 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
9890 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9891 /* Use hardware link auto-negotiation */
9892 tg3_flag_set(tp, HW_AUTONEG);
9893 }
9894
9895 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9896 tg3_asic_rev(tp) == ASIC_REV_5714) {
9897 u32 tmp;
9898
9899 tmp = tr32(SERDES_RX_CTRL);
9900 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9901 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9902 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9903 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9904 }
9905
9906 if (!tg3_flag(tp, USE_PHYLIB)) {
9907 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9908 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9909
9910 err = tg3_setup_phy(tp, 0);
9911 if (err)
9912 return err;
9913
9914 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9915 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9916 u32 tmp;
9917
9918 /* Clear CRC stats. */
9919 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9920 tg3_writephy(tp, MII_TG3_TEST1,
9921 tmp | MII_TG3_TEST1_CRC_EN);
9922 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9923 }
9924 }
9925 }
9926
9927 __tg3_set_rx_mode(tp->dev);
9928
9929 /* Initialize receive rules. */
9930 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9931 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9932 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9933 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9934
9935 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9936 limit = 8;
9937 else
9938 limit = 16;
9939 if (tg3_flag(tp, ENABLE_ASF))
9940 limit -= 4;
9941 switch (limit) {
9942 case 16:
9943 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9944 case 15:
9945 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9946 case 14:
9947 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9948 case 13:
9949 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9950 case 12:
9951 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9952 case 11:
9953 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9954 case 10:
9955 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9956 case 9:
9957 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9958 case 8:
9959 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9960 case 7:
9961 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9962 case 6:
9963 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9964 case 5:
9965 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9966 case 4:
9967 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9968 case 3:
9969 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9970 case 2:
9971 case 1:
9972
9973 default:
9974 break;
9975 }
9976
9977 if (tg3_flag(tp, ENABLE_APE))
9978 /* Write our heartbeat update interval to APE. */
9979 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9980 APE_HOST_HEARTBEAT_INT_DISABLE);
9981
9982 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9983
9984 return 0;
9985 }
9986
9987 /* Called at device open time to get the chip ready for
9988 * packet processing. Invoked with tp->lock held.
9989 */
9990 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9991 {
9992 tg3_switch_clocks(tp);
9993
9994 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9995
9996 return tg3_reset_hw(tp, reset_phy);
9997 }
9998
9999 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10000 {
10001 int i;
10002
10003 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10004 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10005
10006 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10007 off += len;
10008
10009 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10010 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10011 memset(ocir, 0, TG3_OCIR_LEN);
10012 }
10013 }
10014
10015 /* sysfs attributes for hwmon */
10016 static ssize_t tg3_show_temp(struct device *dev,
10017 struct device_attribute *devattr, char *buf)
10018 {
10019 struct pci_dev *pdev = to_pci_dev(dev);
10020 struct net_device *netdev = pci_get_drvdata(pdev);
10021 struct tg3 *tp = netdev_priv(netdev);
10022 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10023 u32 temperature;
10024
10025 spin_lock_bh(&tp->lock);
10026 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10027 sizeof(temperature));
10028 spin_unlock_bh(&tp->lock);
10029 return sprintf(buf, "%u\n", temperature);
10030 }
10031
10032
10033 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10034 TG3_TEMP_SENSOR_OFFSET);
10035 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10036 TG3_TEMP_CAUTION_OFFSET);
10037 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10038 TG3_TEMP_MAX_OFFSET);
10039
10040 static struct attribute *tg3_attributes[] = {
10041 &sensor_dev_attr_temp1_input.dev_attr.attr,
10042 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10043 &sensor_dev_attr_temp1_max.dev_attr.attr,
10044 NULL
10045 };
10046
10047 static const struct attribute_group tg3_group = {
10048 .attrs = tg3_attributes,
10049 };
10050
10051 static void tg3_hwmon_close(struct tg3 *tp)
10052 {
10053 if (tp->hwmon_dev) {
10054 hwmon_device_unregister(tp->hwmon_dev);
10055 tp->hwmon_dev = NULL;
10056 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10057 }
10058 }
10059
10060 static void tg3_hwmon_open(struct tg3 *tp)
10061 {
10062 int i, err;
10063 u32 size = 0;
10064 struct pci_dev *pdev = tp->pdev;
10065 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10066
10067 tg3_sd_scan_scratchpad(tp, ocirs);
10068
10069 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10070 if (!ocirs[i].src_data_length)
10071 continue;
10072
10073 size += ocirs[i].src_hdr_length;
10074 size += ocirs[i].src_data_length;
10075 }
10076
10077 if (!size)
10078 return;
10079
10080 /* Register hwmon sysfs hooks */
10081 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10082 if (err) {
10083 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10084 return;
10085 }
10086
10087 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10088 if (IS_ERR(tp->hwmon_dev)) {
10089 tp->hwmon_dev = NULL;
10090 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10091 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10092 }
10093 }
10094
10095
10096 #define TG3_STAT_ADD32(PSTAT, REG) \
10097 do { u32 __val = tr32(REG); \
10098 (PSTAT)->low += __val; \
10099 if ((PSTAT)->low < __val) \
10100 (PSTAT)->high += 1; \
10101 } while (0)
10102
10103 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10104 {
10105 struct tg3_hw_stats *sp = tp->hw_stats;
10106
10107 if (!tp->link_up)
10108 return;
10109
10110 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10111 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10112 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10113 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10114 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10115 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10116 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10117 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10118 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10119 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10120 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10121 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10122 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10123 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10124 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10125 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10126 u32 val;
10127
10128 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10129 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10130 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10131 tg3_flag_clear(tp, 5719_RDMA_BUG);
10132 }
10133
10134 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10135 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10136 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10137 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10138 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10139 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10140 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10141 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10142 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10143 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10144 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10145 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10146 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10147 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10148
10149 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10150 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10151 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10152 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10153 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10154 } else {
10155 u32 val = tr32(HOSTCC_FLOW_ATTN);
10156 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10157 if (val) {
10158 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10159 sp->rx_discards.low += val;
10160 if (sp->rx_discards.low < val)
10161 sp->rx_discards.high += 1;
10162 }
10163 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10164 }
10165 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10166 }
10167
10168 static void tg3_chk_missed_msi(struct tg3 *tp)
10169 {
10170 u32 i;
10171
10172 for (i = 0; i < tp->irq_cnt; i++) {
10173 struct tg3_napi *tnapi = &tp->napi[i];
10174
10175 if (tg3_has_work(tnapi)) {
10176 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10177 tnapi->last_tx_cons == tnapi->tx_cons) {
10178 if (tnapi->chk_msi_cnt < 1) {
10179 tnapi->chk_msi_cnt++;
10180 return;
10181 }
10182 tg3_msi(0, tnapi);
10183 }
10184 }
10185 tnapi->chk_msi_cnt = 0;
10186 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10187 tnapi->last_tx_cons = tnapi->tx_cons;
10188 }
10189 }
10190
10191 static void tg3_timer(unsigned long __opaque)
10192 {
10193 struct tg3 *tp = (struct tg3 *) __opaque;
10194
10195 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10196 goto restart_timer;
10197
10198 spin_lock(&tp->lock);
10199
10200 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10201 tg3_flag(tp, 57765_CLASS))
10202 tg3_chk_missed_msi(tp);
10203
10204 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10205 /* BCM4785: Flush posted writes from GbE to host memory. */
10206 tr32(HOSTCC_MODE);
10207 }
10208
10209 if (!tg3_flag(tp, TAGGED_STATUS)) {
10210 /* All of this garbage is because when using non-tagged
10211 * IRQ status the mailbox/status_block protocol the chip
10212 * uses with the cpu is race prone.
10213 */
10214 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10215 tw32(GRC_LOCAL_CTRL,
10216 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10217 } else {
10218 tw32(HOSTCC_MODE, tp->coalesce_mode |
10219 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10220 }
10221
10222 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10223 spin_unlock(&tp->lock);
10224 tg3_reset_task_schedule(tp);
10225 goto restart_timer;
10226 }
10227 }
10228
10229 /* This part only runs once per second. */
10230 if (!--tp->timer_counter) {
10231 if (tg3_flag(tp, 5705_PLUS))
10232 tg3_periodic_fetch_stats(tp);
10233
10234 if (tp->setlpicnt && !--tp->setlpicnt)
10235 tg3_phy_eee_enable(tp);
10236
10237 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10238 u32 mac_stat;
10239 int phy_event;
10240
10241 mac_stat = tr32(MAC_STATUS);
10242
10243 phy_event = 0;
10244 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10245 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10246 phy_event = 1;
10247 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10248 phy_event = 1;
10249
10250 if (phy_event)
10251 tg3_setup_phy(tp, 0);
10252 } else if (tg3_flag(tp, POLL_SERDES)) {
10253 u32 mac_stat = tr32(MAC_STATUS);
10254 int need_setup = 0;
10255
10256 if (tp->link_up &&
10257 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10258 need_setup = 1;
10259 }
10260 if (!tp->link_up &&
10261 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10262 MAC_STATUS_SIGNAL_DET))) {
10263 need_setup = 1;
10264 }
10265 if (need_setup) {
10266 if (!tp->serdes_counter) {
10267 tw32_f(MAC_MODE,
10268 (tp->mac_mode &
10269 ~MAC_MODE_PORT_MODE_MASK));
10270 udelay(40);
10271 tw32_f(MAC_MODE, tp->mac_mode);
10272 udelay(40);
10273 }
10274 tg3_setup_phy(tp, 0);
10275 }
10276 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10277 tg3_flag(tp, 5780_CLASS)) {
10278 tg3_serdes_parallel_detect(tp);
10279 }
10280
10281 tp->timer_counter = tp->timer_multiplier;
10282 }
10283
10284 /* Heartbeat is only sent once every 2 seconds.
10285 *
10286 * The heartbeat is to tell the ASF firmware that the host
10287 * driver is still alive. In the event that the OS crashes,
10288 * ASF needs to reset the hardware to free up the FIFO space
10289 * that may be filled with rx packets destined for the host.
10290 * If the FIFO is full, ASF will no longer function properly.
10291 *
10292 * Unintended resets have been reported on real time kernels
10293 * where the timer doesn't run on time. Netpoll will also have
10294 * same problem.
10295 *
10296 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10297 * to check the ring condition when the heartbeat is expiring
10298 * before doing the reset. This will prevent most unintended
10299 * resets.
10300 */
10301 if (!--tp->asf_counter) {
10302 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10303 tg3_wait_for_event_ack(tp);
10304
10305 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10306 FWCMD_NICDRV_ALIVE3);
10307 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10308 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10309 TG3_FW_UPDATE_TIMEOUT_SEC);
10310
10311 tg3_generate_fw_event(tp);
10312 }
10313 tp->asf_counter = tp->asf_multiplier;
10314 }
10315
10316 spin_unlock(&tp->lock);
10317
10318 restart_timer:
10319 tp->timer.expires = jiffies + tp->timer_offset;
10320 add_timer(&tp->timer);
10321 }
10322
10323 static void tg3_timer_init(struct tg3 *tp)
10324 {
10325 if (tg3_flag(tp, TAGGED_STATUS) &&
10326 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10327 !tg3_flag(tp, 57765_CLASS))
10328 tp->timer_offset = HZ;
10329 else
10330 tp->timer_offset = HZ / 10;
10331
10332 BUG_ON(tp->timer_offset > HZ);
10333
10334 tp->timer_multiplier = (HZ / tp->timer_offset);
10335 tp->asf_multiplier = (HZ / tp->timer_offset) *
10336 TG3_FW_UPDATE_FREQ_SEC;
10337
10338 init_timer(&tp->timer);
10339 tp->timer.data = (unsigned long) tp;
10340 tp->timer.function = tg3_timer;
10341 }
10342
10343 static void tg3_timer_start(struct tg3 *tp)
10344 {
10345 tp->asf_counter = tp->asf_multiplier;
10346 tp->timer_counter = tp->timer_multiplier;
10347
10348 tp->timer.expires = jiffies + tp->timer_offset;
10349 add_timer(&tp->timer);
10350 }
10351
10352 static void tg3_timer_stop(struct tg3 *tp)
10353 {
10354 del_timer_sync(&tp->timer);
10355 }
10356
10357 /* Restart hardware after configuration changes, self-test, etc.
10358 * Invoked with tp->lock held.
10359 */
10360 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10361 __releases(tp->lock)
10362 __acquires(tp->lock)
10363 {
10364 int err;
10365
10366 err = tg3_init_hw(tp, reset_phy);
10367 if (err) {
10368 netdev_err(tp->dev,
10369 "Failed to re-initialize device, aborting\n");
10370 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10371 tg3_full_unlock(tp);
10372 tg3_timer_stop(tp);
10373 tp->irq_sync = 0;
10374 tg3_napi_enable(tp);
10375 dev_close(tp->dev);
10376 tg3_full_lock(tp, 0);
10377 }
10378 return err;
10379 }
10380
10381 static void tg3_reset_task(struct work_struct *work)
10382 {
10383 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10384 int err;
10385
10386 tg3_full_lock(tp, 0);
10387
10388 if (!netif_running(tp->dev)) {
10389 tg3_flag_clear(tp, RESET_TASK_PENDING);
10390 tg3_full_unlock(tp);
10391 return;
10392 }
10393
10394 tg3_full_unlock(tp);
10395
10396 tg3_phy_stop(tp);
10397
10398 tg3_netif_stop(tp);
10399
10400 tg3_full_lock(tp, 1);
10401
10402 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10403 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10404 tp->write32_rx_mbox = tg3_write_flush_reg32;
10405 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10406 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10407 }
10408
10409 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10410 err = tg3_init_hw(tp, 1);
10411 if (err)
10412 goto out;
10413
10414 tg3_netif_start(tp);
10415
10416 out:
10417 tg3_full_unlock(tp);
10418
10419 if (!err)
10420 tg3_phy_start(tp);
10421
10422 tg3_flag_clear(tp, RESET_TASK_PENDING);
10423 }
10424
10425 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10426 {
10427 irq_handler_t fn;
10428 unsigned long flags;
10429 char *name;
10430 struct tg3_napi *tnapi = &tp->napi[irq_num];
10431
10432 if (tp->irq_cnt == 1)
10433 name = tp->dev->name;
10434 else {
10435 name = &tnapi->irq_lbl[0];
10436 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10437 name[IFNAMSIZ-1] = 0;
10438 }
10439
10440 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10441 fn = tg3_msi;
10442 if (tg3_flag(tp, 1SHOT_MSI))
10443 fn = tg3_msi_1shot;
10444 flags = 0;
10445 } else {
10446 fn = tg3_interrupt;
10447 if (tg3_flag(tp, TAGGED_STATUS))
10448 fn = tg3_interrupt_tagged;
10449 flags = IRQF_SHARED;
10450 }
10451
10452 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10453 }
10454
10455 static int tg3_test_interrupt(struct tg3 *tp)
10456 {
10457 struct tg3_napi *tnapi = &tp->napi[0];
10458 struct net_device *dev = tp->dev;
10459 int err, i, intr_ok = 0;
10460 u32 val;
10461
10462 if (!netif_running(dev))
10463 return -ENODEV;
10464
10465 tg3_disable_ints(tp);
10466
10467 free_irq(tnapi->irq_vec, tnapi);
10468
10469 /*
10470 * Turn off MSI one shot mode. Otherwise this test has no
10471 * observable way to know whether the interrupt was delivered.
10472 */
10473 if (tg3_flag(tp, 57765_PLUS)) {
10474 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10475 tw32(MSGINT_MODE, val);
10476 }
10477
10478 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10479 IRQF_SHARED, dev->name, tnapi);
10480 if (err)
10481 return err;
10482
10483 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10484 tg3_enable_ints(tp);
10485
10486 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10487 tnapi->coal_now);
10488
10489 for (i = 0; i < 5; i++) {
10490 u32 int_mbox, misc_host_ctrl;
10491
10492 int_mbox = tr32_mailbox(tnapi->int_mbox);
10493 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10494
10495 if ((int_mbox != 0) ||
10496 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10497 intr_ok = 1;
10498 break;
10499 }
10500
10501 if (tg3_flag(tp, 57765_PLUS) &&
10502 tnapi->hw_status->status_tag != tnapi->last_tag)
10503 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10504
10505 msleep(10);
10506 }
10507
10508 tg3_disable_ints(tp);
10509
10510 free_irq(tnapi->irq_vec, tnapi);
10511
10512 err = tg3_request_irq(tp, 0);
10513
10514 if (err)
10515 return err;
10516
10517 if (intr_ok) {
10518 /* Reenable MSI one shot mode. */
10519 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10520 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10521 tw32(MSGINT_MODE, val);
10522 }
10523 return 0;
10524 }
10525
10526 return -EIO;
10527 }
10528
10529 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10530 * successfully restored
10531 */
10532 static int tg3_test_msi(struct tg3 *tp)
10533 {
10534 int err;
10535 u16 pci_cmd;
10536
10537 if (!tg3_flag(tp, USING_MSI))
10538 return 0;
10539
10540 /* Turn off SERR reporting in case MSI terminates with Master
10541 * Abort.
10542 */
10543 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10544 pci_write_config_word(tp->pdev, PCI_COMMAND,
10545 pci_cmd & ~PCI_COMMAND_SERR);
10546
10547 err = tg3_test_interrupt(tp);
10548
10549 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10550
10551 if (!err)
10552 return 0;
10553
10554 /* other failures */
10555 if (err != -EIO)
10556 return err;
10557
10558 /* MSI test failed, go back to INTx mode */
10559 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10560 "to INTx mode. Please report this failure to the PCI "
10561 "maintainer and include system chipset information\n");
10562
10563 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10564
10565 pci_disable_msi(tp->pdev);
10566
10567 tg3_flag_clear(tp, USING_MSI);
10568 tp->napi[0].irq_vec = tp->pdev->irq;
10569
10570 err = tg3_request_irq(tp, 0);
10571 if (err)
10572 return err;
10573
10574 /* Need to reset the chip because the MSI cycle may have terminated
10575 * with Master Abort.
10576 */
10577 tg3_full_lock(tp, 1);
10578
10579 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10580 err = tg3_init_hw(tp, 1);
10581
10582 tg3_full_unlock(tp);
10583
10584 if (err)
10585 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10586
10587 return err;
10588 }
10589
10590 static int tg3_request_firmware(struct tg3 *tp)
10591 {
10592 const struct tg3_firmware_hdr *fw_hdr;
10593
10594 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10595 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10596 tp->fw_needed);
10597 return -ENOENT;
10598 }
10599
10600 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10601
10602 /* Firmware blob starts with version numbers, followed by
10603 * start address and _full_ length including BSS sections
10604 * (which must be longer than the actual data, of course
10605 */
10606
10607 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10608 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10609 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10610 tp->fw_len, tp->fw_needed);
10611 release_firmware(tp->fw);
10612 tp->fw = NULL;
10613 return -EINVAL;
10614 }
10615
10616 /* We no longer need firmware; we have it. */
10617 tp->fw_needed = NULL;
10618 return 0;
10619 }
10620
10621 static u32 tg3_irq_count(struct tg3 *tp)
10622 {
10623 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10624
10625 if (irq_cnt > 1) {
10626 /* We want as many rx rings enabled as there are cpus.
10627 * In multiqueue MSI-X mode, the first MSI-X vector
10628 * only deals with link interrupts, etc, so we add
10629 * one to the number of vectors we are requesting.
10630 */
10631 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10632 }
10633
10634 return irq_cnt;
10635 }
10636
10637 static bool tg3_enable_msix(struct tg3 *tp)
10638 {
10639 int i, rc;
10640 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10641
10642 tp->txq_cnt = tp->txq_req;
10643 tp->rxq_cnt = tp->rxq_req;
10644 if (!tp->rxq_cnt)
10645 tp->rxq_cnt = netif_get_num_default_rss_queues();
10646 if (tp->rxq_cnt > tp->rxq_max)
10647 tp->rxq_cnt = tp->rxq_max;
10648
10649 /* Disable multiple TX rings by default. Simple round-robin hardware
10650 * scheduling of the TX rings can cause starvation of rings with
10651 * small packets when other rings have TSO or jumbo packets.
10652 */
10653 if (!tp->txq_req)
10654 tp->txq_cnt = 1;
10655
10656 tp->irq_cnt = tg3_irq_count(tp);
10657
10658 for (i = 0; i < tp->irq_max; i++) {
10659 msix_ent[i].entry = i;
10660 msix_ent[i].vector = 0;
10661 }
10662
10663 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10664 if (rc < 0) {
10665 return false;
10666 } else if (rc != 0) {
10667 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10668 return false;
10669 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10670 tp->irq_cnt, rc);
10671 tp->irq_cnt = rc;
10672 tp->rxq_cnt = max(rc - 1, 1);
10673 if (tp->txq_cnt)
10674 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10675 }
10676
10677 for (i = 0; i < tp->irq_max; i++)
10678 tp->napi[i].irq_vec = msix_ent[i].vector;
10679
10680 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10681 pci_disable_msix(tp->pdev);
10682 return false;
10683 }
10684
10685 if (tp->irq_cnt == 1)
10686 return true;
10687
10688 tg3_flag_set(tp, ENABLE_RSS);
10689
10690 if (tp->txq_cnt > 1)
10691 tg3_flag_set(tp, ENABLE_TSS);
10692
10693 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10694
10695 return true;
10696 }
10697
10698 static void tg3_ints_init(struct tg3 *tp)
10699 {
10700 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10701 !tg3_flag(tp, TAGGED_STATUS)) {
10702 /* All MSI supporting chips should support tagged
10703 * status. Assert that this is the case.
10704 */
10705 netdev_warn(tp->dev,
10706 "MSI without TAGGED_STATUS? Not using MSI\n");
10707 goto defcfg;
10708 }
10709
10710 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10711 tg3_flag_set(tp, USING_MSIX);
10712 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10713 tg3_flag_set(tp, USING_MSI);
10714
10715 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10716 u32 msi_mode = tr32(MSGINT_MODE);
10717 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10718 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10719 if (!tg3_flag(tp, 1SHOT_MSI))
10720 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10721 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10722 }
10723 defcfg:
10724 if (!tg3_flag(tp, USING_MSIX)) {
10725 tp->irq_cnt = 1;
10726 tp->napi[0].irq_vec = tp->pdev->irq;
10727 }
10728
10729 if (tp->irq_cnt == 1) {
10730 tp->txq_cnt = 1;
10731 tp->rxq_cnt = 1;
10732 netif_set_real_num_tx_queues(tp->dev, 1);
10733 netif_set_real_num_rx_queues(tp->dev, 1);
10734 }
10735 }
10736
10737 static void tg3_ints_fini(struct tg3 *tp)
10738 {
10739 if (tg3_flag(tp, USING_MSIX))
10740 pci_disable_msix(tp->pdev);
10741 else if (tg3_flag(tp, USING_MSI))
10742 pci_disable_msi(tp->pdev);
10743 tg3_flag_clear(tp, USING_MSI);
10744 tg3_flag_clear(tp, USING_MSIX);
10745 tg3_flag_clear(tp, ENABLE_RSS);
10746 tg3_flag_clear(tp, ENABLE_TSS);
10747 }
10748
10749 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10750 bool init)
10751 {
10752 struct net_device *dev = tp->dev;
10753 int i, err;
10754
10755 /*
10756 * Setup interrupts first so we know how
10757 * many NAPI resources to allocate
10758 */
10759 tg3_ints_init(tp);
10760
10761 tg3_rss_check_indir_tbl(tp);
10762
10763 /* The placement of this call is tied
10764 * to the setup and use of Host TX descriptors.
10765 */
10766 err = tg3_alloc_consistent(tp);
10767 if (err)
10768 goto err_out1;
10769
10770 tg3_napi_init(tp);
10771
10772 tg3_napi_enable(tp);
10773
10774 for (i = 0; i < tp->irq_cnt; i++) {
10775 struct tg3_napi *tnapi = &tp->napi[i];
10776 err = tg3_request_irq(tp, i);
10777 if (err) {
10778 for (i--; i >= 0; i--) {
10779 tnapi = &tp->napi[i];
10780 free_irq(tnapi->irq_vec, tnapi);
10781 }
10782 goto err_out2;
10783 }
10784 }
10785
10786 tg3_full_lock(tp, 0);
10787
10788 err = tg3_init_hw(tp, reset_phy);
10789 if (err) {
10790 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10791 tg3_free_rings(tp);
10792 }
10793
10794 tg3_full_unlock(tp);
10795
10796 if (err)
10797 goto err_out3;
10798
10799 if (test_irq && tg3_flag(tp, USING_MSI)) {
10800 err = tg3_test_msi(tp);
10801
10802 if (err) {
10803 tg3_full_lock(tp, 0);
10804 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10805 tg3_free_rings(tp);
10806 tg3_full_unlock(tp);
10807
10808 goto err_out2;
10809 }
10810
10811 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10812 u32 val = tr32(PCIE_TRANSACTION_CFG);
10813
10814 tw32(PCIE_TRANSACTION_CFG,
10815 val | PCIE_TRANS_CFG_1SHOT_MSI);
10816 }
10817 }
10818
10819 tg3_phy_start(tp);
10820
10821 tg3_hwmon_open(tp);
10822
10823 tg3_full_lock(tp, 0);
10824
10825 tg3_timer_start(tp);
10826 tg3_flag_set(tp, INIT_COMPLETE);
10827 tg3_enable_ints(tp);
10828
10829 if (init)
10830 tg3_ptp_init(tp);
10831 else
10832 tg3_ptp_resume(tp);
10833
10834
10835 tg3_full_unlock(tp);
10836
10837 netif_tx_start_all_queues(dev);
10838
10839 /*
10840 * Reset loopback feature if it was turned on while the device was down
10841 * make sure that it's installed properly now.
10842 */
10843 if (dev->features & NETIF_F_LOOPBACK)
10844 tg3_set_loopback(dev, dev->features);
10845
10846 return 0;
10847
10848 err_out3:
10849 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10850 struct tg3_napi *tnapi = &tp->napi[i];
10851 free_irq(tnapi->irq_vec, tnapi);
10852 }
10853
10854 err_out2:
10855 tg3_napi_disable(tp);
10856 tg3_napi_fini(tp);
10857 tg3_free_consistent(tp);
10858
10859 err_out1:
10860 tg3_ints_fini(tp);
10861
10862 return err;
10863 }
10864
10865 static void tg3_stop(struct tg3 *tp)
10866 {
10867 int i;
10868
10869 tg3_reset_task_cancel(tp);
10870 tg3_netif_stop(tp);
10871
10872 tg3_timer_stop(tp);
10873
10874 tg3_hwmon_close(tp);
10875
10876 tg3_phy_stop(tp);
10877
10878 tg3_full_lock(tp, 1);
10879
10880 tg3_disable_ints(tp);
10881
10882 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10883 tg3_free_rings(tp);
10884 tg3_flag_clear(tp, INIT_COMPLETE);
10885
10886 tg3_full_unlock(tp);
10887
10888 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10889 struct tg3_napi *tnapi = &tp->napi[i];
10890 free_irq(tnapi->irq_vec, tnapi);
10891 }
10892
10893 tg3_ints_fini(tp);
10894
10895 tg3_napi_fini(tp);
10896
10897 tg3_free_consistent(tp);
10898 }
10899
10900 static int tg3_open(struct net_device *dev)
10901 {
10902 struct tg3 *tp = netdev_priv(dev);
10903 int err;
10904
10905 if (tp->fw_needed) {
10906 err = tg3_request_firmware(tp);
10907 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10908 if (err)
10909 return err;
10910 } else if (err) {
10911 netdev_warn(tp->dev, "TSO capability disabled\n");
10912 tg3_flag_clear(tp, TSO_CAPABLE);
10913 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10914 netdev_notice(tp->dev, "TSO capability restored\n");
10915 tg3_flag_set(tp, TSO_CAPABLE);
10916 }
10917 }
10918
10919 tg3_carrier_off(tp);
10920
10921 err = tg3_power_up(tp);
10922 if (err)
10923 return err;
10924
10925 tg3_full_lock(tp, 0);
10926
10927 tg3_disable_ints(tp);
10928 tg3_flag_clear(tp, INIT_COMPLETE);
10929
10930 tg3_full_unlock(tp);
10931
10932 err = tg3_start(tp, true, true, true);
10933 if (err) {
10934 tg3_frob_aux_power(tp, false);
10935 pci_set_power_state(tp->pdev, PCI_D3hot);
10936 }
10937
10938 if (tg3_flag(tp, PTP_CAPABLE)) {
10939 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10940 &tp->pdev->dev);
10941 if (IS_ERR(tp->ptp_clock))
10942 tp->ptp_clock = NULL;
10943 }
10944
10945 return err;
10946 }
10947
10948 static int tg3_close(struct net_device *dev)
10949 {
10950 struct tg3 *tp = netdev_priv(dev);
10951
10952 tg3_ptp_fini(tp);
10953
10954 tg3_stop(tp);
10955
10956 /* Clear stats across close / open calls */
10957 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10958 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10959
10960 tg3_power_down(tp);
10961
10962 tg3_carrier_off(tp);
10963
10964 return 0;
10965 }
10966
10967 static inline u64 get_stat64(tg3_stat64_t *val)
10968 {
10969 return ((u64)val->high << 32) | ((u64)val->low);
10970 }
10971
10972 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10973 {
10974 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10975
10976 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10977 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
10978 tg3_asic_rev(tp) == ASIC_REV_5701)) {
10979 u32 val;
10980
10981 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10982 tg3_writephy(tp, MII_TG3_TEST1,
10983 val | MII_TG3_TEST1_CRC_EN);
10984 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10985 } else
10986 val = 0;
10987
10988 tp->phy_crc_errors += val;
10989
10990 return tp->phy_crc_errors;
10991 }
10992
10993 return get_stat64(&hw_stats->rx_fcs_errors);
10994 }
10995
10996 #define ESTAT_ADD(member) \
10997 estats->member = old_estats->member + \
10998 get_stat64(&hw_stats->member)
10999
11000 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11001 {
11002 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11003 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11004
11005 ESTAT_ADD(rx_octets);
11006 ESTAT_ADD(rx_fragments);
11007 ESTAT_ADD(rx_ucast_packets);
11008 ESTAT_ADD(rx_mcast_packets);
11009 ESTAT_ADD(rx_bcast_packets);
11010 ESTAT_ADD(rx_fcs_errors);
11011 ESTAT_ADD(rx_align_errors);
11012 ESTAT_ADD(rx_xon_pause_rcvd);
11013 ESTAT_ADD(rx_xoff_pause_rcvd);
11014 ESTAT_ADD(rx_mac_ctrl_rcvd);
11015 ESTAT_ADD(rx_xoff_entered);
11016 ESTAT_ADD(rx_frame_too_long_errors);
11017 ESTAT_ADD(rx_jabbers);
11018 ESTAT_ADD(rx_undersize_packets);
11019 ESTAT_ADD(rx_in_length_errors);
11020 ESTAT_ADD(rx_out_length_errors);
11021 ESTAT_ADD(rx_64_or_less_octet_packets);
11022 ESTAT_ADD(rx_65_to_127_octet_packets);
11023 ESTAT_ADD(rx_128_to_255_octet_packets);
11024 ESTAT_ADD(rx_256_to_511_octet_packets);
11025 ESTAT_ADD(rx_512_to_1023_octet_packets);
11026 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11027 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11028 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11029 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11030 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11031
11032 ESTAT_ADD(tx_octets);
11033 ESTAT_ADD(tx_collisions);
11034 ESTAT_ADD(tx_xon_sent);
11035 ESTAT_ADD(tx_xoff_sent);
11036 ESTAT_ADD(tx_flow_control);
11037 ESTAT_ADD(tx_mac_errors);
11038 ESTAT_ADD(tx_single_collisions);
11039 ESTAT_ADD(tx_mult_collisions);
11040 ESTAT_ADD(tx_deferred);
11041 ESTAT_ADD(tx_excessive_collisions);
11042 ESTAT_ADD(tx_late_collisions);
11043 ESTAT_ADD(tx_collide_2times);
11044 ESTAT_ADD(tx_collide_3times);
11045 ESTAT_ADD(tx_collide_4times);
11046 ESTAT_ADD(tx_collide_5times);
11047 ESTAT_ADD(tx_collide_6times);
11048 ESTAT_ADD(tx_collide_7times);
11049 ESTAT_ADD(tx_collide_8times);
11050 ESTAT_ADD(tx_collide_9times);
11051 ESTAT_ADD(tx_collide_10times);
11052 ESTAT_ADD(tx_collide_11times);
11053 ESTAT_ADD(tx_collide_12times);
11054 ESTAT_ADD(tx_collide_13times);
11055 ESTAT_ADD(tx_collide_14times);
11056 ESTAT_ADD(tx_collide_15times);
11057 ESTAT_ADD(tx_ucast_packets);
11058 ESTAT_ADD(tx_mcast_packets);
11059 ESTAT_ADD(tx_bcast_packets);
11060 ESTAT_ADD(tx_carrier_sense_errors);
11061 ESTAT_ADD(tx_discards);
11062 ESTAT_ADD(tx_errors);
11063
11064 ESTAT_ADD(dma_writeq_full);
11065 ESTAT_ADD(dma_write_prioq_full);
11066 ESTAT_ADD(rxbds_empty);
11067 ESTAT_ADD(rx_discards);
11068 ESTAT_ADD(rx_errors);
11069 ESTAT_ADD(rx_threshold_hit);
11070
11071 ESTAT_ADD(dma_readq_full);
11072 ESTAT_ADD(dma_read_prioq_full);
11073 ESTAT_ADD(tx_comp_queue_full);
11074
11075 ESTAT_ADD(ring_set_send_prod_index);
11076 ESTAT_ADD(ring_status_update);
11077 ESTAT_ADD(nic_irqs);
11078 ESTAT_ADD(nic_avoided_irqs);
11079 ESTAT_ADD(nic_tx_threshold_hit);
11080
11081 ESTAT_ADD(mbuf_lwm_thresh_hit);
11082 }
11083
11084 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11085 {
11086 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11087 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11088
11089 stats->rx_packets = old_stats->rx_packets +
11090 get_stat64(&hw_stats->rx_ucast_packets) +
11091 get_stat64(&hw_stats->rx_mcast_packets) +
11092 get_stat64(&hw_stats->rx_bcast_packets);
11093
11094 stats->tx_packets = old_stats->tx_packets +
11095 get_stat64(&hw_stats->tx_ucast_packets) +
11096 get_stat64(&hw_stats->tx_mcast_packets) +
11097 get_stat64(&hw_stats->tx_bcast_packets);
11098
11099 stats->rx_bytes = old_stats->rx_bytes +
11100 get_stat64(&hw_stats->rx_octets);
11101 stats->tx_bytes = old_stats->tx_bytes +
11102 get_stat64(&hw_stats->tx_octets);
11103
11104 stats->rx_errors = old_stats->rx_errors +
11105 get_stat64(&hw_stats->rx_errors);
11106 stats->tx_errors = old_stats->tx_errors +
11107 get_stat64(&hw_stats->tx_errors) +
11108 get_stat64(&hw_stats->tx_mac_errors) +
11109 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11110 get_stat64(&hw_stats->tx_discards);
11111
11112 stats->multicast = old_stats->multicast +
11113 get_stat64(&hw_stats->rx_mcast_packets);
11114 stats->collisions = old_stats->collisions +
11115 get_stat64(&hw_stats->tx_collisions);
11116
11117 stats->rx_length_errors = old_stats->rx_length_errors +
11118 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11119 get_stat64(&hw_stats->rx_undersize_packets);
11120
11121 stats->rx_over_errors = old_stats->rx_over_errors +
11122 get_stat64(&hw_stats->rxbds_empty);
11123 stats->rx_frame_errors = old_stats->rx_frame_errors +
11124 get_stat64(&hw_stats->rx_align_errors);
11125 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11126 get_stat64(&hw_stats->tx_discards);
11127 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11128 get_stat64(&hw_stats->tx_carrier_sense_errors);
11129
11130 stats->rx_crc_errors = old_stats->rx_crc_errors +
11131 tg3_calc_crc_errors(tp);
11132
11133 stats->rx_missed_errors = old_stats->rx_missed_errors +
11134 get_stat64(&hw_stats->rx_discards);
11135
11136 stats->rx_dropped = tp->rx_dropped;
11137 stats->tx_dropped = tp->tx_dropped;
11138 }
11139
11140 static int tg3_get_regs_len(struct net_device *dev)
11141 {
11142 return TG3_REG_BLK_SIZE;
11143 }
11144
11145 static void tg3_get_regs(struct net_device *dev,
11146 struct ethtool_regs *regs, void *_p)
11147 {
11148 struct tg3 *tp = netdev_priv(dev);
11149
11150 regs->version = 0;
11151
11152 memset(_p, 0, TG3_REG_BLK_SIZE);
11153
11154 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11155 return;
11156
11157 tg3_full_lock(tp, 0);
11158
11159 tg3_dump_legacy_regs(tp, (u32 *)_p);
11160
11161 tg3_full_unlock(tp);
11162 }
11163
11164 static int tg3_get_eeprom_len(struct net_device *dev)
11165 {
11166 struct tg3 *tp = netdev_priv(dev);
11167
11168 return tp->nvram_size;
11169 }
11170
11171 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11172 {
11173 struct tg3 *tp = netdev_priv(dev);
11174 int ret;
11175 u8 *pd;
11176 u32 i, offset, len, b_offset, b_count;
11177 __be32 val;
11178
11179 if (tg3_flag(tp, NO_NVRAM))
11180 return -EINVAL;
11181
11182 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11183 return -EAGAIN;
11184
11185 offset = eeprom->offset;
11186 len = eeprom->len;
11187 eeprom->len = 0;
11188
11189 eeprom->magic = TG3_EEPROM_MAGIC;
11190
11191 if (offset & 3) {
11192 /* adjustments to start on required 4 byte boundary */
11193 b_offset = offset & 3;
11194 b_count = 4 - b_offset;
11195 if (b_count > len) {
11196 /* i.e. offset=1 len=2 */
11197 b_count = len;
11198 }
11199 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11200 if (ret)
11201 return ret;
11202 memcpy(data, ((char *)&val) + b_offset, b_count);
11203 len -= b_count;
11204 offset += b_count;
11205 eeprom->len += b_count;
11206 }
11207
11208 /* read bytes up to the last 4 byte boundary */
11209 pd = &data[eeprom->len];
11210 for (i = 0; i < (len - (len & 3)); i += 4) {
11211 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11212 if (ret) {
11213 eeprom->len += i;
11214 return ret;
11215 }
11216 memcpy(pd + i, &val, 4);
11217 }
11218 eeprom->len += i;
11219
11220 if (len & 3) {
11221 /* read last bytes not ending on 4 byte boundary */
11222 pd = &data[eeprom->len];
11223 b_count = len & 3;
11224 b_offset = offset + len - b_count;
11225 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11226 if (ret)
11227 return ret;
11228 memcpy(pd, &val, b_count);
11229 eeprom->len += b_count;
11230 }
11231 return 0;
11232 }
11233
11234 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11235 {
11236 struct tg3 *tp = netdev_priv(dev);
11237 int ret;
11238 u32 offset, len, b_offset, odd_len;
11239 u8 *buf;
11240 __be32 start, end;
11241
11242 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11243 return -EAGAIN;
11244
11245 if (tg3_flag(tp, NO_NVRAM) ||
11246 eeprom->magic != TG3_EEPROM_MAGIC)
11247 return -EINVAL;
11248
11249 offset = eeprom->offset;
11250 len = eeprom->len;
11251
11252 if ((b_offset = (offset & 3))) {
11253 /* adjustments to start on required 4 byte boundary */
11254 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11255 if (ret)
11256 return ret;
11257 len += b_offset;
11258 offset &= ~3;
11259 if (len < 4)
11260 len = 4;
11261 }
11262
11263 odd_len = 0;
11264 if (len & 3) {
11265 /* adjustments to end on required 4 byte boundary */
11266 odd_len = 1;
11267 len = (len + 3) & ~3;
11268 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11269 if (ret)
11270 return ret;
11271 }
11272
11273 buf = data;
11274 if (b_offset || odd_len) {
11275 buf = kmalloc(len, GFP_KERNEL);
11276 if (!buf)
11277 return -ENOMEM;
11278 if (b_offset)
11279 memcpy(buf, &start, 4);
11280 if (odd_len)
11281 memcpy(buf+len-4, &end, 4);
11282 memcpy(buf + b_offset, data, eeprom->len);
11283 }
11284
11285 ret = tg3_nvram_write_block(tp, offset, len, buf);
11286
11287 if (buf != data)
11288 kfree(buf);
11289
11290 return ret;
11291 }
11292
11293 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11294 {
11295 struct tg3 *tp = netdev_priv(dev);
11296
11297 if (tg3_flag(tp, USE_PHYLIB)) {
11298 struct phy_device *phydev;
11299 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11300 return -EAGAIN;
11301 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11302 return phy_ethtool_gset(phydev, cmd);
11303 }
11304
11305 cmd->supported = (SUPPORTED_Autoneg);
11306
11307 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11308 cmd->supported |= (SUPPORTED_1000baseT_Half |
11309 SUPPORTED_1000baseT_Full);
11310
11311 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11312 cmd->supported |= (SUPPORTED_100baseT_Half |
11313 SUPPORTED_100baseT_Full |
11314 SUPPORTED_10baseT_Half |
11315 SUPPORTED_10baseT_Full |
11316 SUPPORTED_TP);
11317 cmd->port = PORT_TP;
11318 } else {
11319 cmd->supported |= SUPPORTED_FIBRE;
11320 cmd->port = PORT_FIBRE;
11321 }
11322
11323 cmd->advertising = tp->link_config.advertising;
11324 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11325 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11326 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11327 cmd->advertising |= ADVERTISED_Pause;
11328 } else {
11329 cmd->advertising |= ADVERTISED_Pause |
11330 ADVERTISED_Asym_Pause;
11331 }
11332 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11333 cmd->advertising |= ADVERTISED_Asym_Pause;
11334 }
11335 }
11336 if (netif_running(dev) && tp->link_up) {
11337 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11338 cmd->duplex = tp->link_config.active_duplex;
11339 cmd->lp_advertising = tp->link_config.rmt_adv;
11340 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11341 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11342 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11343 else
11344 cmd->eth_tp_mdix = ETH_TP_MDI;
11345 }
11346 } else {
11347 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11348 cmd->duplex = DUPLEX_UNKNOWN;
11349 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11350 }
11351 cmd->phy_address = tp->phy_addr;
11352 cmd->transceiver = XCVR_INTERNAL;
11353 cmd->autoneg = tp->link_config.autoneg;
11354 cmd->maxtxpkt = 0;
11355 cmd->maxrxpkt = 0;
11356 return 0;
11357 }
11358
11359 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11360 {
11361 struct tg3 *tp = netdev_priv(dev);
11362 u32 speed = ethtool_cmd_speed(cmd);
11363
11364 if (tg3_flag(tp, USE_PHYLIB)) {
11365 struct phy_device *phydev;
11366 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11367 return -EAGAIN;
11368 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11369 return phy_ethtool_sset(phydev, cmd);
11370 }
11371
11372 if (cmd->autoneg != AUTONEG_ENABLE &&
11373 cmd->autoneg != AUTONEG_DISABLE)
11374 return -EINVAL;
11375
11376 if (cmd->autoneg == AUTONEG_DISABLE &&
11377 cmd->duplex != DUPLEX_FULL &&
11378 cmd->duplex != DUPLEX_HALF)
11379 return -EINVAL;
11380
11381 if (cmd->autoneg == AUTONEG_ENABLE) {
11382 u32 mask = ADVERTISED_Autoneg |
11383 ADVERTISED_Pause |
11384 ADVERTISED_Asym_Pause;
11385
11386 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11387 mask |= ADVERTISED_1000baseT_Half |
11388 ADVERTISED_1000baseT_Full;
11389
11390 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11391 mask |= ADVERTISED_100baseT_Half |
11392 ADVERTISED_100baseT_Full |
11393 ADVERTISED_10baseT_Half |
11394 ADVERTISED_10baseT_Full |
11395 ADVERTISED_TP;
11396 else
11397 mask |= ADVERTISED_FIBRE;
11398
11399 if (cmd->advertising & ~mask)
11400 return -EINVAL;
11401
11402 mask &= (ADVERTISED_1000baseT_Half |
11403 ADVERTISED_1000baseT_Full |
11404 ADVERTISED_100baseT_Half |
11405 ADVERTISED_100baseT_Full |
11406 ADVERTISED_10baseT_Half |
11407 ADVERTISED_10baseT_Full);
11408
11409 cmd->advertising &= mask;
11410 } else {
11411 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11412 if (speed != SPEED_1000)
11413 return -EINVAL;
11414
11415 if (cmd->duplex != DUPLEX_FULL)
11416 return -EINVAL;
11417 } else {
11418 if (speed != SPEED_100 &&
11419 speed != SPEED_10)
11420 return -EINVAL;
11421 }
11422 }
11423
11424 tg3_full_lock(tp, 0);
11425
11426 tp->link_config.autoneg = cmd->autoneg;
11427 if (cmd->autoneg == AUTONEG_ENABLE) {
11428 tp->link_config.advertising = (cmd->advertising |
11429 ADVERTISED_Autoneg);
11430 tp->link_config.speed = SPEED_UNKNOWN;
11431 tp->link_config.duplex = DUPLEX_UNKNOWN;
11432 } else {
11433 tp->link_config.advertising = 0;
11434 tp->link_config.speed = speed;
11435 tp->link_config.duplex = cmd->duplex;
11436 }
11437
11438 if (netif_running(dev))
11439 tg3_setup_phy(tp, 1);
11440
11441 tg3_full_unlock(tp);
11442
11443 return 0;
11444 }
11445
11446 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11447 {
11448 struct tg3 *tp = netdev_priv(dev);
11449
11450 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11451 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11452 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11453 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11454 }
11455
11456 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11457 {
11458 struct tg3 *tp = netdev_priv(dev);
11459
11460 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11461 wol->supported = WAKE_MAGIC;
11462 else
11463 wol->supported = 0;
11464 wol->wolopts = 0;
11465 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11466 wol->wolopts = WAKE_MAGIC;
11467 memset(&wol->sopass, 0, sizeof(wol->sopass));
11468 }
11469
11470 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11471 {
11472 struct tg3 *tp = netdev_priv(dev);
11473 struct device *dp = &tp->pdev->dev;
11474
11475 if (wol->wolopts & ~WAKE_MAGIC)
11476 return -EINVAL;
11477 if ((wol->wolopts & WAKE_MAGIC) &&
11478 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11479 return -EINVAL;
11480
11481 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11482
11483 spin_lock_bh(&tp->lock);
11484 if (device_may_wakeup(dp))
11485 tg3_flag_set(tp, WOL_ENABLE);
11486 else
11487 tg3_flag_clear(tp, WOL_ENABLE);
11488 spin_unlock_bh(&tp->lock);
11489
11490 return 0;
11491 }
11492
11493 static u32 tg3_get_msglevel(struct net_device *dev)
11494 {
11495 struct tg3 *tp = netdev_priv(dev);
11496 return tp->msg_enable;
11497 }
11498
11499 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11500 {
11501 struct tg3 *tp = netdev_priv(dev);
11502 tp->msg_enable = value;
11503 }
11504
11505 static int tg3_nway_reset(struct net_device *dev)
11506 {
11507 struct tg3 *tp = netdev_priv(dev);
11508 int r;
11509
11510 if (!netif_running(dev))
11511 return -EAGAIN;
11512
11513 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11514 return -EINVAL;
11515
11516 if (tg3_flag(tp, USE_PHYLIB)) {
11517 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11518 return -EAGAIN;
11519 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11520 } else {
11521 u32 bmcr;
11522
11523 spin_lock_bh(&tp->lock);
11524 r = -EINVAL;
11525 tg3_readphy(tp, MII_BMCR, &bmcr);
11526 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11527 ((bmcr & BMCR_ANENABLE) ||
11528 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11529 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11530 BMCR_ANENABLE);
11531 r = 0;
11532 }
11533 spin_unlock_bh(&tp->lock);
11534 }
11535
11536 return r;
11537 }
11538
11539 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11540 {
11541 struct tg3 *tp = netdev_priv(dev);
11542
11543 ering->rx_max_pending = tp->rx_std_ring_mask;
11544 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11545 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11546 else
11547 ering->rx_jumbo_max_pending = 0;
11548
11549 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11550
11551 ering->rx_pending = tp->rx_pending;
11552 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11553 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11554 else
11555 ering->rx_jumbo_pending = 0;
11556
11557 ering->tx_pending = tp->napi[0].tx_pending;
11558 }
11559
11560 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11561 {
11562 struct tg3 *tp = netdev_priv(dev);
11563 int i, irq_sync = 0, err = 0;
11564
11565 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11566 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11567 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11568 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11569 (tg3_flag(tp, TSO_BUG) &&
11570 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11571 return -EINVAL;
11572
11573 if (netif_running(dev)) {
11574 tg3_phy_stop(tp);
11575 tg3_netif_stop(tp);
11576 irq_sync = 1;
11577 }
11578
11579 tg3_full_lock(tp, irq_sync);
11580
11581 tp->rx_pending = ering->rx_pending;
11582
11583 if (tg3_flag(tp, MAX_RXPEND_64) &&
11584 tp->rx_pending > 63)
11585 tp->rx_pending = 63;
11586 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11587
11588 for (i = 0; i < tp->irq_max; i++)
11589 tp->napi[i].tx_pending = ering->tx_pending;
11590
11591 if (netif_running(dev)) {
11592 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11593 err = tg3_restart_hw(tp, 1);
11594 if (!err)
11595 tg3_netif_start(tp);
11596 }
11597
11598 tg3_full_unlock(tp);
11599
11600 if (irq_sync && !err)
11601 tg3_phy_start(tp);
11602
11603 return err;
11604 }
11605
11606 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11607 {
11608 struct tg3 *tp = netdev_priv(dev);
11609
11610 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11611
11612 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11613 epause->rx_pause = 1;
11614 else
11615 epause->rx_pause = 0;
11616
11617 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11618 epause->tx_pause = 1;
11619 else
11620 epause->tx_pause = 0;
11621 }
11622
11623 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11624 {
11625 struct tg3 *tp = netdev_priv(dev);
11626 int err = 0;
11627
11628 if (tg3_flag(tp, USE_PHYLIB)) {
11629 u32 newadv;
11630 struct phy_device *phydev;
11631
11632 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11633
11634 if (!(phydev->supported & SUPPORTED_Pause) ||
11635 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11636 (epause->rx_pause != epause->tx_pause)))
11637 return -EINVAL;
11638
11639 tp->link_config.flowctrl = 0;
11640 if (epause->rx_pause) {
11641 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11642
11643 if (epause->tx_pause) {
11644 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11645 newadv = ADVERTISED_Pause;
11646 } else
11647 newadv = ADVERTISED_Pause |
11648 ADVERTISED_Asym_Pause;
11649 } else if (epause->tx_pause) {
11650 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11651 newadv = ADVERTISED_Asym_Pause;
11652 } else
11653 newadv = 0;
11654
11655 if (epause->autoneg)
11656 tg3_flag_set(tp, PAUSE_AUTONEG);
11657 else
11658 tg3_flag_clear(tp, PAUSE_AUTONEG);
11659
11660 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11661 u32 oldadv = phydev->advertising &
11662 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11663 if (oldadv != newadv) {
11664 phydev->advertising &=
11665 ~(ADVERTISED_Pause |
11666 ADVERTISED_Asym_Pause);
11667 phydev->advertising |= newadv;
11668 if (phydev->autoneg) {
11669 /*
11670 * Always renegotiate the link to
11671 * inform our link partner of our
11672 * flow control settings, even if the
11673 * flow control is forced. Let
11674 * tg3_adjust_link() do the final
11675 * flow control setup.
11676 */
11677 return phy_start_aneg(phydev);
11678 }
11679 }
11680
11681 if (!epause->autoneg)
11682 tg3_setup_flow_control(tp, 0, 0);
11683 } else {
11684 tp->link_config.advertising &=
11685 ~(ADVERTISED_Pause |
11686 ADVERTISED_Asym_Pause);
11687 tp->link_config.advertising |= newadv;
11688 }
11689 } else {
11690 int irq_sync = 0;
11691
11692 if (netif_running(dev)) {
11693 tg3_netif_stop(tp);
11694 irq_sync = 1;
11695 }
11696
11697 tg3_full_lock(tp, irq_sync);
11698
11699 if (epause->autoneg)
11700 tg3_flag_set(tp, PAUSE_AUTONEG);
11701 else
11702 tg3_flag_clear(tp, PAUSE_AUTONEG);
11703 if (epause->rx_pause)
11704 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11705 else
11706 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11707 if (epause->tx_pause)
11708 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11709 else
11710 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11711
11712 if (netif_running(dev)) {
11713 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11714 err = tg3_restart_hw(tp, 1);
11715 if (!err)
11716 tg3_netif_start(tp);
11717 }
11718
11719 tg3_full_unlock(tp);
11720 }
11721
11722 return err;
11723 }
11724
11725 static int tg3_get_sset_count(struct net_device *dev, int sset)
11726 {
11727 switch (sset) {
11728 case ETH_SS_TEST:
11729 return TG3_NUM_TEST;
11730 case ETH_SS_STATS:
11731 return TG3_NUM_STATS;
11732 default:
11733 return -EOPNOTSUPP;
11734 }
11735 }
11736
11737 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11738 u32 *rules __always_unused)
11739 {
11740 struct tg3 *tp = netdev_priv(dev);
11741
11742 if (!tg3_flag(tp, SUPPORT_MSIX))
11743 return -EOPNOTSUPP;
11744
11745 switch (info->cmd) {
11746 case ETHTOOL_GRXRINGS:
11747 if (netif_running(tp->dev))
11748 info->data = tp->rxq_cnt;
11749 else {
11750 info->data = num_online_cpus();
11751 if (info->data > TG3_RSS_MAX_NUM_QS)
11752 info->data = TG3_RSS_MAX_NUM_QS;
11753 }
11754
11755 /* The first interrupt vector only
11756 * handles link interrupts.
11757 */
11758 info->data -= 1;
11759 return 0;
11760
11761 default:
11762 return -EOPNOTSUPP;
11763 }
11764 }
11765
11766 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11767 {
11768 u32 size = 0;
11769 struct tg3 *tp = netdev_priv(dev);
11770
11771 if (tg3_flag(tp, SUPPORT_MSIX))
11772 size = TG3_RSS_INDIR_TBL_SIZE;
11773
11774 return size;
11775 }
11776
11777 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11778 {
11779 struct tg3 *tp = netdev_priv(dev);
11780 int i;
11781
11782 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11783 indir[i] = tp->rss_ind_tbl[i];
11784
11785 return 0;
11786 }
11787
11788 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11789 {
11790 struct tg3 *tp = netdev_priv(dev);
11791 size_t i;
11792
11793 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11794 tp->rss_ind_tbl[i] = indir[i];
11795
11796 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11797 return 0;
11798
11799 /* It is legal to write the indirection
11800 * table while the device is running.
11801 */
11802 tg3_full_lock(tp, 0);
11803 tg3_rss_write_indir_tbl(tp);
11804 tg3_full_unlock(tp);
11805
11806 return 0;
11807 }
11808
11809 static void tg3_get_channels(struct net_device *dev,
11810 struct ethtool_channels *channel)
11811 {
11812 struct tg3 *tp = netdev_priv(dev);
11813 u32 deflt_qs = netif_get_num_default_rss_queues();
11814
11815 channel->max_rx = tp->rxq_max;
11816 channel->max_tx = tp->txq_max;
11817
11818 if (netif_running(dev)) {
11819 channel->rx_count = tp->rxq_cnt;
11820 channel->tx_count = tp->txq_cnt;
11821 } else {
11822 if (tp->rxq_req)
11823 channel->rx_count = tp->rxq_req;
11824 else
11825 channel->rx_count = min(deflt_qs, tp->rxq_max);
11826
11827 if (tp->txq_req)
11828 channel->tx_count = tp->txq_req;
11829 else
11830 channel->tx_count = min(deflt_qs, tp->txq_max);
11831 }
11832 }
11833
11834 static int tg3_set_channels(struct net_device *dev,
11835 struct ethtool_channels *channel)
11836 {
11837 struct tg3 *tp = netdev_priv(dev);
11838
11839 if (!tg3_flag(tp, SUPPORT_MSIX))
11840 return -EOPNOTSUPP;
11841
11842 if (channel->rx_count > tp->rxq_max ||
11843 channel->tx_count > tp->txq_max)
11844 return -EINVAL;
11845
11846 tp->rxq_req = channel->rx_count;
11847 tp->txq_req = channel->tx_count;
11848
11849 if (!netif_running(dev))
11850 return 0;
11851
11852 tg3_stop(tp);
11853
11854 tg3_carrier_off(tp);
11855
11856 tg3_start(tp, true, false, false);
11857
11858 return 0;
11859 }
11860
11861 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11862 {
11863 switch (stringset) {
11864 case ETH_SS_STATS:
11865 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11866 break;
11867 case ETH_SS_TEST:
11868 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11869 break;
11870 default:
11871 WARN_ON(1); /* we need a WARN() */
11872 break;
11873 }
11874 }
11875
11876 static int tg3_set_phys_id(struct net_device *dev,
11877 enum ethtool_phys_id_state state)
11878 {
11879 struct tg3 *tp = netdev_priv(dev);
11880
11881 if (!netif_running(tp->dev))
11882 return -EAGAIN;
11883
11884 switch (state) {
11885 case ETHTOOL_ID_ACTIVE:
11886 return 1; /* cycle on/off once per second */
11887
11888 case ETHTOOL_ID_ON:
11889 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11890 LED_CTRL_1000MBPS_ON |
11891 LED_CTRL_100MBPS_ON |
11892 LED_CTRL_10MBPS_ON |
11893 LED_CTRL_TRAFFIC_OVERRIDE |
11894 LED_CTRL_TRAFFIC_BLINK |
11895 LED_CTRL_TRAFFIC_LED);
11896 break;
11897
11898 case ETHTOOL_ID_OFF:
11899 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11900 LED_CTRL_TRAFFIC_OVERRIDE);
11901 break;
11902
11903 case ETHTOOL_ID_INACTIVE:
11904 tw32(MAC_LED_CTRL, tp->led_ctrl);
11905 break;
11906 }
11907
11908 return 0;
11909 }
11910
11911 static void tg3_get_ethtool_stats(struct net_device *dev,
11912 struct ethtool_stats *estats, u64 *tmp_stats)
11913 {
11914 struct tg3 *tp = netdev_priv(dev);
11915
11916 if (tp->hw_stats)
11917 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11918 else
11919 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11920 }
11921
11922 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11923 {
11924 int i;
11925 __be32 *buf;
11926 u32 offset = 0, len = 0;
11927 u32 magic, val;
11928
11929 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11930 return NULL;
11931
11932 if (magic == TG3_EEPROM_MAGIC) {
11933 for (offset = TG3_NVM_DIR_START;
11934 offset < TG3_NVM_DIR_END;
11935 offset += TG3_NVM_DIRENT_SIZE) {
11936 if (tg3_nvram_read(tp, offset, &val))
11937 return NULL;
11938
11939 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11940 TG3_NVM_DIRTYPE_EXTVPD)
11941 break;
11942 }
11943
11944 if (offset != TG3_NVM_DIR_END) {
11945 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11946 if (tg3_nvram_read(tp, offset + 4, &offset))
11947 return NULL;
11948
11949 offset = tg3_nvram_logical_addr(tp, offset);
11950 }
11951 }
11952
11953 if (!offset || !len) {
11954 offset = TG3_NVM_VPD_OFF;
11955 len = TG3_NVM_VPD_LEN;
11956 }
11957
11958 buf = kmalloc(len, GFP_KERNEL);
11959 if (buf == NULL)
11960 return NULL;
11961
11962 if (magic == TG3_EEPROM_MAGIC) {
11963 for (i = 0; i < len; i += 4) {
11964 /* The data is in little-endian format in NVRAM.
11965 * Use the big-endian read routines to preserve
11966 * the byte order as it exists in NVRAM.
11967 */
11968 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11969 goto error;
11970 }
11971 } else {
11972 u8 *ptr;
11973 ssize_t cnt;
11974 unsigned int pos = 0;
11975
11976 ptr = (u8 *)&buf[0];
11977 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11978 cnt = pci_read_vpd(tp->pdev, pos,
11979 len - pos, ptr);
11980 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11981 cnt = 0;
11982 else if (cnt < 0)
11983 goto error;
11984 }
11985 if (pos != len)
11986 goto error;
11987 }
11988
11989 *vpdlen = len;
11990
11991 return buf;
11992
11993 error:
11994 kfree(buf);
11995 return NULL;
11996 }
11997
11998 #define NVRAM_TEST_SIZE 0x100
11999 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12000 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12001 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12002 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12003 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12004 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12005 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12006 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12007
12008 static int tg3_test_nvram(struct tg3 *tp)
12009 {
12010 u32 csum, magic, len;
12011 __be32 *buf;
12012 int i, j, k, err = 0, size;
12013
12014 if (tg3_flag(tp, NO_NVRAM))
12015 return 0;
12016
12017 if (tg3_nvram_read(tp, 0, &magic) != 0)
12018 return -EIO;
12019
12020 if (magic == TG3_EEPROM_MAGIC)
12021 size = NVRAM_TEST_SIZE;
12022 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12023 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12024 TG3_EEPROM_SB_FORMAT_1) {
12025 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12026 case TG3_EEPROM_SB_REVISION_0:
12027 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12028 break;
12029 case TG3_EEPROM_SB_REVISION_2:
12030 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12031 break;
12032 case TG3_EEPROM_SB_REVISION_3:
12033 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12034 break;
12035 case TG3_EEPROM_SB_REVISION_4:
12036 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12037 break;
12038 case TG3_EEPROM_SB_REVISION_5:
12039 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12040 break;
12041 case TG3_EEPROM_SB_REVISION_6:
12042 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12043 break;
12044 default:
12045 return -EIO;
12046 }
12047 } else
12048 return 0;
12049 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12050 size = NVRAM_SELFBOOT_HW_SIZE;
12051 else
12052 return -EIO;
12053
12054 buf = kmalloc(size, GFP_KERNEL);
12055 if (buf == NULL)
12056 return -ENOMEM;
12057
12058 err = -EIO;
12059 for (i = 0, j = 0; i < size; i += 4, j++) {
12060 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12061 if (err)
12062 break;
12063 }
12064 if (i < size)
12065 goto out;
12066
12067 /* Selfboot format */
12068 magic = be32_to_cpu(buf[0]);
12069 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12070 TG3_EEPROM_MAGIC_FW) {
12071 u8 *buf8 = (u8 *) buf, csum8 = 0;
12072
12073 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12074 TG3_EEPROM_SB_REVISION_2) {
12075 /* For rev 2, the csum doesn't include the MBA. */
12076 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12077 csum8 += buf8[i];
12078 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12079 csum8 += buf8[i];
12080 } else {
12081 for (i = 0; i < size; i++)
12082 csum8 += buf8[i];
12083 }
12084
12085 if (csum8 == 0) {
12086 err = 0;
12087 goto out;
12088 }
12089
12090 err = -EIO;
12091 goto out;
12092 }
12093
12094 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12095 TG3_EEPROM_MAGIC_HW) {
12096 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12097 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12098 u8 *buf8 = (u8 *) buf;
12099
12100 /* Separate the parity bits and the data bytes. */
12101 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12102 if ((i == 0) || (i == 8)) {
12103 int l;
12104 u8 msk;
12105
12106 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12107 parity[k++] = buf8[i] & msk;
12108 i++;
12109 } else if (i == 16) {
12110 int l;
12111 u8 msk;
12112
12113 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12114 parity[k++] = buf8[i] & msk;
12115 i++;
12116
12117 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12118 parity[k++] = buf8[i] & msk;
12119 i++;
12120 }
12121 data[j++] = buf8[i];
12122 }
12123
12124 err = -EIO;
12125 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12126 u8 hw8 = hweight8(data[i]);
12127
12128 if ((hw8 & 0x1) && parity[i])
12129 goto out;
12130 else if (!(hw8 & 0x1) && !parity[i])
12131 goto out;
12132 }
12133 err = 0;
12134 goto out;
12135 }
12136
12137 err = -EIO;
12138
12139 /* Bootstrap checksum at offset 0x10 */
12140 csum = calc_crc((unsigned char *) buf, 0x10);
12141 if (csum != le32_to_cpu(buf[0x10/4]))
12142 goto out;
12143
12144 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12145 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12146 if (csum != le32_to_cpu(buf[0xfc/4]))
12147 goto out;
12148
12149 kfree(buf);
12150
12151 buf = tg3_vpd_readblock(tp, &len);
12152 if (!buf)
12153 return -ENOMEM;
12154
12155 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12156 if (i > 0) {
12157 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12158 if (j < 0)
12159 goto out;
12160
12161 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12162 goto out;
12163
12164 i += PCI_VPD_LRDT_TAG_SIZE;
12165 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12166 PCI_VPD_RO_KEYWORD_CHKSUM);
12167 if (j > 0) {
12168 u8 csum8 = 0;
12169
12170 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12171
12172 for (i = 0; i <= j; i++)
12173 csum8 += ((u8 *)buf)[i];
12174
12175 if (csum8)
12176 goto out;
12177 }
12178 }
12179
12180 err = 0;
12181
12182 out:
12183 kfree(buf);
12184 return err;
12185 }
12186
12187 #define TG3_SERDES_TIMEOUT_SEC 2
12188 #define TG3_COPPER_TIMEOUT_SEC 6
12189
12190 static int tg3_test_link(struct tg3 *tp)
12191 {
12192 int i, max;
12193
12194 if (!netif_running(tp->dev))
12195 return -ENODEV;
12196
12197 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12198 max = TG3_SERDES_TIMEOUT_SEC;
12199 else
12200 max = TG3_COPPER_TIMEOUT_SEC;
12201
12202 for (i = 0; i < max; i++) {
12203 if (tp->link_up)
12204 return 0;
12205
12206 if (msleep_interruptible(1000))
12207 break;
12208 }
12209
12210 return -EIO;
12211 }
12212
12213 /* Only test the commonly used registers */
12214 static int tg3_test_registers(struct tg3 *tp)
12215 {
12216 int i, is_5705, is_5750;
12217 u32 offset, read_mask, write_mask, val, save_val, read_val;
12218 static struct {
12219 u16 offset;
12220 u16 flags;
12221 #define TG3_FL_5705 0x1
12222 #define TG3_FL_NOT_5705 0x2
12223 #define TG3_FL_NOT_5788 0x4
12224 #define TG3_FL_NOT_5750 0x8
12225 u32 read_mask;
12226 u32 write_mask;
12227 } reg_tbl[] = {
12228 /* MAC Control Registers */
12229 { MAC_MODE, TG3_FL_NOT_5705,
12230 0x00000000, 0x00ef6f8c },
12231 { MAC_MODE, TG3_FL_5705,
12232 0x00000000, 0x01ef6b8c },
12233 { MAC_STATUS, TG3_FL_NOT_5705,
12234 0x03800107, 0x00000000 },
12235 { MAC_STATUS, TG3_FL_5705,
12236 0x03800100, 0x00000000 },
12237 { MAC_ADDR_0_HIGH, 0x0000,
12238 0x00000000, 0x0000ffff },
12239 { MAC_ADDR_0_LOW, 0x0000,
12240 0x00000000, 0xffffffff },
12241 { MAC_RX_MTU_SIZE, 0x0000,
12242 0x00000000, 0x0000ffff },
12243 { MAC_TX_MODE, 0x0000,
12244 0x00000000, 0x00000070 },
12245 { MAC_TX_LENGTHS, 0x0000,
12246 0x00000000, 0x00003fff },
12247 { MAC_RX_MODE, TG3_FL_NOT_5705,
12248 0x00000000, 0x000007fc },
12249 { MAC_RX_MODE, TG3_FL_5705,
12250 0x00000000, 0x000007dc },
12251 { MAC_HASH_REG_0, 0x0000,
12252 0x00000000, 0xffffffff },
12253 { MAC_HASH_REG_1, 0x0000,
12254 0x00000000, 0xffffffff },
12255 { MAC_HASH_REG_2, 0x0000,
12256 0x00000000, 0xffffffff },
12257 { MAC_HASH_REG_3, 0x0000,
12258 0x00000000, 0xffffffff },
12259
12260 /* Receive Data and Receive BD Initiator Control Registers. */
12261 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12262 0x00000000, 0xffffffff },
12263 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12264 0x00000000, 0xffffffff },
12265 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12266 0x00000000, 0x00000003 },
12267 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12268 0x00000000, 0xffffffff },
12269 { RCVDBDI_STD_BD+0, 0x0000,
12270 0x00000000, 0xffffffff },
12271 { RCVDBDI_STD_BD+4, 0x0000,
12272 0x00000000, 0xffffffff },
12273 { RCVDBDI_STD_BD+8, 0x0000,
12274 0x00000000, 0xffff0002 },
12275 { RCVDBDI_STD_BD+0xc, 0x0000,
12276 0x00000000, 0xffffffff },
12277
12278 /* Receive BD Initiator Control Registers. */
12279 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12280 0x00000000, 0xffffffff },
12281 { RCVBDI_STD_THRESH, TG3_FL_5705,
12282 0x00000000, 0x000003ff },
12283 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12284 0x00000000, 0xffffffff },
12285
12286 /* Host Coalescing Control Registers. */
12287 { HOSTCC_MODE, TG3_FL_NOT_5705,
12288 0x00000000, 0x00000004 },
12289 { HOSTCC_MODE, TG3_FL_5705,
12290 0x00000000, 0x000000f6 },
12291 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12292 0x00000000, 0xffffffff },
12293 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12294 0x00000000, 0x000003ff },
12295 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12296 0x00000000, 0xffffffff },
12297 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12298 0x00000000, 0x000003ff },
12299 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12300 0x00000000, 0xffffffff },
12301 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12302 0x00000000, 0x000000ff },
12303 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12304 0x00000000, 0xffffffff },
12305 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12306 0x00000000, 0x000000ff },
12307 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12308 0x00000000, 0xffffffff },
12309 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12310 0x00000000, 0xffffffff },
12311 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12312 0x00000000, 0xffffffff },
12313 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12314 0x00000000, 0x000000ff },
12315 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12316 0x00000000, 0xffffffff },
12317 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12318 0x00000000, 0x000000ff },
12319 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12320 0x00000000, 0xffffffff },
12321 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12322 0x00000000, 0xffffffff },
12323 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12324 0x00000000, 0xffffffff },
12325 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12326 0x00000000, 0xffffffff },
12327 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12328 0x00000000, 0xffffffff },
12329 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12330 0xffffffff, 0x00000000 },
12331 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12332 0xffffffff, 0x00000000 },
12333
12334 /* Buffer Manager Control Registers. */
12335 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12336 0x00000000, 0x007fff80 },
12337 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12338 0x00000000, 0x007fffff },
12339 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12340 0x00000000, 0x0000003f },
12341 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12342 0x00000000, 0x000001ff },
12343 { BUFMGR_MB_HIGH_WATER, 0x0000,
12344 0x00000000, 0x000001ff },
12345 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12346 0xffffffff, 0x00000000 },
12347 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12348 0xffffffff, 0x00000000 },
12349
12350 /* Mailbox Registers */
12351 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12352 0x00000000, 0x000001ff },
12353 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12354 0x00000000, 0x000001ff },
12355 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12356 0x00000000, 0x000007ff },
12357 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12358 0x00000000, 0x000001ff },
12359
12360 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12361 };
12362
12363 is_5705 = is_5750 = 0;
12364 if (tg3_flag(tp, 5705_PLUS)) {
12365 is_5705 = 1;
12366 if (tg3_flag(tp, 5750_PLUS))
12367 is_5750 = 1;
12368 }
12369
12370 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12371 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12372 continue;
12373
12374 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12375 continue;
12376
12377 if (tg3_flag(tp, IS_5788) &&
12378 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12379 continue;
12380
12381 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12382 continue;
12383
12384 offset = (u32) reg_tbl[i].offset;
12385 read_mask = reg_tbl[i].read_mask;
12386 write_mask = reg_tbl[i].write_mask;
12387
12388 /* Save the original register content */
12389 save_val = tr32(offset);
12390
12391 /* Determine the read-only value. */
12392 read_val = save_val & read_mask;
12393
12394 /* Write zero to the register, then make sure the read-only bits
12395 * are not changed and the read/write bits are all zeros.
12396 */
12397 tw32(offset, 0);
12398
12399 val = tr32(offset);
12400
12401 /* Test the read-only and read/write bits. */
12402 if (((val & read_mask) != read_val) || (val & write_mask))
12403 goto out;
12404
12405 /* Write ones to all the bits defined by RdMask and WrMask, then
12406 * make sure the read-only bits are not changed and the
12407 * read/write bits are all ones.
12408 */
12409 tw32(offset, read_mask | write_mask);
12410
12411 val = tr32(offset);
12412
12413 /* Test the read-only bits. */
12414 if ((val & read_mask) != read_val)
12415 goto out;
12416
12417 /* Test the read/write bits. */
12418 if ((val & write_mask) != write_mask)
12419 goto out;
12420
12421 tw32(offset, save_val);
12422 }
12423
12424 return 0;
12425
12426 out:
12427 if (netif_msg_hw(tp))
12428 netdev_err(tp->dev,
12429 "Register test failed at offset %x\n", offset);
12430 tw32(offset, save_val);
12431 return -EIO;
12432 }
12433
12434 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12435 {
12436 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12437 int i;
12438 u32 j;
12439
12440 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12441 for (j = 0; j < len; j += 4) {
12442 u32 val;
12443
12444 tg3_write_mem(tp, offset + j, test_pattern[i]);
12445 tg3_read_mem(tp, offset + j, &val);
12446 if (val != test_pattern[i])
12447 return -EIO;
12448 }
12449 }
12450 return 0;
12451 }
12452
12453 static int tg3_test_memory(struct tg3 *tp)
12454 {
12455 static struct mem_entry {
12456 u32 offset;
12457 u32 len;
12458 } mem_tbl_570x[] = {
12459 { 0x00000000, 0x00b50},
12460 { 0x00002000, 0x1c000},
12461 { 0xffffffff, 0x00000}
12462 }, mem_tbl_5705[] = {
12463 { 0x00000100, 0x0000c},
12464 { 0x00000200, 0x00008},
12465 { 0x00004000, 0x00800},
12466 { 0x00006000, 0x01000},
12467 { 0x00008000, 0x02000},
12468 { 0x00010000, 0x0e000},
12469 { 0xffffffff, 0x00000}
12470 }, mem_tbl_5755[] = {
12471 { 0x00000200, 0x00008},
12472 { 0x00004000, 0x00800},
12473 { 0x00006000, 0x00800},
12474 { 0x00008000, 0x02000},
12475 { 0x00010000, 0x0c000},
12476 { 0xffffffff, 0x00000}
12477 }, mem_tbl_5906[] = {
12478 { 0x00000200, 0x00008},
12479 { 0x00004000, 0x00400},
12480 { 0x00006000, 0x00400},
12481 { 0x00008000, 0x01000},
12482 { 0x00010000, 0x01000},
12483 { 0xffffffff, 0x00000}
12484 }, mem_tbl_5717[] = {
12485 { 0x00000200, 0x00008},
12486 { 0x00010000, 0x0a000},
12487 { 0x00020000, 0x13c00},
12488 { 0xffffffff, 0x00000}
12489 }, mem_tbl_57765[] = {
12490 { 0x00000200, 0x00008},
12491 { 0x00004000, 0x00800},
12492 { 0x00006000, 0x09800},
12493 { 0x00010000, 0x0a000},
12494 { 0xffffffff, 0x00000}
12495 };
12496 struct mem_entry *mem_tbl;
12497 int err = 0;
12498 int i;
12499
12500 if (tg3_flag(tp, 5717_PLUS))
12501 mem_tbl = mem_tbl_5717;
12502 else if (tg3_flag(tp, 57765_CLASS) ||
12503 tg3_asic_rev(tp) == ASIC_REV_5762)
12504 mem_tbl = mem_tbl_57765;
12505 else if (tg3_flag(tp, 5755_PLUS))
12506 mem_tbl = mem_tbl_5755;
12507 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12508 mem_tbl = mem_tbl_5906;
12509 else if (tg3_flag(tp, 5705_PLUS))
12510 mem_tbl = mem_tbl_5705;
12511 else
12512 mem_tbl = mem_tbl_570x;
12513
12514 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12515 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12516 if (err)
12517 break;
12518 }
12519
12520 return err;
12521 }
12522
12523 #define TG3_TSO_MSS 500
12524
12525 #define TG3_TSO_IP_HDR_LEN 20
12526 #define TG3_TSO_TCP_HDR_LEN 20
12527 #define TG3_TSO_TCP_OPT_LEN 12
12528
12529 static const u8 tg3_tso_header[] = {
12530 0x08, 0x00,
12531 0x45, 0x00, 0x00, 0x00,
12532 0x00, 0x00, 0x40, 0x00,
12533 0x40, 0x06, 0x00, 0x00,
12534 0x0a, 0x00, 0x00, 0x01,
12535 0x0a, 0x00, 0x00, 0x02,
12536 0x0d, 0x00, 0xe0, 0x00,
12537 0x00, 0x00, 0x01, 0x00,
12538 0x00, 0x00, 0x02, 0x00,
12539 0x80, 0x10, 0x10, 0x00,
12540 0x14, 0x09, 0x00, 0x00,
12541 0x01, 0x01, 0x08, 0x0a,
12542 0x11, 0x11, 0x11, 0x11,
12543 0x11, 0x11, 0x11, 0x11,
12544 };
12545
12546 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12547 {
12548 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12549 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12550 u32 budget;
12551 struct sk_buff *skb;
12552 u8 *tx_data, *rx_data;
12553 dma_addr_t map;
12554 int num_pkts, tx_len, rx_len, i, err;
12555 struct tg3_rx_buffer_desc *desc;
12556 struct tg3_napi *tnapi, *rnapi;
12557 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12558
12559 tnapi = &tp->napi[0];
12560 rnapi = &tp->napi[0];
12561 if (tp->irq_cnt > 1) {
12562 if (tg3_flag(tp, ENABLE_RSS))
12563 rnapi = &tp->napi[1];
12564 if (tg3_flag(tp, ENABLE_TSS))
12565 tnapi = &tp->napi[1];
12566 }
12567 coal_now = tnapi->coal_now | rnapi->coal_now;
12568
12569 err = -EIO;
12570
12571 tx_len = pktsz;
12572 skb = netdev_alloc_skb(tp->dev, tx_len);
12573 if (!skb)
12574 return -ENOMEM;
12575
12576 tx_data = skb_put(skb, tx_len);
12577 memcpy(tx_data, tp->dev->dev_addr, 6);
12578 memset(tx_data + 6, 0x0, 8);
12579
12580 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12581
12582 if (tso_loopback) {
12583 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12584
12585 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12586 TG3_TSO_TCP_OPT_LEN;
12587
12588 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12589 sizeof(tg3_tso_header));
12590 mss = TG3_TSO_MSS;
12591
12592 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12593 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12594
12595 /* Set the total length field in the IP header */
12596 iph->tot_len = htons((u16)(mss + hdr_len));
12597
12598 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12599 TXD_FLAG_CPU_POST_DMA);
12600
12601 if (tg3_flag(tp, HW_TSO_1) ||
12602 tg3_flag(tp, HW_TSO_2) ||
12603 tg3_flag(tp, HW_TSO_3)) {
12604 struct tcphdr *th;
12605 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12606 th = (struct tcphdr *)&tx_data[val];
12607 th->check = 0;
12608 } else
12609 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12610
12611 if (tg3_flag(tp, HW_TSO_3)) {
12612 mss |= (hdr_len & 0xc) << 12;
12613 if (hdr_len & 0x10)
12614 base_flags |= 0x00000010;
12615 base_flags |= (hdr_len & 0x3e0) << 5;
12616 } else if (tg3_flag(tp, HW_TSO_2))
12617 mss |= hdr_len << 9;
12618 else if (tg3_flag(tp, HW_TSO_1) ||
12619 tg3_asic_rev(tp) == ASIC_REV_5705) {
12620 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12621 } else {
12622 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12623 }
12624
12625 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12626 } else {
12627 num_pkts = 1;
12628 data_off = ETH_HLEN;
12629
12630 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12631 tx_len > VLAN_ETH_FRAME_LEN)
12632 base_flags |= TXD_FLAG_JMB_PKT;
12633 }
12634
12635 for (i = data_off; i < tx_len; i++)
12636 tx_data[i] = (u8) (i & 0xff);
12637
12638 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12639 if (pci_dma_mapping_error(tp->pdev, map)) {
12640 dev_kfree_skb(skb);
12641 return -EIO;
12642 }
12643
12644 val = tnapi->tx_prod;
12645 tnapi->tx_buffers[val].skb = skb;
12646 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12647
12648 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12649 rnapi->coal_now);
12650
12651 udelay(10);
12652
12653 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12654
12655 budget = tg3_tx_avail(tnapi);
12656 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12657 base_flags | TXD_FLAG_END, mss, 0)) {
12658 tnapi->tx_buffers[val].skb = NULL;
12659 dev_kfree_skb(skb);
12660 return -EIO;
12661 }
12662
12663 tnapi->tx_prod++;
12664
12665 /* Sync BD data before updating mailbox */
12666 wmb();
12667
12668 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12669 tr32_mailbox(tnapi->prodmbox);
12670
12671 udelay(10);
12672
12673 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12674 for (i = 0; i < 35; i++) {
12675 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12676 coal_now);
12677
12678 udelay(10);
12679
12680 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12681 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12682 if ((tx_idx == tnapi->tx_prod) &&
12683 (rx_idx == (rx_start_idx + num_pkts)))
12684 break;
12685 }
12686
12687 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12688 dev_kfree_skb(skb);
12689
12690 if (tx_idx != tnapi->tx_prod)
12691 goto out;
12692
12693 if (rx_idx != rx_start_idx + num_pkts)
12694 goto out;
12695
12696 val = data_off;
12697 while (rx_idx != rx_start_idx) {
12698 desc = &rnapi->rx_rcb[rx_start_idx++];
12699 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12700 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12701
12702 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12703 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12704 goto out;
12705
12706 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12707 - ETH_FCS_LEN;
12708
12709 if (!tso_loopback) {
12710 if (rx_len != tx_len)
12711 goto out;
12712
12713 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12714 if (opaque_key != RXD_OPAQUE_RING_STD)
12715 goto out;
12716 } else {
12717 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12718 goto out;
12719 }
12720 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12721 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12722 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12723 goto out;
12724 }
12725
12726 if (opaque_key == RXD_OPAQUE_RING_STD) {
12727 rx_data = tpr->rx_std_buffers[desc_idx].data;
12728 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12729 mapping);
12730 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12731 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12732 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12733 mapping);
12734 } else
12735 goto out;
12736
12737 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12738 PCI_DMA_FROMDEVICE);
12739
12740 rx_data += TG3_RX_OFFSET(tp);
12741 for (i = data_off; i < rx_len; i++, val++) {
12742 if (*(rx_data + i) != (u8) (val & 0xff))
12743 goto out;
12744 }
12745 }
12746
12747 err = 0;
12748
12749 /* tg3_free_rings will unmap and free the rx_data */
12750 out:
12751 return err;
12752 }
12753
12754 #define TG3_STD_LOOPBACK_FAILED 1
12755 #define TG3_JMB_LOOPBACK_FAILED 2
12756 #define TG3_TSO_LOOPBACK_FAILED 4
12757 #define TG3_LOOPBACK_FAILED \
12758 (TG3_STD_LOOPBACK_FAILED | \
12759 TG3_JMB_LOOPBACK_FAILED | \
12760 TG3_TSO_LOOPBACK_FAILED)
12761
12762 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12763 {
12764 int err = -EIO;
12765 u32 eee_cap;
12766 u32 jmb_pkt_sz = 9000;
12767
12768 if (tp->dma_limit)
12769 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12770
12771 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12772 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12773
12774 if (!netif_running(tp->dev)) {
12775 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12776 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12777 if (do_extlpbk)
12778 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12779 goto done;
12780 }
12781
12782 err = tg3_reset_hw(tp, 1);
12783 if (err) {
12784 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12785 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12786 if (do_extlpbk)
12787 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12788 goto done;
12789 }
12790
12791 if (tg3_flag(tp, ENABLE_RSS)) {
12792 int i;
12793
12794 /* Reroute all rx packets to the 1st queue */
12795 for (i = MAC_RSS_INDIR_TBL_0;
12796 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12797 tw32(i, 0x0);
12798 }
12799
12800 /* HW errata - mac loopback fails in some cases on 5780.
12801 * Normal traffic and PHY loopback are not affected by
12802 * errata. Also, the MAC loopback test is deprecated for
12803 * all newer ASIC revisions.
12804 */
12805 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12806 !tg3_flag(tp, CPMU_PRESENT)) {
12807 tg3_mac_loopback(tp, true);
12808
12809 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12810 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12811
12812 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12813 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12814 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12815
12816 tg3_mac_loopback(tp, false);
12817 }
12818
12819 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12820 !tg3_flag(tp, USE_PHYLIB)) {
12821 int i;
12822
12823 tg3_phy_lpbk_set(tp, 0, false);
12824
12825 /* Wait for link */
12826 for (i = 0; i < 100; i++) {
12827 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12828 break;
12829 mdelay(1);
12830 }
12831
12832 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12833 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12834 if (tg3_flag(tp, TSO_CAPABLE) &&
12835 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12836 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12837 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12838 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12839 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12840
12841 if (do_extlpbk) {
12842 tg3_phy_lpbk_set(tp, 0, true);
12843
12844 /* All link indications report up, but the hardware
12845 * isn't really ready for about 20 msec. Double it
12846 * to be sure.
12847 */
12848 mdelay(40);
12849
12850 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12851 data[TG3_EXT_LOOPB_TEST] |=
12852 TG3_STD_LOOPBACK_FAILED;
12853 if (tg3_flag(tp, TSO_CAPABLE) &&
12854 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12855 data[TG3_EXT_LOOPB_TEST] |=
12856 TG3_TSO_LOOPBACK_FAILED;
12857 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12858 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12859 data[TG3_EXT_LOOPB_TEST] |=
12860 TG3_JMB_LOOPBACK_FAILED;
12861 }
12862
12863 /* Re-enable gphy autopowerdown. */
12864 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12865 tg3_phy_toggle_apd(tp, true);
12866 }
12867
12868 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12869 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12870
12871 done:
12872 tp->phy_flags |= eee_cap;
12873
12874 return err;
12875 }
12876
12877 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12878 u64 *data)
12879 {
12880 struct tg3 *tp = netdev_priv(dev);
12881 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12882
12883 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12884 tg3_power_up(tp)) {
12885 etest->flags |= ETH_TEST_FL_FAILED;
12886 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12887 return;
12888 }
12889
12890 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12891
12892 if (tg3_test_nvram(tp) != 0) {
12893 etest->flags |= ETH_TEST_FL_FAILED;
12894 data[TG3_NVRAM_TEST] = 1;
12895 }
12896 if (!doextlpbk && tg3_test_link(tp)) {
12897 etest->flags |= ETH_TEST_FL_FAILED;
12898 data[TG3_LINK_TEST] = 1;
12899 }
12900 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12901 int err, err2 = 0, irq_sync = 0;
12902
12903 if (netif_running(dev)) {
12904 tg3_phy_stop(tp);
12905 tg3_netif_stop(tp);
12906 irq_sync = 1;
12907 }
12908
12909 tg3_full_lock(tp, irq_sync);
12910 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12911 err = tg3_nvram_lock(tp);
12912 tg3_halt_cpu(tp, RX_CPU_BASE);
12913 if (!tg3_flag(tp, 5705_PLUS))
12914 tg3_halt_cpu(tp, TX_CPU_BASE);
12915 if (!err)
12916 tg3_nvram_unlock(tp);
12917
12918 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12919 tg3_phy_reset(tp);
12920
12921 if (tg3_test_registers(tp) != 0) {
12922 etest->flags |= ETH_TEST_FL_FAILED;
12923 data[TG3_REGISTER_TEST] = 1;
12924 }
12925
12926 if (tg3_test_memory(tp) != 0) {
12927 etest->flags |= ETH_TEST_FL_FAILED;
12928 data[TG3_MEMORY_TEST] = 1;
12929 }
12930
12931 if (doextlpbk)
12932 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12933
12934 if (tg3_test_loopback(tp, data, doextlpbk))
12935 etest->flags |= ETH_TEST_FL_FAILED;
12936
12937 tg3_full_unlock(tp);
12938
12939 if (tg3_test_interrupt(tp) != 0) {
12940 etest->flags |= ETH_TEST_FL_FAILED;
12941 data[TG3_INTERRUPT_TEST] = 1;
12942 }
12943
12944 tg3_full_lock(tp, 0);
12945
12946 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12947 if (netif_running(dev)) {
12948 tg3_flag_set(tp, INIT_COMPLETE);
12949 err2 = tg3_restart_hw(tp, 1);
12950 if (!err2)
12951 tg3_netif_start(tp);
12952 }
12953
12954 tg3_full_unlock(tp);
12955
12956 if (irq_sync && !err2)
12957 tg3_phy_start(tp);
12958 }
12959 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12960 tg3_power_down(tp);
12961
12962 }
12963
12964 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12965 struct ifreq *ifr, int cmd)
12966 {
12967 struct tg3 *tp = netdev_priv(dev);
12968 struct hwtstamp_config stmpconf;
12969
12970 if (!tg3_flag(tp, PTP_CAPABLE))
12971 return -EINVAL;
12972
12973 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12974 return -EFAULT;
12975
12976 if (stmpconf.flags)
12977 return -EINVAL;
12978
12979 switch (stmpconf.tx_type) {
12980 case HWTSTAMP_TX_ON:
12981 tg3_flag_set(tp, TX_TSTAMP_EN);
12982 break;
12983 case HWTSTAMP_TX_OFF:
12984 tg3_flag_clear(tp, TX_TSTAMP_EN);
12985 break;
12986 default:
12987 return -ERANGE;
12988 }
12989
12990 switch (stmpconf.rx_filter) {
12991 case HWTSTAMP_FILTER_NONE:
12992 tp->rxptpctl = 0;
12993 break;
12994 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12995 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12996 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12997 break;
12998 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12999 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13000 TG3_RX_PTP_CTL_SYNC_EVNT;
13001 break;
13002 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13003 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13004 TG3_RX_PTP_CTL_DELAY_REQ;
13005 break;
13006 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13007 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13008 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13009 break;
13010 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13011 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13012 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13013 break;
13014 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13015 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13016 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13017 break;
13018 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13019 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13020 TG3_RX_PTP_CTL_SYNC_EVNT;
13021 break;
13022 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13023 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13024 TG3_RX_PTP_CTL_SYNC_EVNT;
13025 break;
13026 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13027 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13028 TG3_RX_PTP_CTL_SYNC_EVNT;
13029 break;
13030 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13031 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13032 TG3_RX_PTP_CTL_DELAY_REQ;
13033 break;
13034 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13035 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13036 TG3_RX_PTP_CTL_DELAY_REQ;
13037 break;
13038 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13039 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13040 TG3_RX_PTP_CTL_DELAY_REQ;
13041 break;
13042 default:
13043 return -ERANGE;
13044 }
13045
13046 if (netif_running(dev) && tp->rxptpctl)
13047 tw32(TG3_RX_PTP_CTL,
13048 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13049
13050 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13051 -EFAULT : 0;
13052 }
13053
13054 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13055 {
13056 struct mii_ioctl_data *data = if_mii(ifr);
13057 struct tg3 *tp = netdev_priv(dev);
13058 int err;
13059
13060 if (tg3_flag(tp, USE_PHYLIB)) {
13061 struct phy_device *phydev;
13062 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13063 return -EAGAIN;
13064 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13065 return phy_mii_ioctl(phydev, ifr, cmd);
13066 }
13067
13068 switch (cmd) {
13069 case SIOCGMIIPHY:
13070 data->phy_id = tp->phy_addr;
13071
13072 /* fallthru */
13073 case SIOCGMIIREG: {
13074 u32 mii_regval;
13075
13076 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13077 break; /* We have no PHY */
13078
13079 if (!netif_running(dev))
13080 return -EAGAIN;
13081
13082 spin_lock_bh(&tp->lock);
13083 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13084 data->reg_num & 0x1f, &mii_regval);
13085 spin_unlock_bh(&tp->lock);
13086
13087 data->val_out = mii_regval;
13088
13089 return err;
13090 }
13091
13092 case SIOCSMIIREG:
13093 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13094 break; /* We have no PHY */
13095
13096 if (!netif_running(dev))
13097 return -EAGAIN;
13098
13099 spin_lock_bh(&tp->lock);
13100 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13101 data->reg_num & 0x1f, data->val_in);
13102 spin_unlock_bh(&tp->lock);
13103
13104 return err;
13105
13106 case SIOCSHWTSTAMP:
13107 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13108
13109 default:
13110 /* do nothing */
13111 break;
13112 }
13113 return -EOPNOTSUPP;
13114 }
13115
13116 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13117 {
13118 struct tg3 *tp = netdev_priv(dev);
13119
13120 memcpy(ec, &tp->coal, sizeof(*ec));
13121 return 0;
13122 }
13123
13124 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13125 {
13126 struct tg3 *tp = netdev_priv(dev);
13127 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13128 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13129
13130 if (!tg3_flag(tp, 5705_PLUS)) {
13131 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13132 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13133 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13134 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13135 }
13136
13137 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13138 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13139 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13140 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13141 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13142 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13143 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13144 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13145 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13146 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13147 return -EINVAL;
13148
13149 /* No rx interrupts will be generated if both are zero */
13150 if ((ec->rx_coalesce_usecs == 0) &&
13151 (ec->rx_max_coalesced_frames == 0))
13152 return -EINVAL;
13153
13154 /* No tx interrupts will be generated if both are zero */
13155 if ((ec->tx_coalesce_usecs == 0) &&
13156 (ec->tx_max_coalesced_frames == 0))
13157 return -EINVAL;
13158
13159 /* Only copy relevant parameters, ignore all others. */
13160 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13161 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13162 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13163 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13164 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13165 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13166 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13167 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13168 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13169
13170 if (netif_running(dev)) {
13171 tg3_full_lock(tp, 0);
13172 __tg3_set_coalesce(tp, &tp->coal);
13173 tg3_full_unlock(tp);
13174 }
13175 return 0;
13176 }
13177
13178 static const struct ethtool_ops tg3_ethtool_ops = {
13179 .get_settings = tg3_get_settings,
13180 .set_settings = tg3_set_settings,
13181 .get_drvinfo = tg3_get_drvinfo,
13182 .get_regs_len = tg3_get_regs_len,
13183 .get_regs = tg3_get_regs,
13184 .get_wol = tg3_get_wol,
13185 .set_wol = tg3_set_wol,
13186 .get_msglevel = tg3_get_msglevel,
13187 .set_msglevel = tg3_set_msglevel,
13188 .nway_reset = tg3_nway_reset,
13189 .get_link = ethtool_op_get_link,
13190 .get_eeprom_len = tg3_get_eeprom_len,
13191 .get_eeprom = tg3_get_eeprom,
13192 .set_eeprom = tg3_set_eeprom,
13193 .get_ringparam = tg3_get_ringparam,
13194 .set_ringparam = tg3_set_ringparam,
13195 .get_pauseparam = tg3_get_pauseparam,
13196 .set_pauseparam = tg3_set_pauseparam,
13197 .self_test = tg3_self_test,
13198 .get_strings = tg3_get_strings,
13199 .set_phys_id = tg3_set_phys_id,
13200 .get_ethtool_stats = tg3_get_ethtool_stats,
13201 .get_coalesce = tg3_get_coalesce,
13202 .set_coalesce = tg3_set_coalesce,
13203 .get_sset_count = tg3_get_sset_count,
13204 .get_rxnfc = tg3_get_rxnfc,
13205 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13206 .get_rxfh_indir = tg3_get_rxfh_indir,
13207 .set_rxfh_indir = tg3_set_rxfh_indir,
13208 .get_channels = tg3_get_channels,
13209 .set_channels = tg3_set_channels,
13210 .get_ts_info = tg3_get_ts_info,
13211 };
13212
13213 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13214 struct rtnl_link_stats64 *stats)
13215 {
13216 struct tg3 *tp = netdev_priv(dev);
13217
13218 spin_lock_bh(&tp->lock);
13219 if (!tp->hw_stats) {
13220 spin_unlock_bh(&tp->lock);
13221 return &tp->net_stats_prev;
13222 }
13223
13224 tg3_get_nstats(tp, stats);
13225 spin_unlock_bh(&tp->lock);
13226
13227 return stats;
13228 }
13229
13230 static void tg3_set_rx_mode(struct net_device *dev)
13231 {
13232 struct tg3 *tp = netdev_priv(dev);
13233
13234 if (!netif_running(dev))
13235 return;
13236
13237 tg3_full_lock(tp, 0);
13238 __tg3_set_rx_mode(dev);
13239 tg3_full_unlock(tp);
13240 }
13241
13242 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13243 int new_mtu)
13244 {
13245 dev->mtu = new_mtu;
13246
13247 if (new_mtu > ETH_DATA_LEN) {
13248 if (tg3_flag(tp, 5780_CLASS)) {
13249 netdev_update_features(dev);
13250 tg3_flag_clear(tp, TSO_CAPABLE);
13251 } else {
13252 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13253 }
13254 } else {
13255 if (tg3_flag(tp, 5780_CLASS)) {
13256 tg3_flag_set(tp, TSO_CAPABLE);
13257 netdev_update_features(dev);
13258 }
13259 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13260 }
13261 }
13262
13263 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13264 {
13265 struct tg3 *tp = netdev_priv(dev);
13266 int err, reset_phy = 0;
13267
13268 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13269 return -EINVAL;
13270
13271 if (!netif_running(dev)) {
13272 /* We'll just catch it later when the
13273 * device is up'd.
13274 */
13275 tg3_set_mtu(dev, tp, new_mtu);
13276 return 0;
13277 }
13278
13279 tg3_phy_stop(tp);
13280
13281 tg3_netif_stop(tp);
13282
13283 tg3_full_lock(tp, 1);
13284
13285 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13286
13287 tg3_set_mtu(dev, tp, new_mtu);
13288
13289 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13290 * breaks all requests to 256 bytes.
13291 */
13292 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13293 reset_phy = 1;
13294
13295 err = tg3_restart_hw(tp, reset_phy);
13296
13297 if (!err)
13298 tg3_netif_start(tp);
13299
13300 tg3_full_unlock(tp);
13301
13302 if (!err)
13303 tg3_phy_start(tp);
13304
13305 return err;
13306 }
13307
13308 static const struct net_device_ops tg3_netdev_ops = {
13309 .ndo_open = tg3_open,
13310 .ndo_stop = tg3_close,
13311 .ndo_start_xmit = tg3_start_xmit,
13312 .ndo_get_stats64 = tg3_get_stats64,
13313 .ndo_validate_addr = eth_validate_addr,
13314 .ndo_set_rx_mode = tg3_set_rx_mode,
13315 .ndo_set_mac_address = tg3_set_mac_addr,
13316 .ndo_do_ioctl = tg3_ioctl,
13317 .ndo_tx_timeout = tg3_tx_timeout,
13318 .ndo_change_mtu = tg3_change_mtu,
13319 .ndo_fix_features = tg3_fix_features,
13320 .ndo_set_features = tg3_set_features,
13321 #ifdef CONFIG_NET_POLL_CONTROLLER
13322 .ndo_poll_controller = tg3_poll_controller,
13323 #endif
13324 };
13325
13326 static void tg3_get_eeprom_size(struct tg3 *tp)
13327 {
13328 u32 cursize, val, magic;
13329
13330 tp->nvram_size = EEPROM_CHIP_SIZE;
13331
13332 if (tg3_nvram_read(tp, 0, &magic) != 0)
13333 return;
13334
13335 if ((magic != TG3_EEPROM_MAGIC) &&
13336 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13337 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13338 return;
13339
13340 /*
13341 * Size the chip by reading offsets at increasing powers of two.
13342 * When we encounter our validation signature, we know the addressing
13343 * has wrapped around, and thus have our chip size.
13344 */
13345 cursize = 0x10;
13346
13347 while (cursize < tp->nvram_size) {
13348 if (tg3_nvram_read(tp, cursize, &val) != 0)
13349 return;
13350
13351 if (val == magic)
13352 break;
13353
13354 cursize <<= 1;
13355 }
13356
13357 tp->nvram_size = cursize;
13358 }
13359
13360 static void tg3_get_nvram_size(struct tg3 *tp)
13361 {
13362 u32 val;
13363
13364 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13365 return;
13366
13367 /* Selfboot format */
13368 if (val != TG3_EEPROM_MAGIC) {
13369 tg3_get_eeprom_size(tp);
13370 return;
13371 }
13372
13373 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13374 if (val != 0) {
13375 /* This is confusing. We want to operate on the
13376 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13377 * call will read from NVRAM and byteswap the data
13378 * according to the byteswapping settings for all
13379 * other register accesses. This ensures the data we
13380 * want will always reside in the lower 16-bits.
13381 * However, the data in NVRAM is in LE format, which
13382 * means the data from the NVRAM read will always be
13383 * opposite the endianness of the CPU. The 16-bit
13384 * byteswap then brings the data to CPU endianness.
13385 */
13386 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13387 return;
13388 }
13389 }
13390 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13391 }
13392
13393 static void tg3_get_nvram_info(struct tg3 *tp)
13394 {
13395 u32 nvcfg1;
13396
13397 nvcfg1 = tr32(NVRAM_CFG1);
13398 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13399 tg3_flag_set(tp, FLASH);
13400 } else {
13401 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13402 tw32(NVRAM_CFG1, nvcfg1);
13403 }
13404
13405 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13406 tg3_flag(tp, 5780_CLASS)) {
13407 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13408 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13409 tp->nvram_jedecnum = JEDEC_ATMEL;
13410 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13411 tg3_flag_set(tp, NVRAM_BUFFERED);
13412 break;
13413 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13414 tp->nvram_jedecnum = JEDEC_ATMEL;
13415 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13416 break;
13417 case FLASH_VENDOR_ATMEL_EEPROM:
13418 tp->nvram_jedecnum = JEDEC_ATMEL;
13419 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13420 tg3_flag_set(tp, NVRAM_BUFFERED);
13421 break;
13422 case FLASH_VENDOR_ST:
13423 tp->nvram_jedecnum = JEDEC_ST;
13424 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13425 tg3_flag_set(tp, NVRAM_BUFFERED);
13426 break;
13427 case FLASH_VENDOR_SAIFUN:
13428 tp->nvram_jedecnum = JEDEC_SAIFUN;
13429 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13430 break;
13431 case FLASH_VENDOR_SST_SMALL:
13432 case FLASH_VENDOR_SST_LARGE:
13433 tp->nvram_jedecnum = JEDEC_SST;
13434 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13435 break;
13436 }
13437 } else {
13438 tp->nvram_jedecnum = JEDEC_ATMEL;
13439 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13440 tg3_flag_set(tp, NVRAM_BUFFERED);
13441 }
13442 }
13443
13444 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13445 {
13446 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13447 case FLASH_5752PAGE_SIZE_256:
13448 tp->nvram_pagesize = 256;
13449 break;
13450 case FLASH_5752PAGE_SIZE_512:
13451 tp->nvram_pagesize = 512;
13452 break;
13453 case FLASH_5752PAGE_SIZE_1K:
13454 tp->nvram_pagesize = 1024;
13455 break;
13456 case FLASH_5752PAGE_SIZE_2K:
13457 tp->nvram_pagesize = 2048;
13458 break;
13459 case FLASH_5752PAGE_SIZE_4K:
13460 tp->nvram_pagesize = 4096;
13461 break;
13462 case FLASH_5752PAGE_SIZE_264:
13463 tp->nvram_pagesize = 264;
13464 break;
13465 case FLASH_5752PAGE_SIZE_528:
13466 tp->nvram_pagesize = 528;
13467 break;
13468 }
13469 }
13470
13471 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13472 {
13473 u32 nvcfg1;
13474
13475 nvcfg1 = tr32(NVRAM_CFG1);
13476
13477 /* NVRAM protection for TPM */
13478 if (nvcfg1 & (1 << 27))
13479 tg3_flag_set(tp, PROTECTED_NVRAM);
13480
13481 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13482 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13483 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13484 tp->nvram_jedecnum = JEDEC_ATMEL;
13485 tg3_flag_set(tp, NVRAM_BUFFERED);
13486 break;
13487 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13488 tp->nvram_jedecnum = JEDEC_ATMEL;
13489 tg3_flag_set(tp, NVRAM_BUFFERED);
13490 tg3_flag_set(tp, FLASH);
13491 break;
13492 case FLASH_5752VENDOR_ST_M45PE10:
13493 case FLASH_5752VENDOR_ST_M45PE20:
13494 case FLASH_5752VENDOR_ST_M45PE40:
13495 tp->nvram_jedecnum = JEDEC_ST;
13496 tg3_flag_set(tp, NVRAM_BUFFERED);
13497 tg3_flag_set(tp, FLASH);
13498 break;
13499 }
13500
13501 if (tg3_flag(tp, FLASH)) {
13502 tg3_nvram_get_pagesize(tp, nvcfg1);
13503 } else {
13504 /* For eeprom, set pagesize to maximum eeprom size */
13505 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13506
13507 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13508 tw32(NVRAM_CFG1, nvcfg1);
13509 }
13510 }
13511
13512 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13513 {
13514 u32 nvcfg1, protect = 0;
13515
13516 nvcfg1 = tr32(NVRAM_CFG1);
13517
13518 /* NVRAM protection for TPM */
13519 if (nvcfg1 & (1 << 27)) {
13520 tg3_flag_set(tp, PROTECTED_NVRAM);
13521 protect = 1;
13522 }
13523
13524 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13525 switch (nvcfg1) {
13526 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13527 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13528 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13529 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13530 tp->nvram_jedecnum = JEDEC_ATMEL;
13531 tg3_flag_set(tp, NVRAM_BUFFERED);
13532 tg3_flag_set(tp, FLASH);
13533 tp->nvram_pagesize = 264;
13534 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13535 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13536 tp->nvram_size = (protect ? 0x3e200 :
13537 TG3_NVRAM_SIZE_512KB);
13538 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13539 tp->nvram_size = (protect ? 0x1f200 :
13540 TG3_NVRAM_SIZE_256KB);
13541 else
13542 tp->nvram_size = (protect ? 0x1f200 :
13543 TG3_NVRAM_SIZE_128KB);
13544 break;
13545 case FLASH_5752VENDOR_ST_M45PE10:
13546 case FLASH_5752VENDOR_ST_M45PE20:
13547 case FLASH_5752VENDOR_ST_M45PE40:
13548 tp->nvram_jedecnum = JEDEC_ST;
13549 tg3_flag_set(tp, NVRAM_BUFFERED);
13550 tg3_flag_set(tp, FLASH);
13551 tp->nvram_pagesize = 256;
13552 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13553 tp->nvram_size = (protect ?
13554 TG3_NVRAM_SIZE_64KB :
13555 TG3_NVRAM_SIZE_128KB);
13556 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13557 tp->nvram_size = (protect ?
13558 TG3_NVRAM_SIZE_64KB :
13559 TG3_NVRAM_SIZE_256KB);
13560 else
13561 tp->nvram_size = (protect ?
13562 TG3_NVRAM_SIZE_128KB :
13563 TG3_NVRAM_SIZE_512KB);
13564 break;
13565 }
13566 }
13567
13568 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13569 {
13570 u32 nvcfg1;
13571
13572 nvcfg1 = tr32(NVRAM_CFG1);
13573
13574 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13575 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13576 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13577 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13578 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13579 tp->nvram_jedecnum = JEDEC_ATMEL;
13580 tg3_flag_set(tp, NVRAM_BUFFERED);
13581 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13582
13583 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13584 tw32(NVRAM_CFG1, nvcfg1);
13585 break;
13586 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13587 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13588 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13589 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13590 tp->nvram_jedecnum = JEDEC_ATMEL;
13591 tg3_flag_set(tp, NVRAM_BUFFERED);
13592 tg3_flag_set(tp, FLASH);
13593 tp->nvram_pagesize = 264;
13594 break;
13595 case FLASH_5752VENDOR_ST_M45PE10:
13596 case FLASH_5752VENDOR_ST_M45PE20:
13597 case FLASH_5752VENDOR_ST_M45PE40:
13598 tp->nvram_jedecnum = JEDEC_ST;
13599 tg3_flag_set(tp, NVRAM_BUFFERED);
13600 tg3_flag_set(tp, FLASH);
13601 tp->nvram_pagesize = 256;
13602 break;
13603 }
13604 }
13605
13606 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13607 {
13608 u32 nvcfg1, protect = 0;
13609
13610 nvcfg1 = tr32(NVRAM_CFG1);
13611
13612 /* NVRAM protection for TPM */
13613 if (nvcfg1 & (1 << 27)) {
13614 tg3_flag_set(tp, PROTECTED_NVRAM);
13615 protect = 1;
13616 }
13617
13618 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13619 switch (nvcfg1) {
13620 case FLASH_5761VENDOR_ATMEL_ADB021D:
13621 case FLASH_5761VENDOR_ATMEL_ADB041D:
13622 case FLASH_5761VENDOR_ATMEL_ADB081D:
13623 case FLASH_5761VENDOR_ATMEL_ADB161D:
13624 case FLASH_5761VENDOR_ATMEL_MDB021D:
13625 case FLASH_5761VENDOR_ATMEL_MDB041D:
13626 case FLASH_5761VENDOR_ATMEL_MDB081D:
13627 case FLASH_5761VENDOR_ATMEL_MDB161D:
13628 tp->nvram_jedecnum = JEDEC_ATMEL;
13629 tg3_flag_set(tp, NVRAM_BUFFERED);
13630 tg3_flag_set(tp, FLASH);
13631 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13632 tp->nvram_pagesize = 256;
13633 break;
13634 case FLASH_5761VENDOR_ST_A_M45PE20:
13635 case FLASH_5761VENDOR_ST_A_M45PE40:
13636 case FLASH_5761VENDOR_ST_A_M45PE80:
13637 case FLASH_5761VENDOR_ST_A_M45PE16:
13638 case FLASH_5761VENDOR_ST_M_M45PE20:
13639 case FLASH_5761VENDOR_ST_M_M45PE40:
13640 case FLASH_5761VENDOR_ST_M_M45PE80:
13641 case FLASH_5761VENDOR_ST_M_M45PE16:
13642 tp->nvram_jedecnum = JEDEC_ST;
13643 tg3_flag_set(tp, NVRAM_BUFFERED);
13644 tg3_flag_set(tp, FLASH);
13645 tp->nvram_pagesize = 256;
13646 break;
13647 }
13648
13649 if (protect) {
13650 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13651 } else {
13652 switch (nvcfg1) {
13653 case FLASH_5761VENDOR_ATMEL_ADB161D:
13654 case FLASH_5761VENDOR_ATMEL_MDB161D:
13655 case FLASH_5761VENDOR_ST_A_M45PE16:
13656 case FLASH_5761VENDOR_ST_M_M45PE16:
13657 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13658 break;
13659 case FLASH_5761VENDOR_ATMEL_ADB081D:
13660 case FLASH_5761VENDOR_ATMEL_MDB081D:
13661 case FLASH_5761VENDOR_ST_A_M45PE80:
13662 case FLASH_5761VENDOR_ST_M_M45PE80:
13663 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13664 break;
13665 case FLASH_5761VENDOR_ATMEL_ADB041D:
13666 case FLASH_5761VENDOR_ATMEL_MDB041D:
13667 case FLASH_5761VENDOR_ST_A_M45PE40:
13668 case FLASH_5761VENDOR_ST_M_M45PE40:
13669 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13670 break;
13671 case FLASH_5761VENDOR_ATMEL_ADB021D:
13672 case FLASH_5761VENDOR_ATMEL_MDB021D:
13673 case FLASH_5761VENDOR_ST_A_M45PE20:
13674 case FLASH_5761VENDOR_ST_M_M45PE20:
13675 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13676 break;
13677 }
13678 }
13679 }
13680
13681 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13682 {
13683 tp->nvram_jedecnum = JEDEC_ATMEL;
13684 tg3_flag_set(tp, NVRAM_BUFFERED);
13685 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13686 }
13687
13688 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13689 {
13690 u32 nvcfg1;
13691
13692 nvcfg1 = tr32(NVRAM_CFG1);
13693
13694 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13695 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13696 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13697 tp->nvram_jedecnum = JEDEC_ATMEL;
13698 tg3_flag_set(tp, NVRAM_BUFFERED);
13699 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13700
13701 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13702 tw32(NVRAM_CFG1, nvcfg1);
13703 return;
13704 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13705 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13706 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13707 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13708 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13709 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13710 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13711 tp->nvram_jedecnum = JEDEC_ATMEL;
13712 tg3_flag_set(tp, NVRAM_BUFFERED);
13713 tg3_flag_set(tp, FLASH);
13714
13715 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13716 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13717 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13718 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13719 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13720 break;
13721 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13722 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13723 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13724 break;
13725 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13726 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13727 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13728 break;
13729 }
13730 break;
13731 case FLASH_5752VENDOR_ST_M45PE10:
13732 case FLASH_5752VENDOR_ST_M45PE20:
13733 case FLASH_5752VENDOR_ST_M45PE40:
13734 tp->nvram_jedecnum = JEDEC_ST;
13735 tg3_flag_set(tp, NVRAM_BUFFERED);
13736 tg3_flag_set(tp, FLASH);
13737
13738 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13739 case FLASH_5752VENDOR_ST_M45PE10:
13740 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13741 break;
13742 case FLASH_5752VENDOR_ST_M45PE20:
13743 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13744 break;
13745 case FLASH_5752VENDOR_ST_M45PE40:
13746 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13747 break;
13748 }
13749 break;
13750 default:
13751 tg3_flag_set(tp, NO_NVRAM);
13752 return;
13753 }
13754
13755 tg3_nvram_get_pagesize(tp, nvcfg1);
13756 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13757 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13758 }
13759
13760
13761 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13762 {
13763 u32 nvcfg1;
13764
13765 nvcfg1 = tr32(NVRAM_CFG1);
13766
13767 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13768 case FLASH_5717VENDOR_ATMEL_EEPROM:
13769 case FLASH_5717VENDOR_MICRO_EEPROM:
13770 tp->nvram_jedecnum = JEDEC_ATMEL;
13771 tg3_flag_set(tp, NVRAM_BUFFERED);
13772 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13773
13774 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13775 tw32(NVRAM_CFG1, nvcfg1);
13776 return;
13777 case FLASH_5717VENDOR_ATMEL_MDB011D:
13778 case FLASH_5717VENDOR_ATMEL_ADB011B:
13779 case FLASH_5717VENDOR_ATMEL_ADB011D:
13780 case FLASH_5717VENDOR_ATMEL_MDB021D:
13781 case FLASH_5717VENDOR_ATMEL_ADB021B:
13782 case FLASH_5717VENDOR_ATMEL_ADB021D:
13783 case FLASH_5717VENDOR_ATMEL_45USPT:
13784 tp->nvram_jedecnum = JEDEC_ATMEL;
13785 tg3_flag_set(tp, NVRAM_BUFFERED);
13786 tg3_flag_set(tp, FLASH);
13787
13788 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13789 case FLASH_5717VENDOR_ATMEL_MDB021D:
13790 /* Detect size with tg3_nvram_get_size() */
13791 break;
13792 case FLASH_5717VENDOR_ATMEL_ADB021B:
13793 case FLASH_5717VENDOR_ATMEL_ADB021D:
13794 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13795 break;
13796 default:
13797 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13798 break;
13799 }
13800 break;
13801 case FLASH_5717VENDOR_ST_M_M25PE10:
13802 case FLASH_5717VENDOR_ST_A_M25PE10:
13803 case FLASH_5717VENDOR_ST_M_M45PE10:
13804 case FLASH_5717VENDOR_ST_A_M45PE10:
13805 case FLASH_5717VENDOR_ST_M_M25PE20:
13806 case FLASH_5717VENDOR_ST_A_M25PE20:
13807 case FLASH_5717VENDOR_ST_M_M45PE20:
13808 case FLASH_5717VENDOR_ST_A_M45PE20:
13809 case FLASH_5717VENDOR_ST_25USPT:
13810 case FLASH_5717VENDOR_ST_45USPT:
13811 tp->nvram_jedecnum = JEDEC_ST;
13812 tg3_flag_set(tp, NVRAM_BUFFERED);
13813 tg3_flag_set(tp, FLASH);
13814
13815 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13816 case FLASH_5717VENDOR_ST_M_M25PE20:
13817 case FLASH_5717VENDOR_ST_M_M45PE20:
13818 /* Detect size with tg3_nvram_get_size() */
13819 break;
13820 case FLASH_5717VENDOR_ST_A_M25PE20:
13821 case FLASH_5717VENDOR_ST_A_M45PE20:
13822 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13823 break;
13824 default:
13825 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13826 break;
13827 }
13828 break;
13829 default:
13830 tg3_flag_set(tp, NO_NVRAM);
13831 return;
13832 }
13833
13834 tg3_nvram_get_pagesize(tp, nvcfg1);
13835 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13836 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13837 }
13838
13839 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13840 {
13841 u32 nvcfg1, nvmpinstrp;
13842
13843 nvcfg1 = tr32(NVRAM_CFG1);
13844 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13845
13846 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13847 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13848 tg3_flag_set(tp, NO_NVRAM);
13849 return;
13850 }
13851
13852 switch (nvmpinstrp) {
13853 case FLASH_5762_EEPROM_HD:
13854 nvmpinstrp = FLASH_5720_EEPROM_HD;
13855 break;
13856 case FLASH_5762_EEPROM_LD:
13857 nvmpinstrp = FLASH_5720_EEPROM_LD;
13858 break;
13859 }
13860 }
13861
13862 switch (nvmpinstrp) {
13863 case FLASH_5720_EEPROM_HD:
13864 case FLASH_5720_EEPROM_LD:
13865 tp->nvram_jedecnum = JEDEC_ATMEL;
13866 tg3_flag_set(tp, NVRAM_BUFFERED);
13867
13868 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13869 tw32(NVRAM_CFG1, nvcfg1);
13870 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13871 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13872 else
13873 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13874 return;
13875 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13876 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13877 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13878 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13879 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13880 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13881 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13882 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13883 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13884 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13885 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13886 case FLASH_5720VENDOR_ATMEL_45USPT:
13887 tp->nvram_jedecnum = JEDEC_ATMEL;
13888 tg3_flag_set(tp, NVRAM_BUFFERED);
13889 tg3_flag_set(tp, FLASH);
13890
13891 switch (nvmpinstrp) {
13892 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13893 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13894 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13895 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13896 break;
13897 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13898 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13899 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13900 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13901 break;
13902 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13903 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13904 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13905 break;
13906 default:
13907 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13908 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13909 break;
13910 }
13911 break;
13912 case FLASH_5720VENDOR_M_ST_M25PE10:
13913 case FLASH_5720VENDOR_M_ST_M45PE10:
13914 case FLASH_5720VENDOR_A_ST_M25PE10:
13915 case FLASH_5720VENDOR_A_ST_M45PE10:
13916 case FLASH_5720VENDOR_M_ST_M25PE20:
13917 case FLASH_5720VENDOR_M_ST_M45PE20:
13918 case FLASH_5720VENDOR_A_ST_M25PE20:
13919 case FLASH_5720VENDOR_A_ST_M45PE20:
13920 case FLASH_5720VENDOR_M_ST_M25PE40:
13921 case FLASH_5720VENDOR_M_ST_M45PE40:
13922 case FLASH_5720VENDOR_A_ST_M25PE40:
13923 case FLASH_5720VENDOR_A_ST_M45PE40:
13924 case FLASH_5720VENDOR_M_ST_M25PE80:
13925 case FLASH_5720VENDOR_M_ST_M45PE80:
13926 case FLASH_5720VENDOR_A_ST_M25PE80:
13927 case FLASH_5720VENDOR_A_ST_M45PE80:
13928 case FLASH_5720VENDOR_ST_25USPT:
13929 case FLASH_5720VENDOR_ST_45USPT:
13930 tp->nvram_jedecnum = JEDEC_ST;
13931 tg3_flag_set(tp, NVRAM_BUFFERED);
13932 tg3_flag_set(tp, FLASH);
13933
13934 switch (nvmpinstrp) {
13935 case FLASH_5720VENDOR_M_ST_M25PE20:
13936 case FLASH_5720VENDOR_M_ST_M45PE20:
13937 case FLASH_5720VENDOR_A_ST_M25PE20:
13938 case FLASH_5720VENDOR_A_ST_M45PE20:
13939 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13940 break;
13941 case FLASH_5720VENDOR_M_ST_M25PE40:
13942 case FLASH_5720VENDOR_M_ST_M45PE40:
13943 case FLASH_5720VENDOR_A_ST_M25PE40:
13944 case FLASH_5720VENDOR_A_ST_M45PE40:
13945 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13946 break;
13947 case FLASH_5720VENDOR_M_ST_M25PE80:
13948 case FLASH_5720VENDOR_M_ST_M45PE80:
13949 case FLASH_5720VENDOR_A_ST_M25PE80:
13950 case FLASH_5720VENDOR_A_ST_M45PE80:
13951 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13952 break;
13953 default:
13954 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13955 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13956 break;
13957 }
13958 break;
13959 default:
13960 tg3_flag_set(tp, NO_NVRAM);
13961 return;
13962 }
13963
13964 tg3_nvram_get_pagesize(tp, nvcfg1);
13965 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13966 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13967
13968 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13969 u32 val;
13970
13971 if (tg3_nvram_read(tp, 0, &val))
13972 return;
13973
13974 if (val != TG3_EEPROM_MAGIC &&
13975 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13976 tg3_flag_set(tp, NO_NVRAM);
13977 }
13978 }
13979
13980 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13981 static void tg3_nvram_init(struct tg3 *tp)
13982 {
13983 if (tg3_flag(tp, IS_SSB_CORE)) {
13984 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13985 tg3_flag_clear(tp, NVRAM);
13986 tg3_flag_clear(tp, NVRAM_BUFFERED);
13987 tg3_flag_set(tp, NO_NVRAM);
13988 return;
13989 }
13990
13991 tw32_f(GRC_EEPROM_ADDR,
13992 (EEPROM_ADDR_FSM_RESET |
13993 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13994 EEPROM_ADDR_CLKPERD_SHIFT)));
13995
13996 msleep(1);
13997
13998 /* Enable seeprom accesses. */
13999 tw32_f(GRC_LOCAL_CTRL,
14000 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14001 udelay(100);
14002
14003 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14004 tg3_asic_rev(tp) != ASIC_REV_5701) {
14005 tg3_flag_set(tp, NVRAM);
14006
14007 if (tg3_nvram_lock(tp)) {
14008 netdev_warn(tp->dev,
14009 "Cannot get nvram lock, %s failed\n",
14010 __func__);
14011 return;
14012 }
14013 tg3_enable_nvram_access(tp);
14014
14015 tp->nvram_size = 0;
14016
14017 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14018 tg3_get_5752_nvram_info(tp);
14019 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14020 tg3_get_5755_nvram_info(tp);
14021 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14022 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14023 tg3_asic_rev(tp) == ASIC_REV_5785)
14024 tg3_get_5787_nvram_info(tp);
14025 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14026 tg3_get_5761_nvram_info(tp);
14027 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14028 tg3_get_5906_nvram_info(tp);
14029 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14030 tg3_flag(tp, 57765_CLASS))
14031 tg3_get_57780_nvram_info(tp);
14032 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14033 tg3_asic_rev(tp) == ASIC_REV_5719)
14034 tg3_get_5717_nvram_info(tp);
14035 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14036 tg3_asic_rev(tp) == ASIC_REV_5762)
14037 tg3_get_5720_nvram_info(tp);
14038 else
14039 tg3_get_nvram_info(tp);
14040
14041 if (tp->nvram_size == 0)
14042 tg3_get_nvram_size(tp);
14043
14044 tg3_disable_nvram_access(tp);
14045 tg3_nvram_unlock(tp);
14046
14047 } else {
14048 tg3_flag_clear(tp, NVRAM);
14049 tg3_flag_clear(tp, NVRAM_BUFFERED);
14050
14051 tg3_get_eeprom_size(tp);
14052 }
14053 }
14054
14055 struct subsys_tbl_ent {
14056 u16 subsys_vendor, subsys_devid;
14057 u32 phy_id;
14058 };
14059
14060 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14061 /* Broadcom boards. */
14062 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14063 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14064 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14065 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14066 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14067 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14068 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14069 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14070 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14071 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14072 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14073 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14074 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14075 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14076 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14077 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14078 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14079 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14080 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14081 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14083 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14084
14085 /* 3com boards. */
14086 { TG3PCI_SUBVENDOR_ID_3COM,
14087 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14088 { TG3PCI_SUBVENDOR_ID_3COM,
14089 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14090 { TG3PCI_SUBVENDOR_ID_3COM,
14091 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14092 { TG3PCI_SUBVENDOR_ID_3COM,
14093 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14094 { TG3PCI_SUBVENDOR_ID_3COM,
14095 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14096
14097 /* DELL boards. */
14098 { TG3PCI_SUBVENDOR_ID_DELL,
14099 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14100 { TG3PCI_SUBVENDOR_ID_DELL,
14101 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14102 { TG3PCI_SUBVENDOR_ID_DELL,
14103 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14104 { TG3PCI_SUBVENDOR_ID_DELL,
14105 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14106
14107 /* Compaq boards. */
14108 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14109 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14110 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14111 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14112 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14113 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14114 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14115 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14116 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14117 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14118
14119 /* IBM boards. */
14120 { TG3PCI_SUBVENDOR_ID_IBM,
14121 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14122 };
14123
14124 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14125 {
14126 int i;
14127
14128 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14129 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14130 tp->pdev->subsystem_vendor) &&
14131 (subsys_id_to_phy_id[i].subsys_devid ==
14132 tp->pdev->subsystem_device))
14133 return &subsys_id_to_phy_id[i];
14134 }
14135 return NULL;
14136 }
14137
14138 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14139 {
14140 u32 val;
14141
14142 tp->phy_id = TG3_PHY_ID_INVALID;
14143 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14144
14145 /* Assume an onboard device and WOL capable by default. */
14146 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14147 tg3_flag_set(tp, WOL_CAP);
14148
14149 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14150 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14151 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14152 tg3_flag_set(tp, IS_NIC);
14153 }
14154 val = tr32(VCPU_CFGSHDW);
14155 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14156 tg3_flag_set(tp, ASPM_WORKAROUND);
14157 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14158 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14159 tg3_flag_set(tp, WOL_ENABLE);
14160 device_set_wakeup_enable(&tp->pdev->dev, true);
14161 }
14162 goto done;
14163 }
14164
14165 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14166 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14167 u32 nic_cfg, led_cfg;
14168 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14169 int eeprom_phy_serdes = 0;
14170
14171 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14172 tp->nic_sram_data_cfg = nic_cfg;
14173
14174 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14175 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14176 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14177 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14178 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14179 (ver > 0) && (ver < 0x100))
14180 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14181
14182 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14183 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14184
14185 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14186 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14187 eeprom_phy_serdes = 1;
14188
14189 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14190 if (nic_phy_id != 0) {
14191 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14192 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14193
14194 eeprom_phy_id = (id1 >> 16) << 10;
14195 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14196 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14197 } else
14198 eeprom_phy_id = 0;
14199
14200 tp->phy_id = eeprom_phy_id;
14201 if (eeprom_phy_serdes) {
14202 if (!tg3_flag(tp, 5705_PLUS))
14203 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14204 else
14205 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14206 }
14207
14208 if (tg3_flag(tp, 5750_PLUS))
14209 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14210 SHASTA_EXT_LED_MODE_MASK);
14211 else
14212 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14213
14214 switch (led_cfg) {
14215 default:
14216 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14217 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14218 break;
14219
14220 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14221 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14222 break;
14223
14224 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14225 tp->led_ctrl = LED_CTRL_MODE_MAC;
14226
14227 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14228 * read on some older 5700/5701 bootcode.
14229 */
14230 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14231 tg3_asic_rev(tp) == ASIC_REV_5701)
14232 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14233
14234 break;
14235
14236 case SHASTA_EXT_LED_SHARED:
14237 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14238 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14239 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14240 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14241 LED_CTRL_MODE_PHY_2);
14242 break;
14243
14244 case SHASTA_EXT_LED_MAC:
14245 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14246 break;
14247
14248 case SHASTA_EXT_LED_COMBO:
14249 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14250 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14251 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14252 LED_CTRL_MODE_PHY_2);
14253 break;
14254
14255 }
14256
14257 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14258 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14259 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14260 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14261
14262 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14263 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14264
14265 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14266 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14267 if ((tp->pdev->subsystem_vendor ==
14268 PCI_VENDOR_ID_ARIMA) &&
14269 (tp->pdev->subsystem_device == 0x205a ||
14270 tp->pdev->subsystem_device == 0x2063))
14271 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14272 } else {
14273 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14274 tg3_flag_set(tp, IS_NIC);
14275 }
14276
14277 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14278 tg3_flag_set(tp, ENABLE_ASF);
14279 if (tg3_flag(tp, 5750_PLUS))
14280 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14281 }
14282
14283 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14284 tg3_flag(tp, 5750_PLUS))
14285 tg3_flag_set(tp, ENABLE_APE);
14286
14287 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14288 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14289 tg3_flag_clear(tp, WOL_CAP);
14290
14291 if (tg3_flag(tp, WOL_CAP) &&
14292 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14293 tg3_flag_set(tp, WOL_ENABLE);
14294 device_set_wakeup_enable(&tp->pdev->dev, true);
14295 }
14296
14297 if (cfg2 & (1 << 17))
14298 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14299
14300 /* serdes signal pre-emphasis in register 0x590 set by */
14301 /* bootcode if bit 18 is set */
14302 if (cfg2 & (1 << 18))
14303 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14304
14305 if ((tg3_flag(tp, 57765_PLUS) ||
14306 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14307 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14308 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14309 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14310
14311 if (tg3_flag(tp, PCI_EXPRESS) &&
14312 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14313 !tg3_flag(tp, 57765_PLUS)) {
14314 u32 cfg3;
14315
14316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14317 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14318 tg3_flag_set(tp, ASPM_WORKAROUND);
14319 }
14320
14321 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14322 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14323 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14324 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14325 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14326 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14327 }
14328 done:
14329 if (tg3_flag(tp, WOL_CAP))
14330 device_set_wakeup_enable(&tp->pdev->dev,
14331 tg3_flag(tp, WOL_ENABLE));
14332 else
14333 device_set_wakeup_capable(&tp->pdev->dev, false);
14334 }
14335
14336 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14337 {
14338 int i, err;
14339 u32 val2, off = offset * 8;
14340
14341 err = tg3_nvram_lock(tp);
14342 if (err)
14343 return err;
14344
14345 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14346 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14347 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14348 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14349 udelay(10);
14350
14351 for (i = 0; i < 100; i++) {
14352 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14353 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14354 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14355 break;
14356 }
14357 udelay(10);
14358 }
14359
14360 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14361
14362 tg3_nvram_unlock(tp);
14363 if (val2 & APE_OTP_STATUS_CMD_DONE)
14364 return 0;
14365
14366 return -EBUSY;
14367 }
14368
14369 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14370 {
14371 int i;
14372 u32 val;
14373
14374 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14375 tw32(OTP_CTRL, cmd);
14376
14377 /* Wait for up to 1 ms for command to execute. */
14378 for (i = 0; i < 100; i++) {
14379 val = tr32(OTP_STATUS);
14380 if (val & OTP_STATUS_CMD_DONE)
14381 break;
14382 udelay(10);
14383 }
14384
14385 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14386 }
14387
14388 /* Read the gphy configuration from the OTP region of the chip. The gphy
14389 * configuration is a 32-bit value that straddles the alignment boundary.
14390 * We do two 32-bit reads and then shift and merge the results.
14391 */
14392 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14393 {
14394 u32 bhalf_otp, thalf_otp;
14395
14396 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14397
14398 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14399 return 0;
14400
14401 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14402
14403 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14404 return 0;
14405
14406 thalf_otp = tr32(OTP_READ_DATA);
14407
14408 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14409
14410 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14411 return 0;
14412
14413 bhalf_otp = tr32(OTP_READ_DATA);
14414
14415 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14416 }
14417
14418 static void tg3_phy_init_link_config(struct tg3 *tp)
14419 {
14420 u32 adv = ADVERTISED_Autoneg;
14421
14422 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14423 adv |= ADVERTISED_1000baseT_Half |
14424 ADVERTISED_1000baseT_Full;
14425
14426 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14427 adv |= ADVERTISED_100baseT_Half |
14428 ADVERTISED_100baseT_Full |
14429 ADVERTISED_10baseT_Half |
14430 ADVERTISED_10baseT_Full |
14431 ADVERTISED_TP;
14432 else
14433 adv |= ADVERTISED_FIBRE;
14434
14435 tp->link_config.advertising = adv;
14436 tp->link_config.speed = SPEED_UNKNOWN;
14437 tp->link_config.duplex = DUPLEX_UNKNOWN;
14438 tp->link_config.autoneg = AUTONEG_ENABLE;
14439 tp->link_config.active_speed = SPEED_UNKNOWN;
14440 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14441
14442 tp->old_link = -1;
14443 }
14444
14445 static int tg3_phy_probe(struct tg3 *tp)
14446 {
14447 u32 hw_phy_id_1, hw_phy_id_2;
14448 u32 hw_phy_id, hw_phy_id_masked;
14449 int err;
14450
14451 /* flow control autonegotiation is default behavior */
14452 tg3_flag_set(tp, PAUSE_AUTONEG);
14453 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14454
14455 if (tg3_flag(tp, ENABLE_APE)) {
14456 switch (tp->pci_fn) {
14457 case 0:
14458 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14459 break;
14460 case 1:
14461 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14462 break;
14463 case 2:
14464 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14465 break;
14466 case 3:
14467 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14468 break;
14469 }
14470 }
14471
14472 if (tg3_flag(tp, USE_PHYLIB))
14473 return tg3_phy_init(tp);
14474
14475 /* Reading the PHY ID register can conflict with ASF
14476 * firmware access to the PHY hardware.
14477 */
14478 err = 0;
14479 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14480 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14481 } else {
14482 /* Now read the physical PHY_ID from the chip and verify
14483 * that it is sane. If it doesn't look good, we fall back
14484 * to either the hard-coded table based PHY_ID and failing
14485 * that the value found in the eeprom area.
14486 */
14487 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14488 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14489
14490 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14491 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14492 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14493
14494 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14495 }
14496
14497 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14498 tp->phy_id = hw_phy_id;
14499 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14500 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14501 else
14502 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14503 } else {
14504 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14505 /* Do nothing, phy ID already set up in
14506 * tg3_get_eeprom_hw_cfg().
14507 */
14508 } else {
14509 struct subsys_tbl_ent *p;
14510
14511 /* No eeprom signature? Try the hardcoded
14512 * subsys device table.
14513 */
14514 p = tg3_lookup_by_subsys(tp);
14515 if (p) {
14516 tp->phy_id = p->phy_id;
14517 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14518 /* For now we saw the IDs 0xbc050cd0,
14519 * 0xbc050f80 and 0xbc050c30 on devices
14520 * connected to an BCM4785 and there are
14521 * probably more. Just assume that the phy is
14522 * supported when it is connected to a SSB core
14523 * for now.
14524 */
14525 return -ENODEV;
14526 }
14527
14528 if (!tp->phy_id ||
14529 tp->phy_id == TG3_PHY_ID_BCM8002)
14530 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14531 }
14532 }
14533
14534 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14535 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14536 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14537 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14538 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14539 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14540 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14541 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14542 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14543
14544 tg3_phy_init_link_config(tp);
14545
14546 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14547 !tg3_flag(tp, ENABLE_APE) &&
14548 !tg3_flag(tp, ENABLE_ASF)) {
14549 u32 bmsr, dummy;
14550
14551 tg3_readphy(tp, MII_BMSR, &bmsr);
14552 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14553 (bmsr & BMSR_LSTATUS))
14554 goto skip_phy_reset;
14555
14556 err = tg3_phy_reset(tp);
14557 if (err)
14558 return err;
14559
14560 tg3_phy_set_wirespeed(tp);
14561
14562 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14563 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14564 tp->link_config.flowctrl);
14565
14566 tg3_writephy(tp, MII_BMCR,
14567 BMCR_ANENABLE | BMCR_ANRESTART);
14568 }
14569 }
14570
14571 skip_phy_reset:
14572 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14573 err = tg3_init_5401phy_dsp(tp);
14574 if (err)
14575 return err;
14576
14577 err = tg3_init_5401phy_dsp(tp);
14578 }
14579
14580 return err;
14581 }
14582
14583 static void tg3_read_vpd(struct tg3 *tp)
14584 {
14585 u8 *vpd_data;
14586 unsigned int block_end, rosize, len;
14587 u32 vpdlen;
14588 int j, i = 0;
14589
14590 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14591 if (!vpd_data)
14592 goto out_no_vpd;
14593
14594 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14595 if (i < 0)
14596 goto out_not_found;
14597
14598 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14599 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14600 i += PCI_VPD_LRDT_TAG_SIZE;
14601
14602 if (block_end > vpdlen)
14603 goto out_not_found;
14604
14605 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14606 PCI_VPD_RO_KEYWORD_MFR_ID);
14607 if (j > 0) {
14608 len = pci_vpd_info_field_size(&vpd_data[j]);
14609
14610 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14611 if (j + len > block_end || len != 4 ||
14612 memcmp(&vpd_data[j], "1028", 4))
14613 goto partno;
14614
14615 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14616 PCI_VPD_RO_KEYWORD_VENDOR0);
14617 if (j < 0)
14618 goto partno;
14619
14620 len = pci_vpd_info_field_size(&vpd_data[j]);
14621
14622 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14623 if (j + len > block_end)
14624 goto partno;
14625
14626 memcpy(tp->fw_ver, &vpd_data[j], len);
14627 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14628 }
14629
14630 partno:
14631 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14632 PCI_VPD_RO_KEYWORD_PARTNO);
14633 if (i < 0)
14634 goto out_not_found;
14635
14636 len = pci_vpd_info_field_size(&vpd_data[i]);
14637
14638 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14639 if (len > TG3_BPN_SIZE ||
14640 (len + i) > vpdlen)
14641 goto out_not_found;
14642
14643 memcpy(tp->board_part_number, &vpd_data[i], len);
14644
14645 out_not_found:
14646 kfree(vpd_data);
14647 if (tp->board_part_number[0])
14648 return;
14649
14650 out_no_vpd:
14651 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14652 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14654 strcpy(tp->board_part_number, "BCM5717");
14655 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14656 strcpy(tp->board_part_number, "BCM5718");
14657 else
14658 goto nomatch;
14659 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14660 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14661 strcpy(tp->board_part_number, "BCM57780");
14662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14663 strcpy(tp->board_part_number, "BCM57760");
14664 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14665 strcpy(tp->board_part_number, "BCM57790");
14666 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14667 strcpy(tp->board_part_number, "BCM57788");
14668 else
14669 goto nomatch;
14670 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14671 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14672 strcpy(tp->board_part_number, "BCM57761");
14673 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14674 strcpy(tp->board_part_number, "BCM57765");
14675 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14676 strcpy(tp->board_part_number, "BCM57781");
14677 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14678 strcpy(tp->board_part_number, "BCM57785");
14679 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14680 strcpy(tp->board_part_number, "BCM57791");
14681 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14682 strcpy(tp->board_part_number, "BCM57795");
14683 else
14684 goto nomatch;
14685 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14686 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14687 strcpy(tp->board_part_number, "BCM57762");
14688 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14689 strcpy(tp->board_part_number, "BCM57766");
14690 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14691 strcpy(tp->board_part_number, "BCM57782");
14692 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14693 strcpy(tp->board_part_number, "BCM57786");
14694 else
14695 goto nomatch;
14696 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14697 strcpy(tp->board_part_number, "BCM95906");
14698 } else {
14699 nomatch:
14700 strcpy(tp->board_part_number, "none");
14701 }
14702 }
14703
14704 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14705 {
14706 u32 val;
14707
14708 if (tg3_nvram_read(tp, offset, &val) ||
14709 (val & 0xfc000000) != 0x0c000000 ||
14710 tg3_nvram_read(tp, offset + 4, &val) ||
14711 val != 0)
14712 return 0;
14713
14714 return 1;
14715 }
14716
14717 static void tg3_read_bc_ver(struct tg3 *tp)
14718 {
14719 u32 val, offset, start, ver_offset;
14720 int i, dst_off;
14721 bool newver = false;
14722
14723 if (tg3_nvram_read(tp, 0xc, &offset) ||
14724 tg3_nvram_read(tp, 0x4, &start))
14725 return;
14726
14727 offset = tg3_nvram_logical_addr(tp, offset);
14728
14729 if (tg3_nvram_read(tp, offset, &val))
14730 return;
14731
14732 if ((val & 0xfc000000) == 0x0c000000) {
14733 if (tg3_nvram_read(tp, offset + 4, &val))
14734 return;
14735
14736 if (val == 0)
14737 newver = true;
14738 }
14739
14740 dst_off = strlen(tp->fw_ver);
14741
14742 if (newver) {
14743 if (TG3_VER_SIZE - dst_off < 16 ||
14744 tg3_nvram_read(tp, offset + 8, &ver_offset))
14745 return;
14746
14747 offset = offset + ver_offset - start;
14748 for (i = 0; i < 16; i += 4) {
14749 __be32 v;
14750 if (tg3_nvram_read_be32(tp, offset + i, &v))
14751 return;
14752
14753 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14754 }
14755 } else {
14756 u32 major, minor;
14757
14758 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14759 return;
14760
14761 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14762 TG3_NVM_BCVER_MAJSFT;
14763 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14764 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14765 "v%d.%02d", major, minor);
14766 }
14767 }
14768
14769 static void tg3_read_hwsb_ver(struct tg3 *tp)
14770 {
14771 u32 val, major, minor;
14772
14773 /* Use native endian representation */
14774 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14775 return;
14776
14777 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14778 TG3_NVM_HWSB_CFG1_MAJSFT;
14779 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14780 TG3_NVM_HWSB_CFG1_MINSFT;
14781
14782 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14783 }
14784
14785 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14786 {
14787 u32 offset, major, minor, build;
14788
14789 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14790
14791 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14792 return;
14793
14794 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14795 case TG3_EEPROM_SB_REVISION_0:
14796 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14797 break;
14798 case TG3_EEPROM_SB_REVISION_2:
14799 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14800 break;
14801 case TG3_EEPROM_SB_REVISION_3:
14802 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14803 break;
14804 case TG3_EEPROM_SB_REVISION_4:
14805 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14806 break;
14807 case TG3_EEPROM_SB_REVISION_5:
14808 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14809 break;
14810 case TG3_EEPROM_SB_REVISION_6:
14811 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14812 break;
14813 default:
14814 return;
14815 }
14816
14817 if (tg3_nvram_read(tp, offset, &val))
14818 return;
14819
14820 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14821 TG3_EEPROM_SB_EDH_BLD_SHFT;
14822 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14823 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14824 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14825
14826 if (minor > 99 || build > 26)
14827 return;
14828
14829 offset = strlen(tp->fw_ver);
14830 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14831 " v%d.%02d", major, minor);
14832
14833 if (build > 0) {
14834 offset = strlen(tp->fw_ver);
14835 if (offset < TG3_VER_SIZE - 1)
14836 tp->fw_ver[offset] = 'a' + build - 1;
14837 }
14838 }
14839
14840 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14841 {
14842 u32 val, offset, start;
14843 int i, vlen;
14844
14845 for (offset = TG3_NVM_DIR_START;
14846 offset < TG3_NVM_DIR_END;
14847 offset += TG3_NVM_DIRENT_SIZE) {
14848 if (tg3_nvram_read(tp, offset, &val))
14849 return;
14850
14851 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14852 break;
14853 }
14854
14855 if (offset == TG3_NVM_DIR_END)
14856 return;
14857
14858 if (!tg3_flag(tp, 5705_PLUS))
14859 start = 0x08000000;
14860 else if (tg3_nvram_read(tp, offset - 4, &start))
14861 return;
14862
14863 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14864 !tg3_fw_img_is_valid(tp, offset) ||
14865 tg3_nvram_read(tp, offset + 8, &val))
14866 return;
14867
14868 offset += val - start;
14869
14870 vlen = strlen(tp->fw_ver);
14871
14872 tp->fw_ver[vlen++] = ',';
14873 tp->fw_ver[vlen++] = ' ';
14874
14875 for (i = 0; i < 4; i++) {
14876 __be32 v;
14877 if (tg3_nvram_read_be32(tp, offset, &v))
14878 return;
14879
14880 offset += sizeof(v);
14881
14882 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14883 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14884 break;
14885 }
14886
14887 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14888 vlen += sizeof(v);
14889 }
14890 }
14891
14892 static void tg3_probe_ncsi(struct tg3 *tp)
14893 {
14894 u32 apedata;
14895
14896 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14897 if (apedata != APE_SEG_SIG_MAGIC)
14898 return;
14899
14900 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14901 if (!(apedata & APE_FW_STATUS_READY))
14902 return;
14903
14904 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14905 tg3_flag_set(tp, APE_HAS_NCSI);
14906 }
14907
14908 static void tg3_read_dash_ver(struct tg3 *tp)
14909 {
14910 int vlen;
14911 u32 apedata;
14912 char *fwtype;
14913
14914 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14915
14916 if (tg3_flag(tp, APE_HAS_NCSI))
14917 fwtype = "NCSI";
14918 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14919 fwtype = "SMASH";
14920 else
14921 fwtype = "DASH";
14922
14923 vlen = strlen(tp->fw_ver);
14924
14925 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14926 fwtype,
14927 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14928 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14929 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14930 (apedata & APE_FW_VERSION_BLDMSK));
14931 }
14932
14933 static void tg3_read_otp_ver(struct tg3 *tp)
14934 {
14935 u32 val, val2;
14936
14937 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14938 return;
14939
14940 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14941 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14942 TG3_OTP_MAGIC0_VALID(val)) {
14943 u64 val64 = (u64) val << 32 | val2;
14944 u32 ver = 0;
14945 int i, vlen;
14946
14947 for (i = 0; i < 7; i++) {
14948 if ((val64 & 0xff) == 0)
14949 break;
14950 ver = val64 & 0xff;
14951 val64 >>= 8;
14952 }
14953 vlen = strlen(tp->fw_ver);
14954 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14955 }
14956 }
14957
14958 static void tg3_read_fw_ver(struct tg3 *tp)
14959 {
14960 u32 val;
14961 bool vpd_vers = false;
14962
14963 if (tp->fw_ver[0] != 0)
14964 vpd_vers = true;
14965
14966 if (tg3_flag(tp, NO_NVRAM)) {
14967 strcat(tp->fw_ver, "sb");
14968 tg3_read_otp_ver(tp);
14969 return;
14970 }
14971
14972 if (tg3_nvram_read(tp, 0, &val))
14973 return;
14974
14975 if (val == TG3_EEPROM_MAGIC)
14976 tg3_read_bc_ver(tp);
14977 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14978 tg3_read_sb_ver(tp, val);
14979 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14980 tg3_read_hwsb_ver(tp);
14981
14982 if (tg3_flag(tp, ENABLE_ASF)) {
14983 if (tg3_flag(tp, ENABLE_APE)) {
14984 tg3_probe_ncsi(tp);
14985 if (!vpd_vers)
14986 tg3_read_dash_ver(tp);
14987 } else if (!vpd_vers) {
14988 tg3_read_mgmtfw_ver(tp);
14989 }
14990 }
14991
14992 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14993 }
14994
14995 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14996 {
14997 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14998 return TG3_RX_RET_MAX_SIZE_5717;
14999 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15000 return TG3_RX_RET_MAX_SIZE_5700;
15001 else
15002 return TG3_RX_RET_MAX_SIZE_5705;
15003 }
15004
15005 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15006 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15007 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15008 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15009 { },
15010 };
15011
15012 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15013 {
15014 struct pci_dev *peer;
15015 unsigned int func, devnr = tp->pdev->devfn & ~7;
15016
15017 for (func = 0; func < 8; func++) {
15018 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15019 if (peer && peer != tp->pdev)
15020 break;
15021 pci_dev_put(peer);
15022 }
15023 /* 5704 can be configured in single-port mode, set peer to
15024 * tp->pdev in that case.
15025 */
15026 if (!peer) {
15027 peer = tp->pdev;
15028 return peer;
15029 }
15030
15031 /*
15032 * We don't need to keep the refcount elevated; there's no way
15033 * to remove one half of this device without removing the other
15034 */
15035 pci_dev_put(peer);
15036
15037 return peer;
15038 }
15039
15040 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15041 {
15042 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15043 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15044 u32 reg;
15045
15046 /* All devices that use the alternate
15047 * ASIC REV location have a CPMU.
15048 */
15049 tg3_flag_set(tp, CPMU_PRESENT);
15050
15051 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15055 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15056 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15057 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15058 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15059 reg = TG3PCI_GEN2_PRODID_ASICREV;
15060 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15061 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15062 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15063 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15064 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15065 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15066 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15068 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15069 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15070 reg = TG3PCI_GEN15_PRODID_ASICREV;
15071 else
15072 reg = TG3PCI_PRODID_ASICREV;
15073
15074 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15075 }
15076
15077 /* Wrong chip ID in 5752 A0. This code can be removed later
15078 * as A0 is not in production.
15079 */
15080 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15081 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15082
15083 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15084 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15085
15086 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15087 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15088 tg3_asic_rev(tp) == ASIC_REV_5720)
15089 tg3_flag_set(tp, 5717_PLUS);
15090
15091 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15092 tg3_asic_rev(tp) == ASIC_REV_57766)
15093 tg3_flag_set(tp, 57765_CLASS);
15094
15095 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15096 tg3_asic_rev(tp) == ASIC_REV_5762)
15097 tg3_flag_set(tp, 57765_PLUS);
15098
15099 /* Intentionally exclude ASIC_REV_5906 */
15100 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15101 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15102 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15103 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15104 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15105 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15106 tg3_flag(tp, 57765_PLUS))
15107 tg3_flag_set(tp, 5755_PLUS);
15108
15109 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15110 tg3_asic_rev(tp) == ASIC_REV_5714)
15111 tg3_flag_set(tp, 5780_CLASS);
15112
15113 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15114 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15115 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15116 tg3_flag(tp, 5755_PLUS) ||
15117 tg3_flag(tp, 5780_CLASS))
15118 tg3_flag_set(tp, 5750_PLUS);
15119
15120 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15121 tg3_flag(tp, 5750_PLUS))
15122 tg3_flag_set(tp, 5705_PLUS);
15123 }
15124
15125 static bool tg3_10_100_only_device(struct tg3 *tp,
15126 const struct pci_device_id *ent)
15127 {
15128 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15129
15130 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15131 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15132 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15133 return true;
15134
15135 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15136 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15137 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15138 return true;
15139 } else {
15140 return true;
15141 }
15142 }
15143
15144 return false;
15145 }
15146
15147 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15148 {
15149 u32 misc_ctrl_reg;
15150 u32 pci_state_reg, grc_misc_cfg;
15151 u32 val;
15152 u16 pci_cmd;
15153 int err;
15154
15155 /* Force memory write invalidate off. If we leave it on,
15156 * then on 5700_BX chips we have to enable a workaround.
15157 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15158 * to match the cacheline size. The Broadcom driver have this
15159 * workaround but turns MWI off all the times so never uses
15160 * it. This seems to suggest that the workaround is insufficient.
15161 */
15162 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15163 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15164 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15165
15166 /* Important! -- Make sure register accesses are byteswapped
15167 * correctly. Also, for those chips that require it, make
15168 * sure that indirect register accesses are enabled before
15169 * the first operation.
15170 */
15171 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15172 &misc_ctrl_reg);
15173 tp->misc_host_ctrl |= (misc_ctrl_reg &
15174 MISC_HOST_CTRL_CHIPREV);
15175 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15176 tp->misc_host_ctrl);
15177
15178 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15179
15180 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15181 * we need to disable memory and use config. cycles
15182 * only to access all registers. The 5702/03 chips
15183 * can mistakenly decode the special cycles from the
15184 * ICH chipsets as memory write cycles, causing corruption
15185 * of register and memory space. Only certain ICH bridges
15186 * will drive special cycles with non-zero data during the
15187 * address phase which can fall within the 5703's address
15188 * range. This is not an ICH bug as the PCI spec allows
15189 * non-zero address during special cycles. However, only
15190 * these ICH bridges are known to drive non-zero addresses
15191 * during special cycles.
15192 *
15193 * Since special cycles do not cross PCI bridges, we only
15194 * enable this workaround if the 5703 is on the secondary
15195 * bus of these ICH bridges.
15196 */
15197 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15198 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15199 static struct tg3_dev_id {
15200 u32 vendor;
15201 u32 device;
15202 u32 rev;
15203 } ich_chipsets[] = {
15204 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15205 PCI_ANY_ID },
15206 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15207 PCI_ANY_ID },
15208 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15209 0xa },
15210 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15211 PCI_ANY_ID },
15212 { },
15213 };
15214 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15215 struct pci_dev *bridge = NULL;
15216
15217 while (pci_id->vendor != 0) {
15218 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15219 bridge);
15220 if (!bridge) {
15221 pci_id++;
15222 continue;
15223 }
15224 if (pci_id->rev != PCI_ANY_ID) {
15225 if (bridge->revision > pci_id->rev)
15226 continue;
15227 }
15228 if (bridge->subordinate &&
15229 (bridge->subordinate->number ==
15230 tp->pdev->bus->number)) {
15231 tg3_flag_set(tp, ICH_WORKAROUND);
15232 pci_dev_put(bridge);
15233 break;
15234 }
15235 }
15236 }
15237
15238 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15239 static struct tg3_dev_id {
15240 u32 vendor;
15241 u32 device;
15242 } bridge_chipsets[] = {
15243 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15244 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15245 { },
15246 };
15247 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15248 struct pci_dev *bridge = NULL;
15249
15250 while (pci_id->vendor != 0) {
15251 bridge = pci_get_device(pci_id->vendor,
15252 pci_id->device,
15253 bridge);
15254 if (!bridge) {
15255 pci_id++;
15256 continue;
15257 }
15258 if (bridge->subordinate &&
15259 (bridge->subordinate->number <=
15260 tp->pdev->bus->number) &&
15261 (bridge->subordinate->busn_res.end >=
15262 tp->pdev->bus->number)) {
15263 tg3_flag_set(tp, 5701_DMA_BUG);
15264 pci_dev_put(bridge);
15265 break;
15266 }
15267 }
15268 }
15269
15270 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15271 * DMA addresses > 40-bit. This bridge may have other additional
15272 * 57xx devices behind it in some 4-port NIC designs for example.
15273 * Any tg3 device found behind the bridge will also need the 40-bit
15274 * DMA workaround.
15275 */
15276 if (tg3_flag(tp, 5780_CLASS)) {
15277 tg3_flag_set(tp, 40BIT_DMA_BUG);
15278 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15279 } else {
15280 struct pci_dev *bridge = NULL;
15281
15282 do {
15283 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15284 PCI_DEVICE_ID_SERVERWORKS_EPB,
15285 bridge);
15286 if (bridge && bridge->subordinate &&
15287 (bridge->subordinate->number <=
15288 tp->pdev->bus->number) &&
15289 (bridge->subordinate->busn_res.end >=
15290 tp->pdev->bus->number)) {
15291 tg3_flag_set(tp, 40BIT_DMA_BUG);
15292 pci_dev_put(bridge);
15293 break;
15294 }
15295 } while (bridge);
15296 }
15297
15298 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15299 tg3_asic_rev(tp) == ASIC_REV_5714)
15300 tp->pdev_peer = tg3_find_peer(tp);
15301
15302 /* Determine TSO capabilities */
15303 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15304 ; /* Do nothing. HW bug. */
15305 else if (tg3_flag(tp, 57765_PLUS))
15306 tg3_flag_set(tp, HW_TSO_3);
15307 else if (tg3_flag(tp, 5755_PLUS) ||
15308 tg3_asic_rev(tp) == ASIC_REV_5906)
15309 tg3_flag_set(tp, HW_TSO_2);
15310 else if (tg3_flag(tp, 5750_PLUS)) {
15311 tg3_flag_set(tp, HW_TSO_1);
15312 tg3_flag_set(tp, TSO_BUG);
15313 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15314 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15315 tg3_flag_clear(tp, TSO_BUG);
15316 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15317 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15318 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15319 tg3_flag_set(tp, FW_TSO);
15320 tg3_flag_set(tp, TSO_BUG);
15321 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15322 tp->fw_needed = FIRMWARE_TG3TSO5;
15323 else
15324 tp->fw_needed = FIRMWARE_TG3TSO;
15325 }
15326
15327 /* Selectively allow TSO based on operating conditions */
15328 if (tg3_flag(tp, HW_TSO_1) ||
15329 tg3_flag(tp, HW_TSO_2) ||
15330 tg3_flag(tp, HW_TSO_3) ||
15331 tg3_flag(tp, FW_TSO)) {
15332 /* For firmware TSO, assume ASF is disabled.
15333 * We'll disable TSO later if we discover ASF
15334 * is enabled in tg3_get_eeprom_hw_cfg().
15335 */
15336 tg3_flag_set(tp, TSO_CAPABLE);
15337 } else {
15338 tg3_flag_clear(tp, TSO_CAPABLE);
15339 tg3_flag_clear(tp, TSO_BUG);
15340 tp->fw_needed = NULL;
15341 }
15342
15343 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15344 tp->fw_needed = FIRMWARE_TG3;
15345
15346 tp->irq_max = 1;
15347
15348 if (tg3_flag(tp, 5750_PLUS)) {
15349 tg3_flag_set(tp, SUPPORT_MSI);
15350 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15351 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15352 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15353 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15354 tp->pdev_peer == tp->pdev))
15355 tg3_flag_clear(tp, SUPPORT_MSI);
15356
15357 if (tg3_flag(tp, 5755_PLUS) ||
15358 tg3_asic_rev(tp) == ASIC_REV_5906) {
15359 tg3_flag_set(tp, 1SHOT_MSI);
15360 }
15361
15362 if (tg3_flag(tp, 57765_PLUS)) {
15363 tg3_flag_set(tp, SUPPORT_MSIX);
15364 tp->irq_max = TG3_IRQ_MAX_VECS;
15365 }
15366 }
15367
15368 tp->txq_max = 1;
15369 tp->rxq_max = 1;
15370 if (tp->irq_max > 1) {
15371 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15372 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15373
15374 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15375 tg3_asic_rev(tp) == ASIC_REV_5720)
15376 tp->txq_max = tp->irq_max - 1;
15377 }
15378
15379 if (tg3_flag(tp, 5755_PLUS) ||
15380 tg3_asic_rev(tp) == ASIC_REV_5906)
15381 tg3_flag_set(tp, SHORT_DMA_BUG);
15382
15383 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15384 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15385
15386 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15387 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15388 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15389 tg3_asic_rev(tp) == ASIC_REV_5762)
15390 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15391
15392 if (tg3_flag(tp, 57765_PLUS) &&
15393 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15394 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15395
15396 if (!tg3_flag(tp, 5705_PLUS) ||
15397 tg3_flag(tp, 5780_CLASS) ||
15398 tg3_flag(tp, USE_JUMBO_BDFLAG))
15399 tg3_flag_set(tp, JUMBO_CAPABLE);
15400
15401 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15402 &pci_state_reg);
15403
15404 if (pci_is_pcie(tp->pdev)) {
15405 u16 lnkctl;
15406
15407 tg3_flag_set(tp, PCI_EXPRESS);
15408
15409 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15410 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15411 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15412 tg3_flag_clear(tp, HW_TSO_2);
15413 tg3_flag_clear(tp, TSO_CAPABLE);
15414 }
15415 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15416 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15417 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15418 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15419 tg3_flag_set(tp, CLKREQ_BUG);
15420 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15421 tg3_flag_set(tp, L1PLLPD_EN);
15422 }
15423 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15424 /* BCM5785 devices are effectively PCIe devices, and should
15425 * follow PCIe codepaths, but do not have a PCIe capabilities
15426 * section.
15427 */
15428 tg3_flag_set(tp, PCI_EXPRESS);
15429 } else if (!tg3_flag(tp, 5705_PLUS) ||
15430 tg3_flag(tp, 5780_CLASS)) {
15431 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15432 if (!tp->pcix_cap) {
15433 dev_err(&tp->pdev->dev,
15434 "Cannot find PCI-X capability, aborting\n");
15435 return -EIO;
15436 }
15437
15438 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15439 tg3_flag_set(tp, PCIX_MODE);
15440 }
15441
15442 /* If we have an AMD 762 or VIA K8T800 chipset, write
15443 * reordering to the mailbox registers done by the host
15444 * controller can cause major troubles. We read back from
15445 * every mailbox register write to force the writes to be
15446 * posted to the chip in order.
15447 */
15448 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15449 !tg3_flag(tp, PCI_EXPRESS))
15450 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15451
15452 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15453 &tp->pci_cacheline_sz);
15454 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15455 &tp->pci_lat_timer);
15456 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15457 tp->pci_lat_timer < 64) {
15458 tp->pci_lat_timer = 64;
15459 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15460 tp->pci_lat_timer);
15461 }
15462
15463 /* Important! -- It is critical that the PCI-X hw workaround
15464 * situation is decided before the first MMIO register access.
15465 */
15466 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15467 /* 5700 BX chips need to have their TX producer index
15468 * mailboxes written twice to workaround a bug.
15469 */
15470 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15471
15472 /* If we are in PCI-X mode, enable register write workaround.
15473 *
15474 * The workaround is to use indirect register accesses
15475 * for all chip writes not to mailbox registers.
15476 */
15477 if (tg3_flag(tp, PCIX_MODE)) {
15478 u32 pm_reg;
15479
15480 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15481
15482 /* The chip can have it's power management PCI config
15483 * space registers clobbered due to this bug.
15484 * So explicitly force the chip into D0 here.
15485 */
15486 pci_read_config_dword(tp->pdev,
15487 tp->pm_cap + PCI_PM_CTRL,
15488 &pm_reg);
15489 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15490 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15491 pci_write_config_dword(tp->pdev,
15492 tp->pm_cap + PCI_PM_CTRL,
15493 pm_reg);
15494
15495 /* Also, force SERR#/PERR# in PCI command. */
15496 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15497 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15498 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15499 }
15500 }
15501
15502 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15503 tg3_flag_set(tp, PCI_HIGH_SPEED);
15504 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15505 tg3_flag_set(tp, PCI_32BIT);
15506
15507 /* Chip-specific fixup from Broadcom driver */
15508 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15509 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15510 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15511 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15512 }
15513
15514 /* Default fast path register access methods */
15515 tp->read32 = tg3_read32;
15516 tp->write32 = tg3_write32;
15517 tp->read32_mbox = tg3_read32;
15518 tp->write32_mbox = tg3_write32;
15519 tp->write32_tx_mbox = tg3_write32;
15520 tp->write32_rx_mbox = tg3_write32;
15521
15522 /* Various workaround register access methods */
15523 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15524 tp->write32 = tg3_write_indirect_reg32;
15525 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15526 (tg3_flag(tp, PCI_EXPRESS) &&
15527 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15528 /*
15529 * Back to back register writes can cause problems on these
15530 * chips, the workaround is to read back all reg writes
15531 * except those to mailbox regs.
15532 *
15533 * See tg3_write_indirect_reg32().
15534 */
15535 tp->write32 = tg3_write_flush_reg32;
15536 }
15537
15538 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15539 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15540 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15541 tp->write32_rx_mbox = tg3_write_flush_reg32;
15542 }
15543
15544 if (tg3_flag(tp, ICH_WORKAROUND)) {
15545 tp->read32 = tg3_read_indirect_reg32;
15546 tp->write32 = tg3_write_indirect_reg32;
15547 tp->read32_mbox = tg3_read_indirect_mbox;
15548 tp->write32_mbox = tg3_write_indirect_mbox;
15549 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15550 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15551
15552 iounmap(tp->regs);
15553 tp->regs = NULL;
15554
15555 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15556 pci_cmd &= ~PCI_COMMAND_MEMORY;
15557 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15558 }
15559 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15560 tp->read32_mbox = tg3_read32_mbox_5906;
15561 tp->write32_mbox = tg3_write32_mbox_5906;
15562 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15563 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15564 }
15565
15566 if (tp->write32 == tg3_write_indirect_reg32 ||
15567 (tg3_flag(tp, PCIX_MODE) &&
15568 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15569 tg3_asic_rev(tp) == ASIC_REV_5701)))
15570 tg3_flag_set(tp, SRAM_USE_CONFIG);
15571
15572 /* The memory arbiter has to be enabled in order for SRAM accesses
15573 * to succeed. Normally on powerup the tg3 chip firmware will make
15574 * sure it is enabled, but other entities such as system netboot
15575 * code might disable it.
15576 */
15577 val = tr32(MEMARB_MODE);
15578 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15579
15580 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15581 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15582 tg3_flag(tp, 5780_CLASS)) {
15583 if (tg3_flag(tp, PCIX_MODE)) {
15584 pci_read_config_dword(tp->pdev,
15585 tp->pcix_cap + PCI_X_STATUS,
15586 &val);
15587 tp->pci_fn = val & 0x7;
15588 }
15589 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15590 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15591 tg3_asic_rev(tp) == ASIC_REV_5720) {
15592 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15593 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15594 val = tr32(TG3_CPMU_STATUS);
15595
15596 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15597 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15598 else
15599 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15600 TG3_CPMU_STATUS_FSHFT_5719;
15601 }
15602
15603 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15604 tp->write32_tx_mbox = tg3_write_flush_reg32;
15605 tp->write32_rx_mbox = tg3_write_flush_reg32;
15606 }
15607
15608 /* Get eeprom hw config before calling tg3_set_power_state().
15609 * In particular, the TG3_FLAG_IS_NIC flag must be
15610 * determined before calling tg3_set_power_state() so that
15611 * we know whether or not to switch out of Vaux power.
15612 * When the flag is set, it means that GPIO1 is used for eeprom
15613 * write protect and also implies that it is a LOM where GPIOs
15614 * are not used to switch power.
15615 */
15616 tg3_get_eeprom_hw_cfg(tp);
15617
15618 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15619 tg3_flag_clear(tp, TSO_CAPABLE);
15620 tg3_flag_clear(tp, TSO_BUG);
15621 tp->fw_needed = NULL;
15622 }
15623
15624 if (tg3_flag(tp, ENABLE_APE)) {
15625 /* Allow reads and writes to the
15626 * APE register and memory space.
15627 */
15628 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15629 PCISTATE_ALLOW_APE_SHMEM_WR |
15630 PCISTATE_ALLOW_APE_PSPACE_WR;
15631 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15632 pci_state_reg);
15633
15634 tg3_ape_lock_init(tp);
15635 }
15636
15637 /* Set up tp->grc_local_ctrl before calling
15638 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15639 * will bring 5700's external PHY out of reset.
15640 * It is also used as eeprom write protect on LOMs.
15641 */
15642 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15643 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15644 tg3_flag(tp, EEPROM_WRITE_PROT))
15645 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15646 GRC_LCLCTRL_GPIO_OUTPUT1);
15647 /* Unused GPIO3 must be driven as output on 5752 because there
15648 * are no pull-up resistors on unused GPIO pins.
15649 */
15650 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15651 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15652
15653 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15654 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15655 tg3_flag(tp, 57765_CLASS))
15656 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15657
15658 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15659 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15660 /* Turn off the debug UART. */
15661 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15662 if (tg3_flag(tp, IS_NIC))
15663 /* Keep VMain power. */
15664 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15665 GRC_LCLCTRL_GPIO_OUTPUT0;
15666 }
15667
15668 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15669 tp->grc_local_ctrl |=
15670 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15671
15672 /* Switch out of Vaux if it is a NIC */
15673 tg3_pwrsrc_switch_to_vmain(tp);
15674
15675 /* Derive initial jumbo mode from MTU assigned in
15676 * ether_setup() via the alloc_etherdev() call
15677 */
15678 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15679 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15680
15681 /* Determine WakeOnLan speed to use. */
15682 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15683 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15684 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15685 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15686 tg3_flag_clear(tp, WOL_SPEED_100MB);
15687 } else {
15688 tg3_flag_set(tp, WOL_SPEED_100MB);
15689 }
15690
15691 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15692 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15693
15694 /* A few boards don't want Ethernet@WireSpeed phy feature */
15695 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15696 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15697 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15698 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15699 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15700 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15701 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15702
15703 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15704 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15705 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15706 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15707 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15708
15709 if (tg3_flag(tp, 5705_PLUS) &&
15710 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15711 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15712 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15713 !tg3_flag(tp, 57765_PLUS)) {
15714 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15715 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15716 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15717 tg3_asic_rev(tp) == ASIC_REV_5761) {
15718 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15719 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15720 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15721 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15722 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15723 } else
15724 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15725 }
15726
15727 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15728 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15729 tp->phy_otp = tg3_read_otp_phycfg(tp);
15730 if (tp->phy_otp == 0)
15731 tp->phy_otp = TG3_OTP_DEFAULT;
15732 }
15733
15734 if (tg3_flag(tp, CPMU_PRESENT))
15735 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15736 else
15737 tp->mi_mode = MAC_MI_MODE_BASE;
15738
15739 tp->coalesce_mode = 0;
15740 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15741 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15742 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15743
15744 /* Set these bits to enable statistics workaround. */
15745 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15746 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15747 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15748 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15749 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15750 }
15751
15752 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15753 tg3_asic_rev(tp) == ASIC_REV_57780)
15754 tg3_flag_set(tp, USE_PHYLIB);
15755
15756 err = tg3_mdio_init(tp);
15757 if (err)
15758 return err;
15759
15760 /* Initialize data/descriptor byte/word swapping. */
15761 val = tr32(GRC_MODE);
15762 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15763 tg3_asic_rev(tp) == ASIC_REV_5762)
15764 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15765 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15766 GRC_MODE_B2HRX_ENABLE |
15767 GRC_MODE_HTX2B_ENABLE |
15768 GRC_MODE_HOST_STACKUP);
15769 else
15770 val &= GRC_MODE_HOST_STACKUP;
15771
15772 tw32(GRC_MODE, val | tp->grc_mode);
15773
15774 tg3_switch_clocks(tp);
15775
15776 /* Clear this out for sanity. */
15777 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15778
15779 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15780 &pci_state_reg);
15781 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15782 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15783 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15784 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15785 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15786 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15787 void __iomem *sram_base;
15788
15789 /* Write some dummy words into the SRAM status block
15790 * area, see if it reads back correctly. If the return
15791 * value is bad, force enable the PCIX workaround.
15792 */
15793 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15794
15795 writel(0x00000000, sram_base);
15796 writel(0x00000000, sram_base + 4);
15797 writel(0xffffffff, sram_base + 4);
15798 if (readl(sram_base) != 0x00000000)
15799 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15800 }
15801 }
15802
15803 udelay(50);
15804 tg3_nvram_init(tp);
15805
15806 grc_misc_cfg = tr32(GRC_MISC_CFG);
15807 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15808
15809 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15810 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15811 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15812 tg3_flag_set(tp, IS_5788);
15813
15814 if (!tg3_flag(tp, IS_5788) &&
15815 tg3_asic_rev(tp) != ASIC_REV_5700)
15816 tg3_flag_set(tp, TAGGED_STATUS);
15817 if (tg3_flag(tp, TAGGED_STATUS)) {
15818 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15819 HOSTCC_MODE_CLRTICK_TXBD);
15820
15821 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15822 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15823 tp->misc_host_ctrl);
15824 }
15825
15826 /* Preserve the APE MAC_MODE bits */
15827 if (tg3_flag(tp, ENABLE_APE))
15828 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15829 else
15830 tp->mac_mode = 0;
15831
15832 if (tg3_10_100_only_device(tp, ent))
15833 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15834
15835 err = tg3_phy_probe(tp);
15836 if (err) {
15837 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15838 /* ... but do not return immediately ... */
15839 tg3_mdio_fini(tp);
15840 }
15841
15842 tg3_read_vpd(tp);
15843 tg3_read_fw_ver(tp);
15844
15845 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15846 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15847 } else {
15848 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15849 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15850 else
15851 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15852 }
15853
15854 /* 5700 {AX,BX} chips have a broken status block link
15855 * change bit implementation, so we must use the
15856 * status register in those cases.
15857 */
15858 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15859 tg3_flag_set(tp, USE_LINKCHG_REG);
15860 else
15861 tg3_flag_clear(tp, USE_LINKCHG_REG);
15862
15863 /* The led_ctrl is set during tg3_phy_probe, here we might
15864 * have to force the link status polling mechanism based
15865 * upon subsystem IDs.
15866 */
15867 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15868 tg3_asic_rev(tp) == ASIC_REV_5701 &&
15869 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15870 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15871 tg3_flag_set(tp, USE_LINKCHG_REG);
15872 }
15873
15874 /* For all SERDES we poll the MAC status register. */
15875 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15876 tg3_flag_set(tp, POLL_SERDES);
15877 else
15878 tg3_flag_clear(tp, POLL_SERDES);
15879
15880 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15881 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15882 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
15883 tg3_flag(tp, PCIX_MODE)) {
15884 tp->rx_offset = NET_SKB_PAD;
15885 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15886 tp->rx_copy_thresh = ~(u16)0;
15887 #endif
15888 }
15889
15890 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15891 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15892 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15893
15894 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15895
15896 /* Increment the rx prod index on the rx std ring by at most
15897 * 8 for these chips to workaround hw errata.
15898 */
15899 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15900 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15901 tg3_asic_rev(tp) == ASIC_REV_5755)
15902 tp->rx_std_max_post = 8;
15903
15904 if (tg3_flag(tp, ASPM_WORKAROUND))
15905 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15906 PCIE_PWR_MGMT_L1_THRESH_MSK;
15907
15908 return err;
15909 }
15910
15911 #ifdef CONFIG_SPARC
15912 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15913 {
15914 struct net_device *dev = tp->dev;
15915 struct pci_dev *pdev = tp->pdev;
15916 struct device_node *dp = pci_device_to_OF_node(pdev);
15917 const unsigned char *addr;
15918 int len;
15919
15920 addr = of_get_property(dp, "local-mac-address", &len);
15921 if (addr && len == 6) {
15922 memcpy(dev->dev_addr, addr, 6);
15923 return 0;
15924 }
15925 return -ENODEV;
15926 }
15927
15928 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15929 {
15930 struct net_device *dev = tp->dev;
15931
15932 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15933 return 0;
15934 }
15935 #endif
15936
15937 static int tg3_get_device_address(struct tg3 *tp)
15938 {
15939 struct net_device *dev = tp->dev;
15940 u32 hi, lo, mac_offset;
15941 int addr_ok = 0;
15942 int err;
15943
15944 #ifdef CONFIG_SPARC
15945 if (!tg3_get_macaddr_sparc(tp))
15946 return 0;
15947 #endif
15948
15949 if (tg3_flag(tp, IS_SSB_CORE)) {
15950 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15951 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15952 return 0;
15953 }
15954
15955 mac_offset = 0x7c;
15956 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15957 tg3_flag(tp, 5780_CLASS)) {
15958 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15959 mac_offset = 0xcc;
15960 if (tg3_nvram_lock(tp))
15961 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15962 else
15963 tg3_nvram_unlock(tp);
15964 } else if (tg3_flag(tp, 5717_PLUS)) {
15965 if (tp->pci_fn & 1)
15966 mac_offset = 0xcc;
15967 if (tp->pci_fn > 1)
15968 mac_offset += 0x18c;
15969 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15970 mac_offset = 0x10;
15971
15972 /* First try to get it from MAC address mailbox. */
15973 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15974 if ((hi >> 16) == 0x484b) {
15975 dev->dev_addr[0] = (hi >> 8) & 0xff;
15976 dev->dev_addr[1] = (hi >> 0) & 0xff;
15977
15978 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15979 dev->dev_addr[2] = (lo >> 24) & 0xff;
15980 dev->dev_addr[3] = (lo >> 16) & 0xff;
15981 dev->dev_addr[4] = (lo >> 8) & 0xff;
15982 dev->dev_addr[5] = (lo >> 0) & 0xff;
15983
15984 /* Some old bootcode may report a 0 MAC address in SRAM */
15985 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15986 }
15987 if (!addr_ok) {
15988 /* Next, try NVRAM. */
15989 if (!tg3_flag(tp, NO_NVRAM) &&
15990 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15991 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15992 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15993 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15994 }
15995 /* Finally just fetch it out of the MAC control regs. */
15996 else {
15997 hi = tr32(MAC_ADDR_0_HIGH);
15998 lo = tr32(MAC_ADDR_0_LOW);
15999
16000 dev->dev_addr[5] = lo & 0xff;
16001 dev->dev_addr[4] = (lo >> 8) & 0xff;
16002 dev->dev_addr[3] = (lo >> 16) & 0xff;
16003 dev->dev_addr[2] = (lo >> 24) & 0xff;
16004 dev->dev_addr[1] = hi & 0xff;
16005 dev->dev_addr[0] = (hi >> 8) & 0xff;
16006 }
16007 }
16008
16009 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16010 #ifdef CONFIG_SPARC
16011 if (!tg3_get_default_macaddr_sparc(tp))
16012 return 0;
16013 #endif
16014 return -EINVAL;
16015 }
16016 return 0;
16017 }
16018
16019 #define BOUNDARY_SINGLE_CACHELINE 1
16020 #define BOUNDARY_MULTI_CACHELINE 2
16021
16022 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16023 {
16024 int cacheline_size;
16025 u8 byte;
16026 int goal;
16027
16028 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16029 if (byte == 0)
16030 cacheline_size = 1024;
16031 else
16032 cacheline_size = (int) byte * 4;
16033
16034 /* On 5703 and later chips, the boundary bits have no
16035 * effect.
16036 */
16037 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16038 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16039 !tg3_flag(tp, PCI_EXPRESS))
16040 goto out;
16041
16042 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16043 goal = BOUNDARY_MULTI_CACHELINE;
16044 #else
16045 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16046 goal = BOUNDARY_SINGLE_CACHELINE;
16047 #else
16048 goal = 0;
16049 #endif
16050 #endif
16051
16052 if (tg3_flag(tp, 57765_PLUS)) {
16053 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16054 goto out;
16055 }
16056
16057 if (!goal)
16058 goto out;
16059
16060 /* PCI controllers on most RISC systems tend to disconnect
16061 * when a device tries to burst across a cache-line boundary.
16062 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16063 *
16064 * Unfortunately, for PCI-E there are only limited
16065 * write-side controls for this, and thus for reads
16066 * we will still get the disconnects. We'll also waste
16067 * these PCI cycles for both read and write for chips
16068 * other than 5700 and 5701 which do not implement the
16069 * boundary bits.
16070 */
16071 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16072 switch (cacheline_size) {
16073 case 16:
16074 case 32:
16075 case 64:
16076 case 128:
16077 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16078 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16079 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16080 } else {
16081 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16082 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16083 }
16084 break;
16085
16086 case 256:
16087 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16088 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16089 break;
16090
16091 default:
16092 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16093 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16094 break;
16095 }
16096 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16097 switch (cacheline_size) {
16098 case 16:
16099 case 32:
16100 case 64:
16101 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16102 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16103 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16104 break;
16105 }
16106 /* fallthrough */
16107 case 128:
16108 default:
16109 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16110 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16111 break;
16112 }
16113 } else {
16114 switch (cacheline_size) {
16115 case 16:
16116 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16117 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16118 DMA_RWCTRL_WRITE_BNDRY_16);
16119 break;
16120 }
16121 /* fallthrough */
16122 case 32:
16123 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16124 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16125 DMA_RWCTRL_WRITE_BNDRY_32);
16126 break;
16127 }
16128 /* fallthrough */
16129 case 64:
16130 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16131 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16132 DMA_RWCTRL_WRITE_BNDRY_64);
16133 break;
16134 }
16135 /* fallthrough */
16136 case 128:
16137 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16138 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16139 DMA_RWCTRL_WRITE_BNDRY_128);
16140 break;
16141 }
16142 /* fallthrough */
16143 case 256:
16144 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16145 DMA_RWCTRL_WRITE_BNDRY_256);
16146 break;
16147 case 512:
16148 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16149 DMA_RWCTRL_WRITE_BNDRY_512);
16150 break;
16151 case 1024:
16152 default:
16153 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16154 DMA_RWCTRL_WRITE_BNDRY_1024);
16155 break;
16156 }
16157 }
16158
16159 out:
16160 return val;
16161 }
16162
16163 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16164 int size, int to_device)
16165 {
16166 struct tg3_internal_buffer_desc test_desc;
16167 u32 sram_dma_descs;
16168 int i, ret;
16169
16170 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16171
16172 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16173 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16174 tw32(RDMAC_STATUS, 0);
16175 tw32(WDMAC_STATUS, 0);
16176
16177 tw32(BUFMGR_MODE, 0);
16178 tw32(FTQ_RESET, 0);
16179
16180 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16181 test_desc.addr_lo = buf_dma & 0xffffffff;
16182 test_desc.nic_mbuf = 0x00002100;
16183 test_desc.len = size;
16184
16185 /*
16186 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16187 * the *second* time the tg3 driver was getting loaded after an
16188 * initial scan.
16189 *
16190 * Broadcom tells me:
16191 * ...the DMA engine is connected to the GRC block and a DMA
16192 * reset may affect the GRC block in some unpredictable way...
16193 * The behavior of resets to individual blocks has not been tested.
16194 *
16195 * Broadcom noted the GRC reset will also reset all sub-components.
16196 */
16197 if (to_device) {
16198 test_desc.cqid_sqid = (13 << 8) | 2;
16199
16200 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16201 udelay(40);
16202 } else {
16203 test_desc.cqid_sqid = (16 << 8) | 7;
16204
16205 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16206 udelay(40);
16207 }
16208 test_desc.flags = 0x00000005;
16209
16210 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16211 u32 val;
16212
16213 val = *(((u32 *)&test_desc) + i);
16214 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16215 sram_dma_descs + (i * sizeof(u32)));
16216 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16217 }
16218 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16219
16220 if (to_device)
16221 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16222 else
16223 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16224
16225 ret = -ENODEV;
16226 for (i = 0; i < 40; i++) {
16227 u32 val;
16228
16229 if (to_device)
16230 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16231 else
16232 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16233 if ((val & 0xffff) == sram_dma_descs) {
16234 ret = 0;
16235 break;
16236 }
16237
16238 udelay(100);
16239 }
16240
16241 return ret;
16242 }
16243
16244 #define TEST_BUFFER_SIZE 0x2000
16245
16246 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16247 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16248 { },
16249 };
16250
16251 static int tg3_test_dma(struct tg3 *tp)
16252 {
16253 dma_addr_t buf_dma;
16254 u32 *buf, saved_dma_rwctrl;
16255 int ret = 0;
16256
16257 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16258 &buf_dma, GFP_KERNEL);
16259 if (!buf) {
16260 ret = -ENOMEM;
16261 goto out_nofree;
16262 }
16263
16264 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16265 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16266
16267 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16268
16269 if (tg3_flag(tp, 57765_PLUS))
16270 goto out;
16271
16272 if (tg3_flag(tp, PCI_EXPRESS)) {
16273 /* DMA read watermark not used on PCIE */
16274 tp->dma_rwctrl |= 0x00180000;
16275 } else if (!tg3_flag(tp, PCIX_MODE)) {
16276 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16277 tg3_asic_rev(tp) == ASIC_REV_5750)
16278 tp->dma_rwctrl |= 0x003f0000;
16279 else
16280 tp->dma_rwctrl |= 0x003f000f;
16281 } else {
16282 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16283 tg3_asic_rev(tp) == ASIC_REV_5704) {
16284 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16285 u32 read_water = 0x7;
16286
16287 /* If the 5704 is behind the EPB bridge, we can
16288 * do the less restrictive ONE_DMA workaround for
16289 * better performance.
16290 */
16291 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16292 tg3_asic_rev(tp) == ASIC_REV_5704)
16293 tp->dma_rwctrl |= 0x8000;
16294 else if (ccval == 0x6 || ccval == 0x7)
16295 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16296
16297 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16298 read_water = 4;
16299 /* Set bit 23 to enable PCIX hw bug fix */
16300 tp->dma_rwctrl |=
16301 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16302 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16303 (1 << 23);
16304 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16305 /* 5780 always in PCIX mode */
16306 tp->dma_rwctrl |= 0x00144000;
16307 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16308 /* 5714 always in PCIX mode */
16309 tp->dma_rwctrl |= 0x00148000;
16310 } else {
16311 tp->dma_rwctrl |= 0x001b000f;
16312 }
16313 }
16314 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16315 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16316
16317 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16318 tg3_asic_rev(tp) == ASIC_REV_5704)
16319 tp->dma_rwctrl &= 0xfffffff0;
16320
16321 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16322 tg3_asic_rev(tp) == ASIC_REV_5701) {
16323 /* Remove this if it causes problems for some boards. */
16324 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16325
16326 /* On 5700/5701 chips, we need to set this bit.
16327 * Otherwise the chip will issue cacheline transactions
16328 * to streamable DMA memory with not all the byte
16329 * enables turned on. This is an error on several
16330 * RISC PCI controllers, in particular sparc64.
16331 *
16332 * On 5703/5704 chips, this bit has been reassigned
16333 * a different meaning. In particular, it is used
16334 * on those chips to enable a PCI-X workaround.
16335 */
16336 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16337 }
16338
16339 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16340
16341 #if 0
16342 /* Unneeded, already done by tg3_get_invariants. */
16343 tg3_switch_clocks(tp);
16344 #endif
16345
16346 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16347 tg3_asic_rev(tp) != ASIC_REV_5701)
16348 goto out;
16349
16350 /* It is best to perform DMA test with maximum write burst size
16351 * to expose the 5700/5701 write DMA bug.
16352 */
16353 saved_dma_rwctrl = tp->dma_rwctrl;
16354 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16355 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16356
16357 while (1) {
16358 u32 *p = buf, i;
16359
16360 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16361 p[i] = i;
16362
16363 /* Send the buffer to the chip. */
16364 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16365 if (ret) {
16366 dev_err(&tp->pdev->dev,
16367 "%s: Buffer write failed. err = %d\n",
16368 __func__, ret);
16369 break;
16370 }
16371
16372 #if 0
16373 /* validate data reached card RAM correctly. */
16374 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16375 u32 val;
16376 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16377 if (le32_to_cpu(val) != p[i]) {
16378 dev_err(&tp->pdev->dev,
16379 "%s: Buffer corrupted on device! "
16380 "(%d != %d)\n", __func__, val, i);
16381 /* ret = -ENODEV here? */
16382 }
16383 p[i] = 0;
16384 }
16385 #endif
16386 /* Now read it back. */
16387 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16388 if (ret) {
16389 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16390 "err = %d\n", __func__, ret);
16391 break;
16392 }
16393
16394 /* Verify it. */
16395 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16396 if (p[i] == i)
16397 continue;
16398
16399 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16400 DMA_RWCTRL_WRITE_BNDRY_16) {
16401 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16402 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16403 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16404 break;
16405 } else {
16406 dev_err(&tp->pdev->dev,
16407 "%s: Buffer corrupted on read back! "
16408 "(%d != %d)\n", __func__, p[i], i);
16409 ret = -ENODEV;
16410 goto out;
16411 }
16412 }
16413
16414 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16415 /* Success. */
16416 ret = 0;
16417 break;
16418 }
16419 }
16420 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16421 DMA_RWCTRL_WRITE_BNDRY_16) {
16422 /* DMA test passed without adjusting DMA boundary,
16423 * now look for chipsets that are known to expose the
16424 * DMA bug without failing the test.
16425 */
16426 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16427 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16428 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16429 } else {
16430 /* Safe to use the calculated DMA boundary. */
16431 tp->dma_rwctrl = saved_dma_rwctrl;
16432 }
16433
16434 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16435 }
16436
16437 out:
16438 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16439 out_nofree:
16440 return ret;
16441 }
16442
16443 static void tg3_init_bufmgr_config(struct tg3 *tp)
16444 {
16445 if (tg3_flag(tp, 57765_PLUS)) {
16446 tp->bufmgr_config.mbuf_read_dma_low_water =
16447 DEFAULT_MB_RDMA_LOW_WATER_5705;
16448 tp->bufmgr_config.mbuf_mac_rx_low_water =
16449 DEFAULT_MB_MACRX_LOW_WATER_57765;
16450 tp->bufmgr_config.mbuf_high_water =
16451 DEFAULT_MB_HIGH_WATER_57765;
16452
16453 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16454 DEFAULT_MB_RDMA_LOW_WATER_5705;
16455 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16456 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16457 tp->bufmgr_config.mbuf_high_water_jumbo =
16458 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16459 } else if (tg3_flag(tp, 5705_PLUS)) {
16460 tp->bufmgr_config.mbuf_read_dma_low_water =
16461 DEFAULT_MB_RDMA_LOW_WATER_5705;
16462 tp->bufmgr_config.mbuf_mac_rx_low_water =
16463 DEFAULT_MB_MACRX_LOW_WATER_5705;
16464 tp->bufmgr_config.mbuf_high_water =
16465 DEFAULT_MB_HIGH_WATER_5705;
16466 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16467 tp->bufmgr_config.mbuf_mac_rx_low_water =
16468 DEFAULT_MB_MACRX_LOW_WATER_5906;
16469 tp->bufmgr_config.mbuf_high_water =
16470 DEFAULT_MB_HIGH_WATER_5906;
16471 }
16472
16473 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16474 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16475 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16476 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16477 tp->bufmgr_config.mbuf_high_water_jumbo =
16478 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16479 } else {
16480 tp->bufmgr_config.mbuf_read_dma_low_water =
16481 DEFAULT_MB_RDMA_LOW_WATER;
16482 tp->bufmgr_config.mbuf_mac_rx_low_water =
16483 DEFAULT_MB_MACRX_LOW_WATER;
16484 tp->bufmgr_config.mbuf_high_water =
16485 DEFAULT_MB_HIGH_WATER;
16486
16487 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16488 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16489 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16490 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16491 tp->bufmgr_config.mbuf_high_water_jumbo =
16492 DEFAULT_MB_HIGH_WATER_JUMBO;
16493 }
16494
16495 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16496 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16497 }
16498
16499 static char *tg3_phy_string(struct tg3 *tp)
16500 {
16501 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16502 case TG3_PHY_ID_BCM5400: return "5400";
16503 case TG3_PHY_ID_BCM5401: return "5401";
16504 case TG3_PHY_ID_BCM5411: return "5411";
16505 case TG3_PHY_ID_BCM5701: return "5701";
16506 case TG3_PHY_ID_BCM5703: return "5703";
16507 case TG3_PHY_ID_BCM5704: return "5704";
16508 case TG3_PHY_ID_BCM5705: return "5705";
16509 case TG3_PHY_ID_BCM5750: return "5750";
16510 case TG3_PHY_ID_BCM5752: return "5752";
16511 case TG3_PHY_ID_BCM5714: return "5714";
16512 case TG3_PHY_ID_BCM5780: return "5780";
16513 case TG3_PHY_ID_BCM5755: return "5755";
16514 case TG3_PHY_ID_BCM5787: return "5787";
16515 case TG3_PHY_ID_BCM5784: return "5784";
16516 case TG3_PHY_ID_BCM5756: return "5722/5756";
16517 case TG3_PHY_ID_BCM5906: return "5906";
16518 case TG3_PHY_ID_BCM5761: return "5761";
16519 case TG3_PHY_ID_BCM5718C: return "5718C";
16520 case TG3_PHY_ID_BCM5718S: return "5718S";
16521 case TG3_PHY_ID_BCM57765: return "57765";
16522 case TG3_PHY_ID_BCM5719C: return "5719C";
16523 case TG3_PHY_ID_BCM5720C: return "5720C";
16524 case TG3_PHY_ID_BCM5762: return "5762C";
16525 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16526 case 0: return "serdes";
16527 default: return "unknown";
16528 }
16529 }
16530
16531 static char *tg3_bus_string(struct tg3 *tp, char *str)
16532 {
16533 if (tg3_flag(tp, PCI_EXPRESS)) {
16534 strcpy(str, "PCI Express");
16535 return str;
16536 } else if (tg3_flag(tp, PCIX_MODE)) {
16537 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16538
16539 strcpy(str, "PCIX:");
16540
16541 if ((clock_ctrl == 7) ||
16542 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16543 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16544 strcat(str, "133MHz");
16545 else if (clock_ctrl == 0)
16546 strcat(str, "33MHz");
16547 else if (clock_ctrl == 2)
16548 strcat(str, "50MHz");
16549 else if (clock_ctrl == 4)
16550 strcat(str, "66MHz");
16551 else if (clock_ctrl == 6)
16552 strcat(str, "100MHz");
16553 } else {
16554 strcpy(str, "PCI:");
16555 if (tg3_flag(tp, PCI_HIGH_SPEED))
16556 strcat(str, "66MHz");
16557 else
16558 strcat(str, "33MHz");
16559 }
16560 if (tg3_flag(tp, PCI_32BIT))
16561 strcat(str, ":32-bit");
16562 else
16563 strcat(str, ":64-bit");
16564 return str;
16565 }
16566
16567 static void tg3_init_coal(struct tg3 *tp)
16568 {
16569 struct ethtool_coalesce *ec = &tp->coal;
16570
16571 memset(ec, 0, sizeof(*ec));
16572 ec->cmd = ETHTOOL_GCOALESCE;
16573 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16574 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16575 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16576 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16577 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16578 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16579 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16580 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16581 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16582
16583 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16584 HOSTCC_MODE_CLRTICK_TXBD)) {
16585 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16586 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16587 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16588 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16589 }
16590
16591 if (tg3_flag(tp, 5705_PLUS)) {
16592 ec->rx_coalesce_usecs_irq = 0;
16593 ec->tx_coalesce_usecs_irq = 0;
16594 ec->stats_block_coalesce_usecs = 0;
16595 }
16596 }
16597
16598 static int tg3_init_one(struct pci_dev *pdev,
16599 const struct pci_device_id *ent)
16600 {
16601 struct net_device *dev;
16602 struct tg3 *tp;
16603 int i, err, pm_cap;
16604 u32 sndmbx, rcvmbx, intmbx;
16605 char str[40];
16606 u64 dma_mask, persist_dma_mask;
16607 netdev_features_t features = 0;
16608
16609 printk_once(KERN_INFO "%s\n", version);
16610
16611 err = pci_enable_device(pdev);
16612 if (err) {
16613 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16614 return err;
16615 }
16616
16617 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16618 if (err) {
16619 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16620 goto err_out_disable_pdev;
16621 }
16622
16623 pci_set_master(pdev);
16624
16625 /* Find power-management capability. */
16626 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16627 if (pm_cap == 0) {
16628 dev_err(&pdev->dev,
16629 "Cannot find Power Management capability, aborting\n");
16630 err = -EIO;
16631 goto err_out_free_res;
16632 }
16633
16634 err = pci_set_power_state(pdev, PCI_D0);
16635 if (err) {
16636 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16637 goto err_out_free_res;
16638 }
16639
16640 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16641 if (!dev) {
16642 err = -ENOMEM;
16643 goto err_out_power_down;
16644 }
16645
16646 SET_NETDEV_DEV(dev, &pdev->dev);
16647
16648 tp = netdev_priv(dev);
16649 tp->pdev = pdev;
16650 tp->dev = dev;
16651 tp->pm_cap = pm_cap;
16652 tp->rx_mode = TG3_DEF_RX_MODE;
16653 tp->tx_mode = TG3_DEF_TX_MODE;
16654 tp->irq_sync = 1;
16655
16656 if (tg3_debug > 0)
16657 tp->msg_enable = tg3_debug;
16658 else
16659 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16660
16661 if (pdev_is_ssb_gige_core(pdev)) {
16662 tg3_flag_set(tp, IS_SSB_CORE);
16663 if (ssb_gige_must_flush_posted_writes(pdev))
16664 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16665 if (ssb_gige_one_dma_at_once(pdev))
16666 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16667 if (ssb_gige_have_roboswitch(pdev))
16668 tg3_flag_set(tp, ROBOSWITCH);
16669 if (ssb_gige_is_rgmii(pdev))
16670 tg3_flag_set(tp, RGMII_MODE);
16671 }
16672
16673 /* The word/byte swap controls here control register access byte
16674 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16675 * setting below.
16676 */
16677 tp->misc_host_ctrl =
16678 MISC_HOST_CTRL_MASK_PCI_INT |
16679 MISC_HOST_CTRL_WORD_SWAP |
16680 MISC_HOST_CTRL_INDIR_ACCESS |
16681 MISC_HOST_CTRL_PCISTATE_RW;
16682
16683 /* The NONFRM (non-frame) byte/word swap controls take effect
16684 * on descriptor entries, anything which isn't packet data.
16685 *
16686 * The StrongARM chips on the board (one for tx, one for rx)
16687 * are running in big-endian mode.
16688 */
16689 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16690 GRC_MODE_WSWAP_NONFRM_DATA);
16691 #ifdef __BIG_ENDIAN
16692 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16693 #endif
16694 spin_lock_init(&tp->lock);
16695 spin_lock_init(&tp->indirect_lock);
16696 INIT_WORK(&tp->reset_task, tg3_reset_task);
16697
16698 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16699 if (!tp->regs) {
16700 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16701 err = -ENOMEM;
16702 goto err_out_free_dev;
16703 }
16704
16705 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16706 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16707 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16708 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16711 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16712 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16714 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16715 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16717 tg3_flag_set(tp, ENABLE_APE);
16718 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16719 if (!tp->aperegs) {
16720 dev_err(&pdev->dev,
16721 "Cannot map APE registers, aborting\n");
16722 err = -ENOMEM;
16723 goto err_out_iounmap;
16724 }
16725 }
16726
16727 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16728 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16729
16730 dev->ethtool_ops = &tg3_ethtool_ops;
16731 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16732 dev->netdev_ops = &tg3_netdev_ops;
16733 dev->irq = pdev->irq;
16734
16735 err = tg3_get_invariants(tp, ent);
16736 if (err) {
16737 dev_err(&pdev->dev,
16738 "Problem fetching invariants of chip, aborting\n");
16739 goto err_out_apeunmap;
16740 }
16741
16742 /* The EPB bridge inside 5714, 5715, and 5780 and any
16743 * device behind the EPB cannot support DMA addresses > 40-bit.
16744 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16745 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16746 * do DMA address check in tg3_start_xmit().
16747 */
16748 if (tg3_flag(tp, IS_5788))
16749 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16750 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16751 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16752 #ifdef CONFIG_HIGHMEM
16753 dma_mask = DMA_BIT_MASK(64);
16754 #endif
16755 } else
16756 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16757
16758 /* Configure DMA attributes. */
16759 if (dma_mask > DMA_BIT_MASK(32)) {
16760 err = pci_set_dma_mask(pdev, dma_mask);
16761 if (!err) {
16762 features |= NETIF_F_HIGHDMA;
16763 err = pci_set_consistent_dma_mask(pdev,
16764 persist_dma_mask);
16765 if (err < 0) {
16766 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16767 "DMA for consistent allocations\n");
16768 goto err_out_apeunmap;
16769 }
16770 }
16771 }
16772 if (err || dma_mask == DMA_BIT_MASK(32)) {
16773 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16774 if (err) {
16775 dev_err(&pdev->dev,
16776 "No usable DMA configuration, aborting\n");
16777 goto err_out_apeunmap;
16778 }
16779 }
16780
16781 tg3_init_bufmgr_config(tp);
16782
16783 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16784
16785 /* 5700 B0 chips do not support checksumming correctly due
16786 * to hardware bugs.
16787 */
16788 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16789 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16790
16791 if (tg3_flag(tp, 5755_PLUS))
16792 features |= NETIF_F_IPV6_CSUM;
16793 }
16794
16795 /* TSO is on by default on chips that support hardware TSO.
16796 * Firmware TSO on older chips gives lower performance, so it
16797 * is off by default, but can be enabled using ethtool.
16798 */
16799 if ((tg3_flag(tp, HW_TSO_1) ||
16800 tg3_flag(tp, HW_TSO_2) ||
16801 tg3_flag(tp, HW_TSO_3)) &&
16802 (features & NETIF_F_IP_CSUM))
16803 features |= NETIF_F_TSO;
16804 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16805 if (features & NETIF_F_IPV6_CSUM)
16806 features |= NETIF_F_TSO6;
16807 if (tg3_flag(tp, HW_TSO_3) ||
16808 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16809 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16810 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16811 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16812 tg3_asic_rev(tp) == ASIC_REV_57780)
16813 features |= NETIF_F_TSO_ECN;
16814 }
16815
16816 dev->features |= features;
16817 dev->vlan_features |= features;
16818
16819 /*
16820 * Add loopback capability only for a subset of devices that support
16821 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16822 * loopback for the remaining devices.
16823 */
16824 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16825 !tg3_flag(tp, CPMU_PRESENT))
16826 /* Add the loopback capability */
16827 features |= NETIF_F_LOOPBACK;
16828
16829 dev->hw_features |= features;
16830
16831 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16832 !tg3_flag(tp, TSO_CAPABLE) &&
16833 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16834 tg3_flag_set(tp, MAX_RXPEND_64);
16835 tp->rx_pending = 63;
16836 }
16837
16838 err = tg3_get_device_address(tp);
16839 if (err) {
16840 dev_err(&pdev->dev,
16841 "Could not obtain valid ethernet address, aborting\n");
16842 goto err_out_apeunmap;
16843 }
16844
16845 /*
16846 * Reset chip in case UNDI or EFI driver did not shutdown
16847 * DMA self test will enable WDMAC and we'll see (spurious)
16848 * pending DMA on the PCI bus at that point.
16849 */
16850 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16851 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16852 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16853 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16854 }
16855
16856 err = tg3_test_dma(tp);
16857 if (err) {
16858 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16859 goto err_out_apeunmap;
16860 }
16861
16862 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16863 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16864 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16865 for (i = 0; i < tp->irq_max; i++) {
16866 struct tg3_napi *tnapi = &tp->napi[i];
16867
16868 tnapi->tp = tp;
16869 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16870
16871 tnapi->int_mbox = intmbx;
16872 if (i <= 4)
16873 intmbx += 0x8;
16874 else
16875 intmbx += 0x4;
16876
16877 tnapi->consmbox = rcvmbx;
16878 tnapi->prodmbox = sndmbx;
16879
16880 if (i)
16881 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16882 else
16883 tnapi->coal_now = HOSTCC_MODE_NOW;
16884
16885 if (!tg3_flag(tp, SUPPORT_MSIX))
16886 break;
16887
16888 /*
16889 * If we support MSIX, we'll be using RSS. If we're using
16890 * RSS, the first vector only handles link interrupts and the
16891 * remaining vectors handle rx and tx interrupts. Reuse the
16892 * mailbox values for the next iteration. The values we setup
16893 * above are still useful for the single vectored mode.
16894 */
16895 if (!i)
16896 continue;
16897
16898 rcvmbx += 0x8;
16899
16900 if (sndmbx & 0x4)
16901 sndmbx -= 0x4;
16902 else
16903 sndmbx += 0xc;
16904 }
16905
16906 tg3_init_coal(tp);
16907
16908 pci_set_drvdata(pdev, dev);
16909
16910 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16911 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16912 tg3_asic_rev(tp) == ASIC_REV_5762)
16913 tg3_flag_set(tp, PTP_CAPABLE);
16914
16915 if (tg3_flag(tp, 5717_PLUS)) {
16916 /* Resume a low-power mode */
16917 tg3_frob_aux_power(tp, false);
16918 }
16919
16920 tg3_timer_init(tp);
16921
16922 tg3_carrier_off(tp);
16923
16924 err = register_netdev(dev);
16925 if (err) {
16926 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16927 goto err_out_apeunmap;
16928 }
16929
16930 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16931 tp->board_part_number,
16932 tg3_chip_rev_id(tp),
16933 tg3_bus_string(tp, str),
16934 dev->dev_addr);
16935
16936 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16937 struct phy_device *phydev;
16938 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16939 netdev_info(dev,
16940 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16941 phydev->drv->name, dev_name(&phydev->dev));
16942 } else {
16943 char *ethtype;
16944
16945 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16946 ethtype = "10/100Base-TX";
16947 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16948 ethtype = "1000Base-SX";
16949 else
16950 ethtype = "10/100/1000Base-T";
16951
16952 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16953 "(WireSpeed[%d], EEE[%d])\n",
16954 tg3_phy_string(tp), ethtype,
16955 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16956 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16957 }
16958
16959 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16960 (dev->features & NETIF_F_RXCSUM) != 0,
16961 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16962 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16963 tg3_flag(tp, ENABLE_ASF) != 0,
16964 tg3_flag(tp, TSO_CAPABLE) != 0);
16965 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16966 tp->dma_rwctrl,
16967 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16968 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16969
16970 pci_save_state(pdev);
16971
16972 return 0;
16973
16974 err_out_apeunmap:
16975 if (tp->aperegs) {
16976 iounmap(tp->aperegs);
16977 tp->aperegs = NULL;
16978 }
16979
16980 err_out_iounmap:
16981 if (tp->regs) {
16982 iounmap(tp->regs);
16983 tp->regs = NULL;
16984 }
16985
16986 err_out_free_dev:
16987 free_netdev(dev);
16988
16989 err_out_power_down:
16990 pci_set_power_state(pdev, PCI_D3hot);
16991
16992 err_out_free_res:
16993 pci_release_regions(pdev);
16994
16995 err_out_disable_pdev:
16996 pci_disable_device(pdev);
16997 pci_set_drvdata(pdev, NULL);
16998 return err;
16999 }
17000
17001 static void tg3_remove_one(struct pci_dev *pdev)
17002 {
17003 struct net_device *dev = pci_get_drvdata(pdev);
17004
17005 if (dev) {
17006 struct tg3 *tp = netdev_priv(dev);
17007
17008 release_firmware(tp->fw);
17009
17010 tg3_reset_task_cancel(tp);
17011
17012 if (tg3_flag(tp, USE_PHYLIB)) {
17013 tg3_phy_fini(tp);
17014 tg3_mdio_fini(tp);
17015 }
17016
17017 unregister_netdev(dev);
17018 if (tp->aperegs) {
17019 iounmap(tp->aperegs);
17020 tp->aperegs = NULL;
17021 }
17022 if (tp->regs) {
17023 iounmap(tp->regs);
17024 tp->regs = NULL;
17025 }
17026 free_netdev(dev);
17027 pci_release_regions(pdev);
17028 pci_disable_device(pdev);
17029 pci_set_drvdata(pdev, NULL);
17030 }
17031 }
17032
17033 #ifdef CONFIG_PM_SLEEP
17034 static int tg3_suspend(struct device *device)
17035 {
17036 struct pci_dev *pdev = to_pci_dev(device);
17037 struct net_device *dev = pci_get_drvdata(pdev);
17038 struct tg3 *tp = netdev_priv(dev);
17039 int err;
17040
17041 if (!netif_running(dev))
17042 return 0;
17043
17044 tg3_reset_task_cancel(tp);
17045 tg3_phy_stop(tp);
17046 tg3_netif_stop(tp);
17047
17048 tg3_timer_stop(tp);
17049
17050 tg3_full_lock(tp, 1);
17051 tg3_disable_ints(tp);
17052 tg3_full_unlock(tp);
17053
17054 netif_device_detach(dev);
17055
17056 tg3_full_lock(tp, 0);
17057 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17058 tg3_flag_clear(tp, INIT_COMPLETE);
17059 tg3_full_unlock(tp);
17060
17061 err = tg3_power_down_prepare(tp);
17062 if (err) {
17063 int err2;
17064
17065 tg3_full_lock(tp, 0);
17066
17067 tg3_flag_set(tp, INIT_COMPLETE);
17068 err2 = tg3_restart_hw(tp, 1);
17069 if (err2)
17070 goto out;
17071
17072 tg3_timer_start(tp);
17073
17074 netif_device_attach(dev);
17075 tg3_netif_start(tp);
17076
17077 out:
17078 tg3_full_unlock(tp);
17079
17080 if (!err2)
17081 tg3_phy_start(tp);
17082 }
17083
17084 return err;
17085 }
17086
17087 static int tg3_resume(struct device *device)
17088 {
17089 struct pci_dev *pdev = to_pci_dev(device);
17090 struct net_device *dev = pci_get_drvdata(pdev);
17091 struct tg3 *tp = netdev_priv(dev);
17092 int err;
17093
17094 if (!netif_running(dev))
17095 return 0;
17096
17097 netif_device_attach(dev);
17098
17099 tg3_full_lock(tp, 0);
17100
17101 tg3_flag_set(tp, INIT_COMPLETE);
17102 err = tg3_restart_hw(tp, 1);
17103 if (err)
17104 goto out;
17105
17106 tg3_timer_start(tp);
17107
17108 tg3_netif_start(tp);
17109
17110 out:
17111 tg3_full_unlock(tp);
17112
17113 if (!err)
17114 tg3_phy_start(tp);
17115
17116 return err;
17117 }
17118
17119 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17120 #define TG3_PM_OPS (&tg3_pm_ops)
17121
17122 #else
17123
17124 #define TG3_PM_OPS NULL
17125
17126 #endif /* CONFIG_PM_SLEEP */
17127
17128 /**
17129 * tg3_io_error_detected - called when PCI error is detected
17130 * @pdev: Pointer to PCI device
17131 * @state: The current pci connection state
17132 *
17133 * This function is called after a PCI bus error affecting
17134 * this device has been detected.
17135 */
17136 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17137 pci_channel_state_t state)
17138 {
17139 struct net_device *netdev = pci_get_drvdata(pdev);
17140 struct tg3 *tp = netdev_priv(netdev);
17141 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17142
17143 netdev_info(netdev, "PCI I/O error detected\n");
17144
17145 rtnl_lock();
17146
17147 if (!netif_running(netdev))
17148 goto done;
17149
17150 tg3_phy_stop(tp);
17151
17152 tg3_netif_stop(tp);
17153
17154 tg3_timer_stop(tp);
17155
17156 /* Want to make sure that the reset task doesn't run */
17157 tg3_reset_task_cancel(tp);
17158
17159 netif_device_detach(netdev);
17160
17161 /* Clean up software state, even if MMIO is blocked */
17162 tg3_full_lock(tp, 0);
17163 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17164 tg3_full_unlock(tp);
17165
17166 done:
17167 if (state == pci_channel_io_perm_failure)
17168 err = PCI_ERS_RESULT_DISCONNECT;
17169 else
17170 pci_disable_device(pdev);
17171
17172 rtnl_unlock();
17173
17174 return err;
17175 }
17176
17177 /**
17178 * tg3_io_slot_reset - called after the pci bus has been reset.
17179 * @pdev: Pointer to PCI device
17180 *
17181 * Restart the card from scratch, as if from a cold-boot.
17182 * At this point, the card has exprienced a hard reset,
17183 * followed by fixups by BIOS, and has its config space
17184 * set up identically to what it was at cold boot.
17185 */
17186 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17187 {
17188 struct net_device *netdev = pci_get_drvdata(pdev);
17189 struct tg3 *tp = netdev_priv(netdev);
17190 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17191 int err;
17192
17193 rtnl_lock();
17194
17195 if (pci_enable_device(pdev)) {
17196 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17197 goto done;
17198 }
17199
17200 pci_set_master(pdev);
17201 pci_restore_state(pdev);
17202 pci_save_state(pdev);
17203
17204 if (!netif_running(netdev)) {
17205 rc = PCI_ERS_RESULT_RECOVERED;
17206 goto done;
17207 }
17208
17209 err = tg3_power_up(tp);
17210 if (err)
17211 goto done;
17212
17213 rc = PCI_ERS_RESULT_RECOVERED;
17214
17215 done:
17216 rtnl_unlock();
17217
17218 return rc;
17219 }
17220
17221 /**
17222 * tg3_io_resume - called when traffic can start flowing again.
17223 * @pdev: Pointer to PCI device
17224 *
17225 * This callback is called when the error recovery driver tells
17226 * us that its OK to resume normal operation.
17227 */
17228 static void tg3_io_resume(struct pci_dev *pdev)
17229 {
17230 struct net_device *netdev = pci_get_drvdata(pdev);
17231 struct tg3 *tp = netdev_priv(netdev);
17232 int err;
17233
17234 rtnl_lock();
17235
17236 if (!netif_running(netdev))
17237 goto done;
17238
17239 tg3_full_lock(tp, 0);
17240 tg3_flag_set(tp, INIT_COMPLETE);
17241 err = tg3_restart_hw(tp, 1);
17242 if (err) {
17243 tg3_full_unlock(tp);
17244 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17245 goto done;
17246 }
17247
17248 netif_device_attach(netdev);
17249
17250 tg3_timer_start(tp);
17251
17252 tg3_netif_start(tp);
17253
17254 tg3_full_unlock(tp);
17255
17256 tg3_phy_start(tp);
17257
17258 done:
17259 rtnl_unlock();
17260 }
17261
17262 static const struct pci_error_handlers tg3_err_handler = {
17263 .error_detected = tg3_io_error_detected,
17264 .slot_reset = tg3_io_slot_reset,
17265 .resume = tg3_io_resume
17266 };
17267
17268 static struct pci_driver tg3_driver = {
17269 .name = DRV_MODULE_NAME,
17270 .id_table = tg3_pci_tbl,
17271 .probe = tg3_init_one,
17272 .remove = tg3_remove_one,
17273 .err_handler = &tg3_err_handler,
17274 .driver.pm = TG3_PM_OPS,
17275 };
17276
17277 static int __init tg3_init(void)
17278 {
17279 return pci_register_driver(&tg3_driver);
17280 }
17281
17282 static void __exit tg3_cleanup(void)
17283 {
17284 pci_unregister_driver(&tg3_driver);
17285 }
17286
17287 module_init(tg3_init);
17288 module_exit(tg3_cleanup);
This page took 0.436766 seconds and 6 git commands to generate.