tg3: Add new FW_TSO flag
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217
218 static char version[] =
219 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257 TG3_DRV_DATA_FLAG_5705_10_100},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286 PCI_VENDOR_ID_LENOVO,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347 {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353 const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355 { "rx_octets" },
356 { "rx_fragments" },
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
360 { "rx_fcs_errors" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
367 { "rx_jabbers" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
381
382 { "tx_octets" },
383 { "tx_collisions" },
384
385 { "tx_xon_sent" },
386 { "tx_xoff_sent" },
387 { "tx_flow_control" },
388 { "tx_mac_errors" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
391 { "tx_deferred" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
412 { "tx_discards" },
413 { "tx_errors" },
414
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
417 { "rxbds_empty" },
418 { "rx_discards" },
419 { "rx_errors" },
420 { "rx_threshold_hit" },
421
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
425
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
428 { "nic_irqs" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
431
432 { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
444
445
446 static const struct {
447 const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449 [TG3_NVRAM_TEST] = { "nvram test (online) " },
450 [TG3_LINK_TEST] = { "link test (online) " },
451 [TG3_REGISTER_TEST] = { "register test (offline)" },
452 [TG3_MEMORY_TEST] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
457 };
458
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464 writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469 return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474 writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479 return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484 unsigned long flags;
485
486 spin_lock_irqsave(&tp->indirect_lock, flags);
487 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494 writel(val, tp->regs + off);
495 readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500 unsigned long flags;
501 u32 val;
502
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512 unsigned long flags;
513
514 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516 TG3_64BIT_REG_LOW, val);
517 return;
518 }
519 if (off == TG3_RX_STD_PROD_IDX_REG) {
520 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521 TG3_64BIT_REG_LOW, val);
522 return;
523 }
524
525 spin_lock_irqsave(&tp->indirect_lock, flags);
526 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528 spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
532 */
533 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534 (val == 0x1)) {
535 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537 }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542 unsigned long flags;
543 u32 val;
544
545 spin_lock_irqsave(&tp->indirect_lock, flags);
546 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548 spin_unlock_irqrestore(&tp->indirect_lock, flags);
549 return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556 */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560 /* Non-posted methods */
561 tp->write32(tp, off, val);
562 else {
563 /* Posted method */
564 tg3_write32(tp, off, val);
565 if (usec_wait)
566 udelay(usec_wait);
567 tp->read32(tp, off);
568 }
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
571 */
572 if (usec_wait)
573 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578 tp->write32_mbox(tp, off, val);
579 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581 !tg3_flag(tp, ICH_WORKAROUND)))
582 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587 void __iomem *mbox = tp->regs + off;
588 writel(val, mbox);
589 if (tg3_flag(tp, TXD_MBOX_HWBUG))
590 writel(val, mbox);
591 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592 tg3_flag(tp, FLUSH_POSTED_WRITES))
593 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598 return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603 writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619 unsigned long flags;
620
621 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623 return;
624
625 spin_lock_irqsave(&tp->indirect_lock, flags);
626 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632 } else {
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 }
639 spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644 unsigned long flags;
645
646 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648 *val = 0;
649 return;
650 }
651
652 spin_lock_irqsave(&tp->indirect_lock, flags);
653 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659 } else {
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 }
666 spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671 int i;
672 u32 regbase, bit;
673
674 if (tg3_asic_rev(tp) == ASIC_REV_5761)
675 regbase = TG3_APE_LOCK_GRANT;
676 else
677 regbase = TG3_APE_PER_LOCK_GRANT;
678
679 /* Make sure the driver hasn't any stale locks. */
680 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681 switch (i) {
682 case TG3_APE_LOCK_PHY0:
683 case TG3_APE_LOCK_PHY1:
684 case TG3_APE_LOCK_PHY2:
685 case TG3_APE_LOCK_PHY3:
686 bit = APE_LOCK_GRANT_DRIVER;
687 break;
688 default:
689 if (!tp->pci_fn)
690 bit = APE_LOCK_GRANT_DRIVER;
691 else
692 bit = 1 << tp->pci_fn;
693 }
694 tg3_ape_write32(tp, regbase + 4 * i, bit);
695 }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701 int i, off;
702 int ret = 0;
703 u32 status, req, gnt, bit;
704
705 if (!tg3_flag(tp, ENABLE_APE))
706 return 0;
707
708 switch (locknum) {
709 case TG3_APE_LOCK_GPIO:
710 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711 return 0;
712 case TG3_APE_LOCK_GRC:
713 case TG3_APE_LOCK_MEM:
714 if (!tp->pci_fn)
715 bit = APE_LOCK_REQ_DRIVER;
716 else
717 bit = 1 << tp->pci_fn;
718 break;
719 case TG3_APE_LOCK_PHY0:
720 case TG3_APE_LOCK_PHY1:
721 case TG3_APE_LOCK_PHY2:
722 case TG3_APE_LOCK_PHY3:
723 bit = APE_LOCK_REQ_DRIVER;
724 break;
725 default:
726 return -EINVAL;
727 }
728
729 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730 req = TG3_APE_LOCK_REQ;
731 gnt = TG3_APE_LOCK_GRANT;
732 } else {
733 req = TG3_APE_PER_LOCK_REQ;
734 gnt = TG3_APE_PER_LOCK_GRANT;
735 }
736
737 off = 4 * locknum;
738
739 tg3_ape_write32(tp, req + off, bit);
740
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i = 0; i < 100; i++) {
743 status = tg3_ape_read32(tp, gnt + off);
744 if (status == bit)
745 break;
746 udelay(10);
747 }
748
749 if (status != bit) {
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp, gnt + off, bit);
752 ret = -EBUSY;
753 }
754
755 return ret;
756 }
757
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760 u32 gnt, bit;
761
762 if (!tg3_flag(tp, ENABLE_APE))
763 return;
764
765 switch (locknum) {
766 case TG3_APE_LOCK_GPIO:
767 if (tg3_asic_rev(tp) == ASIC_REV_5761)
768 return;
769 case TG3_APE_LOCK_GRC:
770 case TG3_APE_LOCK_MEM:
771 if (!tp->pci_fn)
772 bit = APE_LOCK_GRANT_DRIVER;
773 else
774 bit = 1 << tp->pci_fn;
775 break;
776 case TG3_APE_LOCK_PHY0:
777 case TG3_APE_LOCK_PHY1:
778 case TG3_APE_LOCK_PHY2:
779 case TG3_APE_LOCK_PHY3:
780 bit = APE_LOCK_GRANT_DRIVER;
781 break;
782 default:
783 return;
784 }
785
786 if (tg3_asic_rev(tp) == ASIC_REV_5761)
787 gnt = TG3_APE_LOCK_GRANT;
788 else
789 gnt = TG3_APE_PER_LOCK_GRANT;
790
791 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796 u32 apedata;
797
798 while (timeout_us) {
799 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800 return -EBUSY;
801
802 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804 break;
805
806 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808 udelay(10);
809 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810 }
811
812 return timeout_us ? 0 : -EBUSY;
813 }
814
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817 u32 i, apedata;
818
819 for (i = 0; i < timeout_us / 10; i++) {
820 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 break;
824
825 udelay(10);
826 }
827
828 return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832 u32 len)
833 {
834 int err;
835 u32 i, bufoff, msgoff, maxlen, apedata;
836
837 if (!tg3_flag(tp, APE_HAS_NCSI))
838 return 0;
839
840 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841 if (apedata != APE_SEG_SIG_MAGIC)
842 return -ENODEV;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845 if (!(apedata & APE_FW_STATUS_READY))
846 return -EAGAIN;
847
848 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849 TG3_APE_SHMEM_BASE;
850 msgoff = bufoff + 2 * sizeof(u32);
851 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853 while (len) {
854 u32 length;
855
856 /* Cap xfer sizes to scratchpad limits. */
857 length = (len > maxlen) ? maxlen : len;
858 len -= length;
859
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
862 return -EAGAIN;
863
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err = tg3_ape_event_lock(tp, 1000);
866 if (err)
867 return err;
868
869 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870 APE_EVENT_STATUS_SCRTCHPD_READ |
871 APE_EVENT_STATUS_EVENT_PENDING;
872 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874 tg3_ape_write32(tp, bufoff, base_off);
875 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880 base_off += length;
881
882 if (tg3_ape_wait_for_event(tp, 30000))
883 return -EAGAIN;
884
885 for (i = 0; length; i += 4, length -= 4) {
886 u32 val = tg3_ape_read32(tp, msgoff + i);
887 memcpy(data, &val, sizeof(u32));
888 data++;
889 }
890 }
891
892 return 0;
893 }
894
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897 int err;
898 u32 apedata;
899
900 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901 if (apedata != APE_SEG_SIG_MAGIC)
902 return -EAGAIN;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905 if (!(apedata & APE_FW_STATUS_READY))
906 return -EAGAIN;
907
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err = tg3_ape_event_lock(tp, 1000);
910 if (err)
911 return err;
912
913 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914 event | APE_EVENT_STATUS_EVENT_PENDING);
915
916 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918
919 return 0;
920 }
921
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924 u32 event;
925 u32 apedata;
926
927 if (!tg3_flag(tp, ENABLE_APE))
928 return;
929
930 switch (kind) {
931 case RESET_KIND_INIT:
932 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933 APE_HOST_SEG_SIG_MAGIC);
934 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935 APE_HOST_SEG_LEN_MAGIC);
936 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941 APE_HOST_BEHAV_NO_PHYLOCK);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943 TG3_APE_HOST_DRVR_STATE_START);
944
945 event = APE_EVENT_STATUS_STATE_START;
946 break;
947 case RESET_KIND_SHUTDOWN:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
952 */
953 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954
955 if (device_may_wakeup(&tp->pdev->dev) &&
956 tg3_flag(tp, WOL_ENABLE)) {
957 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958 TG3_APE_HOST_WOL_SPEED_AUTO);
959 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960 } else
961 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962
963 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964
965 event = APE_EVENT_STATUS_STATE_UNLOAD;
966 break;
967 case RESET_KIND_SUSPEND:
968 event = APE_EVENT_STATUS_STATE_SUSPEND;
969 break;
970 default:
971 return;
972 }
973
974 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976 tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981 int i;
982
983 tw32(TG3PCI_MISC_HOST_CTRL,
984 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985 for (i = 0; i < tp->irq_max; i++)
986 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991 int i;
992
993 tp->irq_sync = 0;
994 wmb();
995
996 tw32(TG3PCI_MISC_HOST_CTRL,
997 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000 for (i = 0; i < tp->irq_cnt; i++) {
1001 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004 if (tg3_flag(tp, 1SHOT_MSI))
1005 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007 tp->coal_now |= tnapi->coal_now;
1008 }
1009
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp, TAGGED_STATUS) &&
1012 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014 else
1015 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022 struct tg3 *tp = tnapi->tp;
1023 struct tg3_hw_status *sblk = tnapi->hw_status;
1024 unsigned int work_exists = 0;
1025
1026 /* check for phy events */
1027 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028 if (sblk->status & SD_STATUS_LINK_CHG)
1029 work_exists = 1;
1030 }
1031
1032 /* check for TX work to do */
1033 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034 work_exists = 1;
1035
1036 /* check for RX work to do */
1037 if (tnapi->rx_rcb_prod_idx &&
1038 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039 work_exists = 1;
1040
1041 return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1048 */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051 struct tg3 *tp = tnapi->tp;
1052
1053 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054 mmiowb();
1055
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1059 */
1060 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067 u32 clock_ctrl;
1068 u32 orig_clock_ctrl;
1069
1070 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071 return;
1072
1073 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075 orig_clock_ctrl = clock_ctrl;
1076 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077 CLOCK_CTRL_CLKRUN_OENABLE |
1078 0x1f);
1079 tp->pci_clock_ctrl = clock_ctrl;
1080
1081 if (tg3_flag(tp, 5705_PLUS)) {
1082 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085 }
1086 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl |
1089 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090 40);
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093 40);
1094 }
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS 5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101 u32 *val)
1102 {
1103 u32 frame_val;
1104 unsigned int loops;
1105 int ret;
1106
1107 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108 tw32_f(MAC_MI_MODE,
1109 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110 udelay(80);
1111 }
1112
1113 tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115 *val = 0x0;
1116
1117 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118 MI_COM_PHY_ADDR_MASK);
1119 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120 MI_COM_REG_ADDR_MASK);
1121 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123 tw32_f(MAC_MI_COM, frame_val);
1124
1125 loops = PHY_BUSY_LOOPS;
1126 while (loops != 0) {
1127 udelay(10);
1128 frame_val = tr32(MAC_MI_COM);
1129
1130 if ((frame_val & MI_COM_BUSY) == 0) {
1131 udelay(5);
1132 frame_val = tr32(MAC_MI_COM);
1133 break;
1134 }
1135 loops -= 1;
1136 }
1137
1138 ret = -EBUSY;
1139 if (loops != 0) {
1140 *val = frame_val & MI_COM_DATA_MASK;
1141 ret = 0;
1142 }
1143
1144 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146 udelay(80);
1147 }
1148
1149 tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151 return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160 u32 val)
1161 {
1162 u32 frame_val;
1163 unsigned int loops;
1164 int ret;
1165
1166 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168 return 0;
1169
1170 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171 tw32_f(MAC_MI_MODE,
1172 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173 udelay(80);
1174 }
1175
1176 tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179 MI_COM_PHY_ADDR_MASK);
1180 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181 MI_COM_REG_ADDR_MASK);
1182 frame_val |= (val & MI_COM_DATA_MASK);
1183 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185 tw32_f(MAC_MI_COM, frame_val);
1186
1187 loops = PHY_BUSY_LOOPS;
1188 while (loops != 0) {
1189 udelay(10);
1190 frame_val = tr32(MAC_MI_COM);
1191 if ((frame_val & MI_COM_BUSY) == 0) {
1192 udelay(5);
1193 frame_val = tr32(MAC_MI_COM);
1194 break;
1195 }
1196 loops -= 1;
1197 }
1198
1199 ret = -EBUSY;
1200 if (loops != 0)
1201 ret = 0;
1202
1203 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205 udelay(80);
1206 }
1207
1208 tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210 return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220 int err;
1221
1222 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223 if (err)
1224 goto done;
1225
1226 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227 if (err)
1228 goto done;
1229
1230 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238 return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243 int err;
1244
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246 if (err)
1247 goto done;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261 return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266 int err;
1267
1268 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269 if (!err)
1270 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272 return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277 int err;
1278
1279 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280 if (!err)
1281 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283 return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288 int err;
1289
1290 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC);
1293 if (!err)
1294 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296 return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309 u32 val;
1310 int err;
1311
1312 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314 if (err)
1315 return err;
1316 if (enable)
1317
1318 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319 else
1320 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325 return err;
1326 }
1327
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330 u32 phy_control;
1331 int limit, err;
1332
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1335 */
1336 phy_control = BMCR_RESET;
1337 err = tg3_writephy(tp, MII_BMCR, phy_control);
1338 if (err != 0)
1339 return -EBUSY;
1340
1341 limit = 5000;
1342 while (limit--) {
1343 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344 if (err != 0)
1345 return -EBUSY;
1346
1347 if ((phy_control & BMCR_RESET) == 0) {
1348 udelay(40);
1349 break;
1350 }
1351 udelay(10);
1352 }
1353 if (limit < 0)
1354 return -EBUSY;
1355
1356 return 0;
1357 }
1358
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361 struct tg3 *tp = bp->priv;
1362 u32 val;
1363
1364 spin_lock_bh(&tp->lock);
1365
1366 if (tg3_readphy(tp, reg, &val))
1367 val = -EIO;
1368
1369 spin_unlock_bh(&tp->lock);
1370
1371 return val;
1372 }
1373
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 ret = 0;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (tg3_writephy(tp, reg, val))
1382 ret = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return ret;
1387 }
1388
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391 return 0;
1392 }
1393
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396 u32 val;
1397 struct phy_device *phydev;
1398
1399 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401 case PHY_ID_BCM50610:
1402 case PHY_ID_BCM50610M:
1403 val = MAC_PHYCFG2_50610_LED_MODES;
1404 break;
1405 case PHY_ID_BCMAC131:
1406 val = MAC_PHYCFG2_AC131_LED_MODES;
1407 break;
1408 case PHY_ID_RTL8211C:
1409 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410 break;
1411 case PHY_ID_RTL8201E:
1412 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413 break;
1414 default:
1415 return;
1416 }
1417
1418 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419 tw32(MAC_PHYCFG2, val);
1420
1421 val = tr32(MAC_PHYCFG1);
1422 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425 tw32(MAC_PHYCFG1, val);
1426
1427 return;
1428 }
1429
1430 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432 MAC_PHYCFG2_FMODE_MASK_MASK |
1433 MAC_PHYCFG2_GMODE_MASK_MASK |
1434 MAC_PHYCFG2_ACT_MASK_MASK |
1435 MAC_PHYCFG2_QUAL_MASK_MASK |
1436 MAC_PHYCFG2_INBAND_ENABLE;
1437
1438 tw32(MAC_PHYCFG2, val);
1439
1440 val = tr32(MAC_PHYCFG1);
1441 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448 }
1449 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451 tw32(MAC_PHYCFG1, val);
1452
1453 val = tr32(MAC_EXT_RGMII_MODE);
1454 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455 MAC_RGMII_MODE_RX_QUALITY |
1456 MAC_RGMII_MODE_RX_ACTIVITY |
1457 MAC_RGMII_MODE_RX_ENG_DET |
1458 MAC_RGMII_MODE_TX_ENABLE |
1459 MAC_RGMII_MODE_TX_LOWPWR |
1460 MAC_RGMII_MODE_TX_RESET);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_RGMII_MODE_RX_INT_B |
1464 MAC_RGMII_MODE_RX_QUALITY |
1465 MAC_RGMII_MODE_RX_ACTIVITY |
1466 MAC_RGMII_MODE_RX_ENG_DET;
1467 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 val |= MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET;
1471 }
1472 tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478 tw32_f(MAC_MI_MODE, tp->mi_mode);
1479 udelay(80);
1480
1481 if (tg3_flag(tp, MDIOBUS_INITED) &&
1482 tg3_asic_rev(tp) == ASIC_REV_5785)
1483 tg3_mdio_config_5785(tp);
1484 }
1485
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488 int i;
1489 u32 reg;
1490 struct phy_device *phydev;
1491
1492 if (tg3_flag(tp, 5717_PLUS)) {
1493 u32 is_serdes;
1494
1495 tp->phy_addr = tp->pci_fn + 1;
1496
1497 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1498 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499 else
1500 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES;
1502 if (is_serdes)
1503 tp->phy_addr += 7;
1504 } else
1505 tp->phy_addr = TG3_PHY_MII_ADDR;
1506
1507 tg3_mdio_start(tp);
1508
1509 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510 return 0;
1511
1512 tp->mdio_bus = mdiobus_alloc();
1513 if (tp->mdio_bus == NULL)
1514 return -ENOMEM;
1515
1516 tp->mdio_bus->name = "tg3 mdio bus";
1517 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519 tp->mdio_bus->priv = tp;
1520 tp->mdio_bus->parent = &tp->pdev->dev;
1521 tp->mdio_bus->read = &tg3_mdio_read;
1522 tp->mdio_bus->write = &tg3_mdio_write;
1523 tp->mdio_bus->reset = &tg3_mdio_reset;
1524 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525 tp->mdio_bus->irq = &tp->mdio_irq[0];
1526
1527 for (i = 0; i < PHY_MAX_ADDR; i++)
1528 tp->mdio_bus->irq[i] = PHY_POLL;
1529
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1534 */
1535 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536 tg3_bmcr_reset(tp);
1537
1538 i = mdiobus_register(tp->mdio_bus);
1539 if (i) {
1540 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541 mdiobus_free(tp->mdio_bus);
1542 return i;
1543 }
1544
1545 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546
1547 if (!phydev || !phydev->drv) {
1548 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549 mdiobus_unregister(tp->mdio_bus);
1550 mdiobus_free(tp->mdio_bus);
1551 return -ENODEV;
1552 }
1553
1554 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555 case PHY_ID_BCM57780:
1556 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558 break;
1559 case PHY_ID_BCM50610:
1560 case PHY_ID_BCM50610M:
1561 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562 PHY_BRCM_RX_REFCLK_UNUSED |
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571 /* fallthru */
1572 case PHY_ID_RTL8211C:
1573 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574 break;
1575 case PHY_ID_RTL8201E:
1576 case PHY_ID_BCMAC131:
1577 phydev->interface = PHY_INTERFACE_MODE_MII;
1578 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580 break;
1581 }
1582
1583 tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586 tg3_mdio_config_5785(tp);
1587
1588 return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593 if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 tg3_flag_clear(tp, MDIOBUS_INITED);
1595 mdiobus_unregister(tp->mdio_bus);
1596 mdiobus_free(tp->mdio_bus);
1597 }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603 u32 val;
1604
1605 val = tr32(GRC_RX_CPU_EVENT);
1606 val |= GRC_RX_CPU_DRIVER_EVENT;
1607 tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609 tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617 int i;
1618 unsigned int delay_cnt;
1619 long time_remain;
1620
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain = (long)(tp->last_event_jiffies + 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624 (long)jiffies;
1625 if (time_remain < 0)
1626 return;
1627
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt = jiffies_to_usecs(time_remain);
1630 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 delay_cnt = (delay_cnt >> 3) + 1;
1633
1634 for (i = 0; i < delay_cnt; i++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636 break;
1637 udelay(8);
1638 }
1639 }
1640
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644 u32 reg, val;
1645
1646 val = 0;
1647 if (!tg3_readphy(tp, MII_BMCR, &reg))
1648 val = reg << 16;
1649 if (!tg3_readphy(tp, MII_BMSR, &reg))
1650 val |= (reg & 0xffff);
1651 *data++ = val;
1652
1653 val = 0;
1654 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655 val = reg << 16;
1656 if (!tg3_readphy(tp, MII_LPA, &reg))
1657 val |= (reg & 0xffff);
1658 *data++ = val;
1659
1660 val = 0;
1661 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663 val = reg << 16;
1664 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665 val |= (reg & 0xffff);
1666 }
1667 *data++ = val;
1668
1669 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670 val = reg << 16;
1671 else
1672 val = 0;
1673 *data++ = val;
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679 u32 data[4];
1680
1681 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682 return;
1683
1684 tg3_phy_gather_ump_data(tp, data);
1685
1686 tg3_wait_for_event_ack(tp);
1687
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694
1695 tg3_generate_fw_event(tp);
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp);
1704
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706
1707 tg3_generate_fw_event(tp);
1708
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp);
1711 }
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719
1720 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721 switch (kind) {
1722 case RESET_KIND_INIT:
1723 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724 DRV_STATE_START);
1725 break;
1726
1727 case RESET_KIND_SHUTDOWN:
1728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729 DRV_STATE_UNLOAD);
1730 break;
1731
1732 case RESET_KIND_SUSPEND:
1733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 DRV_STATE_SUSPEND);
1735 break;
1736
1737 default:
1738 break;
1739 }
1740 }
1741
1742 if (kind == RESET_KIND_INIT ||
1743 kind == RESET_KIND_SUSPEND)
1744 tg3_ape_driver_state_change(tp, kind);
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 switch (kind) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1755 break;
1756
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1760 break;
1761
1762 default:
1763 break;
1764 }
1765 }
1766
1767 if (kind == RESET_KIND_SHUTDOWN)
1768 tg3_ape_driver_state_change(tp, kind);
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774 if (tg3_flag(tp, ENABLE_ASF)) {
1775 switch (kind) {
1776 case RESET_KIND_INIT:
1777 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 DRV_STATE_START);
1779 break;
1780
1781 case RESET_KIND_SHUTDOWN:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 DRV_STATE_UNLOAD);
1784 break;
1785
1786 case RESET_KIND_SUSPEND:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_SUSPEND);
1789 break;
1790
1791 default:
1792 break;
1793 }
1794 }
1795 }
1796
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799 int i;
1800 u32 val;
1801
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1804 return 0;
1805 }
1806
1807 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 return 0;
1812 udelay(100);
1813 }
1814 return -ENODEV;
1815 }
1816
1817 /* Wait for firmware initialization to complete. */
1818 for (i = 0; i < 100000; i++) {
1819 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821 break;
1822 udelay(10);
1823 }
1824
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1829 */
1830 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831 tg3_flag_set(tp, NO_FWARE_REPORTED);
1832
1833 netdev_info(tp->dev, "No firmware running\n");
1834 }
1835
1836 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1839 */
1840 mdelay(10);
1841 }
1842
1843 return 0;
1844 }
1845
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848 if (!netif_carrier_ok(tp->dev)) {
1849 netif_info(tp, link, tp->dev, "Link is down\n");
1850 tg3_ump_link_report(tp);
1851 } else if (netif_msg_link(tp)) {
1852 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853 (tp->link_config.active_speed == SPEED_1000 ?
1854 1000 :
1855 (tp->link_config.active_speed == SPEED_100 ?
1856 100 : 10)),
1857 (tp->link_config.active_duplex == DUPLEX_FULL ?
1858 "full" : "half"));
1859
1860 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862 "on" : "off",
1863 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864 "on" : "off");
1865
1866 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867 netdev_info(tp->dev, "EEE is %s\n",
1868 tp->setlpicnt ? "enabled" : "disabled");
1869
1870 tg3_ump_link_report(tp);
1871 }
1872 }
1873
1874 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1875 {
1876 u16 miireg;
1877
1878 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1879 miireg = ADVERTISE_1000XPAUSE;
1880 else if (flow_ctrl & FLOW_CTRL_TX)
1881 miireg = ADVERTISE_1000XPSE_ASYM;
1882 else if (flow_ctrl & FLOW_CTRL_RX)
1883 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1884 else
1885 miireg = 0;
1886
1887 return miireg;
1888 }
1889
1890 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1891 {
1892 u8 cap = 0;
1893
1894 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1895 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1896 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1897 if (lcladv & ADVERTISE_1000XPAUSE)
1898 cap = FLOW_CTRL_RX;
1899 if (rmtadv & ADVERTISE_1000XPAUSE)
1900 cap = FLOW_CTRL_TX;
1901 }
1902
1903 return cap;
1904 }
1905
1906 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1907 {
1908 u8 autoneg;
1909 u8 flowctrl = 0;
1910 u32 old_rx_mode = tp->rx_mode;
1911 u32 old_tx_mode = tp->tx_mode;
1912
1913 if (tg3_flag(tp, USE_PHYLIB))
1914 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1915 else
1916 autoneg = tp->link_config.autoneg;
1917
1918 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1919 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1920 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1921 else
1922 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1923 } else
1924 flowctrl = tp->link_config.flowctrl;
1925
1926 tp->link_config.active_flowctrl = flowctrl;
1927
1928 if (flowctrl & FLOW_CTRL_RX)
1929 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1930 else
1931 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1932
1933 if (old_rx_mode != tp->rx_mode)
1934 tw32_f(MAC_RX_MODE, tp->rx_mode);
1935
1936 if (flowctrl & FLOW_CTRL_TX)
1937 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1938 else
1939 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1940
1941 if (old_tx_mode != tp->tx_mode)
1942 tw32_f(MAC_TX_MODE, tp->tx_mode);
1943 }
1944
1945 static void tg3_adjust_link(struct net_device *dev)
1946 {
1947 u8 oldflowctrl, linkmesg = 0;
1948 u32 mac_mode, lcl_adv, rmt_adv;
1949 struct tg3 *tp = netdev_priv(dev);
1950 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1951
1952 spin_lock_bh(&tp->lock);
1953
1954 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1955 MAC_MODE_HALF_DUPLEX);
1956
1957 oldflowctrl = tp->link_config.active_flowctrl;
1958
1959 if (phydev->link) {
1960 lcl_adv = 0;
1961 rmt_adv = 0;
1962
1963 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1964 mac_mode |= MAC_MODE_PORT_MODE_MII;
1965 else if (phydev->speed == SPEED_1000 ||
1966 tg3_asic_rev(tp) != ASIC_REV_5785)
1967 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1968 else
1969 mac_mode |= MAC_MODE_PORT_MODE_MII;
1970
1971 if (phydev->duplex == DUPLEX_HALF)
1972 mac_mode |= MAC_MODE_HALF_DUPLEX;
1973 else {
1974 lcl_adv = mii_advertise_flowctrl(
1975 tp->link_config.flowctrl);
1976
1977 if (phydev->pause)
1978 rmt_adv = LPA_PAUSE_CAP;
1979 if (phydev->asym_pause)
1980 rmt_adv |= LPA_PAUSE_ASYM;
1981 }
1982
1983 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1984 } else
1985 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1986
1987 if (mac_mode != tp->mac_mode) {
1988 tp->mac_mode = mac_mode;
1989 tw32_f(MAC_MODE, tp->mac_mode);
1990 udelay(40);
1991 }
1992
1993 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1994 if (phydev->speed == SPEED_10)
1995 tw32(MAC_MI_STAT,
1996 MAC_MI_STAT_10MBPS_MODE |
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1998 else
1999 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000 }
2001
2002 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2003 tw32(MAC_TX_LENGTHS,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2005 (6 << TX_LENGTHS_IPG_SHIFT) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2007 else
2008 tw32(MAC_TX_LENGTHS,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2010 (6 << TX_LENGTHS_IPG_SHIFT) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2012
2013 if (phydev->link != tp->old_link ||
2014 phydev->speed != tp->link_config.active_speed ||
2015 phydev->duplex != tp->link_config.active_duplex ||
2016 oldflowctrl != tp->link_config.active_flowctrl)
2017 linkmesg = 1;
2018
2019 tp->old_link = phydev->link;
2020 tp->link_config.active_speed = phydev->speed;
2021 tp->link_config.active_duplex = phydev->duplex;
2022
2023 spin_unlock_bh(&tp->lock);
2024
2025 if (linkmesg)
2026 tg3_link_report(tp);
2027 }
2028
2029 static int tg3_phy_init(struct tg3 *tp)
2030 {
2031 struct phy_device *phydev;
2032
2033 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2034 return 0;
2035
2036 /* Bring the PHY back to a known state. */
2037 tg3_bmcr_reset(tp);
2038
2039 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2040
2041 /* Attach the MAC to the PHY. */
2042 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2043 tg3_adjust_link, phydev->interface);
2044 if (IS_ERR(phydev)) {
2045 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev);
2047 }
2048
2049 /* Mask with MAC supported features. */
2050 switch (phydev->interface) {
2051 case PHY_INTERFACE_MODE_GMII:
2052 case PHY_INTERFACE_MODE_RGMII:
2053 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2054 phydev->supported &= (PHY_GBIT_FEATURES |
2055 SUPPORTED_Pause |
2056 SUPPORTED_Asym_Pause);
2057 break;
2058 }
2059 /* fallthru */
2060 case PHY_INTERFACE_MODE_MII:
2061 phydev->supported &= (PHY_BASIC_FEATURES |
2062 SUPPORTED_Pause |
2063 SUPPORTED_Asym_Pause);
2064 break;
2065 default:
2066 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 return -EINVAL;
2068 }
2069
2070 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2071
2072 phydev->advertising = phydev->supported;
2073
2074 return 0;
2075 }
2076
2077 static void tg3_phy_start(struct tg3 *tp)
2078 {
2079 struct phy_device *phydev;
2080
2081 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2082 return;
2083
2084 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2085
2086 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2087 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2088 phydev->speed = tp->link_config.speed;
2089 phydev->duplex = tp->link_config.duplex;
2090 phydev->autoneg = tp->link_config.autoneg;
2091 phydev->advertising = tp->link_config.advertising;
2092 }
2093
2094 phy_start(phydev);
2095
2096 phy_start_aneg(phydev);
2097 }
2098
2099 static void tg3_phy_stop(struct tg3 *tp)
2100 {
2101 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2102 return;
2103
2104 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2105 }
2106
2107 static void tg3_phy_fini(struct tg3 *tp)
2108 {
2109 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2110 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2111 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2112 }
2113 }
2114
2115 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2116 {
2117 int err;
2118 u32 val;
2119
2120 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2121 return 0;
2122
2123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err = tg3_phy_auxctl_write(tp,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2128 0x4c20);
2129 goto done;
2130 }
2131
2132 err = tg3_phy_auxctl_read(tp,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2134 if (err)
2135 return err;
2136
2137 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2138 err = tg3_phy_auxctl_write(tp,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2140
2141 done:
2142 return err;
2143 }
2144
2145 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2146 {
2147 u32 phytest;
2148
2149 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2150 u32 phy;
2151
2152 tg3_writephy(tp, MII_TG3_FET_TEST,
2153 phytest | MII_TG3_FET_SHADOW_EN);
2154 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2155 if (enable)
2156 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2157 else
2158 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2160 }
2161 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2162 }
2163 }
2164
2165 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2166 {
2167 u32 reg;
2168
2169 if (!tg3_flag(tp, 5705_PLUS) ||
2170 (tg3_flag(tp, 5717_PLUS) &&
2171 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2172 return;
2173
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2175 tg3_phy_fet_toggle_apd(tp, enable);
2176 return;
2177 }
2178
2179 reg = MII_TG3_MISC_SHDW_WREN |
2180 MII_TG3_MISC_SHDW_SCR5_SEL |
2181 MII_TG3_MISC_SHDW_SCR5_LPED |
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2183 MII_TG3_MISC_SHDW_SCR5_SDTL |
2184 MII_TG3_MISC_SHDW_SCR5_C125OE;
2185 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2186 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2187
2188 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2189
2190
2191 reg = MII_TG3_MISC_SHDW_WREN |
2192 MII_TG3_MISC_SHDW_APD_SEL |
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2194 if (enable)
2195 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2196
2197 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2198 }
2199
2200 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2201 {
2202 u32 phy;
2203
2204 if (!tg3_flag(tp, 5705_PLUS) ||
2205 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2206 return;
2207
2208 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2209 u32 ephy;
2210
2211 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2212 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2213
2214 tg3_writephy(tp, MII_TG3_FET_TEST,
2215 ephy | MII_TG3_FET_SHADOW_EN);
2216 if (!tg3_readphy(tp, reg, &phy)) {
2217 if (enable)
2218 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2219 else
2220 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221 tg3_writephy(tp, reg, phy);
2222 }
2223 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2224 }
2225 } else {
2226 int ret;
2227
2228 ret = tg3_phy_auxctl_read(tp,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2230 if (!ret) {
2231 if (enable)
2232 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2233 else
2234 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235 tg3_phy_auxctl_write(tp,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2237 }
2238 }
2239 }
2240
2241 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2242 {
2243 int ret;
2244 u32 val;
2245
2246 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2247 return;
2248
2249 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2250 if (!ret)
2251 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2252 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2253 }
2254
2255 static void tg3_phy_apply_otp(struct tg3 *tp)
2256 {
2257 u32 otp, phy;
2258
2259 if (!tp->phy_otp)
2260 return;
2261
2262 otp = tp->phy_otp;
2263
2264 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2265 return;
2266
2267 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2268 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2269 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2270
2271 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2272 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2273 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2274
2275 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2276 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2277 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2278
2279 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2280 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2281
2282 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2284
2285 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2286 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2287 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2288
2289 tg3_phy_toggle_auxctl_smdsp(tp, false);
2290 }
2291
2292 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2293 {
2294 u32 val;
2295
2296 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2297 return;
2298
2299 tp->setlpicnt = 0;
2300
2301 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2302 current_link_up == 1 &&
2303 tp->link_config.active_duplex == DUPLEX_FULL &&
2304 (tp->link_config.active_speed == SPEED_100 ||
2305 tp->link_config.active_speed == SPEED_1000)) {
2306 u32 eeectl;
2307
2308 if (tp->link_config.active_speed == SPEED_1000)
2309 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2310 else
2311 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2312
2313 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2314
2315 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2316 TG3_CL45_D7_EEERES_STAT, &val);
2317
2318 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2319 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2320 tp->setlpicnt = 2;
2321 }
2322
2323 if (!tp->setlpicnt) {
2324 if (current_link_up == 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2326 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp, false);
2328 }
2329
2330 val = tr32(TG3_CPMU_EEE_MODE);
2331 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2332 }
2333 }
2334
2335 static void tg3_phy_eee_enable(struct tg3 *tp)
2336 {
2337 u32 val;
2338
2339 if (tp->link_config.active_speed == SPEED_1000 &&
2340 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2341 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2342 tg3_flag(tp, 57765_CLASS)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2344 val = MII_TG3_DSP_TAP26_ALNOKO |
2345 MII_TG3_DSP_TAP26_RMRXSTO;
2346 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2347 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348 }
2349
2350 val = tr32(TG3_CPMU_EEE_MODE);
2351 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2352 }
2353
2354 static int tg3_wait_macro_done(struct tg3 *tp)
2355 {
2356 int limit = 100;
2357
2358 while (limit--) {
2359 u32 tmp32;
2360
2361 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2362 if ((tmp32 & 0x1000) == 0)
2363 break;
2364 }
2365 }
2366 if (limit < 0)
2367 return -EBUSY;
2368
2369 return 0;
2370 }
2371
2372 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2373 {
2374 static const u32 test_pat[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2379 };
2380 int chan;
2381
2382 for (chan = 0; chan < 4; chan++) {
2383 int i;
2384
2385 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2386 (chan * 0x2000) | 0x0200);
2387 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2388
2389 for (i = 0; i < 6; i++)
2390 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2391 test_pat[chan][i]);
2392
2393 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2394 if (tg3_wait_macro_done(tp)) {
2395 *resetp = 1;
2396 return -EBUSY;
2397 }
2398
2399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2400 (chan * 0x2000) | 0x0200);
2401 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2402 if (tg3_wait_macro_done(tp)) {
2403 *resetp = 1;
2404 return -EBUSY;
2405 }
2406
2407 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2408 if (tg3_wait_macro_done(tp)) {
2409 *resetp = 1;
2410 return -EBUSY;
2411 }
2412
2413 for (i = 0; i < 6; i += 2) {
2414 u32 low, high;
2415
2416 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2418 tg3_wait_macro_done(tp)) {
2419 *resetp = 1;
2420 return -EBUSY;
2421 }
2422 low &= 0x7fff;
2423 high &= 0x000f;
2424 if (low != test_pat[chan][i] ||
2425 high != test_pat[chan][i+1]) {
2426 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2429
2430 return -EBUSY;
2431 }
2432 }
2433 }
2434
2435 return 0;
2436 }
2437
2438 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2439 {
2440 int chan;
2441
2442 for (chan = 0; chan < 4; chan++) {
2443 int i;
2444
2445 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2446 (chan * 0x2000) | 0x0200);
2447 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2448 for (i = 0; i < 6; i++)
2449 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2451 if (tg3_wait_macro_done(tp))
2452 return -EBUSY;
2453 }
2454
2455 return 0;
2456 }
2457
2458 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2459 {
2460 u32 reg32, phy9_orig;
2461 int retries, do_phy_reset, err;
2462
2463 retries = 10;
2464 do_phy_reset = 1;
2465 do {
2466 if (do_phy_reset) {
2467 err = tg3_bmcr_reset(tp);
2468 if (err)
2469 return err;
2470 do_phy_reset = 0;
2471 }
2472
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2475 continue;
2476
2477 reg32 |= 0x3000;
2478 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2479
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp, MII_BMCR,
2482 BMCR_FULLDPLX | BMCR_SPEED1000);
2483
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2486 continue;
2487
2488 tg3_writephy(tp, MII_CTRL1000,
2489 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2490
2491 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2492 if (err)
2493 return err;
2494
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp, 0x8005, 0x0800);
2497
2498 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2499 if (!err)
2500 break;
2501 } while (--retries);
2502
2503 err = tg3_phy_reset_chanpat(tp);
2504 if (err)
2505 return err;
2506
2507 tg3_phydsp_write(tp, 0x8005, 0x0000);
2508
2509 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2510 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2511
2512 tg3_phy_toggle_auxctl_smdsp(tp, false);
2513
2514 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2515
2516 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2517 reg32 &= ~0x3000;
2518 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2519 } else if (!err)
2520 err = -EBUSY;
2521
2522 return err;
2523 }
2524
2525 static void tg3_carrier_on(struct tg3 *tp)
2526 {
2527 netif_carrier_on(tp->dev);
2528 tp->link_up = true;
2529 }
2530
2531 static void tg3_carrier_off(struct tg3 *tp)
2532 {
2533 netif_carrier_off(tp->dev);
2534 tp->link_up = false;
2535 }
2536
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2539 */
2540 static int tg3_phy_reset(struct tg3 *tp)
2541 {
2542 u32 val, cpmuctrl;
2543 int err;
2544
2545 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2546 val = tr32(GRC_MISC_CFG);
2547 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2548 udelay(40);
2549 }
2550 err = tg3_readphy(tp, MII_BMSR, &val);
2551 err |= tg3_readphy(tp, MII_BMSR, &val);
2552 if (err != 0)
2553 return -EBUSY;
2554
2555 if (netif_running(tp->dev) && tp->link_up) {
2556 tg3_carrier_off(tp);
2557 tg3_link_report(tp);
2558 }
2559
2560 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2561 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2562 tg3_asic_rev(tp) == ASIC_REV_5705) {
2563 err = tg3_phy_reset_5703_4_5(tp);
2564 if (err)
2565 return err;
2566 goto out;
2567 }
2568
2569 cpmuctrl = 0;
2570 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2571 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2572 cpmuctrl = tr32(TG3_CPMU_CTRL);
2573 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2574 tw32(TG3_CPMU_CTRL,
2575 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2576 }
2577
2578 err = tg3_bmcr_reset(tp);
2579 if (err)
2580 return err;
2581
2582 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2583 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2584 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2585
2586 tw32(TG3_CPMU_CTRL, cpmuctrl);
2587 }
2588
2589 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2590 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2591 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2592 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5) {
2594 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2595 udelay(40);
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597 }
2598 }
2599
2600 if (tg3_flag(tp, 5717_PLUS) &&
2601 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2602 return 0;
2603
2604 tg3_phy_apply_otp(tp);
2605
2606 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2607 tg3_phy_toggle_apd(tp, true);
2608 else
2609 tg3_phy_toggle_apd(tp, false);
2610
2611 out:
2612 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2614 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp, false);
2617 }
2618
2619 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2620 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622 }
2623
2624 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2626 tg3_phydsp_write(tp, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp, false);
2630 }
2631 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2633 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2634 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2635 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2636 tg3_writephy(tp, MII_TG3_TEST1,
2637 MII_TG3_TEST1_TRIM_EN | 0x4);
2638 } else
2639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2640
2641 tg3_phy_toggle_auxctl_smdsp(tp, false);
2642 }
2643 }
2644
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2650 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err = tg3_phy_auxctl_read(tp,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2654 if (!err)
2655 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2656 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2657 }
2658
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2661 */
2662 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2663 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2664 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2665 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2666 }
2667
2668 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2671 }
2672
2673 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2674 tg3_phydsp_write(tp, 0xffb, 0x4000);
2675
2676 tg3_phy_toggle_automdix(tp, 1);
2677 tg3_phy_set_wirespeed(tp);
2678 return 0;
2679 }
2680
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2690
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2696
2697 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 {
2699 u32 status, shift;
2700
2701 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2702 tg3_asic_rev(tp) == ASIC_REV_5719)
2703 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2704 else
2705 status = tr32(TG3_CPMU_DRV_STATUS);
2706
2707 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2708 status &= ~(TG3_GPIO_MSG_MASK << shift);
2709 status |= (newstat << shift);
2710
2711 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2712 tg3_asic_rev(tp) == ASIC_REV_5719)
2713 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2714 else
2715 tw32(TG3_CPMU_DRV_STATUS, status);
2716
2717 return status >> TG3_APE_GPIO_MSG_SHIFT;
2718 }
2719
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2721 {
2722 if (!tg3_flag(tp, IS_NIC))
2723 return 0;
2724
2725 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2726 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2727 tg3_asic_rev(tp) == ASIC_REV_5720) {
2728 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2729 return -EIO;
2730
2731 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2732
2733 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY);
2735
2736 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2737 } else {
2738 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740 }
2741
2742 return 0;
2743 }
2744
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 {
2747 u32 grc_local_ctrl;
2748
2749 if (!tg3_flag(tp, IS_NIC) ||
2750 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2751 tg3_asic_rev(tp) == ASIC_REV_5701)
2752 return;
2753
2754 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2755
2756 tw32_wait_f(GRC_LOCAL_CTRL,
2757 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY);
2759
2760 tw32_wait_f(GRC_LOCAL_CTRL,
2761 grc_local_ctrl,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764 tw32_wait_f(GRC_LOCAL_CTRL,
2765 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 }
2768
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2770 {
2771 if (!tg3_flag(tp, IS_NIC))
2772 return;
2773
2774 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2775 tg3_asic_rev(tp) == ASIC_REV_5701) {
2776 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2777 (GRC_LCLCTRL_GPIO_OE0 |
2778 GRC_LCLCTRL_GPIO_OE1 |
2779 GRC_LCLCTRL_GPIO_OE2 |
2780 GRC_LCLCTRL_GPIO_OUTPUT0 |
2781 GRC_LCLCTRL_GPIO_OUTPUT1),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY);
2783 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2787 GRC_LCLCTRL_GPIO_OE1 |
2788 GRC_LCLCTRL_GPIO_OE2 |
2789 GRC_LCLCTRL_GPIO_OUTPUT0 |
2790 GRC_LCLCTRL_GPIO_OUTPUT1 |
2791 tp->grc_local_ctrl;
2792 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2796 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2800 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2802 } else {
2803 u32 no_gpio2;
2804 u32 grc_local_ctrl = 0;
2805
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2808 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2809 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2810 grc_local_ctrl,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2812 }
2813
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2 = tp->nic_sram_data_cfg &
2816 NIC_SRAM_DATA_CFG_NO_GPIO2;
2817
2818 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT1 |
2822 GRC_LCLCTRL_GPIO_OUTPUT2;
2823 if (no_gpio2) {
2824 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2825 GRC_LCLCTRL_GPIO_OUTPUT2);
2826 }
2827 tw32_wait_f(GRC_LOCAL_CTRL,
2828 tp->grc_local_ctrl | grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2832
2833 tw32_wait_f(GRC_LOCAL_CTRL,
2834 tp->grc_local_ctrl | grc_local_ctrl,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837 if (!no_gpio2) {
2838 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2839 tw32_wait_f(GRC_LOCAL_CTRL,
2840 tp->grc_local_ctrl | grc_local_ctrl,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 }
2843 }
2844 }
2845
2846 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 {
2848 u32 msg = 0;
2849
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2852 return;
2853
2854 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2855 msg = TG3_GPIO_MSG_NEED_VAUX;
2856
2857 msg = tg3_set_function_status(tp, msg);
2858
2859 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2860 goto done;
2861
2862 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2863 tg3_pwrsrc_switch_to_vaux(tp);
2864 else
2865 tg3_pwrsrc_die_with_vmain(tp);
2866
2867 done:
2868 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2869 }
2870
2871 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2872 {
2873 bool need_vaux = false;
2874
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2877 return;
2878
2879 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2880 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2881 tg3_asic_rev(tp) == ASIC_REV_5720) {
2882 tg3_frob_aux_power_5717(tp, include_wol ?
2883 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884 return;
2885 }
2886
2887 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2888 struct net_device *dev_peer;
2889
2890 dev_peer = pci_get_drvdata(tp->pdev_peer);
2891
2892 /* remove_one() may have been run on the peer. */
2893 if (dev_peer) {
2894 struct tg3 *tp_peer = netdev_priv(dev_peer);
2895
2896 if (tg3_flag(tp_peer, INIT_COMPLETE))
2897 return;
2898
2899 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2900 tg3_flag(tp_peer, ENABLE_ASF))
2901 need_vaux = true;
2902 }
2903 }
2904
2905 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2906 tg3_flag(tp, ENABLE_ASF))
2907 need_vaux = true;
2908
2909 if (need_vaux)
2910 tg3_pwrsrc_switch_to_vaux(tp);
2911 else
2912 tg3_pwrsrc_die_with_vmain(tp);
2913 }
2914
2915 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2916 {
2917 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2918 return 1;
2919 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2920 if (speed != SPEED_10)
2921 return 1;
2922 } else if (speed == SPEED_10)
2923 return 1;
2924
2925 return 0;
2926 }
2927
2928 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 {
2930 u32 val;
2931
2932 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2933 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2934 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2935 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2936
2937 sg_dig_ctrl |=
2938 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2939 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2940 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2941 }
2942 return;
2943 }
2944
2945 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2946 tg3_bmcr_reset(tp);
2947 val = tr32(GRC_MISC_CFG);
2948 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2949 udelay(40);
2950 return;
2951 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2952 u32 phytest;
2953 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2954 u32 phy;
2955
2956 tg3_writephy(tp, MII_ADVERTISE, 0);
2957 tg3_writephy(tp, MII_BMCR,
2958 BMCR_ANENABLE | BMCR_ANRESTART);
2959
2960 tg3_writephy(tp, MII_TG3_FET_TEST,
2961 phytest | MII_TG3_FET_SHADOW_EN);
2962 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2963 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2964 tg3_writephy(tp,
2965 MII_TG3_FET_SHDW_AUXMODE4,
2966 phy);
2967 }
2968 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2969 }
2970 return;
2971 } else if (do_low_power) {
2972 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2974
2975 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2977 MII_TG3_AUXCTL_PCTL_VREG_11V;
2978 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2979 }
2980
2981 /* The PHY should not be powered down on some chips because
2982 * of bugs.
2983 */
2984 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2985 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2986 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2987 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2988 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2989 !tp->pci_fn))
2990 return;
2991
2992 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2993 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2994 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2995 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2996 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2998 }
2999
3000 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3001 }
3002
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3 *tp)
3005 {
3006 if (tg3_flag(tp, NVRAM)) {
3007 int i;
3008
3009 if (tp->nvram_lock_cnt == 0) {
3010 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3011 for (i = 0; i < 8000; i++) {
3012 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3013 break;
3014 udelay(20);
3015 }
3016 if (i == 8000) {
3017 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018 return -ENODEV;
3019 }
3020 }
3021 tp->nvram_lock_cnt++;
3022 }
3023 return 0;
3024 }
3025
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3 *tp)
3028 {
3029 if (tg3_flag(tp, NVRAM)) {
3030 if (tp->nvram_lock_cnt > 0)
3031 tp->nvram_lock_cnt--;
3032 if (tp->nvram_lock_cnt == 0)
3033 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034 }
3035 }
3036
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3 *tp)
3039 {
3040 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3041 u32 nvaccess = tr32(NVRAM_ACCESS);
3042
3043 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044 }
3045 }
3046
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3 *tp)
3049 {
3050 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3051 u32 nvaccess = tr32(NVRAM_ACCESS);
3052
3053 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054 }
3055 }
3056
3057 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3058 u32 offset, u32 *val)
3059 {
3060 u32 tmp;
3061 int i;
3062
3063 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3064 return -EINVAL;
3065
3066 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3067 EEPROM_ADDR_DEVID_MASK |
3068 EEPROM_ADDR_READ);
3069 tw32(GRC_EEPROM_ADDR,
3070 tmp |
3071 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3072 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3073 EEPROM_ADDR_ADDR_MASK) |
3074 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3075
3076 for (i = 0; i < 1000; i++) {
3077 tmp = tr32(GRC_EEPROM_ADDR);
3078
3079 if (tmp & EEPROM_ADDR_COMPLETE)
3080 break;
3081 msleep(1);
3082 }
3083 if (!(tmp & EEPROM_ADDR_COMPLETE))
3084 return -EBUSY;
3085
3086 tmp = tr32(GRC_EEPROM_DATA);
3087
3088 /*
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3091 */
3092 *val = swab32(tmp);
3093
3094 return 0;
3095 }
3096
3097 #define NVRAM_CMD_TIMEOUT 10000
3098
3099 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 {
3101 int i;
3102
3103 tw32(NVRAM_CMD, nvram_cmd);
3104 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3105 udelay(10);
3106 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3107 udelay(10);
3108 break;
3109 }
3110 }
3111
3112 if (i == NVRAM_CMD_TIMEOUT)
3113 return -EBUSY;
3114
3115 return 0;
3116 }
3117
3118 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3119 {
3120 if (tg3_flag(tp, NVRAM) &&
3121 tg3_flag(tp, NVRAM_BUFFERED) &&
3122 tg3_flag(tp, FLASH) &&
3123 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3124 (tp->nvram_jedecnum == JEDEC_ATMEL))
3125
3126 addr = ((addr / tp->nvram_pagesize) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS) +
3128 (addr % tp->nvram_pagesize);
3129
3130 return addr;
3131 }
3132
3133 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3134 {
3135 if (tg3_flag(tp, NVRAM) &&
3136 tg3_flag(tp, NVRAM_BUFFERED) &&
3137 tg3_flag(tp, FLASH) &&
3138 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3139 (tp->nvram_jedecnum == JEDEC_ATMEL))
3140
3141 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3142 tp->nvram_pagesize) +
3143 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3144
3145 return addr;
3146 }
3147
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3153 */
3154 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 {
3156 int ret;
3157
3158 if (!tg3_flag(tp, NVRAM))
3159 return tg3_nvram_read_using_eeprom(tp, offset, val);
3160
3161 offset = tg3_nvram_phys_addr(tp, offset);
3162
3163 if (offset > NVRAM_ADDR_MSK)
3164 return -EINVAL;
3165
3166 ret = tg3_nvram_lock(tp);
3167 if (ret)
3168 return ret;
3169
3170 tg3_enable_nvram_access(tp);
3171
3172 tw32(NVRAM_ADDR, offset);
3173 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3174 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3175
3176 if (ret == 0)
3177 *val = tr32(NVRAM_RDDATA);
3178
3179 tg3_disable_nvram_access(tp);
3180
3181 tg3_nvram_unlock(tp);
3182
3183 return ret;
3184 }
3185
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3188 {
3189 u32 v;
3190 int res = tg3_nvram_read(tp, offset, &v);
3191 if (!res)
3192 *val = cpu_to_be32(v);
3193 return res;
3194 }
3195
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3197 u32 offset, u32 len, u8 *buf)
3198 {
3199 int i, j, rc = 0;
3200 u32 val;
3201
3202 for (i = 0; i < len; i += 4) {
3203 u32 addr;
3204 __be32 data;
3205
3206 addr = offset + i;
3207
3208 memcpy(&data, buf + i, 4);
3209
3210 /*
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3215 */
3216 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3217
3218 val = tr32(GRC_EEPROM_ADDR);
3219 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3220
3221 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3222 EEPROM_ADDR_READ);
3223 tw32(GRC_EEPROM_ADDR, val |
3224 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3225 (addr & EEPROM_ADDR_ADDR_MASK) |
3226 EEPROM_ADDR_START |
3227 EEPROM_ADDR_WRITE);
3228
3229 for (j = 0; j < 1000; j++) {
3230 val = tr32(GRC_EEPROM_ADDR);
3231
3232 if (val & EEPROM_ADDR_COMPLETE)
3233 break;
3234 msleep(1);
3235 }
3236 if (!(val & EEPROM_ADDR_COMPLETE)) {
3237 rc = -EBUSY;
3238 break;
3239 }
3240 }
3241
3242 return rc;
3243 }
3244
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247 u8 *buf)
3248 {
3249 int ret = 0;
3250 u32 pagesize = tp->nvram_pagesize;
3251 u32 pagemask = pagesize - 1;
3252 u32 nvram_cmd;
3253 u8 *tmp;
3254
3255 tmp = kmalloc(pagesize, GFP_KERNEL);
3256 if (tmp == NULL)
3257 return -ENOMEM;
3258
3259 while (len) {
3260 int j;
3261 u32 phy_addr, page_off, size;
3262
3263 phy_addr = offset & ~pagemask;
3264
3265 for (j = 0; j < pagesize; j += 4) {
3266 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3267 (__be32 *) (tmp + j));
3268 if (ret)
3269 break;
3270 }
3271 if (ret)
3272 break;
3273
3274 page_off = offset & pagemask;
3275 size = pagesize;
3276 if (len < size)
3277 size = len;
3278
3279 len -= size;
3280
3281 memcpy(tmp + page_off, buf, size);
3282
3283 offset = offset + (pagesize - page_off);
3284
3285 tg3_enable_nvram_access(tp);
3286
3287 /*
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3290 */
3291 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292
3293 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3294 break;
3295
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR, phy_addr);
3298
3299 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3301
3302 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3303 break;
3304
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3307
3308 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3309 break;
3310
3311 for (j = 0; j < pagesize; j += 4) {
3312 __be32 data;
3313
3314 data = *((__be32 *) (tmp + j));
3315
3316 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3317
3318 tw32(NVRAM_ADDR, phy_addr + j);
3319
3320 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321 NVRAM_CMD_WR;
3322
3323 if (j == 0)
3324 nvram_cmd |= NVRAM_CMD_FIRST;
3325 else if (j == (pagesize - 4))
3326 nvram_cmd |= NVRAM_CMD_LAST;
3327
3328 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329 if (ret)
3330 break;
3331 }
3332 if (ret)
3333 break;
3334 }
3335
3336 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3337 tg3_nvram_exec_cmd(tp, nvram_cmd);
3338
3339 kfree(tmp);
3340
3341 return ret;
3342 }
3343
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3346 u8 *buf)
3347 {
3348 int i, ret = 0;
3349
3350 for (i = 0; i < len; i += 4, offset += 4) {
3351 u32 page_off, phy_addr, nvram_cmd;
3352 __be32 data;
3353
3354 memcpy(&data, buf + i, 4);
3355 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3356
3357 page_off = offset % tp->nvram_pagesize;
3358
3359 phy_addr = tg3_nvram_phys_addr(tp, offset);
3360
3361 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3362
3363 if (page_off == 0 || i == 0)
3364 nvram_cmd |= NVRAM_CMD_FIRST;
3365 if (page_off == (tp->nvram_pagesize - 4))
3366 nvram_cmd |= NVRAM_CMD_LAST;
3367
3368 if (i == (len - 4))
3369 nvram_cmd |= NVRAM_CMD_LAST;
3370
3371 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3372 !tg3_flag(tp, FLASH) ||
3373 !tg3_flag(tp, 57765_PLUS))
3374 tw32(NVRAM_ADDR, phy_addr);
3375
3376 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3377 !tg3_flag(tp, 5755_PLUS) &&
3378 (tp->nvram_jedecnum == JEDEC_ST) &&
3379 (nvram_cmd & NVRAM_CMD_FIRST)) {
3380 u32 cmd;
3381
3382 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383 ret = tg3_nvram_exec_cmd(tp, cmd);
3384 if (ret)
3385 break;
3386 }
3387 if (!tg3_flag(tp, FLASH)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3390 }
3391
3392 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3393 if (ret)
3394 break;
3395 }
3396 return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 {
3402 int ret;
3403
3404 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3405 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407 udelay(40);
3408 }
3409
3410 if (!tg3_flag(tp, NVRAM)) {
3411 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412 } else {
3413 u32 grc_mode;
3414
3415 ret = tg3_nvram_lock(tp);
3416 if (ret)
3417 return ret;
3418
3419 tg3_enable_nvram_access(tp);
3420 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3421 tw32(NVRAM_WRITE1, 0x406);
3422
3423 grc_mode = tr32(GRC_MODE);
3424 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3425
3426 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3427 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3428 buf);
3429 } else {
3430 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431 buf);
3432 }
3433
3434 grc_mode = tr32(GRC_MODE);
3435 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3436
3437 tg3_disable_nvram_access(tp);
3438 tg3_nvram_unlock(tp);
3439 }
3440
3441 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3442 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3443 udelay(40);
3444 }
3445
3446 return ret;
3447 }
3448
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3453
3454 /* tp->lock is held. */
3455 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3456 {
3457 int i;
3458
3459 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3460
3461 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3462 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3463
3464 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3465 return 0;
3466 }
3467 if (offset == RX_CPU_BASE) {
3468 for (i = 0; i < 10000; i++) {
3469 tw32(offset + CPU_STATE, 0xffffffff);
3470 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3471 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3472 break;
3473 }
3474
3475 tw32(offset + CPU_STATE, 0xffffffff);
3476 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3477 udelay(10);
3478 } else {
3479 /*
3480 * There is only an Rx CPU for the 5750 derivative in the
3481 * BCM4785.
3482 */
3483 if (tg3_flag(tp, IS_SSB_CORE))
3484 return 0;
3485
3486 for (i = 0; i < 10000; i++) {
3487 tw32(offset + CPU_STATE, 0xffffffff);
3488 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3489 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3490 break;
3491 }
3492 }
3493
3494 if (i >= 10000) {
3495 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3496 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3497 return -ENODEV;
3498 }
3499
3500 /* Clear firmware's nvram arbitration. */
3501 if (tg3_flag(tp, NVRAM))
3502 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3503 return 0;
3504 }
3505
3506 struct fw_info {
3507 unsigned int fw_base;
3508 unsigned int fw_len;
3509 const __be32 *fw_data;
3510 };
3511
3512 /* tp->lock is held. */
3513 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3514 u32 cpu_scratch_base, int cpu_scratch_size,
3515 struct fw_info *info)
3516 {
3517 int err, lock_err, i;
3518 void (*write_op)(struct tg3 *, u32, u32);
3519
3520 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3521 netdev_err(tp->dev,
3522 "%s: Trying to load TX cpu firmware which is 5705\n",
3523 __func__);
3524 return -EINVAL;
3525 }
3526
3527 if (tg3_flag(tp, 5705_PLUS))
3528 write_op = tg3_write_mem;
3529 else
3530 write_op = tg3_write_indirect_reg32;
3531
3532 /* It is possible that bootcode is still loading at this point.
3533 * Get the nvram lock first before halting the cpu.
3534 */
3535 lock_err = tg3_nvram_lock(tp);
3536 err = tg3_halt_cpu(tp, cpu_base);
3537 if (!lock_err)
3538 tg3_nvram_unlock(tp);
3539 if (err)
3540 goto out;
3541
3542 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3543 write_op(tp, cpu_scratch_base + i, 0);
3544 tw32(cpu_base + CPU_STATE, 0xffffffff);
3545 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3546 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3547 write_op(tp, (cpu_scratch_base +
3548 (info->fw_base & 0xffff) +
3549 (i * sizeof(u32))),
3550 be32_to_cpu(info->fw_data[i]));
3551
3552 err = 0;
3553
3554 out:
3555 return err;
3556 }
3557
3558 /* tp->lock is held. */
3559 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3560 {
3561 struct fw_info info;
3562 const __be32 *fw_data;
3563 int err, i;
3564
3565 fw_data = (void *)tp->fw->data;
3566
3567 /* Firmware blob starts with version numbers, followed by
3568 start address and length. We are setting complete length.
3569 length = end_address_of_bss - start_address_of_text.
3570 Remainder is the blob to be loaded contiguously
3571 from start address. */
3572
3573 info.fw_base = be32_to_cpu(fw_data[1]);
3574 info.fw_len = tp->fw->size - 12;
3575 info.fw_data = &fw_data[3];
3576
3577 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3578 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3579 &info);
3580 if (err)
3581 return err;
3582
3583 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3584 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3585 &info);
3586 if (err)
3587 return err;
3588
3589 /* Now startup only the RX cpu. */
3590 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3592
3593 for (i = 0; i < 5; i++) {
3594 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3595 break;
3596 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3597 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3598 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3599 udelay(1000);
3600 }
3601 if (i >= 5) {
3602 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3603 "should be %08x\n", __func__,
3604 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3605 return -ENODEV;
3606 }
3607 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3609
3610 return 0;
3611 }
3612
3613 /* tp->lock is held. */
3614 static int tg3_load_tso_firmware(struct tg3 *tp)
3615 {
3616 struct fw_info info;
3617 const __be32 *fw_data;
3618 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3619 int err, i;
3620
3621 if (!tg3_flag(tp, FW_TSO))
3622 return 0;
3623
3624 fw_data = (void *)tp->fw->data;
3625
3626 /* Firmware blob starts with version numbers, followed by
3627 start address and length. We are setting complete length.
3628 length = end_address_of_bss - start_address_of_text.
3629 Remainder is the blob to be loaded contiguously
3630 from start address. */
3631
3632 info.fw_base = be32_to_cpu(fw_data[1]);
3633 cpu_scratch_size = tp->fw_len;
3634 info.fw_len = tp->fw->size - 12;
3635 info.fw_data = &fw_data[3];
3636
3637 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3638 cpu_base = RX_CPU_BASE;
3639 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3640 } else {
3641 cpu_base = TX_CPU_BASE;
3642 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3643 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3644 }
3645
3646 err = tg3_load_firmware_cpu(tp, cpu_base,
3647 cpu_scratch_base, cpu_scratch_size,
3648 &info);
3649 if (err)
3650 return err;
3651
3652 /* Now startup the cpu. */
3653 tw32(cpu_base + CPU_STATE, 0xffffffff);
3654 tw32_f(cpu_base + CPU_PC, info.fw_base);
3655
3656 for (i = 0; i < 5; i++) {
3657 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3658 break;
3659 tw32(cpu_base + CPU_STATE, 0xffffffff);
3660 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3661 tw32_f(cpu_base + CPU_PC, info.fw_base);
3662 udelay(1000);
3663 }
3664 if (i >= 5) {
3665 netdev_err(tp->dev,
3666 "%s fails to set CPU PC, is %08x should be %08x\n",
3667 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3668 return -ENODEV;
3669 }
3670 tw32(cpu_base + CPU_STATE, 0xffffffff);
3671 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3672 return 0;
3673 }
3674
3675
3676 /* tp->lock is held. */
3677 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3678 {
3679 u32 addr_high, addr_low;
3680 int i;
3681
3682 addr_high = ((tp->dev->dev_addr[0] << 8) |
3683 tp->dev->dev_addr[1]);
3684 addr_low = ((tp->dev->dev_addr[2] << 24) |
3685 (tp->dev->dev_addr[3] << 16) |
3686 (tp->dev->dev_addr[4] << 8) |
3687 (tp->dev->dev_addr[5] << 0));
3688 for (i = 0; i < 4; i++) {
3689 if (i == 1 && skip_mac_1)
3690 continue;
3691 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3692 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3693 }
3694
3695 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3696 tg3_asic_rev(tp) == ASIC_REV_5704) {
3697 for (i = 0; i < 12; i++) {
3698 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3699 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3700 }
3701 }
3702
3703 addr_high = (tp->dev->dev_addr[0] +
3704 tp->dev->dev_addr[1] +
3705 tp->dev->dev_addr[2] +
3706 tp->dev->dev_addr[3] +
3707 tp->dev->dev_addr[4] +
3708 tp->dev->dev_addr[5]) &
3709 TX_BACKOFF_SEED_MASK;
3710 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3711 }
3712
3713 static void tg3_enable_register_access(struct tg3 *tp)
3714 {
3715 /*
3716 * Make sure register accesses (indirect or otherwise) will function
3717 * correctly.
3718 */
3719 pci_write_config_dword(tp->pdev,
3720 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3721 }
3722
3723 static int tg3_power_up(struct tg3 *tp)
3724 {
3725 int err;
3726
3727 tg3_enable_register_access(tp);
3728
3729 err = pci_set_power_state(tp->pdev, PCI_D0);
3730 if (!err) {
3731 /* Switch out of Vaux if it is a NIC */
3732 tg3_pwrsrc_switch_to_vmain(tp);
3733 } else {
3734 netdev_err(tp->dev, "Transition to D0 failed\n");
3735 }
3736
3737 return err;
3738 }
3739
3740 static int tg3_setup_phy(struct tg3 *, int);
3741
3742 static int tg3_power_down_prepare(struct tg3 *tp)
3743 {
3744 u32 misc_host_ctrl;
3745 bool device_should_wake, do_low_power;
3746
3747 tg3_enable_register_access(tp);
3748
3749 /* Restore the CLKREQ setting. */
3750 if (tg3_flag(tp, CLKREQ_BUG))
3751 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3752 PCI_EXP_LNKCTL_CLKREQ_EN);
3753
3754 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3755 tw32(TG3PCI_MISC_HOST_CTRL,
3756 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3757
3758 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3759 tg3_flag(tp, WOL_ENABLE);
3760
3761 if (tg3_flag(tp, USE_PHYLIB)) {
3762 do_low_power = false;
3763 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3764 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3765 struct phy_device *phydev;
3766 u32 phyid, advertising;
3767
3768 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3769
3770 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3771
3772 tp->link_config.speed = phydev->speed;
3773 tp->link_config.duplex = phydev->duplex;
3774 tp->link_config.autoneg = phydev->autoneg;
3775 tp->link_config.advertising = phydev->advertising;
3776
3777 advertising = ADVERTISED_TP |
3778 ADVERTISED_Pause |
3779 ADVERTISED_Autoneg |
3780 ADVERTISED_10baseT_Half;
3781
3782 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3783 if (tg3_flag(tp, WOL_SPEED_100MB))
3784 advertising |=
3785 ADVERTISED_100baseT_Half |
3786 ADVERTISED_100baseT_Full |
3787 ADVERTISED_10baseT_Full;
3788 else
3789 advertising |= ADVERTISED_10baseT_Full;
3790 }
3791
3792 phydev->advertising = advertising;
3793
3794 phy_start_aneg(phydev);
3795
3796 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3797 if (phyid != PHY_ID_BCMAC131) {
3798 phyid &= PHY_BCM_OUI_MASK;
3799 if (phyid == PHY_BCM_OUI_1 ||
3800 phyid == PHY_BCM_OUI_2 ||
3801 phyid == PHY_BCM_OUI_3)
3802 do_low_power = true;
3803 }
3804 }
3805 } else {
3806 do_low_power = true;
3807
3808 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3809 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3810
3811 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3812 tg3_setup_phy(tp, 0);
3813 }
3814
3815 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3816 u32 val;
3817
3818 val = tr32(GRC_VCPU_EXT_CTRL);
3819 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3820 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3821 int i;
3822 u32 val;
3823
3824 for (i = 0; i < 200; i++) {
3825 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3826 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3827 break;
3828 msleep(1);
3829 }
3830 }
3831 if (tg3_flag(tp, WOL_CAP))
3832 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3833 WOL_DRV_STATE_SHUTDOWN |
3834 WOL_DRV_WOL |
3835 WOL_SET_MAGIC_PKT);
3836
3837 if (device_should_wake) {
3838 u32 mac_mode;
3839
3840 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3841 if (do_low_power &&
3842 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3843 tg3_phy_auxctl_write(tp,
3844 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3845 MII_TG3_AUXCTL_PCTL_WOL_EN |
3846 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3847 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3848 udelay(40);
3849 }
3850
3851 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3852 mac_mode = MAC_MODE_PORT_MODE_GMII;
3853 else
3854 mac_mode = MAC_MODE_PORT_MODE_MII;
3855
3856 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3857 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3858 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3859 SPEED_100 : SPEED_10;
3860 if (tg3_5700_link_polarity(tp, speed))
3861 mac_mode |= MAC_MODE_LINK_POLARITY;
3862 else
3863 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3864 }
3865 } else {
3866 mac_mode = MAC_MODE_PORT_MODE_TBI;
3867 }
3868
3869 if (!tg3_flag(tp, 5750_PLUS))
3870 tw32(MAC_LED_CTRL, tp->led_ctrl);
3871
3872 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3873 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3874 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3875 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3876
3877 if (tg3_flag(tp, ENABLE_APE))
3878 mac_mode |= MAC_MODE_APE_TX_EN |
3879 MAC_MODE_APE_RX_EN |
3880 MAC_MODE_TDE_ENABLE;
3881
3882 tw32_f(MAC_MODE, mac_mode);
3883 udelay(100);
3884
3885 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3886 udelay(10);
3887 }
3888
3889 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3890 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3891 tg3_asic_rev(tp) == ASIC_REV_5701)) {
3892 u32 base_val;
3893
3894 base_val = tp->pci_clock_ctrl;
3895 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3896 CLOCK_CTRL_TXCLK_DISABLE);
3897
3898 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3899 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3900 } else if (tg3_flag(tp, 5780_CLASS) ||
3901 tg3_flag(tp, CPMU_PRESENT) ||
3902 tg3_asic_rev(tp) == ASIC_REV_5906) {
3903 /* do nothing */
3904 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3905 u32 newbits1, newbits2;
3906
3907 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3908 tg3_asic_rev(tp) == ASIC_REV_5701) {
3909 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3910 CLOCK_CTRL_TXCLK_DISABLE |
3911 CLOCK_CTRL_ALTCLK);
3912 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3913 } else if (tg3_flag(tp, 5705_PLUS)) {
3914 newbits1 = CLOCK_CTRL_625_CORE;
3915 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3916 } else {
3917 newbits1 = CLOCK_CTRL_ALTCLK;
3918 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3919 }
3920
3921 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3922 40);
3923
3924 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3925 40);
3926
3927 if (!tg3_flag(tp, 5705_PLUS)) {
3928 u32 newbits3;
3929
3930 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3931 tg3_asic_rev(tp) == ASIC_REV_5701) {
3932 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3933 CLOCK_CTRL_TXCLK_DISABLE |
3934 CLOCK_CTRL_44MHZ_CORE);
3935 } else {
3936 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3937 }
3938
3939 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3940 tp->pci_clock_ctrl | newbits3, 40);
3941 }
3942 }
3943
3944 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3945 tg3_power_down_phy(tp, do_low_power);
3946
3947 tg3_frob_aux_power(tp, true);
3948
3949 /* Workaround for unstable PLL clock */
3950 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3951 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
3952 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
3953 u32 val = tr32(0x7d00);
3954
3955 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3956 tw32(0x7d00, val);
3957 if (!tg3_flag(tp, ENABLE_ASF)) {
3958 int err;
3959
3960 err = tg3_nvram_lock(tp);
3961 tg3_halt_cpu(tp, RX_CPU_BASE);
3962 if (!err)
3963 tg3_nvram_unlock(tp);
3964 }
3965 }
3966
3967 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3968
3969 return 0;
3970 }
3971
3972 static void tg3_power_down(struct tg3 *tp)
3973 {
3974 tg3_power_down_prepare(tp);
3975
3976 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3977 pci_set_power_state(tp->pdev, PCI_D3hot);
3978 }
3979
3980 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3981 {
3982 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3983 case MII_TG3_AUX_STAT_10HALF:
3984 *speed = SPEED_10;
3985 *duplex = DUPLEX_HALF;
3986 break;
3987
3988 case MII_TG3_AUX_STAT_10FULL:
3989 *speed = SPEED_10;
3990 *duplex = DUPLEX_FULL;
3991 break;
3992
3993 case MII_TG3_AUX_STAT_100HALF:
3994 *speed = SPEED_100;
3995 *duplex = DUPLEX_HALF;
3996 break;
3997
3998 case MII_TG3_AUX_STAT_100FULL:
3999 *speed = SPEED_100;
4000 *duplex = DUPLEX_FULL;
4001 break;
4002
4003 case MII_TG3_AUX_STAT_1000HALF:
4004 *speed = SPEED_1000;
4005 *duplex = DUPLEX_HALF;
4006 break;
4007
4008 case MII_TG3_AUX_STAT_1000FULL:
4009 *speed = SPEED_1000;
4010 *duplex = DUPLEX_FULL;
4011 break;
4012
4013 default:
4014 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4015 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4016 SPEED_10;
4017 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4018 DUPLEX_HALF;
4019 break;
4020 }
4021 *speed = SPEED_UNKNOWN;
4022 *duplex = DUPLEX_UNKNOWN;
4023 break;
4024 }
4025 }
4026
4027 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4028 {
4029 int err = 0;
4030 u32 val, new_adv;
4031
4032 new_adv = ADVERTISE_CSMA;
4033 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4034 new_adv |= mii_advertise_flowctrl(flowctrl);
4035
4036 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4037 if (err)
4038 goto done;
4039
4040 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4041 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4042
4043 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4044 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4045 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4046
4047 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4048 if (err)
4049 goto done;
4050 }
4051
4052 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4053 goto done;
4054
4055 tw32(TG3_CPMU_EEE_MODE,
4056 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4057
4058 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4059 if (!err) {
4060 u32 err2;
4061
4062 val = 0;
4063 /* Advertise 100-BaseTX EEE ability */
4064 if (advertise & ADVERTISED_100baseT_Full)
4065 val |= MDIO_AN_EEE_ADV_100TX;
4066 /* Advertise 1000-BaseT EEE ability */
4067 if (advertise & ADVERTISED_1000baseT_Full)
4068 val |= MDIO_AN_EEE_ADV_1000T;
4069 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4070 if (err)
4071 val = 0;
4072
4073 switch (tg3_asic_rev(tp)) {
4074 case ASIC_REV_5717:
4075 case ASIC_REV_57765:
4076 case ASIC_REV_57766:
4077 case ASIC_REV_5719:
4078 /* If we advertised any eee advertisements above... */
4079 if (val)
4080 val = MII_TG3_DSP_TAP26_ALNOKO |
4081 MII_TG3_DSP_TAP26_RMRXSTO |
4082 MII_TG3_DSP_TAP26_OPCSINPT;
4083 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4084 /* Fall through */
4085 case ASIC_REV_5720:
4086 case ASIC_REV_5762:
4087 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4088 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4089 MII_TG3_DSP_CH34TP2_HIBW01);
4090 }
4091
4092 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4093 if (!err)
4094 err = err2;
4095 }
4096
4097 done:
4098 return err;
4099 }
4100
4101 static void tg3_phy_copper_begin(struct tg3 *tp)
4102 {
4103 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4104 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4105 u32 adv, fc;
4106
4107 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4108 adv = ADVERTISED_10baseT_Half |
4109 ADVERTISED_10baseT_Full;
4110 if (tg3_flag(tp, WOL_SPEED_100MB))
4111 adv |= ADVERTISED_100baseT_Half |
4112 ADVERTISED_100baseT_Full;
4113
4114 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4115 } else {
4116 adv = tp->link_config.advertising;
4117 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4118 adv &= ~(ADVERTISED_1000baseT_Half |
4119 ADVERTISED_1000baseT_Full);
4120
4121 fc = tp->link_config.flowctrl;
4122 }
4123
4124 tg3_phy_autoneg_cfg(tp, adv, fc);
4125
4126 tg3_writephy(tp, MII_BMCR,
4127 BMCR_ANENABLE | BMCR_ANRESTART);
4128 } else {
4129 int i;
4130 u32 bmcr, orig_bmcr;
4131
4132 tp->link_config.active_speed = tp->link_config.speed;
4133 tp->link_config.active_duplex = tp->link_config.duplex;
4134
4135 bmcr = 0;
4136 switch (tp->link_config.speed) {
4137 default:
4138 case SPEED_10:
4139 break;
4140
4141 case SPEED_100:
4142 bmcr |= BMCR_SPEED100;
4143 break;
4144
4145 case SPEED_1000:
4146 bmcr |= BMCR_SPEED1000;
4147 break;
4148 }
4149
4150 if (tp->link_config.duplex == DUPLEX_FULL)
4151 bmcr |= BMCR_FULLDPLX;
4152
4153 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4154 (bmcr != orig_bmcr)) {
4155 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4156 for (i = 0; i < 1500; i++) {
4157 u32 tmp;
4158
4159 udelay(10);
4160 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4161 tg3_readphy(tp, MII_BMSR, &tmp))
4162 continue;
4163 if (!(tmp & BMSR_LSTATUS)) {
4164 udelay(40);
4165 break;
4166 }
4167 }
4168 tg3_writephy(tp, MII_BMCR, bmcr);
4169 udelay(40);
4170 }
4171 }
4172 }
4173
4174 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4175 {
4176 int err;
4177
4178 /* Turn off tap power management. */
4179 /* Set Extended packet length bit */
4180 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4181
4182 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4183 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4184 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4185 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4186 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4187
4188 udelay(40);
4189
4190 return err;
4191 }
4192
4193 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4194 {
4195 u32 advmsk, tgtadv, advertising;
4196
4197 advertising = tp->link_config.advertising;
4198 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4199
4200 advmsk = ADVERTISE_ALL;
4201 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4202 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4203 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4204 }
4205
4206 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4207 return false;
4208
4209 if ((*lcladv & advmsk) != tgtadv)
4210 return false;
4211
4212 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4213 u32 tg3_ctrl;
4214
4215 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4216
4217 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4218 return false;
4219
4220 if (tgtadv &&
4221 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4222 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4223 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4224 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4225 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4226 } else {
4227 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4228 }
4229
4230 if (tg3_ctrl != tgtadv)
4231 return false;
4232 }
4233
4234 return true;
4235 }
4236
4237 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4238 {
4239 u32 lpeth = 0;
4240
4241 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4242 u32 val;
4243
4244 if (tg3_readphy(tp, MII_STAT1000, &val))
4245 return false;
4246
4247 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4248 }
4249
4250 if (tg3_readphy(tp, MII_LPA, rmtadv))
4251 return false;
4252
4253 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4254 tp->link_config.rmt_adv = lpeth;
4255
4256 return true;
4257 }
4258
4259 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4260 {
4261 if (curr_link_up != tp->link_up) {
4262 if (curr_link_up) {
4263 tg3_carrier_on(tp);
4264 } else {
4265 tg3_carrier_off(tp);
4266 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4267 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4268 }
4269
4270 tg3_link_report(tp);
4271 return true;
4272 }
4273
4274 return false;
4275 }
4276
4277 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4278 {
4279 int current_link_up;
4280 u32 bmsr, val;
4281 u32 lcl_adv, rmt_adv;
4282 u16 current_speed;
4283 u8 current_duplex;
4284 int i, err;
4285
4286 tw32(MAC_EVENT, 0);
4287
4288 tw32_f(MAC_STATUS,
4289 (MAC_STATUS_SYNC_CHANGED |
4290 MAC_STATUS_CFG_CHANGED |
4291 MAC_STATUS_MI_COMPLETION |
4292 MAC_STATUS_LNKSTATE_CHANGED));
4293 udelay(40);
4294
4295 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4296 tw32_f(MAC_MI_MODE,
4297 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4298 udelay(80);
4299 }
4300
4301 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4302
4303 /* Some third-party PHYs need to be reset on link going
4304 * down.
4305 */
4306 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4307 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4308 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4309 tp->link_up) {
4310 tg3_readphy(tp, MII_BMSR, &bmsr);
4311 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4312 !(bmsr & BMSR_LSTATUS))
4313 force_reset = 1;
4314 }
4315 if (force_reset)
4316 tg3_phy_reset(tp);
4317
4318 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4319 tg3_readphy(tp, MII_BMSR, &bmsr);
4320 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4321 !tg3_flag(tp, INIT_COMPLETE))
4322 bmsr = 0;
4323
4324 if (!(bmsr & BMSR_LSTATUS)) {
4325 err = tg3_init_5401phy_dsp(tp);
4326 if (err)
4327 return err;
4328
4329 tg3_readphy(tp, MII_BMSR, &bmsr);
4330 for (i = 0; i < 1000; i++) {
4331 udelay(10);
4332 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4333 (bmsr & BMSR_LSTATUS)) {
4334 udelay(40);
4335 break;
4336 }
4337 }
4338
4339 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4340 TG3_PHY_REV_BCM5401_B0 &&
4341 !(bmsr & BMSR_LSTATUS) &&
4342 tp->link_config.active_speed == SPEED_1000) {
4343 err = tg3_phy_reset(tp);
4344 if (!err)
4345 err = tg3_init_5401phy_dsp(tp);
4346 if (err)
4347 return err;
4348 }
4349 }
4350 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4351 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4352 /* 5701 {A0,B0} CRC bug workaround */
4353 tg3_writephy(tp, 0x15, 0x0a75);
4354 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4355 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4356 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4357 }
4358
4359 /* Clear pending interrupts... */
4360 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4361 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4362
4363 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4364 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4365 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4366 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4367
4368 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4369 tg3_asic_rev(tp) == ASIC_REV_5701) {
4370 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4371 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4372 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4373 else
4374 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4375 }
4376
4377 current_link_up = 0;
4378 current_speed = SPEED_UNKNOWN;
4379 current_duplex = DUPLEX_UNKNOWN;
4380 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4381 tp->link_config.rmt_adv = 0;
4382
4383 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4384 err = tg3_phy_auxctl_read(tp,
4385 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4386 &val);
4387 if (!err && !(val & (1 << 10))) {
4388 tg3_phy_auxctl_write(tp,
4389 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4390 val | (1 << 10));
4391 goto relink;
4392 }
4393 }
4394
4395 bmsr = 0;
4396 for (i = 0; i < 100; i++) {
4397 tg3_readphy(tp, MII_BMSR, &bmsr);
4398 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4399 (bmsr & BMSR_LSTATUS))
4400 break;
4401 udelay(40);
4402 }
4403
4404 if (bmsr & BMSR_LSTATUS) {
4405 u32 aux_stat, bmcr;
4406
4407 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4408 for (i = 0; i < 2000; i++) {
4409 udelay(10);
4410 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4411 aux_stat)
4412 break;
4413 }
4414
4415 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4416 &current_speed,
4417 &current_duplex);
4418
4419 bmcr = 0;
4420 for (i = 0; i < 200; i++) {
4421 tg3_readphy(tp, MII_BMCR, &bmcr);
4422 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4423 continue;
4424 if (bmcr && bmcr != 0x7fff)
4425 break;
4426 udelay(10);
4427 }
4428
4429 lcl_adv = 0;
4430 rmt_adv = 0;
4431
4432 tp->link_config.active_speed = current_speed;
4433 tp->link_config.active_duplex = current_duplex;
4434
4435 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4436 if ((bmcr & BMCR_ANENABLE) &&
4437 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4438 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4439 current_link_up = 1;
4440 } else {
4441 if (!(bmcr & BMCR_ANENABLE) &&
4442 tp->link_config.speed == current_speed &&
4443 tp->link_config.duplex == current_duplex &&
4444 tp->link_config.flowctrl ==
4445 tp->link_config.active_flowctrl) {
4446 current_link_up = 1;
4447 }
4448 }
4449
4450 if (current_link_up == 1 &&
4451 tp->link_config.active_duplex == DUPLEX_FULL) {
4452 u32 reg, bit;
4453
4454 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4455 reg = MII_TG3_FET_GEN_STAT;
4456 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4457 } else {
4458 reg = MII_TG3_EXT_STAT;
4459 bit = MII_TG3_EXT_STAT_MDIX;
4460 }
4461
4462 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4463 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4464
4465 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4466 }
4467 }
4468
4469 relink:
4470 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4471 tg3_phy_copper_begin(tp);
4472
4473 if (tg3_flag(tp, ROBOSWITCH)) {
4474 current_link_up = 1;
4475 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4476 current_speed = SPEED_1000;
4477 current_duplex = DUPLEX_FULL;
4478 tp->link_config.active_speed = current_speed;
4479 tp->link_config.active_duplex = current_duplex;
4480 }
4481
4482 tg3_readphy(tp, MII_BMSR, &bmsr);
4483 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4484 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4485 current_link_up = 1;
4486 }
4487
4488 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4489 if (current_link_up == 1) {
4490 if (tp->link_config.active_speed == SPEED_100 ||
4491 tp->link_config.active_speed == SPEED_10)
4492 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4493 else
4494 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4495 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4496 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4497 else
4498 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4499
4500 /* In order for the 5750 core in BCM4785 chip to work properly
4501 * in RGMII mode, the Led Control Register must be set up.
4502 */
4503 if (tg3_flag(tp, RGMII_MODE)) {
4504 u32 led_ctrl = tr32(MAC_LED_CTRL);
4505 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4506
4507 if (tp->link_config.active_speed == SPEED_10)
4508 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4509 else if (tp->link_config.active_speed == SPEED_100)
4510 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4511 LED_CTRL_100MBPS_ON);
4512 else if (tp->link_config.active_speed == SPEED_1000)
4513 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4514 LED_CTRL_1000MBPS_ON);
4515
4516 tw32(MAC_LED_CTRL, led_ctrl);
4517 udelay(40);
4518 }
4519
4520 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4521 if (tp->link_config.active_duplex == DUPLEX_HALF)
4522 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4523
4524 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4525 if (current_link_up == 1 &&
4526 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4527 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4528 else
4529 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4530 }
4531
4532 /* ??? Without this setting Netgear GA302T PHY does not
4533 * ??? send/receive packets...
4534 */
4535 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4536 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4537 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4538 tw32_f(MAC_MI_MODE, tp->mi_mode);
4539 udelay(80);
4540 }
4541
4542 tw32_f(MAC_MODE, tp->mac_mode);
4543 udelay(40);
4544
4545 tg3_phy_eee_adjust(tp, current_link_up);
4546
4547 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4548 /* Polled via timer. */
4549 tw32_f(MAC_EVENT, 0);
4550 } else {
4551 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4552 }
4553 udelay(40);
4554
4555 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4556 current_link_up == 1 &&
4557 tp->link_config.active_speed == SPEED_1000 &&
4558 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4559 udelay(120);
4560 tw32_f(MAC_STATUS,
4561 (MAC_STATUS_SYNC_CHANGED |
4562 MAC_STATUS_CFG_CHANGED));
4563 udelay(40);
4564 tg3_write_mem(tp,
4565 NIC_SRAM_FIRMWARE_MBOX,
4566 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4567 }
4568
4569 /* Prevent send BD corruption. */
4570 if (tg3_flag(tp, CLKREQ_BUG)) {
4571 if (tp->link_config.active_speed == SPEED_100 ||
4572 tp->link_config.active_speed == SPEED_10)
4573 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4574 PCI_EXP_LNKCTL_CLKREQ_EN);
4575 else
4576 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4577 PCI_EXP_LNKCTL_CLKREQ_EN);
4578 }
4579
4580 tg3_test_and_report_link_chg(tp, current_link_up);
4581
4582 return 0;
4583 }
4584
4585 struct tg3_fiber_aneginfo {
4586 int state;
4587 #define ANEG_STATE_UNKNOWN 0
4588 #define ANEG_STATE_AN_ENABLE 1
4589 #define ANEG_STATE_RESTART_INIT 2
4590 #define ANEG_STATE_RESTART 3
4591 #define ANEG_STATE_DISABLE_LINK_OK 4
4592 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4593 #define ANEG_STATE_ABILITY_DETECT 6
4594 #define ANEG_STATE_ACK_DETECT_INIT 7
4595 #define ANEG_STATE_ACK_DETECT 8
4596 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4597 #define ANEG_STATE_COMPLETE_ACK 10
4598 #define ANEG_STATE_IDLE_DETECT_INIT 11
4599 #define ANEG_STATE_IDLE_DETECT 12
4600 #define ANEG_STATE_LINK_OK 13
4601 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4602 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4603
4604 u32 flags;
4605 #define MR_AN_ENABLE 0x00000001
4606 #define MR_RESTART_AN 0x00000002
4607 #define MR_AN_COMPLETE 0x00000004
4608 #define MR_PAGE_RX 0x00000008
4609 #define MR_NP_LOADED 0x00000010
4610 #define MR_TOGGLE_TX 0x00000020
4611 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4612 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4613 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4614 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4615 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4616 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4617 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4618 #define MR_TOGGLE_RX 0x00002000
4619 #define MR_NP_RX 0x00004000
4620
4621 #define MR_LINK_OK 0x80000000
4622
4623 unsigned long link_time, cur_time;
4624
4625 u32 ability_match_cfg;
4626 int ability_match_count;
4627
4628 char ability_match, idle_match, ack_match;
4629
4630 u32 txconfig, rxconfig;
4631 #define ANEG_CFG_NP 0x00000080
4632 #define ANEG_CFG_ACK 0x00000040
4633 #define ANEG_CFG_RF2 0x00000020
4634 #define ANEG_CFG_RF1 0x00000010
4635 #define ANEG_CFG_PS2 0x00000001
4636 #define ANEG_CFG_PS1 0x00008000
4637 #define ANEG_CFG_HD 0x00004000
4638 #define ANEG_CFG_FD 0x00002000
4639 #define ANEG_CFG_INVAL 0x00001f06
4640
4641 };
4642 #define ANEG_OK 0
4643 #define ANEG_DONE 1
4644 #define ANEG_TIMER_ENAB 2
4645 #define ANEG_FAILED -1
4646
4647 #define ANEG_STATE_SETTLE_TIME 10000
4648
4649 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4650 struct tg3_fiber_aneginfo *ap)
4651 {
4652 u16 flowctrl;
4653 unsigned long delta;
4654 u32 rx_cfg_reg;
4655 int ret;
4656
4657 if (ap->state == ANEG_STATE_UNKNOWN) {
4658 ap->rxconfig = 0;
4659 ap->link_time = 0;
4660 ap->cur_time = 0;
4661 ap->ability_match_cfg = 0;
4662 ap->ability_match_count = 0;
4663 ap->ability_match = 0;
4664 ap->idle_match = 0;
4665 ap->ack_match = 0;
4666 }
4667 ap->cur_time++;
4668
4669 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4670 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4671
4672 if (rx_cfg_reg != ap->ability_match_cfg) {
4673 ap->ability_match_cfg = rx_cfg_reg;
4674 ap->ability_match = 0;
4675 ap->ability_match_count = 0;
4676 } else {
4677 if (++ap->ability_match_count > 1) {
4678 ap->ability_match = 1;
4679 ap->ability_match_cfg = rx_cfg_reg;
4680 }
4681 }
4682 if (rx_cfg_reg & ANEG_CFG_ACK)
4683 ap->ack_match = 1;
4684 else
4685 ap->ack_match = 0;
4686
4687 ap->idle_match = 0;
4688 } else {
4689 ap->idle_match = 1;
4690 ap->ability_match_cfg = 0;
4691 ap->ability_match_count = 0;
4692 ap->ability_match = 0;
4693 ap->ack_match = 0;
4694
4695 rx_cfg_reg = 0;
4696 }
4697
4698 ap->rxconfig = rx_cfg_reg;
4699 ret = ANEG_OK;
4700
4701 switch (ap->state) {
4702 case ANEG_STATE_UNKNOWN:
4703 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4704 ap->state = ANEG_STATE_AN_ENABLE;
4705
4706 /* fallthru */
4707 case ANEG_STATE_AN_ENABLE:
4708 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4709 if (ap->flags & MR_AN_ENABLE) {
4710 ap->link_time = 0;
4711 ap->cur_time = 0;
4712 ap->ability_match_cfg = 0;
4713 ap->ability_match_count = 0;
4714 ap->ability_match = 0;
4715 ap->idle_match = 0;
4716 ap->ack_match = 0;
4717
4718 ap->state = ANEG_STATE_RESTART_INIT;
4719 } else {
4720 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4721 }
4722 break;
4723
4724 case ANEG_STATE_RESTART_INIT:
4725 ap->link_time = ap->cur_time;
4726 ap->flags &= ~(MR_NP_LOADED);
4727 ap->txconfig = 0;
4728 tw32(MAC_TX_AUTO_NEG, 0);
4729 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4730 tw32_f(MAC_MODE, tp->mac_mode);
4731 udelay(40);
4732
4733 ret = ANEG_TIMER_ENAB;
4734 ap->state = ANEG_STATE_RESTART;
4735
4736 /* fallthru */
4737 case ANEG_STATE_RESTART:
4738 delta = ap->cur_time - ap->link_time;
4739 if (delta > ANEG_STATE_SETTLE_TIME)
4740 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4741 else
4742 ret = ANEG_TIMER_ENAB;
4743 break;
4744
4745 case ANEG_STATE_DISABLE_LINK_OK:
4746 ret = ANEG_DONE;
4747 break;
4748
4749 case ANEG_STATE_ABILITY_DETECT_INIT:
4750 ap->flags &= ~(MR_TOGGLE_TX);
4751 ap->txconfig = ANEG_CFG_FD;
4752 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4753 if (flowctrl & ADVERTISE_1000XPAUSE)
4754 ap->txconfig |= ANEG_CFG_PS1;
4755 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4756 ap->txconfig |= ANEG_CFG_PS2;
4757 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4758 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4759 tw32_f(MAC_MODE, tp->mac_mode);
4760 udelay(40);
4761
4762 ap->state = ANEG_STATE_ABILITY_DETECT;
4763 break;
4764
4765 case ANEG_STATE_ABILITY_DETECT:
4766 if (ap->ability_match != 0 && ap->rxconfig != 0)
4767 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4768 break;
4769
4770 case ANEG_STATE_ACK_DETECT_INIT:
4771 ap->txconfig |= ANEG_CFG_ACK;
4772 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4773 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4774 tw32_f(MAC_MODE, tp->mac_mode);
4775 udelay(40);
4776
4777 ap->state = ANEG_STATE_ACK_DETECT;
4778
4779 /* fallthru */
4780 case ANEG_STATE_ACK_DETECT:
4781 if (ap->ack_match != 0) {
4782 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4783 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4784 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4785 } else {
4786 ap->state = ANEG_STATE_AN_ENABLE;
4787 }
4788 } else if (ap->ability_match != 0 &&
4789 ap->rxconfig == 0) {
4790 ap->state = ANEG_STATE_AN_ENABLE;
4791 }
4792 break;
4793
4794 case ANEG_STATE_COMPLETE_ACK_INIT:
4795 if (ap->rxconfig & ANEG_CFG_INVAL) {
4796 ret = ANEG_FAILED;
4797 break;
4798 }
4799 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4800 MR_LP_ADV_HALF_DUPLEX |
4801 MR_LP_ADV_SYM_PAUSE |
4802 MR_LP_ADV_ASYM_PAUSE |
4803 MR_LP_ADV_REMOTE_FAULT1 |
4804 MR_LP_ADV_REMOTE_FAULT2 |
4805 MR_LP_ADV_NEXT_PAGE |
4806 MR_TOGGLE_RX |
4807 MR_NP_RX);
4808 if (ap->rxconfig & ANEG_CFG_FD)
4809 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4810 if (ap->rxconfig & ANEG_CFG_HD)
4811 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4812 if (ap->rxconfig & ANEG_CFG_PS1)
4813 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4814 if (ap->rxconfig & ANEG_CFG_PS2)
4815 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4816 if (ap->rxconfig & ANEG_CFG_RF1)
4817 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4818 if (ap->rxconfig & ANEG_CFG_RF2)
4819 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4820 if (ap->rxconfig & ANEG_CFG_NP)
4821 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4822
4823 ap->link_time = ap->cur_time;
4824
4825 ap->flags ^= (MR_TOGGLE_TX);
4826 if (ap->rxconfig & 0x0008)
4827 ap->flags |= MR_TOGGLE_RX;
4828 if (ap->rxconfig & ANEG_CFG_NP)
4829 ap->flags |= MR_NP_RX;
4830 ap->flags |= MR_PAGE_RX;
4831
4832 ap->state = ANEG_STATE_COMPLETE_ACK;
4833 ret = ANEG_TIMER_ENAB;
4834 break;
4835
4836 case ANEG_STATE_COMPLETE_ACK:
4837 if (ap->ability_match != 0 &&
4838 ap->rxconfig == 0) {
4839 ap->state = ANEG_STATE_AN_ENABLE;
4840 break;
4841 }
4842 delta = ap->cur_time - ap->link_time;
4843 if (delta > ANEG_STATE_SETTLE_TIME) {
4844 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4845 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4846 } else {
4847 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4848 !(ap->flags & MR_NP_RX)) {
4849 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4850 } else {
4851 ret = ANEG_FAILED;
4852 }
4853 }
4854 }
4855 break;
4856
4857 case ANEG_STATE_IDLE_DETECT_INIT:
4858 ap->link_time = ap->cur_time;
4859 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4860 tw32_f(MAC_MODE, tp->mac_mode);
4861 udelay(40);
4862
4863 ap->state = ANEG_STATE_IDLE_DETECT;
4864 ret = ANEG_TIMER_ENAB;
4865 break;
4866
4867 case ANEG_STATE_IDLE_DETECT:
4868 if (ap->ability_match != 0 &&
4869 ap->rxconfig == 0) {
4870 ap->state = ANEG_STATE_AN_ENABLE;
4871 break;
4872 }
4873 delta = ap->cur_time - ap->link_time;
4874 if (delta > ANEG_STATE_SETTLE_TIME) {
4875 /* XXX another gem from the Broadcom driver :( */
4876 ap->state = ANEG_STATE_LINK_OK;
4877 }
4878 break;
4879
4880 case ANEG_STATE_LINK_OK:
4881 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4882 ret = ANEG_DONE;
4883 break;
4884
4885 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4886 /* ??? unimplemented */
4887 break;
4888
4889 case ANEG_STATE_NEXT_PAGE_WAIT:
4890 /* ??? unimplemented */
4891 break;
4892
4893 default:
4894 ret = ANEG_FAILED;
4895 break;
4896 }
4897
4898 return ret;
4899 }
4900
4901 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4902 {
4903 int res = 0;
4904 struct tg3_fiber_aneginfo aninfo;
4905 int status = ANEG_FAILED;
4906 unsigned int tick;
4907 u32 tmp;
4908
4909 tw32_f(MAC_TX_AUTO_NEG, 0);
4910
4911 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4912 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4913 udelay(40);
4914
4915 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4916 udelay(40);
4917
4918 memset(&aninfo, 0, sizeof(aninfo));
4919 aninfo.flags |= MR_AN_ENABLE;
4920 aninfo.state = ANEG_STATE_UNKNOWN;
4921 aninfo.cur_time = 0;
4922 tick = 0;
4923 while (++tick < 195000) {
4924 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4925 if (status == ANEG_DONE || status == ANEG_FAILED)
4926 break;
4927
4928 udelay(1);
4929 }
4930
4931 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4932 tw32_f(MAC_MODE, tp->mac_mode);
4933 udelay(40);
4934
4935 *txflags = aninfo.txconfig;
4936 *rxflags = aninfo.flags;
4937
4938 if (status == ANEG_DONE &&
4939 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4940 MR_LP_ADV_FULL_DUPLEX)))
4941 res = 1;
4942
4943 return res;
4944 }
4945
4946 static void tg3_init_bcm8002(struct tg3 *tp)
4947 {
4948 u32 mac_status = tr32(MAC_STATUS);
4949 int i;
4950
4951 /* Reset when initting first time or we have a link. */
4952 if (tg3_flag(tp, INIT_COMPLETE) &&
4953 !(mac_status & MAC_STATUS_PCS_SYNCED))
4954 return;
4955
4956 /* Set PLL lock range. */
4957 tg3_writephy(tp, 0x16, 0x8007);
4958
4959 /* SW reset */
4960 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4961
4962 /* Wait for reset to complete. */
4963 /* XXX schedule_timeout() ... */
4964 for (i = 0; i < 500; i++)
4965 udelay(10);
4966
4967 /* Config mode; select PMA/Ch 1 regs. */
4968 tg3_writephy(tp, 0x10, 0x8411);
4969
4970 /* Enable auto-lock and comdet, select txclk for tx. */
4971 tg3_writephy(tp, 0x11, 0x0a10);
4972
4973 tg3_writephy(tp, 0x18, 0x00a0);
4974 tg3_writephy(tp, 0x16, 0x41ff);
4975
4976 /* Assert and deassert POR. */
4977 tg3_writephy(tp, 0x13, 0x0400);
4978 udelay(40);
4979 tg3_writephy(tp, 0x13, 0x0000);
4980
4981 tg3_writephy(tp, 0x11, 0x0a50);
4982 udelay(40);
4983 tg3_writephy(tp, 0x11, 0x0a10);
4984
4985 /* Wait for signal to stabilize */
4986 /* XXX schedule_timeout() ... */
4987 for (i = 0; i < 15000; i++)
4988 udelay(10);
4989
4990 /* Deselect the channel register so we can read the PHYID
4991 * later.
4992 */
4993 tg3_writephy(tp, 0x10, 0x8011);
4994 }
4995
4996 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4997 {
4998 u16 flowctrl;
4999 u32 sg_dig_ctrl, sg_dig_status;
5000 u32 serdes_cfg, expected_sg_dig_ctrl;
5001 int workaround, port_a;
5002 int current_link_up;
5003
5004 serdes_cfg = 0;
5005 expected_sg_dig_ctrl = 0;
5006 workaround = 0;
5007 port_a = 1;
5008 current_link_up = 0;
5009
5010 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5011 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5012 workaround = 1;
5013 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5014 port_a = 0;
5015
5016 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5017 /* preserve bits 20-23 for voltage regulator */
5018 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5019 }
5020
5021 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5022
5023 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5024 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5025 if (workaround) {
5026 u32 val = serdes_cfg;
5027
5028 if (port_a)
5029 val |= 0xc010000;
5030 else
5031 val |= 0x4010000;
5032 tw32_f(MAC_SERDES_CFG, val);
5033 }
5034
5035 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5036 }
5037 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5038 tg3_setup_flow_control(tp, 0, 0);
5039 current_link_up = 1;
5040 }
5041 goto out;
5042 }
5043
5044 /* Want auto-negotiation. */
5045 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5046
5047 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5048 if (flowctrl & ADVERTISE_1000XPAUSE)
5049 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5050 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5051 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5052
5053 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5054 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5055 tp->serdes_counter &&
5056 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5057 MAC_STATUS_RCVD_CFG)) ==
5058 MAC_STATUS_PCS_SYNCED)) {
5059 tp->serdes_counter--;
5060 current_link_up = 1;
5061 goto out;
5062 }
5063 restart_autoneg:
5064 if (workaround)
5065 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5066 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5067 udelay(5);
5068 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5069
5070 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5071 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5072 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5073 MAC_STATUS_SIGNAL_DET)) {
5074 sg_dig_status = tr32(SG_DIG_STATUS);
5075 mac_status = tr32(MAC_STATUS);
5076
5077 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5078 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5079 u32 local_adv = 0, remote_adv = 0;
5080
5081 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5082 local_adv |= ADVERTISE_1000XPAUSE;
5083 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5084 local_adv |= ADVERTISE_1000XPSE_ASYM;
5085
5086 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5087 remote_adv |= LPA_1000XPAUSE;
5088 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5089 remote_adv |= LPA_1000XPAUSE_ASYM;
5090
5091 tp->link_config.rmt_adv =
5092 mii_adv_to_ethtool_adv_x(remote_adv);
5093
5094 tg3_setup_flow_control(tp, local_adv, remote_adv);
5095 current_link_up = 1;
5096 tp->serdes_counter = 0;
5097 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5098 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5099 if (tp->serdes_counter)
5100 tp->serdes_counter--;
5101 else {
5102 if (workaround) {
5103 u32 val = serdes_cfg;
5104
5105 if (port_a)
5106 val |= 0xc010000;
5107 else
5108 val |= 0x4010000;
5109
5110 tw32_f(MAC_SERDES_CFG, val);
5111 }
5112
5113 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5114 udelay(40);
5115
5116 /* Link parallel detection - link is up */
5117 /* only if we have PCS_SYNC and not */
5118 /* receiving config code words */
5119 mac_status = tr32(MAC_STATUS);
5120 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5121 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5122 tg3_setup_flow_control(tp, 0, 0);
5123 current_link_up = 1;
5124 tp->phy_flags |=
5125 TG3_PHYFLG_PARALLEL_DETECT;
5126 tp->serdes_counter =
5127 SERDES_PARALLEL_DET_TIMEOUT;
5128 } else
5129 goto restart_autoneg;
5130 }
5131 }
5132 } else {
5133 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5134 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5135 }
5136
5137 out:
5138 return current_link_up;
5139 }
5140
5141 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5142 {
5143 int current_link_up = 0;
5144
5145 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5146 goto out;
5147
5148 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5149 u32 txflags, rxflags;
5150 int i;
5151
5152 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5153 u32 local_adv = 0, remote_adv = 0;
5154
5155 if (txflags & ANEG_CFG_PS1)
5156 local_adv |= ADVERTISE_1000XPAUSE;
5157 if (txflags & ANEG_CFG_PS2)
5158 local_adv |= ADVERTISE_1000XPSE_ASYM;
5159
5160 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5161 remote_adv |= LPA_1000XPAUSE;
5162 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5163 remote_adv |= LPA_1000XPAUSE_ASYM;
5164
5165 tp->link_config.rmt_adv =
5166 mii_adv_to_ethtool_adv_x(remote_adv);
5167
5168 tg3_setup_flow_control(tp, local_adv, remote_adv);
5169
5170 current_link_up = 1;
5171 }
5172 for (i = 0; i < 30; i++) {
5173 udelay(20);
5174 tw32_f(MAC_STATUS,
5175 (MAC_STATUS_SYNC_CHANGED |
5176 MAC_STATUS_CFG_CHANGED));
5177 udelay(40);
5178 if ((tr32(MAC_STATUS) &
5179 (MAC_STATUS_SYNC_CHANGED |
5180 MAC_STATUS_CFG_CHANGED)) == 0)
5181 break;
5182 }
5183
5184 mac_status = tr32(MAC_STATUS);
5185 if (current_link_up == 0 &&
5186 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5187 !(mac_status & MAC_STATUS_RCVD_CFG))
5188 current_link_up = 1;
5189 } else {
5190 tg3_setup_flow_control(tp, 0, 0);
5191
5192 /* Forcing 1000FD link up. */
5193 current_link_up = 1;
5194
5195 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5196 udelay(40);
5197
5198 tw32_f(MAC_MODE, tp->mac_mode);
5199 udelay(40);
5200 }
5201
5202 out:
5203 return current_link_up;
5204 }
5205
5206 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5207 {
5208 u32 orig_pause_cfg;
5209 u16 orig_active_speed;
5210 u8 orig_active_duplex;
5211 u32 mac_status;
5212 int current_link_up;
5213 int i;
5214
5215 orig_pause_cfg = tp->link_config.active_flowctrl;
5216 orig_active_speed = tp->link_config.active_speed;
5217 orig_active_duplex = tp->link_config.active_duplex;
5218
5219 if (!tg3_flag(tp, HW_AUTONEG) &&
5220 tp->link_up &&
5221 tg3_flag(tp, INIT_COMPLETE)) {
5222 mac_status = tr32(MAC_STATUS);
5223 mac_status &= (MAC_STATUS_PCS_SYNCED |
5224 MAC_STATUS_SIGNAL_DET |
5225 MAC_STATUS_CFG_CHANGED |
5226 MAC_STATUS_RCVD_CFG);
5227 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5228 MAC_STATUS_SIGNAL_DET)) {
5229 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5230 MAC_STATUS_CFG_CHANGED));
5231 return 0;
5232 }
5233 }
5234
5235 tw32_f(MAC_TX_AUTO_NEG, 0);
5236
5237 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5238 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5239 tw32_f(MAC_MODE, tp->mac_mode);
5240 udelay(40);
5241
5242 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5243 tg3_init_bcm8002(tp);
5244
5245 /* Enable link change event even when serdes polling. */
5246 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5247 udelay(40);
5248
5249 current_link_up = 0;
5250 tp->link_config.rmt_adv = 0;
5251 mac_status = tr32(MAC_STATUS);
5252
5253 if (tg3_flag(tp, HW_AUTONEG))
5254 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5255 else
5256 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5257
5258 tp->napi[0].hw_status->status =
5259 (SD_STATUS_UPDATED |
5260 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5261
5262 for (i = 0; i < 100; i++) {
5263 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5264 MAC_STATUS_CFG_CHANGED));
5265 udelay(5);
5266 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5267 MAC_STATUS_CFG_CHANGED |
5268 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5269 break;
5270 }
5271
5272 mac_status = tr32(MAC_STATUS);
5273 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5274 current_link_up = 0;
5275 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5276 tp->serdes_counter == 0) {
5277 tw32_f(MAC_MODE, (tp->mac_mode |
5278 MAC_MODE_SEND_CONFIGS));
5279 udelay(1);
5280 tw32_f(MAC_MODE, tp->mac_mode);
5281 }
5282 }
5283
5284 if (current_link_up == 1) {
5285 tp->link_config.active_speed = SPEED_1000;
5286 tp->link_config.active_duplex = DUPLEX_FULL;
5287 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5288 LED_CTRL_LNKLED_OVERRIDE |
5289 LED_CTRL_1000MBPS_ON));
5290 } else {
5291 tp->link_config.active_speed = SPEED_UNKNOWN;
5292 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5293 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5294 LED_CTRL_LNKLED_OVERRIDE |
5295 LED_CTRL_TRAFFIC_OVERRIDE));
5296 }
5297
5298 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5299 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5300 if (orig_pause_cfg != now_pause_cfg ||
5301 orig_active_speed != tp->link_config.active_speed ||
5302 orig_active_duplex != tp->link_config.active_duplex)
5303 tg3_link_report(tp);
5304 }
5305
5306 return 0;
5307 }
5308
5309 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5310 {
5311 int current_link_up, err = 0;
5312 u32 bmsr, bmcr;
5313 u16 current_speed;
5314 u8 current_duplex;
5315 u32 local_adv, remote_adv;
5316
5317 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5318 tw32_f(MAC_MODE, tp->mac_mode);
5319 udelay(40);
5320
5321 tw32(MAC_EVENT, 0);
5322
5323 tw32_f(MAC_STATUS,
5324 (MAC_STATUS_SYNC_CHANGED |
5325 MAC_STATUS_CFG_CHANGED |
5326 MAC_STATUS_MI_COMPLETION |
5327 MAC_STATUS_LNKSTATE_CHANGED));
5328 udelay(40);
5329
5330 if (force_reset)
5331 tg3_phy_reset(tp);
5332
5333 current_link_up = 0;
5334 current_speed = SPEED_UNKNOWN;
5335 current_duplex = DUPLEX_UNKNOWN;
5336 tp->link_config.rmt_adv = 0;
5337
5338 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5339 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5340 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5341 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5342 bmsr |= BMSR_LSTATUS;
5343 else
5344 bmsr &= ~BMSR_LSTATUS;
5345 }
5346
5347 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5348
5349 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5350 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5351 /* do nothing, just check for link up at the end */
5352 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5353 u32 adv, newadv;
5354
5355 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5356 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5357 ADVERTISE_1000XPAUSE |
5358 ADVERTISE_1000XPSE_ASYM |
5359 ADVERTISE_SLCT);
5360
5361 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5362 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5363
5364 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5365 tg3_writephy(tp, MII_ADVERTISE, newadv);
5366 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5367 tg3_writephy(tp, MII_BMCR, bmcr);
5368
5369 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5370 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5371 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5372
5373 return err;
5374 }
5375 } else {
5376 u32 new_bmcr;
5377
5378 bmcr &= ~BMCR_SPEED1000;
5379 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5380
5381 if (tp->link_config.duplex == DUPLEX_FULL)
5382 new_bmcr |= BMCR_FULLDPLX;
5383
5384 if (new_bmcr != bmcr) {
5385 /* BMCR_SPEED1000 is a reserved bit that needs
5386 * to be set on write.
5387 */
5388 new_bmcr |= BMCR_SPEED1000;
5389
5390 /* Force a linkdown */
5391 if (tp->link_up) {
5392 u32 adv;
5393
5394 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5395 adv &= ~(ADVERTISE_1000XFULL |
5396 ADVERTISE_1000XHALF |
5397 ADVERTISE_SLCT);
5398 tg3_writephy(tp, MII_ADVERTISE, adv);
5399 tg3_writephy(tp, MII_BMCR, bmcr |
5400 BMCR_ANRESTART |
5401 BMCR_ANENABLE);
5402 udelay(10);
5403 tg3_carrier_off(tp);
5404 }
5405 tg3_writephy(tp, MII_BMCR, new_bmcr);
5406 bmcr = new_bmcr;
5407 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5408 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5409 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5410 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5411 bmsr |= BMSR_LSTATUS;
5412 else
5413 bmsr &= ~BMSR_LSTATUS;
5414 }
5415 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5416 }
5417 }
5418
5419 if (bmsr & BMSR_LSTATUS) {
5420 current_speed = SPEED_1000;
5421 current_link_up = 1;
5422 if (bmcr & BMCR_FULLDPLX)
5423 current_duplex = DUPLEX_FULL;
5424 else
5425 current_duplex = DUPLEX_HALF;
5426
5427 local_adv = 0;
5428 remote_adv = 0;
5429
5430 if (bmcr & BMCR_ANENABLE) {
5431 u32 common;
5432
5433 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5434 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5435 common = local_adv & remote_adv;
5436 if (common & (ADVERTISE_1000XHALF |
5437 ADVERTISE_1000XFULL)) {
5438 if (common & ADVERTISE_1000XFULL)
5439 current_duplex = DUPLEX_FULL;
5440 else
5441 current_duplex = DUPLEX_HALF;
5442
5443 tp->link_config.rmt_adv =
5444 mii_adv_to_ethtool_adv_x(remote_adv);
5445 } else if (!tg3_flag(tp, 5780_CLASS)) {
5446 /* Link is up via parallel detect */
5447 } else {
5448 current_link_up = 0;
5449 }
5450 }
5451 }
5452
5453 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5454 tg3_setup_flow_control(tp, local_adv, remote_adv);
5455
5456 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5457 if (tp->link_config.active_duplex == DUPLEX_HALF)
5458 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5459
5460 tw32_f(MAC_MODE, tp->mac_mode);
5461 udelay(40);
5462
5463 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5464
5465 tp->link_config.active_speed = current_speed;
5466 tp->link_config.active_duplex = current_duplex;
5467
5468 tg3_test_and_report_link_chg(tp, current_link_up);
5469 return err;
5470 }
5471
5472 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5473 {
5474 if (tp->serdes_counter) {
5475 /* Give autoneg time to complete. */
5476 tp->serdes_counter--;
5477 return;
5478 }
5479
5480 if (!tp->link_up &&
5481 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5482 u32 bmcr;
5483
5484 tg3_readphy(tp, MII_BMCR, &bmcr);
5485 if (bmcr & BMCR_ANENABLE) {
5486 u32 phy1, phy2;
5487
5488 /* Select shadow register 0x1f */
5489 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5490 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5491
5492 /* Select expansion interrupt status register */
5493 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5494 MII_TG3_DSP_EXP1_INT_STAT);
5495 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5496 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5497
5498 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5499 /* We have signal detect and not receiving
5500 * config code words, link is up by parallel
5501 * detection.
5502 */
5503
5504 bmcr &= ~BMCR_ANENABLE;
5505 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5506 tg3_writephy(tp, MII_BMCR, bmcr);
5507 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5508 }
5509 }
5510 } else if (tp->link_up &&
5511 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5512 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5513 u32 phy2;
5514
5515 /* Select expansion interrupt status register */
5516 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5517 MII_TG3_DSP_EXP1_INT_STAT);
5518 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5519 if (phy2 & 0x20) {
5520 u32 bmcr;
5521
5522 /* Config code words received, turn on autoneg. */
5523 tg3_readphy(tp, MII_BMCR, &bmcr);
5524 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5525
5526 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5527
5528 }
5529 }
5530 }
5531
5532 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5533 {
5534 u32 val;
5535 int err;
5536
5537 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5538 err = tg3_setup_fiber_phy(tp, force_reset);
5539 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5540 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5541 else
5542 err = tg3_setup_copper_phy(tp, force_reset);
5543
5544 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5545 u32 scale;
5546
5547 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5548 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5549 scale = 65;
5550 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5551 scale = 6;
5552 else
5553 scale = 12;
5554
5555 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5556 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5557 tw32(GRC_MISC_CFG, val);
5558 }
5559
5560 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5561 (6 << TX_LENGTHS_IPG_SHIFT);
5562 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5563 tg3_asic_rev(tp) == ASIC_REV_5762)
5564 val |= tr32(MAC_TX_LENGTHS) &
5565 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5566 TX_LENGTHS_CNT_DWN_VAL_MSK);
5567
5568 if (tp->link_config.active_speed == SPEED_1000 &&
5569 tp->link_config.active_duplex == DUPLEX_HALF)
5570 tw32(MAC_TX_LENGTHS, val |
5571 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5572 else
5573 tw32(MAC_TX_LENGTHS, val |
5574 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5575
5576 if (!tg3_flag(tp, 5705_PLUS)) {
5577 if (tp->link_up) {
5578 tw32(HOSTCC_STAT_COAL_TICKS,
5579 tp->coal.stats_block_coalesce_usecs);
5580 } else {
5581 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5582 }
5583 }
5584
5585 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5586 val = tr32(PCIE_PWR_MGMT_THRESH);
5587 if (!tp->link_up)
5588 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5589 tp->pwrmgmt_thresh;
5590 else
5591 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5592 tw32(PCIE_PWR_MGMT_THRESH, val);
5593 }
5594
5595 return err;
5596 }
5597
5598 /* tp->lock must be held */
5599 static u64 tg3_refclk_read(struct tg3 *tp)
5600 {
5601 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5602 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5603 }
5604
5605 /* tp->lock must be held */
5606 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5607 {
5608 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5609 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5610 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5611 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5612 }
5613
5614 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5615 static inline void tg3_full_unlock(struct tg3 *tp);
5616 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5617 {
5618 struct tg3 *tp = netdev_priv(dev);
5619
5620 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5621 SOF_TIMESTAMPING_RX_SOFTWARE |
5622 SOF_TIMESTAMPING_SOFTWARE |
5623 SOF_TIMESTAMPING_TX_HARDWARE |
5624 SOF_TIMESTAMPING_RX_HARDWARE |
5625 SOF_TIMESTAMPING_RAW_HARDWARE;
5626
5627 if (tp->ptp_clock)
5628 info->phc_index = ptp_clock_index(tp->ptp_clock);
5629 else
5630 info->phc_index = -1;
5631
5632 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5633
5634 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5635 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5636 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5637 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5638 return 0;
5639 }
5640
5641 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5642 {
5643 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5644 bool neg_adj = false;
5645 u32 correction = 0;
5646
5647 if (ppb < 0) {
5648 neg_adj = true;
5649 ppb = -ppb;
5650 }
5651
5652 /* Frequency adjustment is performed using hardware with a 24 bit
5653 * accumulator and a programmable correction value. On each clk, the
5654 * correction value gets added to the accumulator and when it
5655 * overflows, the time counter is incremented/decremented.
5656 *
5657 * So conversion from ppb to correction value is
5658 * ppb * (1 << 24) / 1000000000
5659 */
5660 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5661 TG3_EAV_REF_CLK_CORRECT_MASK;
5662
5663 tg3_full_lock(tp, 0);
5664
5665 if (correction)
5666 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5667 TG3_EAV_REF_CLK_CORRECT_EN |
5668 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5669 else
5670 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5671
5672 tg3_full_unlock(tp);
5673
5674 return 0;
5675 }
5676
5677 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5678 {
5679 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5680
5681 tg3_full_lock(tp, 0);
5682 tp->ptp_adjust += delta;
5683 tg3_full_unlock(tp);
5684
5685 return 0;
5686 }
5687
5688 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5689 {
5690 u64 ns;
5691 u32 remainder;
5692 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5693
5694 tg3_full_lock(tp, 0);
5695 ns = tg3_refclk_read(tp);
5696 ns += tp->ptp_adjust;
5697 tg3_full_unlock(tp);
5698
5699 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5700 ts->tv_nsec = remainder;
5701
5702 return 0;
5703 }
5704
5705 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5706 const struct timespec *ts)
5707 {
5708 u64 ns;
5709 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5710
5711 ns = timespec_to_ns(ts);
5712
5713 tg3_full_lock(tp, 0);
5714 tg3_refclk_write(tp, ns);
5715 tp->ptp_adjust = 0;
5716 tg3_full_unlock(tp);
5717
5718 return 0;
5719 }
5720
5721 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5722 struct ptp_clock_request *rq, int on)
5723 {
5724 return -EOPNOTSUPP;
5725 }
5726
5727 static const struct ptp_clock_info tg3_ptp_caps = {
5728 .owner = THIS_MODULE,
5729 .name = "tg3 clock",
5730 .max_adj = 250000000,
5731 .n_alarm = 0,
5732 .n_ext_ts = 0,
5733 .n_per_out = 0,
5734 .pps = 0,
5735 .adjfreq = tg3_ptp_adjfreq,
5736 .adjtime = tg3_ptp_adjtime,
5737 .gettime = tg3_ptp_gettime,
5738 .settime = tg3_ptp_settime,
5739 .enable = tg3_ptp_enable,
5740 };
5741
5742 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5743 struct skb_shared_hwtstamps *timestamp)
5744 {
5745 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5746 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5747 tp->ptp_adjust);
5748 }
5749
5750 /* tp->lock must be held */
5751 static void tg3_ptp_init(struct tg3 *tp)
5752 {
5753 if (!tg3_flag(tp, PTP_CAPABLE))
5754 return;
5755
5756 /* Initialize the hardware clock to the system time. */
5757 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5758 tp->ptp_adjust = 0;
5759 tp->ptp_info = tg3_ptp_caps;
5760 }
5761
5762 /* tp->lock must be held */
5763 static void tg3_ptp_resume(struct tg3 *tp)
5764 {
5765 if (!tg3_flag(tp, PTP_CAPABLE))
5766 return;
5767
5768 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5769 tp->ptp_adjust = 0;
5770 }
5771
5772 static void tg3_ptp_fini(struct tg3 *tp)
5773 {
5774 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5775 return;
5776
5777 ptp_clock_unregister(tp->ptp_clock);
5778 tp->ptp_clock = NULL;
5779 tp->ptp_adjust = 0;
5780 }
5781
5782 static inline int tg3_irq_sync(struct tg3 *tp)
5783 {
5784 return tp->irq_sync;
5785 }
5786
5787 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5788 {
5789 int i;
5790
5791 dst = (u32 *)((u8 *)dst + off);
5792 for (i = 0; i < len; i += sizeof(u32))
5793 *dst++ = tr32(off + i);
5794 }
5795
5796 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5797 {
5798 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5799 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5800 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5801 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5802 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5803 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5804 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5805 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5806 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5807 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5808 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5809 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5810 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5811 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5812 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5813 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5814 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5815 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5816 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5817
5818 if (tg3_flag(tp, SUPPORT_MSIX))
5819 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5820
5821 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5822 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5823 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5824 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5825 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5826 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5827 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5828 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5829
5830 if (!tg3_flag(tp, 5705_PLUS)) {
5831 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5832 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5833 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5834 }
5835
5836 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5837 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5838 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5839 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5840 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5841
5842 if (tg3_flag(tp, NVRAM))
5843 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5844 }
5845
5846 static void tg3_dump_state(struct tg3 *tp)
5847 {
5848 int i;
5849 u32 *regs;
5850
5851 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5852 if (!regs)
5853 return;
5854
5855 if (tg3_flag(tp, PCI_EXPRESS)) {
5856 /* Read up to but not including private PCI registers */
5857 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5858 regs[i / sizeof(u32)] = tr32(i);
5859 } else
5860 tg3_dump_legacy_regs(tp, regs);
5861
5862 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5863 if (!regs[i + 0] && !regs[i + 1] &&
5864 !regs[i + 2] && !regs[i + 3])
5865 continue;
5866
5867 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5868 i * 4,
5869 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5870 }
5871
5872 kfree(regs);
5873
5874 for (i = 0; i < tp->irq_cnt; i++) {
5875 struct tg3_napi *tnapi = &tp->napi[i];
5876
5877 /* SW status block */
5878 netdev_err(tp->dev,
5879 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5880 i,
5881 tnapi->hw_status->status,
5882 tnapi->hw_status->status_tag,
5883 tnapi->hw_status->rx_jumbo_consumer,
5884 tnapi->hw_status->rx_consumer,
5885 tnapi->hw_status->rx_mini_consumer,
5886 tnapi->hw_status->idx[0].rx_producer,
5887 tnapi->hw_status->idx[0].tx_consumer);
5888
5889 netdev_err(tp->dev,
5890 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5891 i,
5892 tnapi->last_tag, tnapi->last_irq_tag,
5893 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5894 tnapi->rx_rcb_ptr,
5895 tnapi->prodring.rx_std_prod_idx,
5896 tnapi->prodring.rx_std_cons_idx,
5897 tnapi->prodring.rx_jmb_prod_idx,
5898 tnapi->prodring.rx_jmb_cons_idx);
5899 }
5900 }
5901
5902 /* This is called whenever we suspect that the system chipset is re-
5903 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5904 * is bogus tx completions. We try to recover by setting the
5905 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5906 * in the workqueue.
5907 */
5908 static void tg3_tx_recover(struct tg3 *tp)
5909 {
5910 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5911 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5912
5913 netdev_warn(tp->dev,
5914 "The system may be re-ordering memory-mapped I/O "
5915 "cycles to the network device, attempting to recover. "
5916 "Please report the problem to the driver maintainer "
5917 "and include system chipset information.\n");
5918
5919 spin_lock(&tp->lock);
5920 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5921 spin_unlock(&tp->lock);
5922 }
5923
5924 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5925 {
5926 /* Tell compiler to fetch tx indices from memory. */
5927 barrier();
5928 return tnapi->tx_pending -
5929 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5930 }
5931
5932 /* Tigon3 never reports partial packet sends. So we do not
5933 * need special logic to handle SKBs that have not had all
5934 * of their frags sent yet, like SunGEM does.
5935 */
5936 static void tg3_tx(struct tg3_napi *tnapi)
5937 {
5938 struct tg3 *tp = tnapi->tp;
5939 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5940 u32 sw_idx = tnapi->tx_cons;
5941 struct netdev_queue *txq;
5942 int index = tnapi - tp->napi;
5943 unsigned int pkts_compl = 0, bytes_compl = 0;
5944
5945 if (tg3_flag(tp, ENABLE_TSS))
5946 index--;
5947
5948 txq = netdev_get_tx_queue(tp->dev, index);
5949
5950 while (sw_idx != hw_idx) {
5951 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5952 struct sk_buff *skb = ri->skb;
5953 int i, tx_bug = 0;
5954
5955 if (unlikely(skb == NULL)) {
5956 tg3_tx_recover(tp);
5957 return;
5958 }
5959
5960 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5961 struct skb_shared_hwtstamps timestamp;
5962 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5963 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5964
5965 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5966
5967 skb_tstamp_tx(skb, &timestamp);
5968 }
5969
5970 pci_unmap_single(tp->pdev,
5971 dma_unmap_addr(ri, mapping),
5972 skb_headlen(skb),
5973 PCI_DMA_TODEVICE);
5974
5975 ri->skb = NULL;
5976
5977 while (ri->fragmented) {
5978 ri->fragmented = false;
5979 sw_idx = NEXT_TX(sw_idx);
5980 ri = &tnapi->tx_buffers[sw_idx];
5981 }
5982
5983 sw_idx = NEXT_TX(sw_idx);
5984
5985 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5986 ri = &tnapi->tx_buffers[sw_idx];
5987 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5988 tx_bug = 1;
5989
5990 pci_unmap_page(tp->pdev,
5991 dma_unmap_addr(ri, mapping),
5992 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5993 PCI_DMA_TODEVICE);
5994
5995 while (ri->fragmented) {
5996 ri->fragmented = false;
5997 sw_idx = NEXT_TX(sw_idx);
5998 ri = &tnapi->tx_buffers[sw_idx];
5999 }
6000
6001 sw_idx = NEXT_TX(sw_idx);
6002 }
6003
6004 pkts_compl++;
6005 bytes_compl += skb->len;
6006
6007 dev_kfree_skb(skb);
6008
6009 if (unlikely(tx_bug)) {
6010 tg3_tx_recover(tp);
6011 return;
6012 }
6013 }
6014
6015 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6016
6017 tnapi->tx_cons = sw_idx;
6018
6019 /* Need to make the tx_cons update visible to tg3_start_xmit()
6020 * before checking for netif_queue_stopped(). Without the
6021 * memory barrier, there is a small possibility that tg3_start_xmit()
6022 * will miss it and cause the queue to be stopped forever.
6023 */
6024 smp_mb();
6025
6026 if (unlikely(netif_tx_queue_stopped(txq) &&
6027 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6028 __netif_tx_lock(txq, smp_processor_id());
6029 if (netif_tx_queue_stopped(txq) &&
6030 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6031 netif_tx_wake_queue(txq);
6032 __netif_tx_unlock(txq);
6033 }
6034 }
6035
6036 static void tg3_frag_free(bool is_frag, void *data)
6037 {
6038 if (is_frag)
6039 put_page(virt_to_head_page(data));
6040 else
6041 kfree(data);
6042 }
6043
6044 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6045 {
6046 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6047 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6048
6049 if (!ri->data)
6050 return;
6051
6052 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6053 map_sz, PCI_DMA_FROMDEVICE);
6054 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6055 ri->data = NULL;
6056 }
6057
6058
6059 /* Returns size of skb allocated or < 0 on error.
6060 *
6061 * We only need to fill in the address because the other members
6062 * of the RX descriptor are invariant, see tg3_init_rings.
6063 *
6064 * Note the purposeful assymetry of cpu vs. chip accesses. For
6065 * posting buffers we only dirty the first cache line of the RX
6066 * descriptor (containing the address). Whereas for the RX status
6067 * buffers the cpu only reads the last cacheline of the RX descriptor
6068 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6069 */
6070 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6071 u32 opaque_key, u32 dest_idx_unmasked,
6072 unsigned int *frag_size)
6073 {
6074 struct tg3_rx_buffer_desc *desc;
6075 struct ring_info *map;
6076 u8 *data;
6077 dma_addr_t mapping;
6078 int skb_size, data_size, dest_idx;
6079
6080 switch (opaque_key) {
6081 case RXD_OPAQUE_RING_STD:
6082 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6083 desc = &tpr->rx_std[dest_idx];
6084 map = &tpr->rx_std_buffers[dest_idx];
6085 data_size = tp->rx_pkt_map_sz;
6086 break;
6087
6088 case RXD_OPAQUE_RING_JUMBO:
6089 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6090 desc = &tpr->rx_jmb[dest_idx].std;
6091 map = &tpr->rx_jmb_buffers[dest_idx];
6092 data_size = TG3_RX_JMB_MAP_SZ;
6093 break;
6094
6095 default:
6096 return -EINVAL;
6097 }
6098
6099 /* Do not overwrite any of the map or rp information
6100 * until we are sure we can commit to a new buffer.
6101 *
6102 * Callers depend upon this behavior and assume that
6103 * we leave everything unchanged if we fail.
6104 */
6105 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6106 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6107 if (skb_size <= PAGE_SIZE) {
6108 data = netdev_alloc_frag(skb_size);
6109 *frag_size = skb_size;
6110 } else {
6111 data = kmalloc(skb_size, GFP_ATOMIC);
6112 *frag_size = 0;
6113 }
6114 if (!data)
6115 return -ENOMEM;
6116
6117 mapping = pci_map_single(tp->pdev,
6118 data + TG3_RX_OFFSET(tp),
6119 data_size,
6120 PCI_DMA_FROMDEVICE);
6121 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6122 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6123 return -EIO;
6124 }
6125
6126 map->data = data;
6127 dma_unmap_addr_set(map, mapping, mapping);
6128
6129 desc->addr_hi = ((u64)mapping >> 32);
6130 desc->addr_lo = ((u64)mapping & 0xffffffff);
6131
6132 return data_size;
6133 }
6134
6135 /* We only need to move over in the address because the other
6136 * members of the RX descriptor are invariant. See notes above
6137 * tg3_alloc_rx_data for full details.
6138 */
6139 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6140 struct tg3_rx_prodring_set *dpr,
6141 u32 opaque_key, int src_idx,
6142 u32 dest_idx_unmasked)
6143 {
6144 struct tg3 *tp = tnapi->tp;
6145 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6146 struct ring_info *src_map, *dest_map;
6147 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6148 int dest_idx;
6149
6150 switch (opaque_key) {
6151 case RXD_OPAQUE_RING_STD:
6152 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6153 dest_desc = &dpr->rx_std[dest_idx];
6154 dest_map = &dpr->rx_std_buffers[dest_idx];
6155 src_desc = &spr->rx_std[src_idx];
6156 src_map = &spr->rx_std_buffers[src_idx];
6157 break;
6158
6159 case RXD_OPAQUE_RING_JUMBO:
6160 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6161 dest_desc = &dpr->rx_jmb[dest_idx].std;
6162 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6163 src_desc = &spr->rx_jmb[src_idx].std;
6164 src_map = &spr->rx_jmb_buffers[src_idx];
6165 break;
6166
6167 default:
6168 return;
6169 }
6170
6171 dest_map->data = src_map->data;
6172 dma_unmap_addr_set(dest_map, mapping,
6173 dma_unmap_addr(src_map, mapping));
6174 dest_desc->addr_hi = src_desc->addr_hi;
6175 dest_desc->addr_lo = src_desc->addr_lo;
6176
6177 /* Ensure that the update to the skb happens after the physical
6178 * addresses have been transferred to the new BD location.
6179 */
6180 smp_wmb();
6181
6182 src_map->data = NULL;
6183 }
6184
6185 /* The RX ring scheme is composed of multiple rings which post fresh
6186 * buffers to the chip, and one special ring the chip uses to report
6187 * status back to the host.
6188 *
6189 * The special ring reports the status of received packets to the
6190 * host. The chip does not write into the original descriptor the
6191 * RX buffer was obtained from. The chip simply takes the original
6192 * descriptor as provided by the host, updates the status and length
6193 * field, then writes this into the next status ring entry.
6194 *
6195 * Each ring the host uses to post buffers to the chip is described
6196 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6197 * it is first placed into the on-chip ram. When the packet's length
6198 * is known, it walks down the TG3_BDINFO entries to select the ring.
6199 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6200 * which is within the range of the new packet's length is chosen.
6201 *
6202 * The "separate ring for rx status" scheme may sound queer, but it makes
6203 * sense from a cache coherency perspective. If only the host writes
6204 * to the buffer post rings, and only the chip writes to the rx status
6205 * rings, then cache lines never move beyond shared-modified state.
6206 * If both the host and chip were to write into the same ring, cache line
6207 * eviction could occur since both entities want it in an exclusive state.
6208 */
6209 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6210 {
6211 struct tg3 *tp = tnapi->tp;
6212 u32 work_mask, rx_std_posted = 0;
6213 u32 std_prod_idx, jmb_prod_idx;
6214 u32 sw_idx = tnapi->rx_rcb_ptr;
6215 u16 hw_idx;
6216 int received;
6217 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6218
6219 hw_idx = *(tnapi->rx_rcb_prod_idx);
6220 /*
6221 * We need to order the read of hw_idx and the read of
6222 * the opaque cookie.
6223 */
6224 rmb();
6225 work_mask = 0;
6226 received = 0;
6227 std_prod_idx = tpr->rx_std_prod_idx;
6228 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6229 while (sw_idx != hw_idx && budget > 0) {
6230 struct ring_info *ri;
6231 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6232 unsigned int len;
6233 struct sk_buff *skb;
6234 dma_addr_t dma_addr;
6235 u32 opaque_key, desc_idx, *post_ptr;
6236 u8 *data;
6237 u64 tstamp = 0;
6238
6239 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6240 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6241 if (opaque_key == RXD_OPAQUE_RING_STD) {
6242 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6243 dma_addr = dma_unmap_addr(ri, mapping);
6244 data = ri->data;
6245 post_ptr = &std_prod_idx;
6246 rx_std_posted++;
6247 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6248 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6249 dma_addr = dma_unmap_addr(ri, mapping);
6250 data = ri->data;
6251 post_ptr = &jmb_prod_idx;
6252 } else
6253 goto next_pkt_nopost;
6254
6255 work_mask |= opaque_key;
6256
6257 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6258 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6259 drop_it:
6260 tg3_recycle_rx(tnapi, tpr, opaque_key,
6261 desc_idx, *post_ptr);
6262 drop_it_no_recycle:
6263 /* Other statistics kept track of by card. */
6264 tp->rx_dropped++;
6265 goto next_pkt;
6266 }
6267
6268 prefetch(data + TG3_RX_OFFSET(tp));
6269 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6270 ETH_FCS_LEN;
6271
6272 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6273 RXD_FLAG_PTPSTAT_PTPV1 ||
6274 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6275 RXD_FLAG_PTPSTAT_PTPV2) {
6276 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6277 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6278 }
6279
6280 if (len > TG3_RX_COPY_THRESH(tp)) {
6281 int skb_size;
6282 unsigned int frag_size;
6283
6284 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6285 *post_ptr, &frag_size);
6286 if (skb_size < 0)
6287 goto drop_it;
6288
6289 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6290 PCI_DMA_FROMDEVICE);
6291
6292 skb = build_skb(data, frag_size);
6293 if (!skb) {
6294 tg3_frag_free(frag_size != 0, data);
6295 goto drop_it_no_recycle;
6296 }
6297 skb_reserve(skb, TG3_RX_OFFSET(tp));
6298 /* Ensure that the update to the data happens
6299 * after the usage of the old DMA mapping.
6300 */
6301 smp_wmb();
6302
6303 ri->data = NULL;
6304
6305 } else {
6306 tg3_recycle_rx(tnapi, tpr, opaque_key,
6307 desc_idx, *post_ptr);
6308
6309 skb = netdev_alloc_skb(tp->dev,
6310 len + TG3_RAW_IP_ALIGN);
6311 if (skb == NULL)
6312 goto drop_it_no_recycle;
6313
6314 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6315 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6316 memcpy(skb->data,
6317 data + TG3_RX_OFFSET(tp),
6318 len);
6319 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6320 }
6321
6322 skb_put(skb, len);
6323 if (tstamp)
6324 tg3_hwclock_to_timestamp(tp, tstamp,
6325 skb_hwtstamps(skb));
6326
6327 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6328 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6329 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6330 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6331 skb->ip_summed = CHECKSUM_UNNECESSARY;
6332 else
6333 skb_checksum_none_assert(skb);
6334
6335 skb->protocol = eth_type_trans(skb, tp->dev);
6336
6337 if (len > (tp->dev->mtu + ETH_HLEN) &&
6338 skb->protocol != htons(ETH_P_8021Q)) {
6339 dev_kfree_skb(skb);
6340 goto drop_it_no_recycle;
6341 }
6342
6343 if (desc->type_flags & RXD_FLAG_VLAN &&
6344 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6345 __vlan_hwaccel_put_tag(skb,
6346 desc->err_vlan & RXD_VLAN_MASK);
6347
6348 napi_gro_receive(&tnapi->napi, skb);
6349
6350 received++;
6351 budget--;
6352
6353 next_pkt:
6354 (*post_ptr)++;
6355
6356 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6357 tpr->rx_std_prod_idx = std_prod_idx &
6358 tp->rx_std_ring_mask;
6359 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6360 tpr->rx_std_prod_idx);
6361 work_mask &= ~RXD_OPAQUE_RING_STD;
6362 rx_std_posted = 0;
6363 }
6364 next_pkt_nopost:
6365 sw_idx++;
6366 sw_idx &= tp->rx_ret_ring_mask;
6367
6368 /* Refresh hw_idx to see if there is new work */
6369 if (sw_idx == hw_idx) {
6370 hw_idx = *(tnapi->rx_rcb_prod_idx);
6371 rmb();
6372 }
6373 }
6374
6375 /* ACK the status ring. */
6376 tnapi->rx_rcb_ptr = sw_idx;
6377 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6378
6379 /* Refill RX ring(s). */
6380 if (!tg3_flag(tp, ENABLE_RSS)) {
6381 /* Sync BD data before updating mailbox */
6382 wmb();
6383
6384 if (work_mask & RXD_OPAQUE_RING_STD) {
6385 tpr->rx_std_prod_idx = std_prod_idx &
6386 tp->rx_std_ring_mask;
6387 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6388 tpr->rx_std_prod_idx);
6389 }
6390 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6391 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6392 tp->rx_jmb_ring_mask;
6393 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6394 tpr->rx_jmb_prod_idx);
6395 }
6396 mmiowb();
6397 } else if (work_mask) {
6398 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6399 * updated before the producer indices can be updated.
6400 */
6401 smp_wmb();
6402
6403 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6404 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6405
6406 if (tnapi != &tp->napi[1]) {
6407 tp->rx_refill = true;
6408 napi_schedule(&tp->napi[1].napi);
6409 }
6410 }
6411
6412 return received;
6413 }
6414
6415 static void tg3_poll_link(struct tg3 *tp)
6416 {
6417 /* handle link change and other phy events */
6418 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6419 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6420
6421 if (sblk->status & SD_STATUS_LINK_CHG) {
6422 sblk->status = SD_STATUS_UPDATED |
6423 (sblk->status & ~SD_STATUS_LINK_CHG);
6424 spin_lock(&tp->lock);
6425 if (tg3_flag(tp, USE_PHYLIB)) {
6426 tw32_f(MAC_STATUS,
6427 (MAC_STATUS_SYNC_CHANGED |
6428 MAC_STATUS_CFG_CHANGED |
6429 MAC_STATUS_MI_COMPLETION |
6430 MAC_STATUS_LNKSTATE_CHANGED));
6431 udelay(40);
6432 } else
6433 tg3_setup_phy(tp, 0);
6434 spin_unlock(&tp->lock);
6435 }
6436 }
6437 }
6438
6439 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6440 struct tg3_rx_prodring_set *dpr,
6441 struct tg3_rx_prodring_set *spr)
6442 {
6443 u32 si, di, cpycnt, src_prod_idx;
6444 int i, err = 0;
6445
6446 while (1) {
6447 src_prod_idx = spr->rx_std_prod_idx;
6448
6449 /* Make sure updates to the rx_std_buffers[] entries and the
6450 * standard producer index are seen in the correct order.
6451 */
6452 smp_rmb();
6453
6454 if (spr->rx_std_cons_idx == src_prod_idx)
6455 break;
6456
6457 if (spr->rx_std_cons_idx < src_prod_idx)
6458 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6459 else
6460 cpycnt = tp->rx_std_ring_mask + 1 -
6461 spr->rx_std_cons_idx;
6462
6463 cpycnt = min(cpycnt,
6464 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6465
6466 si = spr->rx_std_cons_idx;
6467 di = dpr->rx_std_prod_idx;
6468
6469 for (i = di; i < di + cpycnt; i++) {
6470 if (dpr->rx_std_buffers[i].data) {
6471 cpycnt = i - di;
6472 err = -ENOSPC;
6473 break;
6474 }
6475 }
6476
6477 if (!cpycnt)
6478 break;
6479
6480 /* Ensure that updates to the rx_std_buffers ring and the
6481 * shadowed hardware producer ring from tg3_recycle_skb() are
6482 * ordered correctly WRT the skb check above.
6483 */
6484 smp_rmb();
6485
6486 memcpy(&dpr->rx_std_buffers[di],
6487 &spr->rx_std_buffers[si],
6488 cpycnt * sizeof(struct ring_info));
6489
6490 for (i = 0; i < cpycnt; i++, di++, si++) {
6491 struct tg3_rx_buffer_desc *sbd, *dbd;
6492 sbd = &spr->rx_std[si];
6493 dbd = &dpr->rx_std[di];
6494 dbd->addr_hi = sbd->addr_hi;
6495 dbd->addr_lo = sbd->addr_lo;
6496 }
6497
6498 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6499 tp->rx_std_ring_mask;
6500 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6501 tp->rx_std_ring_mask;
6502 }
6503
6504 while (1) {
6505 src_prod_idx = spr->rx_jmb_prod_idx;
6506
6507 /* Make sure updates to the rx_jmb_buffers[] entries and
6508 * the jumbo producer index are seen in the correct order.
6509 */
6510 smp_rmb();
6511
6512 if (spr->rx_jmb_cons_idx == src_prod_idx)
6513 break;
6514
6515 if (spr->rx_jmb_cons_idx < src_prod_idx)
6516 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6517 else
6518 cpycnt = tp->rx_jmb_ring_mask + 1 -
6519 spr->rx_jmb_cons_idx;
6520
6521 cpycnt = min(cpycnt,
6522 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6523
6524 si = spr->rx_jmb_cons_idx;
6525 di = dpr->rx_jmb_prod_idx;
6526
6527 for (i = di; i < di + cpycnt; i++) {
6528 if (dpr->rx_jmb_buffers[i].data) {
6529 cpycnt = i - di;
6530 err = -ENOSPC;
6531 break;
6532 }
6533 }
6534
6535 if (!cpycnt)
6536 break;
6537
6538 /* Ensure that updates to the rx_jmb_buffers ring and the
6539 * shadowed hardware producer ring from tg3_recycle_skb() are
6540 * ordered correctly WRT the skb check above.
6541 */
6542 smp_rmb();
6543
6544 memcpy(&dpr->rx_jmb_buffers[di],
6545 &spr->rx_jmb_buffers[si],
6546 cpycnt * sizeof(struct ring_info));
6547
6548 for (i = 0; i < cpycnt; i++, di++, si++) {
6549 struct tg3_rx_buffer_desc *sbd, *dbd;
6550 sbd = &spr->rx_jmb[si].std;
6551 dbd = &dpr->rx_jmb[di].std;
6552 dbd->addr_hi = sbd->addr_hi;
6553 dbd->addr_lo = sbd->addr_lo;
6554 }
6555
6556 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6557 tp->rx_jmb_ring_mask;
6558 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6559 tp->rx_jmb_ring_mask;
6560 }
6561
6562 return err;
6563 }
6564
6565 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6566 {
6567 struct tg3 *tp = tnapi->tp;
6568
6569 /* run TX completion thread */
6570 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6571 tg3_tx(tnapi);
6572 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6573 return work_done;
6574 }
6575
6576 if (!tnapi->rx_rcb_prod_idx)
6577 return work_done;
6578
6579 /* run RX thread, within the bounds set by NAPI.
6580 * All RX "locking" is done by ensuring outside
6581 * code synchronizes with tg3->napi.poll()
6582 */
6583 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6584 work_done += tg3_rx(tnapi, budget - work_done);
6585
6586 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6587 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6588 int i, err = 0;
6589 u32 std_prod_idx = dpr->rx_std_prod_idx;
6590 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6591
6592 tp->rx_refill = false;
6593 for (i = 1; i <= tp->rxq_cnt; i++)
6594 err |= tg3_rx_prodring_xfer(tp, dpr,
6595 &tp->napi[i].prodring);
6596
6597 wmb();
6598
6599 if (std_prod_idx != dpr->rx_std_prod_idx)
6600 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6601 dpr->rx_std_prod_idx);
6602
6603 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6604 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6605 dpr->rx_jmb_prod_idx);
6606
6607 mmiowb();
6608
6609 if (err)
6610 tw32_f(HOSTCC_MODE, tp->coal_now);
6611 }
6612
6613 return work_done;
6614 }
6615
6616 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6617 {
6618 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6619 schedule_work(&tp->reset_task);
6620 }
6621
6622 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6623 {
6624 cancel_work_sync(&tp->reset_task);
6625 tg3_flag_clear(tp, RESET_TASK_PENDING);
6626 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6627 }
6628
6629 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6630 {
6631 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6632 struct tg3 *tp = tnapi->tp;
6633 int work_done = 0;
6634 struct tg3_hw_status *sblk = tnapi->hw_status;
6635
6636 while (1) {
6637 work_done = tg3_poll_work(tnapi, work_done, budget);
6638
6639 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6640 goto tx_recovery;
6641
6642 if (unlikely(work_done >= budget))
6643 break;
6644
6645 /* tp->last_tag is used in tg3_int_reenable() below
6646 * to tell the hw how much work has been processed,
6647 * so we must read it before checking for more work.
6648 */
6649 tnapi->last_tag = sblk->status_tag;
6650 tnapi->last_irq_tag = tnapi->last_tag;
6651 rmb();
6652
6653 /* check for RX/TX work to do */
6654 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6655 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6656
6657 /* This test here is not race free, but will reduce
6658 * the number of interrupts by looping again.
6659 */
6660 if (tnapi == &tp->napi[1] && tp->rx_refill)
6661 continue;
6662
6663 napi_complete(napi);
6664 /* Reenable interrupts. */
6665 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6666
6667 /* This test here is synchronized by napi_schedule()
6668 * and napi_complete() to close the race condition.
6669 */
6670 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6671 tw32(HOSTCC_MODE, tp->coalesce_mode |
6672 HOSTCC_MODE_ENABLE |
6673 tnapi->coal_now);
6674 }
6675 mmiowb();
6676 break;
6677 }
6678 }
6679
6680 return work_done;
6681
6682 tx_recovery:
6683 /* work_done is guaranteed to be less than budget. */
6684 napi_complete(napi);
6685 tg3_reset_task_schedule(tp);
6686 return work_done;
6687 }
6688
6689 static void tg3_process_error(struct tg3 *tp)
6690 {
6691 u32 val;
6692 bool real_error = false;
6693
6694 if (tg3_flag(tp, ERROR_PROCESSED))
6695 return;
6696
6697 /* Check Flow Attention register */
6698 val = tr32(HOSTCC_FLOW_ATTN);
6699 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6700 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6701 real_error = true;
6702 }
6703
6704 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6705 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6706 real_error = true;
6707 }
6708
6709 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6710 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6711 real_error = true;
6712 }
6713
6714 if (!real_error)
6715 return;
6716
6717 tg3_dump_state(tp);
6718
6719 tg3_flag_set(tp, ERROR_PROCESSED);
6720 tg3_reset_task_schedule(tp);
6721 }
6722
6723 static int tg3_poll(struct napi_struct *napi, int budget)
6724 {
6725 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6726 struct tg3 *tp = tnapi->tp;
6727 int work_done = 0;
6728 struct tg3_hw_status *sblk = tnapi->hw_status;
6729
6730 while (1) {
6731 if (sblk->status & SD_STATUS_ERROR)
6732 tg3_process_error(tp);
6733
6734 tg3_poll_link(tp);
6735
6736 work_done = tg3_poll_work(tnapi, work_done, budget);
6737
6738 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6739 goto tx_recovery;
6740
6741 if (unlikely(work_done >= budget))
6742 break;
6743
6744 if (tg3_flag(tp, TAGGED_STATUS)) {
6745 /* tp->last_tag is used in tg3_int_reenable() below
6746 * to tell the hw how much work has been processed,
6747 * so we must read it before checking for more work.
6748 */
6749 tnapi->last_tag = sblk->status_tag;
6750 tnapi->last_irq_tag = tnapi->last_tag;
6751 rmb();
6752 } else
6753 sblk->status &= ~SD_STATUS_UPDATED;
6754
6755 if (likely(!tg3_has_work(tnapi))) {
6756 napi_complete(napi);
6757 tg3_int_reenable(tnapi);
6758 break;
6759 }
6760 }
6761
6762 return work_done;
6763
6764 tx_recovery:
6765 /* work_done is guaranteed to be less than budget. */
6766 napi_complete(napi);
6767 tg3_reset_task_schedule(tp);
6768 return work_done;
6769 }
6770
6771 static void tg3_napi_disable(struct tg3 *tp)
6772 {
6773 int i;
6774
6775 for (i = tp->irq_cnt - 1; i >= 0; i--)
6776 napi_disable(&tp->napi[i].napi);
6777 }
6778
6779 static void tg3_napi_enable(struct tg3 *tp)
6780 {
6781 int i;
6782
6783 for (i = 0; i < tp->irq_cnt; i++)
6784 napi_enable(&tp->napi[i].napi);
6785 }
6786
6787 static void tg3_napi_init(struct tg3 *tp)
6788 {
6789 int i;
6790
6791 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6792 for (i = 1; i < tp->irq_cnt; i++)
6793 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6794 }
6795
6796 static void tg3_napi_fini(struct tg3 *tp)
6797 {
6798 int i;
6799
6800 for (i = 0; i < tp->irq_cnt; i++)
6801 netif_napi_del(&tp->napi[i].napi);
6802 }
6803
6804 static inline void tg3_netif_stop(struct tg3 *tp)
6805 {
6806 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6807 tg3_napi_disable(tp);
6808 netif_carrier_off(tp->dev);
6809 netif_tx_disable(tp->dev);
6810 }
6811
6812 /* tp->lock must be held */
6813 static inline void tg3_netif_start(struct tg3 *tp)
6814 {
6815 tg3_ptp_resume(tp);
6816
6817 /* NOTE: unconditional netif_tx_wake_all_queues is only
6818 * appropriate so long as all callers are assured to
6819 * have free tx slots (such as after tg3_init_hw)
6820 */
6821 netif_tx_wake_all_queues(tp->dev);
6822
6823 if (tp->link_up)
6824 netif_carrier_on(tp->dev);
6825
6826 tg3_napi_enable(tp);
6827 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6828 tg3_enable_ints(tp);
6829 }
6830
6831 static void tg3_irq_quiesce(struct tg3 *tp)
6832 {
6833 int i;
6834
6835 BUG_ON(tp->irq_sync);
6836
6837 tp->irq_sync = 1;
6838 smp_mb();
6839
6840 for (i = 0; i < tp->irq_cnt; i++)
6841 synchronize_irq(tp->napi[i].irq_vec);
6842 }
6843
6844 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6845 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6846 * with as well. Most of the time, this is not necessary except when
6847 * shutting down the device.
6848 */
6849 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6850 {
6851 spin_lock_bh(&tp->lock);
6852 if (irq_sync)
6853 tg3_irq_quiesce(tp);
6854 }
6855
6856 static inline void tg3_full_unlock(struct tg3 *tp)
6857 {
6858 spin_unlock_bh(&tp->lock);
6859 }
6860
6861 /* One-shot MSI handler - Chip automatically disables interrupt
6862 * after sending MSI so driver doesn't have to do it.
6863 */
6864 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6865 {
6866 struct tg3_napi *tnapi = dev_id;
6867 struct tg3 *tp = tnapi->tp;
6868
6869 prefetch(tnapi->hw_status);
6870 if (tnapi->rx_rcb)
6871 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6872
6873 if (likely(!tg3_irq_sync(tp)))
6874 napi_schedule(&tnapi->napi);
6875
6876 return IRQ_HANDLED;
6877 }
6878
6879 /* MSI ISR - No need to check for interrupt sharing and no need to
6880 * flush status block and interrupt mailbox. PCI ordering rules
6881 * guarantee that MSI will arrive after the status block.
6882 */
6883 static irqreturn_t tg3_msi(int irq, void *dev_id)
6884 {
6885 struct tg3_napi *tnapi = dev_id;
6886 struct tg3 *tp = tnapi->tp;
6887
6888 prefetch(tnapi->hw_status);
6889 if (tnapi->rx_rcb)
6890 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6891 /*
6892 * Writing any value to intr-mbox-0 clears PCI INTA# and
6893 * chip-internal interrupt pending events.
6894 * Writing non-zero to intr-mbox-0 additional tells the
6895 * NIC to stop sending us irqs, engaging "in-intr-handler"
6896 * event coalescing.
6897 */
6898 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6899 if (likely(!tg3_irq_sync(tp)))
6900 napi_schedule(&tnapi->napi);
6901
6902 return IRQ_RETVAL(1);
6903 }
6904
6905 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6906 {
6907 struct tg3_napi *tnapi = dev_id;
6908 struct tg3 *tp = tnapi->tp;
6909 struct tg3_hw_status *sblk = tnapi->hw_status;
6910 unsigned int handled = 1;
6911
6912 /* In INTx mode, it is possible for the interrupt to arrive at
6913 * the CPU before the status block posted prior to the interrupt.
6914 * Reading the PCI State register will confirm whether the
6915 * interrupt is ours and will flush the status block.
6916 */
6917 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6918 if (tg3_flag(tp, CHIP_RESETTING) ||
6919 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6920 handled = 0;
6921 goto out;
6922 }
6923 }
6924
6925 /*
6926 * Writing any value to intr-mbox-0 clears PCI INTA# and
6927 * chip-internal interrupt pending events.
6928 * Writing non-zero to intr-mbox-0 additional tells the
6929 * NIC to stop sending us irqs, engaging "in-intr-handler"
6930 * event coalescing.
6931 *
6932 * Flush the mailbox to de-assert the IRQ immediately to prevent
6933 * spurious interrupts. The flush impacts performance but
6934 * excessive spurious interrupts can be worse in some cases.
6935 */
6936 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6937 if (tg3_irq_sync(tp))
6938 goto out;
6939 sblk->status &= ~SD_STATUS_UPDATED;
6940 if (likely(tg3_has_work(tnapi))) {
6941 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6942 napi_schedule(&tnapi->napi);
6943 } else {
6944 /* No work, shared interrupt perhaps? re-enable
6945 * interrupts, and flush that PCI write
6946 */
6947 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6948 0x00000000);
6949 }
6950 out:
6951 return IRQ_RETVAL(handled);
6952 }
6953
6954 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6955 {
6956 struct tg3_napi *tnapi = dev_id;
6957 struct tg3 *tp = tnapi->tp;
6958 struct tg3_hw_status *sblk = tnapi->hw_status;
6959 unsigned int handled = 1;
6960
6961 /* In INTx mode, it is possible for the interrupt to arrive at
6962 * the CPU before the status block posted prior to the interrupt.
6963 * Reading the PCI State register will confirm whether the
6964 * interrupt is ours and will flush the status block.
6965 */
6966 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6967 if (tg3_flag(tp, CHIP_RESETTING) ||
6968 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6969 handled = 0;
6970 goto out;
6971 }
6972 }
6973
6974 /*
6975 * writing any value to intr-mbox-0 clears PCI INTA# and
6976 * chip-internal interrupt pending events.
6977 * writing non-zero to intr-mbox-0 additional tells the
6978 * NIC to stop sending us irqs, engaging "in-intr-handler"
6979 * event coalescing.
6980 *
6981 * Flush the mailbox to de-assert the IRQ immediately to prevent
6982 * spurious interrupts. The flush impacts performance but
6983 * excessive spurious interrupts can be worse in some cases.
6984 */
6985 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6986
6987 /*
6988 * In a shared interrupt configuration, sometimes other devices'
6989 * interrupts will scream. We record the current status tag here
6990 * so that the above check can report that the screaming interrupts
6991 * are unhandled. Eventually they will be silenced.
6992 */
6993 tnapi->last_irq_tag = sblk->status_tag;
6994
6995 if (tg3_irq_sync(tp))
6996 goto out;
6997
6998 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6999
7000 napi_schedule(&tnapi->napi);
7001
7002 out:
7003 return IRQ_RETVAL(handled);
7004 }
7005
7006 /* ISR for interrupt test */
7007 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7008 {
7009 struct tg3_napi *tnapi = dev_id;
7010 struct tg3 *tp = tnapi->tp;
7011 struct tg3_hw_status *sblk = tnapi->hw_status;
7012
7013 if ((sblk->status & SD_STATUS_UPDATED) ||
7014 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7015 tg3_disable_ints(tp);
7016 return IRQ_RETVAL(1);
7017 }
7018 return IRQ_RETVAL(0);
7019 }
7020
7021 #ifdef CONFIG_NET_POLL_CONTROLLER
7022 static void tg3_poll_controller(struct net_device *dev)
7023 {
7024 int i;
7025 struct tg3 *tp = netdev_priv(dev);
7026
7027 if (tg3_irq_sync(tp))
7028 return;
7029
7030 for (i = 0; i < tp->irq_cnt; i++)
7031 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7032 }
7033 #endif
7034
7035 static void tg3_tx_timeout(struct net_device *dev)
7036 {
7037 struct tg3 *tp = netdev_priv(dev);
7038
7039 if (netif_msg_tx_err(tp)) {
7040 netdev_err(dev, "transmit timed out, resetting\n");
7041 tg3_dump_state(tp);
7042 }
7043
7044 tg3_reset_task_schedule(tp);
7045 }
7046
7047 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7048 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7049 {
7050 u32 base = (u32) mapping & 0xffffffff;
7051
7052 return (base > 0xffffdcc0) && (base + len + 8 < base);
7053 }
7054
7055 /* Test for DMA addresses > 40-bit */
7056 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7057 int len)
7058 {
7059 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7060 if (tg3_flag(tp, 40BIT_DMA_BUG))
7061 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7062 return 0;
7063 #else
7064 return 0;
7065 #endif
7066 }
7067
7068 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7069 dma_addr_t mapping, u32 len, u32 flags,
7070 u32 mss, u32 vlan)
7071 {
7072 txbd->addr_hi = ((u64) mapping >> 32);
7073 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7074 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7075 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7076 }
7077
7078 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7079 dma_addr_t map, u32 len, u32 flags,
7080 u32 mss, u32 vlan)
7081 {
7082 struct tg3 *tp = tnapi->tp;
7083 bool hwbug = false;
7084
7085 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7086 hwbug = true;
7087
7088 if (tg3_4g_overflow_test(map, len))
7089 hwbug = true;
7090
7091 if (tg3_40bit_overflow_test(tp, map, len))
7092 hwbug = true;
7093
7094 if (tp->dma_limit) {
7095 u32 prvidx = *entry;
7096 u32 tmp_flag = flags & ~TXD_FLAG_END;
7097 while (len > tp->dma_limit && *budget) {
7098 u32 frag_len = tp->dma_limit;
7099 len -= tp->dma_limit;
7100
7101 /* Avoid the 8byte DMA problem */
7102 if (len <= 8) {
7103 len += tp->dma_limit / 2;
7104 frag_len = tp->dma_limit / 2;
7105 }
7106
7107 tnapi->tx_buffers[*entry].fragmented = true;
7108
7109 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7110 frag_len, tmp_flag, mss, vlan);
7111 *budget -= 1;
7112 prvidx = *entry;
7113 *entry = NEXT_TX(*entry);
7114
7115 map += frag_len;
7116 }
7117
7118 if (len) {
7119 if (*budget) {
7120 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7121 len, flags, mss, vlan);
7122 *budget -= 1;
7123 *entry = NEXT_TX(*entry);
7124 } else {
7125 hwbug = true;
7126 tnapi->tx_buffers[prvidx].fragmented = false;
7127 }
7128 }
7129 } else {
7130 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7131 len, flags, mss, vlan);
7132 *entry = NEXT_TX(*entry);
7133 }
7134
7135 return hwbug;
7136 }
7137
7138 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7139 {
7140 int i;
7141 struct sk_buff *skb;
7142 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7143
7144 skb = txb->skb;
7145 txb->skb = NULL;
7146
7147 pci_unmap_single(tnapi->tp->pdev,
7148 dma_unmap_addr(txb, mapping),
7149 skb_headlen(skb),
7150 PCI_DMA_TODEVICE);
7151
7152 while (txb->fragmented) {
7153 txb->fragmented = false;
7154 entry = NEXT_TX(entry);
7155 txb = &tnapi->tx_buffers[entry];
7156 }
7157
7158 for (i = 0; i <= last; i++) {
7159 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7160
7161 entry = NEXT_TX(entry);
7162 txb = &tnapi->tx_buffers[entry];
7163
7164 pci_unmap_page(tnapi->tp->pdev,
7165 dma_unmap_addr(txb, mapping),
7166 skb_frag_size(frag), PCI_DMA_TODEVICE);
7167
7168 while (txb->fragmented) {
7169 txb->fragmented = false;
7170 entry = NEXT_TX(entry);
7171 txb = &tnapi->tx_buffers[entry];
7172 }
7173 }
7174 }
7175
7176 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7177 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7178 struct sk_buff **pskb,
7179 u32 *entry, u32 *budget,
7180 u32 base_flags, u32 mss, u32 vlan)
7181 {
7182 struct tg3 *tp = tnapi->tp;
7183 struct sk_buff *new_skb, *skb = *pskb;
7184 dma_addr_t new_addr = 0;
7185 int ret = 0;
7186
7187 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7188 new_skb = skb_copy(skb, GFP_ATOMIC);
7189 else {
7190 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7191
7192 new_skb = skb_copy_expand(skb,
7193 skb_headroom(skb) + more_headroom,
7194 skb_tailroom(skb), GFP_ATOMIC);
7195 }
7196
7197 if (!new_skb) {
7198 ret = -1;
7199 } else {
7200 /* New SKB is guaranteed to be linear. */
7201 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7202 PCI_DMA_TODEVICE);
7203 /* Make sure the mapping succeeded */
7204 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7205 dev_kfree_skb(new_skb);
7206 ret = -1;
7207 } else {
7208 u32 save_entry = *entry;
7209
7210 base_flags |= TXD_FLAG_END;
7211
7212 tnapi->tx_buffers[*entry].skb = new_skb;
7213 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7214 mapping, new_addr);
7215
7216 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7217 new_skb->len, base_flags,
7218 mss, vlan)) {
7219 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7220 dev_kfree_skb(new_skb);
7221 ret = -1;
7222 }
7223 }
7224 }
7225
7226 dev_kfree_skb(skb);
7227 *pskb = new_skb;
7228 return ret;
7229 }
7230
7231 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7232
7233 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7234 * TSO header is greater than 80 bytes.
7235 */
7236 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7237 {
7238 struct sk_buff *segs, *nskb;
7239 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7240
7241 /* Estimate the number of fragments in the worst case */
7242 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7243 netif_stop_queue(tp->dev);
7244
7245 /* netif_tx_stop_queue() must be done before checking
7246 * checking tx index in tg3_tx_avail() below, because in
7247 * tg3_tx(), we update tx index before checking for
7248 * netif_tx_queue_stopped().
7249 */
7250 smp_mb();
7251 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7252 return NETDEV_TX_BUSY;
7253
7254 netif_wake_queue(tp->dev);
7255 }
7256
7257 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7258 if (IS_ERR(segs))
7259 goto tg3_tso_bug_end;
7260
7261 do {
7262 nskb = segs;
7263 segs = segs->next;
7264 nskb->next = NULL;
7265 tg3_start_xmit(nskb, tp->dev);
7266 } while (segs);
7267
7268 tg3_tso_bug_end:
7269 dev_kfree_skb(skb);
7270
7271 return NETDEV_TX_OK;
7272 }
7273
7274 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7275 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7276 */
7277 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7278 {
7279 struct tg3 *tp = netdev_priv(dev);
7280 u32 len, entry, base_flags, mss, vlan = 0;
7281 u32 budget;
7282 int i = -1, would_hit_hwbug;
7283 dma_addr_t mapping;
7284 struct tg3_napi *tnapi;
7285 struct netdev_queue *txq;
7286 unsigned int last;
7287
7288 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7289 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7290 if (tg3_flag(tp, ENABLE_TSS))
7291 tnapi++;
7292
7293 budget = tg3_tx_avail(tnapi);
7294
7295 /* We are running in BH disabled context with netif_tx_lock
7296 * and TX reclaim runs via tp->napi.poll inside of a software
7297 * interrupt. Furthermore, IRQ processing runs lockless so we have
7298 * no IRQ context deadlocks to worry about either. Rejoice!
7299 */
7300 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7301 if (!netif_tx_queue_stopped(txq)) {
7302 netif_tx_stop_queue(txq);
7303
7304 /* This is a hard error, log it. */
7305 netdev_err(dev,
7306 "BUG! Tx Ring full when queue awake!\n");
7307 }
7308 return NETDEV_TX_BUSY;
7309 }
7310
7311 entry = tnapi->tx_prod;
7312 base_flags = 0;
7313 if (skb->ip_summed == CHECKSUM_PARTIAL)
7314 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7315
7316 mss = skb_shinfo(skb)->gso_size;
7317 if (mss) {
7318 struct iphdr *iph;
7319 u32 tcp_opt_len, hdr_len;
7320
7321 if (skb_header_cloned(skb) &&
7322 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7323 goto drop;
7324
7325 iph = ip_hdr(skb);
7326 tcp_opt_len = tcp_optlen(skb);
7327
7328 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7329
7330 if (!skb_is_gso_v6(skb)) {
7331 iph->check = 0;
7332 iph->tot_len = htons(mss + hdr_len);
7333 }
7334
7335 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7336 tg3_flag(tp, TSO_BUG))
7337 return tg3_tso_bug(tp, skb);
7338
7339 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7340 TXD_FLAG_CPU_POST_DMA);
7341
7342 if (tg3_flag(tp, HW_TSO_1) ||
7343 tg3_flag(tp, HW_TSO_2) ||
7344 tg3_flag(tp, HW_TSO_3)) {
7345 tcp_hdr(skb)->check = 0;
7346 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7347 } else
7348 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7349 iph->daddr, 0,
7350 IPPROTO_TCP,
7351 0);
7352
7353 if (tg3_flag(tp, HW_TSO_3)) {
7354 mss |= (hdr_len & 0xc) << 12;
7355 if (hdr_len & 0x10)
7356 base_flags |= 0x00000010;
7357 base_flags |= (hdr_len & 0x3e0) << 5;
7358 } else if (tg3_flag(tp, HW_TSO_2))
7359 mss |= hdr_len << 9;
7360 else if (tg3_flag(tp, HW_TSO_1) ||
7361 tg3_asic_rev(tp) == ASIC_REV_5705) {
7362 if (tcp_opt_len || iph->ihl > 5) {
7363 int tsflags;
7364
7365 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7366 mss |= (tsflags << 11);
7367 }
7368 } else {
7369 if (tcp_opt_len || iph->ihl > 5) {
7370 int tsflags;
7371
7372 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7373 base_flags |= tsflags << 12;
7374 }
7375 }
7376 }
7377
7378 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7379 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7380 base_flags |= TXD_FLAG_JMB_PKT;
7381
7382 if (vlan_tx_tag_present(skb)) {
7383 base_flags |= TXD_FLAG_VLAN;
7384 vlan = vlan_tx_tag_get(skb);
7385 }
7386
7387 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7388 tg3_flag(tp, TX_TSTAMP_EN)) {
7389 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7390 base_flags |= TXD_FLAG_HWTSTAMP;
7391 }
7392
7393 len = skb_headlen(skb);
7394
7395 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7396 if (pci_dma_mapping_error(tp->pdev, mapping))
7397 goto drop;
7398
7399
7400 tnapi->tx_buffers[entry].skb = skb;
7401 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7402
7403 would_hit_hwbug = 0;
7404
7405 if (tg3_flag(tp, 5701_DMA_BUG))
7406 would_hit_hwbug = 1;
7407
7408 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7409 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7410 mss, vlan)) {
7411 would_hit_hwbug = 1;
7412 } else if (skb_shinfo(skb)->nr_frags > 0) {
7413 u32 tmp_mss = mss;
7414
7415 if (!tg3_flag(tp, HW_TSO_1) &&
7416 !tg3_flag(tp, HW_TSO_2) &&
7417 !tg3_flag(tp, HW_TSO_3))
7418 tmp_mss = 0;
7419
7420 /* Now loop through additional data
7421 * fragments, and queue them.
7422 */
7423 last = skb_shinfo(skb)->nr_frags - 1;
7424 for (i = 0; i <= last; i++) {
7425 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7426
7427 len = skb_frag_size(frag);
7428 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7429 len, DMA_TO_DEVICE);
7430
7431 tnapi->tx_buffers[entry].skb = NULL;
7432 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7433 mapping);
7434 if (dma_mapping_error(&tp->pdev->dev, mapping))
7435 goto dma_error;
7436
7437 if (!budget ||
7438 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7439 len, base_flags |
7440 ((i == last) ? TXD_FLAG_END : 0),
7441 tmp_mss, vlan)) {
7442 would_hit_hwbug = 1;
7443 break;
7444 }
7445 }
7446 }
7447
7448 if (would_hit_hwbug) {
7449 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7450
7451 /* If the workaround fails due to memory/mapping
7452 * failure, silently drop this packet.
7453 */
7454 entry = tnapi->tx_prod;
7455 budget = tg3_tx_avail(tnapi);
7456 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7457 base_flags, mss, vlan))
7458 goto drop_nofree;
7459 }
7460
7461 skb_tx_timestamp(skb);
7462 netdev_tx_sent_queue(txq, skb->len);
7463
7464 /* Sync BD data before updating mailbox */
7465 wmb();
7466
7467 /* Packets are ready, update Tx producer idx local and on card. */
7468 tw32_tx_mbox(tnapi->prodmbox, entry);
7469
7470 tnapi->tx_prod = entry;
7471 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7472 netif_tx_stop_queue(txq);
7473
7474 /* netif_tx_stop_queue() must be done before checking
7475 * checking tx index in tg3_tx_avail() below, because in
7476 * tg3_tx(), we update tx index before checking for
7477 * netif_tx_queue_stopped().
7478 */
7479 smp_mb();
7480 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7481 netif_tx_wake_queue(txq);
7482 }
7483
7484 mmiowb();
7485 return NETDEV_TX_OK;
7486
7487 dma_error:
7488 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7489 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7490 drop:
7491 dev_kfree_skb(skb);
7492 drop_nofree:
7493 tp->tx_dropped++;
7494 return NETDEV_TX_OK;
7495 }
7496
7497 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7498 {
7499 if (enable) {
7500 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7501 MAC_MODE_PORT_MODE_MASK);
7502
7503 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7504
7505 if (!tg3_flag(tp, 5705_PLUS))
7506 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7507
7508 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7509 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7510 else
7511 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7512 } else {
7513 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7514
7515 if (tg3_flag(tp, 5705_PLUS) ||
7516 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7517 tg3_asic_rev(tp) == ASIC_REV_5700)
7518 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7519 }
7520
7521 tw32(MAC_MODE, tp->mac_mode);
7522 udelay(40);
7523 }
7524
7525 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7526 {
7527 u32 val, bmcr, mac_mode, ptest = 0;
7528
7529 tg3_phy_toggle_apd(tp, false);
7530 tg3_phy_toggle_automdix(tp, 0);
7531
7532 if (extlpbk && tg3_phy_set_extloopbk(tp))
7533 return -EIO;
7534
7535 bmcr = BMCR_FULLDPLX;
7536 switch (speed) {
7537 case SPEED_10:
7538 break;
7539 case SPEED_100:
7540 bmcr |= BMCR_SPEED100;
7541 break;
7542 case SPEED_1000:
7543 default:
7544 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7545 speed = SPEED_100;
7546 bmcr |= BMCR_SPEED100;
7547 } else {
7548 speed = SPEED_1000;
7549 bmcr |= BMCR_SPEED1000;
7550 }
7551 }
7552
7553 if (extlpbk) {
7554 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7555 tg3_readphy(tp, MII_CTRL1000, &val);
7556 val |= CTL1000_AS_MASTER |
7557 CTL1000_ENABLE_MASTER;
7558 tg3_writephy(tp, MII_CTRL1000, val);
7559 } else {
7560 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7561 MII_TG3_FET_PTEST_TRIM_2;
7562 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7563 }
7564 } else
7565 bmcr |= BMCR_LOOPBACK;
7566
7567 tg3_writephy(tp, MII_BMCR, bmcr);
7568
7569 /* The write needs to be flushed for the FETs */
7570 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7571 tg3_readphy(tp, MII_BMCR, &bmcr);
7572
7573 udelay(40);
7574
7575 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7576 tg3_asic_rev(tp) == ASIC_REV_5785) {
7577 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7578 MII_TG3_FET_PTEST_FRC_TX_LINK |
7579 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7580
7581 /* The write needs to be flushed for the AC131 */
7582 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7583 }
7584
7585 /* Reset to prevent losing 1st rx packet intermittently */
7586 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7587 tg3_flag(tp, 5780_CLASS)) {
7588 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7589 udelay(10);
7590 tw32_f(MAC_RX_MODE, tp->rx_mode);
7591 }
7592
7593 mac_mode = tp->mac_mode &
7594 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7595 if (speed == SPEED_1000)
7596 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7597 else
7598 mac_mode |= MAC_MODE_PORT_MODE_MII;
7599
7600 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7601 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7602
7603 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7604 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7605 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7606 mac_mode |= MAC_MODE_LINK_POLARITY;
7607
7608 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7609 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7610 }
7611
7612 tw32(MAC_MODE, mac_mode);
7613 udelay(40);
7614
7615 return 0;
7616 }
7617
7618 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7619 {
7620 struct tg3 *tp = netdev_priv(dev);
7621
7622 if (features & NETIF_F_LOOPBACK) {
7623 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7624 return;
7625
7626 spin_lock_bh(&tp->lock);
7627 tg3_mac_loopback(tp, true);
7628 netif_carrier_on(tp->dev);
7629 spin_unlock_bh(&tp->lock);
7630 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7631 } else {
7632 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7633 return;
7634
7635 spin_lock_bh(&tp->lock);
7636 tg3_mac_loopback(tp, false);
7637 /* Force link status check */
7638 tg3_setup_phy(tp, 1);
7639 spin_unlock_bh(&tp->lock);
7640 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7641 }
7642 }
7643
7644 static netdev_features_t tg3_fix_features(struct net_device *dev,
7645 netdev_features_t features)
7646 {
7647 struct tg3 *tp = netdev_priv(dev);
7648
7649 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7650 features &= ~NETIF_F_ALL_TSO;
7651
7652 return features;
7653 }
7654
7655 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7656 {
7657 netdev_features_t changed = dev->features ^ features;
7658
7659 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7660 tg3_set_loopback(dev, features);
7661
7662 return 0;
7663 }
7664
7665 static void tg3_rx_prodring_free(struct tg3 *tp,
7666 struct tg3_rx_prodring_set *tpr)
7667 {
7668 int i;
7669
7670 if (tpr != &tp->napi[0].prodring) {
7671 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7672 i = (i + 1) & tp->rx_std_ring_mask)
7673 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7674 tp->rx_pkt_map_sz);
7675
7676 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7677 for (i = tpr->rx_jmb_cons_idx;
7678 i != tpr->rx_jmb_prod_idx;
7679 i = (i + 1) & tp->rx_jmb_ring_mask) {
7680 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7681 TG3_RX_JMB_MAP_SZ);
7682 }
7683 }
7684
7685 return;
7686 }
7687
7688 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7689 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7690 tp->rx_pkt_map_sz);
7691
7692 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7693 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7694 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7695 TG3_RX_JMB_MAP_SZ);
7696 }
7697 }
7698
7699 /* Initialize rx rings for packet processing.
7700 *
7701 * The chip has been shut down and the driver detached from
7702 * the networking, so no interrupts or new tx packets will
7703 * end up in the driver. tp->{tx,}lock are held and thus
7704 * we may not sleep.
7705 */
7706 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7707 struct tg3_rx_prodring_set *tpr)
7708 {
7709 u32 i, rx_pkt_dma_sz;
7710
7711 tpr->rx_std_cons_idx = 0;
7712 tpr->rx_std_prod_idx = 0;
7713 tpr->rx_jmb_cons_idx = 0;
7714 tpr->rx_jmb_prod_idx = 0;
7715
7716 if (tpr != &tp->napi[0].prodring) {
7717 memset(&tpr->rx_std_buffers[0], 0,
7718 TG3_RX_STD_BUFF_RING_SIZE(tp));
7719 if (tpr->rx_jmb_buffers)
7720 memset(&tpr->rx_jmb_buffers[0], 0,
7721 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7722 goto done;
7723 }
7724
7725 /* Zero out all descriptors. */
7726 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7727
7728 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7729 if (tg3_flag(tp, 5780_CLASS) &&
7730 tp->dev->mtu > ETH_DATA_LEN)
7731 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7732 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7733
7734 /* Initialize invariants of the rings, we only set this
7735 * stuff once. This works because the card does not
7736 * write into the rx buffer posting rings.
7737 */
7738 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7739 struct tg3_rx_buffer_desc *rxd;
7740
7741 rxd = &tpr->rx_std[i];
7742 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7743 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7744 rxd->opaque = (RXD_OPAQUE_RING_STD |
7745 (i << RXD_OPAQUE_INDEX_SHIFT));
7746 }
7747
7748 /* Now allocate fresh SKBs for each rx ring. */
7749 for (i = 0; i < tp->rx_pending; i++) {
7750 unsigned int frag_size;
7751
7752 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7753 &frag_size) < 0) {
7754 netdev_warn(tp->dev,
7755 "Using a smaller RX standard ring. Only "
7756 "%d out of %d buffers were allocated "
7757 "successfully\n", i, tp->rx_pending);
7758 if (i == 0)
7759 goto initfail;
7760 tp->rx_pending = i;
7761 break;
7762 }
7763 }
7764
7765 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7766 goto done;
7767
7768 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7769
7770 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7771 goto done;
7772
7773 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7774 struct tg3_rx_buffer_desc *rxd;
7775
7776 rxd = &tpr->rx_jmb[i].std;
7777 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7778 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7779 RXD_FLAG_JUMBO;
7780 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7781 (i << RXD_OPAQUE_INDEX_SHIFT));
7782 }
7783
7784 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7785 unsigned int frag_size;
7786
7787 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7788 &frag_size) < 0) {
7789 netdev_warn(tp->dev,
7790 "Using a smaller RX jumbo ring. Only %d "
7791 "out of %d buffers were allocated "
7792 "successfully\n", i, tp->rx_jumbo_pending);
7793 if (i == 0)
7794 goto initfail;
7795 tp->rx_jumbo_pending = i;
7796 break;
7797 }
7798 }
7799
7800 done:
7801 return 0;
7802
7803 initfail:
7804 tg3_rx_prodring_free(tp, tpr);
7805 return -ENOMEM;
7806 }
7807
7808 static void tg3_rx_prodring_fini(struct tg3 *tp,
7809 struct tg3_rx_prodring_set *tpr)
7810 {
7811 kfree(tpr->rx_std_buffers);
7812 tpr->rx_std_buffers = NULL;
7813 kfree(tpr->rx_jmb_buffers);
7814 tpr->rx_jmb_buffers = NULL;
7815 if (tpr->rx_std) {
7816 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7817 tpr->rx_std, tpr->rx_std_mapping);
7818 tpr->rx_std = NULL;
7819 }
7820 if (tpr->rx_jmb) {
7821 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7822 tpr->rx_jmb, tpr->rx_jmb_mapping);
7823 tpr->rx_jmb = NULL;
7824 }
7825 }
7826
7827 static int tg3_rx_prodring_init(struct tg3 *tp,
7828 struct tg3_rx_prodring_set *tpr)
7829 {
7830 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7831 GFP_KERNEL);
7832 if (!tpr->rx_std_buffers)
7833 return -ENOMEM;
7834
7835 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7836 TG3_RX_STD_RING_BYTES(tp),
7837 &tpr->rx_std_mapping,
7838 GFP_KERNEL);
7839 if (!tpr->rx_std)
7840 goto err_out;
7841
7842 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7843 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7844 GFP_KERNEL);
7845 if (!tpr->rx_jmb_buffers)
7846 goto err_out;
7847
7848 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7849 TG3_RX_JMB_RING_BYTES(tp),
7850 &tpr->rx_jmb_mapping,
7851 GFP_KERNEL);
7852 if (!tpr->rx_jmb)
7853 goto err_out;
7854 }
7855
7856 return 0;
7857
7858 err_out:
7859 tg3_rx_prodring_fini(tp, tpr);
7860 return -ENOMEM;
7861 }
7862
7863 /* Free up pending packets in all rx/tx rings.
7864 *
7865 * The chip has been shut down and the driver detached from
7866 * the networking, so no interrupts or new tx packets will
7867 * end up in the driver. tp->{tx,}lock is not held and we are not
7868 * in an interrupt context and thus may sleep.
7869 */
7870 static void tg3_free_rings(struct tg3 *tp)
7871 {
7872 int i, j;
7873
7874 for (j = 0; j < tp->irq_cnt; j++) {
7875 struct tg3_napi *tnapi = &tp->napi[j];
7876
7877 tg3_rx_prodring_free(tp, &tnapi->prodring);
7878
7879 if (!tnapi->tx_buffers)
7880 continue;
7881
7882 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7883 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7884
7885 if (!skb)
7886 continue;
7887
7888 tg3_tx_skb_unmap(tnapi, i,
7889 skb_shinfo(skb)->nr_frags - 1);
7890
7891 dev_kfree_skb_any(skb);
7892 }
7893 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7894 }
7895 }
7896
7897 /* Initialize tx/rx rings for packet processing.
7898 *
7899 * The chip has been shut down and the driver detached from
7900 * the networking, so no interrupts or new tx packets will
7901 * end up in the driver. tp->{tx,}lock are held and thus
7902 * we may not sleep.
7903 */
7904 static int tg3_init_rings(struct tg3 *tp)
7905 {
7906 int i;
7907
7908 /* Free up all the SKBs. */
7909 tg3_free_rings(tp);
7910
7911 for (i = 0; i < tp->irq_cnt; i++) {
7912 struct tg3_napi *tnapi = &tp->napi[i];
7913
7914 tnapi->last_tag = 0;
7915 tnapi->last_irq_tag = 0;
7916 tnapi->hw_status->status = 0;
7917 tnapi->hw_status->status_tag = 0;
7918 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7919
7920 tnapi->tx_prod = 0;
7921 tnapi->tx_cons = 0;
7922 if (tnapi->tx_ring)
7923 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7924
7925 tnapi->rx_rcb_ptr = 0;
7926 if (tnapi->rx_rcb)
7927 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7928
7929 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7930 tg3_free_rings(tp);
7931 return -ENOMEM;
7932 }
7933 }
7934
7935 return 0;
7936 }
7937
7938 static void tg3_mem_tx_release(struct tg3 *tp)
7939 {
7940 int i;
7941
7942 for (i = 0; i < tp->irq_max; i++) {
7943 struct tg3_napi *tnapi = &tp->napi[i];
7944
7945 if (tnapi->tx_ring) {
7946 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7947 tnapi->tx_ring, tnapi->tx_desc_mapping);
7948 tnapi->tx_ring = NULL;
7949 }
7950
7951 kfree(tnapi->tx_buffers);
7952 tnapi->tx_buffers = NULL;
7953 }
7954 }
7955
7956 static int tg3_mem_tx_acquire(struct tg3 *tp)
7957 {
7958 int i;
7959 struct tg3_napi *tnapi = &tp->napi[0];
7960
7961 /* If multivector TSS is enabled, vector 0 does not handle
7962 * tx interrupts. Don't allocate any resources for it.
7963 */
7964 if (tg3_flag(tp, ENABLE_TSS))
7965 tnapi++;
7966
7967 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7968 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7969 TG3_TX_RING_SIZE, GFP_KERNEL);
7970 if (!tnapi->tx_buffers)
7971 goto err_out;
7972
7973 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7974 TG3_TX_RING_BYTES,
7975 &tnapi->tx_desc_mapping,
7976 GFP_KERNEL);
7977 if (!tnapi->tx_ring)
7978 goto err_out;
7979 }
7980
7981 return 0;
7982
7983 err_out:
7984 tg3_mem_tx_release(tp);
7985 return -ENOMEM;
7986 }
7987
7988 static void tg3_mem_rx_release(struct tg3 *tp)
7989 {
7990 int i;
7991
7992 for (i = 0; i < tp->irq_max; i++) {
7993 struct tg3_napi *tnapi = &tp->napi[i];
7994
7995 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7996
7997 if (!tnapi->rx_rcb)
7998 continue;
7999
8000 dma_free_coherent(&tp->pdev->dev,
8001 TG3_RX_RCB_RING_BYTES(tp),
8002 tnapi->rx_rcb,
8003 tnapi->rx_rcb_mapping);
8004 tnapi->rx_rcb = NULL;
8005 }
8006 }
8007
8008 static int tg3_mem_rx_acquire(struct tg3 *tp)
8009 {
8010 unsigned int i, limit;
8011
8012 limit = tp->rxq_cnt;
8013
8014 /* If RSS is enabled, we need a (dummy) producer ring
8015 * set on vector zero. This is the true hw prodring.
8016 */
8017 if (tg3_flag(tp, ENABLE_RSS))
8018 limit++;
8019
8020 for (i = 0; i < limit; i++) {
8021 struct tg3_napi *tnapi = &tp->napi[i];
8022
8023 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8024 goto err_out;
8025
8026 /* If multivector RSS is enabled, vector 0
8027 * does not handle rx or tx interrupts.
8028 * Don't allocate any resources for it.
8029 */
8030 if (!i && tg3_flag(tp, ENABLE_RSS))
8031 continue;
8032
8033 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8034 TG3_RX_RCB_RING_BYTES(tp),
8035 &tnapi->rx_rcb_mapping,
8036 GFP_KERNEL);
8037 if (!tnapi->rx_rcb)
8038 goto err_out;
8039
8040 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8041 }
8042
8043 return 0;
8044
8045 err_out:
8046 tg3_mem_rx_release(tp);
8047 return -ENOMEM;
8048 }
8049
8050 /*
8051 * Must not be invoked with interrupt sources disabled and
8052 * the hardware shutdown down.
8053 */
8054 static void tg3_free_consistent(struct tg3 *tp)
8055 {
8056 int i;
8057
8058 for (i = 0; i < tp->irq_cnt; i++) {
8059 struct tg3_napi *tnapi = &tp->napi[i];
8060
8061 if (tnapi->hw_status) {
8062 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8063 tnapi->hw_status,
8064 tnapi->status_mapping);
8065 tnapi->hw_status = NULL;
8066 }
8067 }
8068
8069 tg3_mem_rx_release(tp);
8070 tg3_mem_tx_release(tp);
8071
8072 if (tp->hw_stats) {
8073 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8074 tp->hw_stats, tp->stats_mapping);
8075 tp->hw_stats = NULL;
8076 }
8077 }
8078
8079 /*
8080 * Must not be invoked with interrupt sources disabled and
8081 * the hardware shutdown down. Can sleep.
8082 */
8083 static int tg3_alloc_consistent(struct tg3 *tp)
8084 {
8085 int i;
8086
8087 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8088 sizeof(struct tg3_hw_stats),
8089 &tp->stats_mapping,
8090 GFP_KERNEL);
8091 if (!tp->hw_stats)
8092 goto err_out;
8093
8094 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8095
8096 for (i = 0; i < tp->irq_cnt; i++) {
8097 struct tg3_napi *tnapi = &tp->napi[i];
8098 struct tg3_hw_status *sblk;
8099
8100 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8101 TG3_HW_STATUS_SIZE,
8102 &tnapi->status_mapping,
8103 GFP_KERNEL);
8104 if (!tnapi->hw_status)
8105 goto err_out;
8106
8107 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8108 sblk = tnapi->hw_status;
8109
8110 if (tg3_flag(tp, ENABLE_RSS)) {
8111 u16 *prodptr = NULL;
8112
8113 /*
8114 * When RSS is enabled, the status block format changes
8115 * slightly. The "rx_jumbo_consumer", "reserved",
8116 * and "rx_mini_consumer" members get mapped to the
8117 * other three rx return ring producer indexes.
8118 */
8119 switch (i) {
8120 case 1:
8121 prodptr = &sblk->idx[0].rx_producer;
8122 break;
8123 case 2:
8124 prodptr = &sblk->rx_jumbo_consumer;
8125 break;
8126 case 3:
8127 prodptr = &sblk->reserved;
8128 break;
8129 case 4:
8130 prodptr = &sblk->rx_mini_consumer;
8131 break;
8132 }
8133 tnapi->rx_rcb_prod_idx = prodptr;
8134 } else {
8135 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8136 }
8137 }
8138
8139 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8140 goto err_out;
8141
8142 return 0;
8143
8144 err_out:
8145 tg3_free_consistent(tp);
8146 return -ENOMEM;
8147 }
8148
8149 #define MAX_WAIT_CNT 1000
8150
8151 /* To stop a block, clear the enable bit and poll till it
8152 * clears. tp->lock is held.
8153 */
8154 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8155 {
8156 unsigned int i;
8157 u32 val;
8158
8159 if (tg3_flag(tp, 5705_PLUS)) {
8160 switch (ofs) {
8161 case RCVLSC_MODE:
8162 case DMAC_MODE:
8163 case MBFREE_MODE:
8164 case BUFMGR_MODE:
8165 case MEMARB_MODE:
8166 /* We can't enable/disable these bits of the
8167 * 5705/5750, just say success.
8168 */
8169 return 0;
8170
8171 default:
8172 break;
8173 }
8174 }
8175
8176 val = tr32(ofs);
8177 val &= ~enable_bit;
8178 tw32_f(ofs, val);
8179
8180 for (i = 0; i < MAX_WAIT_CNT; i++) {
8181 udelay(100);
8182 val = tr32(ofs);
8183 if ((val & enable_bit) == 0)
8184 break;
8185 }
8186
8187 if (i == MAX_WAIT_CNT && !silent) {
8188 dev_err(&tp->pdev->dev,
8189 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8190 ofs, enable_bit);
8191 return -ENODEV;
8192 }
8193
8194 return 0;
8195 }
8196
8197 /* tp->lock is held. */
8198 static int tg3_abort_hw(struct tg3 *tp, int silent)
8199 {
8200 int i, err;
8201
8202 tg3_disable_ints(tp);
8203
8204 tp->rx_mode &= ~RX_MODE_ENABLE;
8205 tw32_f(MAC_RX_MODE, tp->rx_mode);
8206 udelay(10);
8207
8208 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8209 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8210 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8211 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8212 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8213 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8214
8215 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8216 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8217 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8218 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8219 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8220 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8221 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8222
8223 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8224 tw32_f(MAC_MODE, tp->mac_mode);
8225 udelay(40);
8226
8227 tp->tx_mode &= ~TX_MODE_ENABLE;
8228 tw32_f(MAC_TX_MODE, tp->tx_mode);
8229
8230 for (i = 0; i < MAX_WAIT_CNT; i++) {
8231 udelay(100);
8232 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8233 break;
8234 }
8235 if (i >= MAX_WAIT_CNT) {
8236 dev_err(&tp->pdev->dev,
8237 "%s timed out, TX_MODE_ENABLE will not clear "
8238 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8239 err |= -ENODEV;
8240 }
8241
8242 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8243 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8244 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8245
8246 tw32(FTQ_RESET, 0xffffffff);
8247 tw32(FTQ_RESET, 0x00000000);
8248
8249 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8250 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8251
8252 for (i = 0; i < tp->irq_cnt; i++) {
8253 struct tg3_napi *tnapi = &tp->napi[i];
8254 if (tnapi->hw_status)
8255 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8256 }
8257
8258 return err;
8259 }
8260
8261 /* Save PCI command register before chip reset */
8262 static void tg3_save_pci_state(struct tg3 *tp)
8263 {
8264 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8265 }
8266
8267 /* Restore PCI state after chip reset */
8268 static void tg3_restore_pci_state(struct tg3 *tp)
8269 {
8270 u32 val;
8271
8272 /* Re-enable indirect register accesses. */
8273 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8274 tp->misc_host_ctrl);
8275
8276 /* Set MAX PCI retry to zero. */
8277 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8278 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8279 tg3_flag(tp, PCIX_MODE))
8280 val |= PCISTATE_RETRY_SAME_DMA;
8281 /* Allow reads and writes to the APE register and memory space. */
8282 if (tg3_flag(tp, ENABLE_APE))
8283 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8284 PCISTATE_ALLOW_APE_SHMEM_WR |
8285 PCISTATE_ALLOW_APE_PSPACE_WR;
8286 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8287
8288 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8289
8290 if (!tg3_flag(tp, PCI_EXPRESS)) {
8291 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8292 tp->pci_cacheline_sz);
8293 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8294 tp->pci_lat_timer);
8295 }
8296
8297 /* Make sure PCI-X relaxed ordering bit is clear. */
8298 if (tg3_flag(tp, PCIX_MODE)) {
8299 u16 pcix_cmd;
8300
8301 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8302 &pcix_cmd);
8303 pcix_cmd &= ~PCI_X_CMD_ERO;
8304 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8305 pcix_cmd);
8306 }
8307
8308 if (tg3_flag(tp, 5780_CLASS)) {
8309
8310 /* Chip reset on 5780 will reset MSI enable bit,
8311 * so need to restore it.
8312 */
8313 if (tg3_flag(tp, USING_MSI)) {
8314 u16 ctrl;
8315
8316 pci_read_config_word(tp->pdev,
8317 tp->msi_cap + PCI_MSI_FLAGS,
8318 &ctrl);
8319 pci_write_config_word(tp->pdev,
8320 tp->msi_cap + PCI_MSI_FLAGS,
8321 ctrl | PCI_MSI_FLAGS_ENABLE);
8322 val = tr32(MSGINT_MODE);
8323 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8324 }
8325 }
8326 }
8327
8328 /* tp->lock is held. */
8329 static int tg3_chip_reset(struct tg3 *tp)
8330 {
8331 u32 val;
8332 void (*write_op)(struct tg3 *, u32, u32);
8333 int i, err;
8334
8335 tg3_nvram_lock(tp);
8336
8337 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8338
8339 /* No matching tg3_nvram_unlock() after this because
8340 * chip reset below will undo the nvram lock.
8341 */
8342 tp->nvram_lock_cnt = 0;
8343
8344 /* GRC_MISC_CFG core clock reset will clear the memory
8345 * enable bit in PCI register 4 and the MSI enable bit
8346 * on some chips, so we save relevant registers here.
8347 */
8348 tg3_save_pci_state(tp);
8349
8350 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8351 tg3_flag(tp, 5755_PLUS))
8352 tw32(GRC_FASTBOOT_PC, 0);
8353
8354 /*
8355 * We must avoid the readl() that normally takes place.
8356 * It locks machines, causes machine checks, and other
8357 * fun things. So, temporarily disable the 5701
8358 * hardware workaround, while we do the reset.
8359 */
8360 write_op = tp->write32;
8361 if (write_op == tg3_write_flush_reg32)
8362 tp->write32 = tg3_write32;
8363
8364 /* Prevent the irq handler from reading or writing PCI registers
8365 * during chip reset when the memory enable bit in the PCI command
8366 * register may be cleared. The chip does not generate interrupt
8367 * at this time, but the irq handler may still be called due to irq
8368 * sharing or irqpoll.
8369 */
8370 tg3_flag_set(tp, CHIP_RESETTING);
8371 for (i = 0; i < tp->irq_cnt; i++) {
8372 struct tg3_napi *tnapi = &tp->napi[i];
8373 if (tnapi->hw_status) {
8374 tnapi->hw_status->status = 0;
8375 tnapi->hw_status->status_tag = 0;
8376 }
8377 tnapi->last_tag = 0;
8378 tnapi->last_irq_tag = 0;
8379 }
8380 smp_mb();
8381
8382 for (i = 0; i < tp->irq_cnt; i++)
8383 synchronize_irq(tp->napi[i].irq_vec);
8384
8385 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8386 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8387 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8388 }
8389
8390 /* do the reset */
8391 val = GRC_MISC_CFG_CORECLK_RESET;
8392
8393 if (tg3_flag(tp, PCI_EXPRESS)) {
8394 /* Force PCIe 1.0a mode */
8395 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8396 !tg3_flag(tp, 57765_PLUS) &&
8397 tr32(TG3_PCIE_PHY_TSTCTL) ==
8398 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8399 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8400
8401 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8402 tw32(GRC_MISC_CFG, (1 << 29));
8403 val |= (1 << 29);
8404 }
8405 }
8406
8407 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8408 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8409 tw32(GRC_VCPU_EXT_CTRL,
8410 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8411 }
8412
8413 /* Manage gphy power for all CPMU absent PCIe devices. */
8414 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8415 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8416
8417 tw32(GRC_MISC_CFG, val);
8418
8419 /* restore 5701 hardware bug workaround write method */
8420 tp->write32 = write_op;
8421
8422 /* Unfortunately, we have to delay before the PCI read back.
8423 * Some 575X chips even will not respond to a PCI cfg access
8424 * when the reset command is given to the chip.
8425 *
8426 * How do these hardware designers expect things to work
8427 * properly if the PCI write is posted for a long period
8428 * of time? It is always necessary to have some method by
8429 * which a register read back can occur to push the write
8430 * out which does the reset.
8431 *
8432 * For most tg3 variants the trick below was working.
8433 * Ho hum...
8434 */
8435 udelay(120);
8436
8437 /* Flush PCI posted writes. The normal MMIO registers
8438 * are inaccessible at this time so this is the only
8439 * way to make this reliably (actually, this is no longer
8440 * the case, see above). I tried to use indirect
8441 * register read/write but this upset some 5701 variants.
8442 */
8443 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8444
8445 udelay(120);
8446
8447 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8448 u16 val16;
8449
8450 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8451 int j;
8452 u32 cfg_val;
8453
8454 /* Wait for link training to complete. */
8455 for (j = 0; j < 5000; j++)
8456 udelay(100);
8457
8458 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8459 pci_write_config_dword(tp->pdev, 0xc4,
8460 cfg_val | (1 << 15));
8461 }
8462
8463 /* Clear the "no snoop" and "relaxed ordering" bits. */
8464 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8465 /*
8466 * Older PCIe devices only support the 128 byte
8467 * MPS setting. Enforce the restriction.
8468 */
8469 if (!tg3_flag(tp, CPMU_PRESENT))
8470 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8471 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8472
8473 /* Clear error status */
8474 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8475 PCI_EXP_DEVSTA_CED |
8476 PCI_EXP_DEVSTA_NFED |
8477 PCI_EXP_DEVSTA_FED |
8478 PCI_EXP_DEVSTA_URD);
8479 }
8480
8481 tg3_restore_pci_state(tp);
8482
8483 tg3_flag_clear(tp, CHIP_RESETTING);
8484 tg3_flag_clear(tp, ERROR_PROCESSED);
8485
8486 val = 0;
8487 if (tg3_flag(tp, 5780_CLASS))
8488 val = tr32(MEMARB_MODE);
8489 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8490
8491 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8492 tg3_stop_fw(tp);
8493 tw32(0x5000, 0x400);
8494 }
8495
8496 if (tg3_flag(tp, IS_SSB_CORE)) {
8497 /*
8498 * BCM4785: In order to avoid repercussions from using
8499 * potentially defective internal ROM, stop the Rx RISC CPU,
8500 * which is not required.
8501 */
8502 tg3_stop_fw(tp);
8503 tg3_halt_cpu(tp, RX_CPU_BASE);
8504 }
8505
8506 tw32(GRC_MODE, tp->grc_mode);
8507
8508 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8509 val = tr32(0xc4);
8510
8511 tw32(0xc4, val | (1 << 15));
8512 }
8513
8514 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8515 tg3_asic_rev(tp) == ASIC_REV_5705) {
8516 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8517 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8518 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8519 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8520 }
8521
8522 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8523 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8524 val = tp->mac_mode;
8525 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8526 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8527 val = tp->mac_mode;
8528 } else
8529 val = 0;
8530
8531 tw32_f(MAC_MODE, val);
8532 udelay(40);
8533
8534 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8535
8536 err = tg3_poll_fw(tp);
8537 if (err)
8538 return err;
8539
8540 tg3_mdio_start(tp);
8541
8542 if (tg3_flag(tp, PCI_EXPRESS) &&
8543 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8544 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8545 !tg3_flag(tp, 57765_PLUS)) {
8546 val = tr32(0x7c00);
8547
8548 tw32(0x7c00, val | (1 << 25));
8549 }
8550
8551 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8552 val = tr32(TG3_CPMU_CLCK_ORIDE);
8553 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8554 }
8555
8556 /* Reprobe ASF enable state. */
8557 tg3_flag_clear(tp, ENABLE_ASF);
8558 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8559 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8560 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8561 u32 nic_cfg;
8562
8563 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8564 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8565 tg3_flag_set(tp, ENABLE_ASF);
8566 tp->last_event_jiffies = jiffies;
8567 if (tg3_flag(tp, 5750_PLUS))
8568 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8569 }
8570 }
8571
8572 return 0;
8573 }
8574
8575 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8576 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8577
8578 /* tp->lock is held. */
8579 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8580 {
8581 int err;
8582
8583 tg3_stop_fw(tp);
8584
8585 tg3_write_sig_pre_reset(tp, kind);
8586
8587 tg3_abort_hw(tp, silent);
8588 err = tg3_chip_reset(tp);
8589
8590 __tg3_set_mac_addr(tp, 0);
8591
8592 tg3_write_sig_legacy(tp, kind);
8593 tg3_write_sig_post_reset(tp, kind);
8594
8595 if (tp->hw_stats) {
8596 /* Save the stats across chip resets... */
8597 tg3_get_nstats(tp, &tp->net_stats_prev);
8598 tg3_get_estats(tp, &tp->estats_prev);
8599
8600 /* And make sure the next sample is new data */
8601 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8602 }
8603
8604 if (err)
8605 return err;
8606
8607 return 0;
8608 }
8609
8610 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8611 {
8612 struct tg3 *tp = netdev_priv(dev);
8613 struct sockaddr *addr = p;
8614 int err = 0, skip_mac_1 = 0;
8615
8616 if (!is_valid_ether_addr(addr->sa_data))
8617 return -EADDRNOTAVAIL;
8618
8619 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8620
8621 if (!netif_running(dev))
8622 return 0;
8623
8624 if (tg3_flag(tp, ENABLE_ASF)) {
8625 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8626
8627 addr0_high = tr32(MAC_ADDR_0_HIGH);
8628 addr0_low = tr32(MAC_ADDR_0_LOW);
8629 addr1_high = tr32(MAC_ADDR_1_HIGH);
8630 addr1_low = tr32(MAC_ADDR_1_LOW);
8631
8632 /* Skip MAC addr 1 if ASF is using it. */
8633 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8634 !(addr1_high == 0 && addr1_low == 0))
8635 skip_mac_1 = 1;
8636 }
8637 spin_lock_bh(&tp->lock);
8638 __tg3_set_mac_addr(tp, skip_mac_1);
8639 spin_unlock_bh(&tp->lock);
8640
8641 return err;
8642 }
8643
8644 /* tp->lock is held. */
8645 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8646 dma_addr_t mapping, u32 maxlen_flags,
8647 u32 nic_addr)
8648 {
8649 tg3_write_mem(tp,
8650 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8651 ((u64) mapping >> 32));
8652 tg3_write_mem(tp,
8653 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8654 ((u64) mapping & 0xffffffff));
8655 tg3_write_mem(tp,
8656 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8657 maxlen_flags);
8658
8659 if (!tg3_flag(tp, 5705_PLUS))
8660 tg3_write_mem(tp,
8661 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8662 nic_addr);
8663 }
8664
8665
8666 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8667 {
8668 int i = 0;
8669
8670 if (!tg3_flag(tp, ENABLE_TSS)) {
8671 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8672 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8673 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8674 } else {
8675 tw32(HOSTCC_TXCOL_TICKS, 0);
8676 tw32(HOSTCC_TXMAX_FRAMES, 0);
8677 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8678
8679 for (; i < tp->txq_cnt; i++) {
8680 u32 reg;
8681
8682 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8683 tw32(reg, ec->tx_coalesce_usecs);
8684 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8685 tw32(reg, ec->tx_max_coalesced_frames);
8686 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8687 tw32(reg, ec->tx_max_coalesced_frames_irq);
8688 }
8689 }
8690
8691 for (; i < tp->irq_max - 1; i++) {
8692 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8693 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8694 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8695 }
8696 }
8697
8698 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8699 {
8700 int i = 0;
8701 u32 limit = tp->rxq_cnt;
8702
8703 if (!tg3_flag(tp, ENABLE_RSS)) {
8704 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8705 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8706 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8707 limit--;
8708 } else {
8709 tw32(HOSTCC_RXCOL_TICKS, 0);
8710 tw32(HOSTCC_RXMAX_FRAMES, 0);
8711 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8712 }
8713
8714 for (; i < limit; i++) {
8715 u32 reg;
8716
8717 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8718 tw32(reg, ec->rx_coalesce_usecs);
8719 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8720 tw32(reg, ec->rx_max_coalesced_frames);
8721 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8722 tw32(reg, ec->rx_max_coalesced_frames_irq);
8723 }
8724
8725 for (; i < tp->irq_max - 1; i++) {
8726 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8727 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8728 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8729 }
8730 }
8731
8732 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8733 {
8734 tg3_coal_tx_init(tp, ec);
8735 tg3_coal_rx_init(tp, ec);
8736
8737 if (!tg3_flag(tp, 5705_PLUS)) {
8738 u32 val = ec->stats_block_coalesce_usecs;
8739
8740 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8741 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8742
8743 if (!tp->link_up)
8744 val = 0;
8745
8746 tw32(HOSTCC_STAT_COAL_TICKS, val);
8747 }
8748 }
8749
8750 /* tp->lock is held. */
8751 static void tg3_rings_reset(struct tg3 *tp)
8752 {
8753 int i;
8754 u32 stblk, txrcb, rxrcb, limit;
8755 struct tg3_napi *tnapi = &tp->napi[0];
8756
8757 /* Disable all transmit rings but the first. */
8758 if (!tg3_flag(tp, 5705_PLUS))
8759 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8760 else if (tg3_flag(tp, 5717_PLUS))
8761 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8762 else if (tg3_flag(tp, 57765_CLASS) ||
8763 tg3_asic_rev(tp) == ASIC_REV_5762)
8764 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8765 else
8766 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8767
8768 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8769 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8770 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8771 BDINFO_FLAGS_DISABLED);
8772
8773
8774 /* Disable all receive return rings but the first. */
8775 if (tg3_flag(tp, 5717_PLUS))
8776 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8777 else if (!tg3_flag(tp, 5705_PLUS))
8778 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8779 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8780 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8781 tg3_flag(tp, 57765_CLASS))
8782 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8783 else
8784 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8785
8786 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8787 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8788 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8789 BDINFO_FLAGS_DISABLED);
8790
8791 /* Disable interrupts */
8792 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8793 tp->napi[0].chk_msi_cnt = 0;
8794 tp->napi[0].last_rx_cons = 0;
8795 tp->napi[0].last_tx_cons = 0;
8796
8797 /* Zero mailbox registers. */
8798 if (tg3_flag(tp, SUPPORT_MSIX)) {
8799 for (i = 1; i < tp->irq_max; i++) {
8800 tp->napi[i].tx_prod = 0;
8801 tp->napi[i].tx_cons = 0;
8802 if (tg3_flag(tp, ENABLE_TSS))
8803 tw32_mailbox(tp->napi[i].prodmbox, 0);
8804 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8805 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8806 tp->napi[i].chk_msi_cnt = 0;
8807 tp->napi[i].last_rx_cons = 0;
8808 tp->napi[i].last_tx_cons = 0;
8809 }
8810 if (!tg3_flag(tp, ENABLE_TSS))
8811 tw32_mailbox(tp->napi[0].prodmbox, 0);
8812 } else {
8813 tp->napi[0].tx_prod = 0;
8814 tp->napi[0].tx_cons = 0;
8815 tw32_mailbox(tp->napi[0].prodmbox, 0);
8816 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8817 }
8818
8819 /* Make sure the NIC-based send BD rings are disabled. */
8820 if (!tg3_flag(tp, 5705_PLUS)) {
8821 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8822 for (i = 0; i < 16; i++)
8823 tw32_tx_mbox(mbox + i * 8, 0);
8824 }
8825
8826 txrcb = NIC_SRAM_SEND_RCB;
8827 rxrcb = NIC_SRAM_RCV_RET_RCB;
8828
8829 /* Clear status block in ram. */
8830 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8831
8832 /* Set status block DMA address */
8833 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8834 ((u64) tnapi->status_mapping >> 32));
8835 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8836 ((u64) tnapi->status_mapping & 0xffffffff));
8837
8838 if (tnapi->tx_ring) {
8839 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8840 (TG3_TX_RING_SIZE <<
8841 BDINFO_FLAGS_MAXLEN_SHIFT),
8842 NIC_SRAM_TX_BUFFER_DESC);
8843 txrcb += TG3_BDINFO_SIZE;
8844 }
8845
8846 if (tnapi->rx_rcb) {
8847 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8848 (tp->rx_ret_ring_mask + 1) <<
8849 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8850 rxrcb += TG3_BDINFO_SIZE;
8851 }
8852
8853 stblk = HOSTCC_STATBLCK_RING1;
8854
8855 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8856 u64 mapping = (u64)tnapi->status_mapping;
8857 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8858 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8859
8860 /* Clear status block in ram. */
8861 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8862
8863 if (tnapi->tx_ring) {
8864 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8865 (TG3_TX_RING_SIZE <<
8866 BDINFO_FLAGS_MAXLEN_SHIFT),
8867 NIC_SRAM_TX_BUFFER_DESC);
8868 txrcb += TG3_BDINFO_SIZE;
8869 }
8870
8871 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8872 ((tp->rx_ret_ring_mask + 1) <<
8873 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8874
8875 stblk += 8;
8876 rxrcb += TG3_BDINFO_SIZE;
8877 }
8878 }
8879
8880 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8881 {
8882 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8883
8884 if (!tg3_flag(tp, 5750_PLUS) ||
8885 tg3_flag(tp, 5780_CLASS) ||
8886 tg3_asic_rev(tp) == ASIC_REV_5750 ||
8887 tg3_asic_rev(tp) == ASIC_REV_5752 ||
8888 tg3_flag(tp, 57765_PLUS))
8889 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8890 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8891 tg3_asic_rev(tp) == ASIC_REV_5787)
8892 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8893 else
8894 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8895
8896 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8897 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8898
8899 val = min(nic_rep_thresh, host_rep_thresh);
8900 tw32(RCVBDI_STD_THRESH, val);
8901
8902 if (tg3_flag(tp, 57765_PLUS))
8903 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8904
8905 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8906 return;
8907
8908 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8909
8910 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8911
8912 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8913 tw32(RCVBDI_JUMBO_THRESH, val);
8914
8915 if (tg3_flag(tp, 57765_PLUS))
8916 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8917 }
8918
8919 static inline u32 calc_crc(unsigned char *buf, int len)
8920 {
8921 u32 reg;
8922 u32 tmp;
8923 int j, k;
8924
8925 reg = 0xffffffff;
8926
8927 for (j = 0; j < len; j++) {
8928 reg ^= buf[j];
8929
8930 for (k = 0; k < 8; k++) {
8931 tmp = reg & 0x01;
8932
8933 reg >>= 1;
8934
8935 if (tmp)
8936 reg ^= 0xedb88320;
8937 }
8938 }
8939
8940 return ~reg;
8941 }
8942
8943 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8944 {
8945 /* accept or reject all multicast frames */
8946 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8947 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8948 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8949 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8950 }
8951
8952 static void __tg3_set_rx_mode(struct net_device *dev)
8953 {
8954 struct tg3 *tp = netdev_priv(dev);
8955 u32 rx_mode;
8956
8957 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8958 RX_MODE_KEEP_VLAN_TAG);
8959
8960 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8961 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8962 * flag clear.
8963 */
8964 if (!tg3_flag(tp, ENABLE_ASF))
8965 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8966 #endif
8967
8968 if (dev->flags & IFF_PROMISC) {
8969 /* Promiscuous mode. */
8970 rx_mode |= RX_MODE_PROMISC;
8971 } else if (dev->flags & IFF_ALLMULTI) {
8972 /* Accept all multicast. */
8973 tg3_set_multi(tp, 1);
8974 } else if (netdev_mc_empty(dev)) {
8975 /* Reject all multicast. */
8976 tg3_set_multi(tp, 0);
8977 } else {
8978 /* Accept one or more multicast(s). */
8979 struct netdev_hw_addr *ha;
8980 u32 mc_filter[4] = { 0, };
8981 u32 regidx;
8982 u32 bit;
8983 u32 crc;
8984
8985 netdev_for_each_mc_addr(ha, dev) {
8986 crc = calc_crc(ha->addr, ETH_ALEN);
8987 bit = ~crc & 0x7f;
8988 regidx = (bit & 0x60) >> 5;
8989 bit &= 0x1f;
8990 mc_filter[regidx] |= (1 << bit);
8991 }
8992
8993 tw32(MAC_HASH_REG_0, mc_filter[0]);
8994 tw32(MAC_HASH_REG_1, mc_filter[1]);
8995 tw32(MAC_HASH_REG_2, mc_filter[2]);
8996 tw32(MAC_HASH_REG_3, mc_filter[3]);
8997 }
8998
8999 if (rx_mode != tp->rx_mode) {
9000 tp->rx_mode = rx_mode;
9001 tw32_f(MAC_RX_MODE, rx_mode);
9002 udelay(10);
9003 }
9004 }
9005
9006 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9007 {
9008 int i;
9009
9010 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9011 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9012 }
9013
9014 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9015 {
9016 int i;
9017
9018 if (!tg3_flag(tp, SUPPORT_MSIX))
9019 return;
9020
9021 if (tp->rxq_cnt == 1) {
9022 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9023 return;
9024 }
9025
9026 /* Validate table against current IRQ count */
9027 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9028 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9029 break;
9030 }
9031
9032 if (i != TG3_RSS_INDIR_TBL_SIZE)
9033 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9034 }
9035
9036 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9037 {
9038 int i = 0;
9039 u32 reg = MAC_RSS_INDIR_TBL_0;
9040
9041 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9042 u32 val = tp->rss_ind_tbl[i];
9043 i++;
9044 for (; i % 8; i++) {
9045 val <<= 4;
9046 val |= tp->rss_ind_tbl[i];
9047 }
9048 tw32(reg, val);
9049 reg += 4;
9050 }
9051 }
9052
9053 /* tp->lock is held. */
9054 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9055 {
9056 u32 val, rdmac_mode;
9057 int i, err, limit;
9058 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9059
9060 tg3_disable_ints(tp);
9061
9062 tg3_stop_fw(tp);
9063
9064 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9065
9066 if (tg3_flag(tp, INIT_COMPLETE))
9067 tg3_abort_hw(tp, 1);
9068
9069 /* Enable MAC control of LPI */
9070 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9071 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9072 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9073 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9074 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9075
9076 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9077
9078 tw32_f(TG3_CPMU_EEE_CTRL,
9079 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9080
9081 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9082 TG3_CPMU_EEEMD_LPI_IN_TX |
9083 TG3_CPMU_EEEMD_LPI_IN_RX |
9084 TG3_CPMU_EEEMD_EEE_ENABLE;
9085
9086 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9087 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9088
9089 if (tg3_flag(tp, ENABLE_APE))
9090 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9091
9092 tw32_f(TG3_CPMU_EEE_MODE, val);
9093
9094 tw32_f(TG3_CPMU_EEE_DBTMR1,
9095 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9096 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9097
9098 tw32_f(TG3_CPMU_EEE_DBTMR2,
9099 TG3_CPMU_DBTMR2_APE_TX_2047US |
9100 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9101 }
9102
9103 if (reset_phy)
9104 tg3_phy_reset(tp);
9105
9106 err = tg3_chip_reset(tp);
9107 if (err)
9108 return err;
9109
9110 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9111
9112 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9113 val = tr32(TG3_CPMU_CTRL);
9114 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9115 tw32(TG3_CPMU_CTRL, val);
9116
9117 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9118 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9119 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9120 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9121
9122 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9123 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9124 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9125 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9126
9127 val = tr32(TG3_CPMU_HST_ACC);
9128 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9129 val |= CPMU_HST_ACC_MACCLK_6_25;
9130 tw32(TG3_CPMU_HST_ACC, val);
9131 }
9132
9133 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9134 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9135 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9136 PCIE_PWR_MGMT_L1_THRESH_4MS;
9137 tw32(PCIE_PWR_MGMT_THRESH, val);
9138
9139 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9140 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9141
9142 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9143
9144 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9145 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9146 }
9147
9148 if (tg3_flag(tp, L1PLLPD_EN)) {
9149 u32 grc_mode = tr32(GRC_MODE);
9150
9151 /* Access the lower 1K of PL PCIE block registers. */
9152 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9153 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9154
9155 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9156 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9157 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9158
9159 tw32(GRC_MODE, grc_mode);
9160 }
9161
9162 if (tg3_flag(tp, 57765_CLASS)) {
9163 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9164 u32 grc_mode = tr32(GRC_MODE);
9165
9166 /* Access the lower 1K of PL PCIE block registers. */
9167 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9168 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9169
9170 val = tr32(TG3_PCIE_TLDLPL_PORT +
9171 TG3_PCIE_PL_LO_PHYCTL5);
9172 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9173 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9174
9175 tw32(GRC_MODE, grc_mode);
9176 }
9177
9178 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9179 u32 grc_mode;
9180
9181 /* Fix transmit hangs */
9182 val = tr32(TG3_CPMU_PADRNG_CTL);
9183 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9184 tw32(TG3_CPMU_PADRNG_CTL, val);
9185
9186 grc_mode = tr32(GRC_MODE);
9187
9188 /* Access the lower 1K of DL PCIE block registers. */
9189 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9190 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9191
9192 val = tr32(TG3_PCIE_TLDLPL_PORT +
9193 TG3_PCIE_DL_LO_FTSMAX);
9194 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9195 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9196 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9197
9198 tw32(GRC_MODE, grc_mode);
9199 }
9200
9201 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9202 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9203 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9204 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9205 }
9206
9207 /* This works around an issue with Athlon chipsets on
9208 * B3 tigon3 silicon. This bit has no effect on any
9209 * other revision. But do not set this on PCI Express
9210 * chips and don't even touch the clocks if the CPMU is present.
9211 */
9212 if (!tg3_flag(tp, CPMU_PRESENT)) {
9213 if (!tg3_flag(tp, PCI_EXPRESS))
9214 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9215 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9216 }
9217
9218 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9219 tg3_flag(tp, PCIX_MODE)) {
9220 val = tr32(TG3PCI_PCISTATE);
9221 val |= PCISTATE_RETRY_SAME_DMA;
9222 tw32(TG3PCI_PCISTATE, val);
9223 }
9224
9225 if (tg3_flag(tp, ENABLE_APE)) {
9226 /* Allow reads and writes to the
9227 * APE register and memory space.
9228 */
9229 val = tr32(TG3PCI_PCISTATE);
9230 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9231 PCISTATE_ALLOW_APE_SHMEM_WR |
9232 PCISTATE_ALLOW_APE_PSPACE_WR;
9233 tw32(TG3PCI_PCISTATE, val);
9234 }
9235
9236 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9237 /* Enable some hw fixes. */
9238 val = tr32(TG3PCI_MSI_DATA);
9239 val |= (1 << 26) | (1 << 28) | (1 << 29);
9240 tw32(TG3PCI_MSI_DATA, val);
9241 }
9242
9243 /* Descriptor ring init may make accesses to the
9244 * NIC SRAM area to setup the TX descriptors, so we
9245 * can only do this after the hardware has been
9246 * successfully reset.
9247 */
9248 err = tg3_init_rings(tp);
9249 if (err)
9250 return err;
9251
9252 if (tg3_flag(tp, 57765_PLUS)) {
9253 val = tr32(TG3PCI_DMA_RW_CTRL) &
9254 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9255 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9256 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9257 if (!tg3_flag(tp, 57765_CLASS) &&
9258 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9259 tg3_asic_rev(tp) != ASIC_REV_5762)
9260 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9261 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9262 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9263 tg3_asic_rev(tp) != ASIC_REV_5761) {
9264 /* This value is determined during the probe time DMA
9265 * engine test, tg3_test_dma.
9266 */
9267 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9268 }
9269
9270 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9271 GRC_MODE_4X_NIC_SEND_RINGS |
9272 GRC_MODE_NO_TX_PHDR_CSUM |
9273 GRC_MODE_NO_RX_PHDR_CSUM);
9274 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9275
9276 /* Pseudo-header checksum is done by hardware logic and not
9277 * the offload processers, so make the chip do the pseudo-
9278 * header checksums on receive. For transmit it is more
9279 * convenient to do the pseudo-header checksum in software
9280 * as Linux does that on transmit for us in all cases.
9281 */
9282 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9283
9284 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9285 if (tp->rxptpctl)
9286 tw32(TG3_RX_PTP_CTL,
9287 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9288
9289 if (tg3_flag(tp, PTP_CAPABLE))
9290 val |= GRC_MODE_TIME_SYNC_ENABLE;
9291
9292 tw32(GRC_MODE, tp->grc_mode | val);
9293
9294 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9295 val = tr32(GRC_MISC_CFG);
9296 val &= ~0xff;
9297 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9298 tw32(GRC_MISC_CFG, val);
9299
9300 /* Initialize MBUF/DESC pool. */
9301 if (tg3_flag(tp, 5750_PLUS)) {
9302 /* Do nothing. */
9303 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9304 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9305 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9306 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9307 else
9308 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9309 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9310 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9311 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9312 int fw_len;
9313
9314 fw_len = tp->fw_len;
9315 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9316 tw32(BUFMGR_MB_POOL_ADDR,
9317 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9318 tw32(BUFMGR_MB_POOL_SIZE,
9319 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9320 }
9321
9322 if (tp->dev->mtu <= ETH_DATA_LEN) {
9323 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9324 tp->bufmgr_config.mbuf_read_dma_low_water);
9325 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9326 tp->bufmgr_config.mbuf_mac_rx_low_water);
9327 tw32(BUFMGR_MB_HIGH_WATER,
9328 tp->bufmgr_config.mbuf_high_water);
9329 } else {
9330 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9331 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9332 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9333 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9334 tw32(BUFMGR_MB_HIGH_WATER,
9335 tp->bufmgr_config.mbuf_high_water_jumbo);
9336 }
9337 tw32(BUFMGR_DMA_LOW_WATER,
9338 tp->bufmgr_config.dma_low_water);
9339 tw32(BUFMGR_DMA_HIGH_WATER,
9340 tp->bufmgr_config.dma_high_water);
9341
9342 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9343 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9344 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9345 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9346 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9347 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9348 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9349 tw32(BUFMGR_MODE, val);
9350 for (i = 0; i < 2000; i++) {
9351 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9352 break;
9353 udelay(10);
9354 }
9355 if (i >= 2000) {
9356 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9357 return -ENODEV;
9358 }
9359
9360 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9361 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9362
9363 tg3_setup_rxbd_thresholds(tp);
9364
9365 /* Initialize TG3_BDINFO's at:
9366 * RCVDBDI_STD_BD: standard eth size rx ring
9367 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9368 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9369 *
9370 * like so:
9371 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9372 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9373 * ring attribute flags
9374 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9375 *
9376 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9377 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9378 *
9379 * The size of each ring is fixed in the firmware, but the location is
9380 * configurable.
9381 */
9382 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9383 ((u64) tpr->rx_std_mapping >> 32));
9384 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9385 ((u64) tpr->rx_std_mapping & 0xffffffff));
9386 if (!tg3_flag(tp, 5717_PLUS))
9387 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9388 NIC_SRAM_RX_BUFFER_DESC);
9389
9390 /* Disable the mini ring */
9391 if (!tg3_flag(tp, 5705_PLUS))
9392 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9393 BDINFO_FLAGS_DISABLED);
9394
9395 /* Program the jumbo buffer descriptor ring control
9396 * blocks on those devices that have them.
9397 */
9398 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9399 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9400
9401 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9402 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9403 ((u64) tpr->rx_jmb_mapping >> 32));
9404 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9405 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9406 val = TG3_RX_JMB_RING_SIZE(tp) <<
9407 BDINFO_FLAGS_MAXLEN_SHIFT;
9408 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9409 val | BDINFO_FLAGS_USE_EXT_RECV);
9410 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9411 tg3_flag(tp, 57765_CLASS) ||
9412 tg3_asic_rev(tp) == ASIC_REV_5762)
9413 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9414 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9415 } else {
9416 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9417 BDINFO_FLAGS_DISABLED);
9418 }
9419
9420 if (tg3_flag(tp, 57765_PLUS)) {
9421 val = TG3_RX_STD_RING_SIZE(tp);
9422 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9423 val |= (TG3_RX_STD_DMA_SZ << 2);
9424 } else
9425 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9426 } else
9427 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9428
9429 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9430
9431 tpr->rx_std_prod_idx = tp->rx_pending;
9432 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9433
9434 tpr->rx_jmb_prod_idx =
9435 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9436 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9437
9438 tg3_rings_reset(tp);
9439
9440 /* Initialize MAC address and backoff seed. */
9441 __tg3_set_mac_addr(tp, 0);
9442
9443 /* MTU + ethernet header + FCS + optional VLAN tag */
9444 tw32(MAC_RX_MTU_SIZE,
9445 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9446
9447 /* The slot time is changed by tg3_setup_phy if we
9448 * run at gigabit with half duplex.
9449 */
9450 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9451 (6 << TX_LENGTHS_IPG_SHIFT) |
9452 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9453
9454 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9455 tg3_asic_rev(tp) == ASIC_REV_5762)
9456 val |= tr32(MAC_TX_LENGTHS) &
9457 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9458 TX_LENGTHS_CNT_DWN_VAL_MSK);
9459
9460 tw32(MAC_TX_LENGTHS, val);
9461
9462 /* Receive rules. */
9463 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9464 tw32(RCVLPC_CONFIG, 0x0181);
9465
9466 /* Calculate RDMAC_MODE setting early, we need it to determine
9467 * the RCVLPC_STATE_ENABLE mask.
9468 */
9469 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9470 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9471 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9472 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9473 RDMAC_MODE_LNGREAD_ENAB);
9474
9475 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9476 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9477
9478 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9479 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9480 tg3_asic_rev(tp) == ASIC_REV_57780)
9481 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9482 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9483 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9484
9485 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9486 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9487 if (tg3_flag(tp, TSO_CAPABLE) &&
9488 tg3_asic_rev(tp) == ASIC_REV_5705) {
9489 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9490 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9491 !tg3_flag(tp, IS_5788)) {
9492 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9493 }
9494 }
9495
9496 if (tg3_flag(tp, PCI_EXPRESS))
9497 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9498
9499 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9500 tp->dma_limit = 0;
9501 if (tp->dev->mtu <= ETH_DATA_LEN) {
9502 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9503 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9504 }
9505 }
9506
9507 if (tg3_flag(tp, HW_TSO_1) ||
9508 tg3_flag(tp, HW_TSO_2) ||
9509 tg3_flag(tp, HW_TSO_3))
9510 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9511
9512 if (tg3_flag(tp, 57765_PLUS) ||
9513 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9514 tg3_asic_rev(tp) == ASIC_REV_57780)
9515 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9516
9517 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9518 tg3_asic_rev(tp) == ASIC_REV_5762)
9519 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9520
9521 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9522 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9523 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9524 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9525 tg3_flag(tp, 57765_PLUS)) {
9526 u32 tgtreg;
9527
9528 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9529 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9530 else
9531 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9532
9533 val = tr32(tgtreg);
9534 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9535 tg3_asic_rev(tp) == ASIC_REV_5762) {
9536 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9537 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9538 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9539 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9540 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9541 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9542 }
9543 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9544 }
9545
9546 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9547 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9548 tg3_asic_rev(tp) == ASIC_REV_5762) {
9549 u32 tgtreg;
9550
9551 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9552 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9553 else
9554 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9555
9556 val = tr32(tgtreg);
9557 tw32(tgtreg, val |
9558 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9559 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9560 }
9561
9562 /* Receive/send statistics. */
9563 if (tg3_flag(tp, 5750_PLUS)) {
9564 val = tr32(RCVLPC_STATS_ENABLE);
9565 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9566 tw32(RCVLPC_STATS_ENABLE, val);
9567 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9568 tg3_flag(tp, TSO_CAPABLE)) {
9569 val = tr32(RCVLPC_STATS_ENABLE);
9570 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9571 tw32(RCVLPC_STATS_ENABLE, val);
9572 } else {
9573 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9574 }
9575 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9576 tw32(SNDDATAI_STATSENAB, 0xffffff);
9577 tw32(SNDDATAI_STATSCTRL,
9578 (SNDDATAI_SCTRL_ENABLE |
9579 SNDDATAI_SCTRL_FASTUPD));
9580
9581 /* Setup host coalescing engine. */
9582 tw32(HOSTCC_MODE, 0);
9583 for (i = 0; i < 2000; i++) {
9584 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9585 break;
9586 udelay(10);
9587 }
9588
9589 __tg3_set_coalesce(tp, &tp->coal);
9590
9591 if (!tg3_flag(tp, 5705_PLUS)) {
9592 /* Status/statistics block address. See tg3_timer,
9593 * the tg3_periodic_fetch_stats call there, and
9594 * tg3_get_stats to see how this works for 5705/5750 chips.
9595 */
9596 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9597 ((u64) tp->stats_mapping >> 32));
9598 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9599 ((u64) tp->stats_mapping & 0xffffffff));
9600 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9601
9602 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9603
9604 /* Clear statistics and status block memory areas */
9605 for (i = NIC_SRAM_STATS_BLK;
9606 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9607 i += sizeof(u32)) {
9608 tg3_write_mem(tp, i, 0);
9609 udelay(40);
9610 }
9611 }
9612
9613 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9614
9615 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9616 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9617 if (!tg3_flag(tp, 5705_PLUS))
9618 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9619
9620 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9621 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9622 /* reset to prevent losing 1st rx packet intermittently */
9623 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9624 udelay(10);
9625 }
9626
9627 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9628 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9629 MAC_MODE_FHDE_ENABLE;
9630 if (tg3_flag(tp, ENABLE_APE))
9631 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9632 if (!tg3_flag(tp, 5705_PLUS) &&
9633 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9634 tg3_asic_rev(tp) != ASIC_REV_5700)
9635 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9636 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9637 udelay(40);
9638
9639 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9640 * If TG3_FLAG_IS_NIC is zero, we should read the
9641 * register to preserve the GPIO settings for LOMs. The GPIOs,
9642 * whether used as inputs or outputs, are set by boot code after
9643 * reset.
9644 */
9645 if (!tg3_flag(tp, IS_NIC)) {
9646 u32 gpio_mask;
9647
9648 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9649 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9650 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9651
9652 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9653 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9654 GRC_LCLCTRL_GPIO_OUTPUT3;
9655
9656 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9657 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9658
9659 tp->grc_local_ctrl &= ~gpio_mask;
9660 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9661
9662 /* GPIO1 must be driven high for eeprom write protect */
9663 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9664 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9665 GRC_LCLCTRL_GPIO_OUTPUT1);
9666 }
9667 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9668 udelay(100);
9669
9670 if (tg3_flag(tp, USING_MSIX)) {
9671 val = tr32(MSGINT_MODE);
9672 val |= MSGINT_MODE_ENABLE;
9673 if (tp->irq_cnt > 1)
9674 val |= MSGINT_MODE_MULTIVEC_EN;
9675 if (!tg3_flag(tp, 1SHOT_MSI))
9676 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9677 tw32(MSGINT_MODE, val);
9678 }
9679
9680 if (!tg3_flag(tp, 5705_PLUS)) {
9681 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9682 udelay(40);
9683 }
9684
9685 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9686 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9687 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9688 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9689 WDMAC_MODE_LNGREAD_ENAB);
9690
9691 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9692 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9693 if (tg3_flag(tp, TSO_CAPABLE) &&
9694 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9695 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9696 /* nothing */
9697 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9698 !tg3_flag(tp, IS_5788)) {
9699 val |= WDMAC_MODE_RX_ACCEL;
9700 }
9701 }
9702
9703 /* Enable host coalescing bug fix */
9704 if (tg3_flag(tp, 5755_PLUS))
9705 val |= WDMAC_MODE_STATUS_TAG_FIX;
9706
9707 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9708 val |= WDMAC_MODE_BURST_ALL_DATA;
9709
9710 tw32_f(WDMAC_MODE, val);
9711 udelay(40);
9712
9713 if (tg3_flag(tp, PCIX_MODE)) {
9714 u16 pcix_cmd;
9715
9716 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9717 &pcix_cmd);
9718 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9719 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9720 pcix_cmd |= PCI_X_CMD_READ_2K;
9721 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9722 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9723 pcix_cmd |= PCI_X_CMD_READ_2K;
9724 }
9725 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9726 pcix_cmd);
9727 }
9728
9729 tw32_f(RDMAC_MODE, rdmac_mode);
9730 udelay(40);
9731
9732 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9733 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9734 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9735 break;
9736 }
9737 if (i < TG3_NUM_RDMA_CHANNELS) {
9738 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9739 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9740 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9741 tg3_flag_set(tp, 5719_RDMA_BUG);
9742 }
9743 }
9744
9745 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9746 if (!tg3_flag(tp, 5705_PLUS))
9747 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9748
9749 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9750 tw32(SNDDATAC_MODE,
9751 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9752 else
9753 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9754
9755 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9756 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9757 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9758 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9759 val |= RCVDBDI_MODE_LRG_RING_SZ;
9760 tw32(RCVDBDI_MODE, val);
9761 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9762 if (tg3_flag(tp, HW_TSO_1) ||
9763 tg3_flag(tp, HW_TSO_2) ||
9764 tg3_flag(tp, HW_TSO_3))
9765 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9766 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9767 if (tg3_flag(tp, ENABLE_TSS))
9768 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9769 tw32(SNDBDI_MODE, val);
9770 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9771
9772 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9773 err = tg3_load_5701_a0_firmware_fix(tp);
9774 if (err)
9775 return err;
9776 }
9777
9778 if (tg3_flag(tp, TSO_CAPABLE)) {
9779 err = tg3_load_tso_firmware(tp);
9780 if (err)
9781 return err;
9782 }
9783
9784 tp->tx_mode = TX_MODE_ENABLE;
9785
9786 if (tg3_flag(tp, 5755_PLUS) ||
9787 tg3_asic_rev(tp) == ASIC_REV_5906)
9788 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9789
9790 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9791 tg3_asic_rev(tp) == ASIC_REV_5762) {
9792 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9793 tp->tx_mode &= ~val;
9794 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9795 }
9796
9797 tw32_f(MAC_TX_MODE, tp->tx_mode);
9798 udelay(100);
9799
9800 if (tg3_flag(tp, ENABLE_RSS)) {
9801 tg3_rss_write_indir_tbl(tp);
9802
9803 /* Setup the "secret" hash key. */
9804 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9805 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9806 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9807 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9808 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9809 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9810 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9811 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9812 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9813 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9814 }
9815
9816 tp->rx_mode = RX_MODE_ENABLE;
9817 if (tg3_flag(tp, 5755_PLUS))
9818 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9819
9820 if (tg3_flag(tp, ENABLE_RSS))
9821 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9822 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9823 RX_MODE_RSS_IPV6_HASH_EN |
9824 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9825 RX_MODE_RSS_IPV4_HASH_EN |
9826 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9827
9828 tw32_f(MAC_RX_MODE, tp->rx_mode);
9829 udelay(10);
9830
9831 tw32(MAC_LED_CTRL, tp->led_ctrl);
9832
9833 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9834 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9835 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9836 udelay(10);
9837 }
9838 tw32_f(MAC_RX_MODE, tp->rx_mode);
9839 udelay(10);
9840
9841 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9842 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9843 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9844 /* Set drive transmission level to 1.2V */
9845 /* only if the signal pre-emphasis bit is not set */
9846 val = tr32(MAC_SERDES_CFG);
9847 val &= 0xfffff000;
9848 val |= 0x880;
9849 tw32(MAC_SERDES_CFG, val);
9850 }
9851 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9852 tw32(MAC_SERDES_CFG, 0x616000);
9853 }
9854
9855 /* Prevent chip from dropping frames when flow control
9856 * is enabled.
9857 */
9858 if (tg3_flag(tp, 57765_CLASS))
9859 val = 1;
9860 else
9861 val = 2;
9862 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9863
9864 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
9865 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9866 /* Use hardware link auto-negotiation */
9867 tg3_flag_set(tp, HW_AUTONEG);
9868 }
9869
9870 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9871 tg3_asic_rev(tp) == ASIC_REV_5714) {
9872 u32 tmp;
9873
9874 tmp = tr32(SERDES_RX_CTRL);
9875 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9876 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9877 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9878 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9879 }
9880
9881 if (!tg3_flag(tp, USE_PHYLIB)) {
9882 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9883 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9884
9885 err = tg3_setup_phy(tp, 0);
9886 if (err)
9887 return err;
9888
9889 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9890 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9891 u32 tmp;
9892
9893 /* Clear CRC stats. */
9894 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9895 tg3_writephy(tp, MII_TG3_TEST1,
9896 tmp | MII_TG3_TEST1_CRC_EN);
9897 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9898 }
9899 }
9900 }
9901
9902 __tg3_set_rx_mode(tp->dev);
9903
9904 /* Initialize receive rules. */
9905 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9906 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9907 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9908 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9909
9910 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9911 limit = 8;
9912 else
9913 limit = 16;
9914 if (tg3_flag(tp, ENABLE_ASF))
9915 limit -= 4;
9916 switch (limit) {
9917 case 16:
9918 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9919 case 15:
9920 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9921 case 14:
9922 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9923 case 13:
9924 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9925 case 12:
9926 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9927 case 11:
9928 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9929 case 10:
9930 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9931 case 9:
9932 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9933 case 8:
9934 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9935 case 7:
9936 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9937 case 6:
9938 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9939 case 5:
9940 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9941 case 4:
9942 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9943 case 3:
9944 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9945 case 2:
9946 case 1:
9947
9948 default:
9949 break;
9950 }
9951
9952 if (tg3_flag(tp, ENABLE_APE))
9953 /* Write our heartbeat update interval to APE. */
9954 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9955 APE_HOST_HEARTBEAT_INT_DISABLE);
9956
9957 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9958
9959 return 0;
9960 }
9961
9962 /* Called at device open time to get the chip ready for
9963 * packet processing. Invoked with tp->lock held.
9964 */
9965 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9966 {
9967 tg3_switch_clocks(tp);
9968
9969 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9970
9971 return tg3_reset_hw(tp, reset_phy);
9972 }
9973
9974 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9975 {
9976 int i;
9977
9978 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9979 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9980
9981 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9982 off += len;
9983
9984 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9985 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9986 memset(ocir, 0, TG3_OCIR_LEN);
9987 }
9988 }
9989
9990 /* sysfs attributes for hwmon */
9991 static ssize_t tg3_show_temp(struct device *dev,
9992 struct device_attribute *devattr, char *buf)
9993 {
9994 struct pci_dev *pdev = to_pci_dev(dev);
9995 struct net_device *netdev = pci_get_drvdata(pdev);
9996 struct tg3 *tp = netdev_priv(netdev);
9997 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9998 u32 temperature;
9999
10000 spin_lock_bh(&tp->lock);
10001 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10002 sizeof(temperature));
10003 spin_unlock_bh(&tp->lock);
10004 return sprintf(buf, "%u\n", temperature);
10005 }
10006
10007
10008 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10009 TG3_TEMP_SENSOR_OFFSET);
10010 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10011 TG3_TEMP_CAUTION_OFFSET);
10012 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10013 TG3_TEMP_MAX_OFFSET);
10014
10015 static struct attribute *tg3_attributes[] = {
10016 &sensor_dev_attr_temp1_input.dev_attr.attr,
10017 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10018 &sensor_dev_attr_temp1_max.dev_attr.attr,
10019 NULL
10020 };
10021
10022 static const struct attribute_group tg3_group = {
10023 .attrs = tg3_attributes,
10024 };
10025
10026 static void tg3_hwmon_close(struct tg3 *tp)
10027 {
10028 if (tp->hwmon_dev) {
10029 hwmon_device_unregister(tp->hwmon_dev);
10030 tp->hwmon_dev = NULL;
10031 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10032 }
10033 }
10034
10035 static void tg3_hwmon_open(struct tg3 *tp)
10036 {
10037 int i, err;
10038 u32 size = 0;
10039 struct pci_dev *pdev = tp->pdev;
10040 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10041
10042 tg3_sd_scan_scratchpad(tp, ocirs);
10043
10044 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10045 if (!ocirs[i].src_data_length)
10046 continue;
10047
10048 size += ocirs[i].src_hdr_length;
10049 size += ocirs[i].src_data_length;
10050 }
10051
10052 if (!size)
10053 return;
10054
10055 /* Register hwmon sysfs hooks */
10056 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10057 if (err) {
10058 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10059 return;
10060 }
10061
10062 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10063 if (IS_ERR(tp->hwmon_dev)) {
10064 tp->hwmon_dev = NULL;
10065 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10066 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10067 }
10068 }
10069
10070
10071 #define TG3_STAT_ADD32(PSTAT, REG) \
10072 do { u32 __val = tr32(REG); \
10073 (PSTAT)->low += __val; \
10074 if ((PSTAT)->low < __val) \
10075 (PSTAT)->high += 1; \
10076 } while (0)
10077
10078 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10079 {
10080 struct tg3_hw_stats *sp = tp->hw_stats;
10081
10082 if (!tp->link_up)
10083 return;
10084
10085 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10086 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10087 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10088 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10089 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10090 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10091 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10092 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10093 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10094 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10095 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10096 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10097 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10098 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10099 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10100 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10101 u32 val;
10102
10103 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10104 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10105 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10106 tg3_flag_clear(tp, 5719_RDMA_BUG);
10107 }
10108
10109 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10110 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10111 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10112 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10113 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10114 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10115 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10116 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10117 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10118 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10119 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10120 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10121 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10122 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10123
10124 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10125 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10126 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10127 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10128 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10129 } else {
10130 u32 val = tr32(HOSTCC_FLOW_ATTN);
10131 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10132 if (val) {
10133 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10134 sp->rx_discards.low += val;
10135 if (sp->rx_discards.low < val)
10136 sp->rx_discards.high += 1;
10137 }
10138 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10139 }
10140 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10141 }
10142
10143 static void tg3_chk_missed_msi(struct tg3 *tp)
10144 {
10145 u32 i;
10146
10147 for (i = 0; i < tp->irq_cnt; i++) {
10148 struct tg3_napi *tnapi = &tp->napi[i];
10149
10150 if (tg3_has_work(tnapi)) {
10151 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10152 tnapi->last_tx_cons == tnapi->tx_cons) {
10153 if (tnapi->chk_msi_cnt < 1) {
10154 tnapi->chk_msi_cnt++;
10155 return;
10156 }
10157 tg3_msi(0, tnapi);
10158 }
10159 }
10160 tnapi->chk_msi_cnt = 0;
10161 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10162 tnapi->last_tx_cons = tnapi->tx_cons;
10163 }
10164 }
10165
10166 static void tg3_timer(unsigned long __opaque)
10167 {
10168 struct tg3 *tp = (struct tg3 *) __opaque;
10169
10170 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10171 goto restart_timer;
10172
10173 spin_lock(&tp->lock);
10174
10175 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10176 tg3_flag(tp, 57765_CLASS))
10177 tg3_chk_missed_msi(tp);
10178
10179 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10180 /* BCM4785: Flush posted writes from GbE to host memory. */
10181 tr32(HOSTCC_MODE);
10182 }
10183
10184 if (!tg3_flag(tp, TAGGED_STATUS)) {
10185 /* All of this garbage is because when using non-tagged
10186 * IRQ status the mailbox/status_block protocol the chip
10187 * uses with the cpu is race prone.
10188 */
10189 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10190 tw32(GRC_LOCAL_CTRL,
10191 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10192 } else {
10193 tw32(HOSTCC_MODE, tp->coalesce_mode |
10194 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10195 }
10196
10197 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10198 spin_unlock(&tp->lock);
10199 tg3_reset_task_schedule(tp);
10200 goto restart_timer;
10201 }
10202 }
10203
10204 /* This part only runs once per second. */
10205 if (!--tp->timer_counter) {
10206 if (tg3_flag(tp, 5705_PLUS))
10207 tg3_periodic_fetch_stats(tp);
10208
10209 if (tp->setlpicnt && !--tp->setlpicnt)
10210 tg3_phy_eee_enable(tp);
10211
10212 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10213 u32 mac_stat;
10214 int phy_event;
10215
10216 mac_stat = tr32(MAC_STATUS);
10217
10218 phy_event = 0;
10219 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10220 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10221 phy_event = 1;
10222 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10223 phy_event = 1;
10224
10225 if (phy_event)
10226 tg3_setup_phy(tp, 0);
10227 } else if (tg3_flag(tp, POLL_SERDES)) {
10228 u32 mac_stat = tr32(MAC_STATUS);
10229 int need_setup = 0;
10230
10231 if (tp->link_up &&
10232 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10233 need_setup = 1;
10234 }
10235 if (!tp->link_up &&
10236 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10237 MAC_STATUS_SIGNAL_DET))) {
10238 need_setup = 1;
10239 }
10240 if (need_setup) {
10241 if (!tp->serdes_counter) {
10242 tw32_f(MAC_MODE,
10243 (tp->mac_mode &
10244 ~MAC_MODE_PORT_MODE_MASK));
10245 udelay(40);
10246 tw32_f(MAC_MODE, tp->mac_mode);
10247 udelay(40);
10248 }
10249 tg3_setup_phy(tp, 0);
10250 }
10251 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10252 tg3_flag(tp, 5780_CLASS)) {
10253 tg3_serdes_parallel_detect(tp);
10254 }
10255
10256 tp->timer_counter = tp->timer_multiplier;
10257 }
10258
10259 /* Heartbeat is only sent once every 2 seconds.
10260 *
10261 * The heartbeat is to tell the ASF firmware that the host
10262 * driver is still alive. In the event that the OS crashes,
10263 * ASF needs to reset the hardware to free up the FIFO space
10264 * that may be filled with rx packets destined for the host.
10265 * If the FIFO is full, ASF will no longer function properly.
10266 *
10267 * Unintended resets have been reported on real time kernels
10268 * where the timer doesn't run on time. Netpoll will also have
10269 * same problem.
10270 *
10271 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10272 * to check the ring condition when the heartbeat is expiring
10273 * before doing the reset. This will prevent most unintended
10274 * resets.
10275 */
10276 if (!--tp->asf_counter) {
10277 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10278 tg3_wait_for_event_ack(tp);
10279
10280 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10281 FWCMD_NICDRV_ALIVE3);
10282 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10283 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10284 TG3_FW_UPDATE_TIMEOUT_SEC);
10285
10286 tg3_generate_fw_event(tp);
10287 }
10288 tp->asf_counter = tp->asf_multiplier;
10289 }
10290
10291 spin_unlock(&tp->lock);
10292
10293 restart_timer:
10294 tp->timer.expires = jiffies + tp->timer_offset;
10295 add_timer(&tp->timer);
10296 }
10297
10298 static void tg3_timer_init(struct tg3 *tp)
10299 {
10300 if (tg3_flag(tp, TAGGED_STATUS) &&
10301 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10302 !tg3_flag(tp, 57765_CLASS))
10303 tp->timer_offset = HZ;
10304 else
10305 tp->timer_offset = HZ / 10;
10306
10307 BUG_ON(tp->timer_offset > HZ);
10308
10309 tp->timer_multiplier = (HZ / tp->timer_offset);
10310 tp->asf_multiplier = (HZ / tp->timer_offset) *
10311 TG3_FW_UPDATE_FREQ_SEC;
10312
10313 init_timer(&tp->timer);
10314 tp->timer.data = (unsigned long) tp;
10315 tp->timer.function = tg3_timer;
10316 }
10317
10318 static void tg3_timer_start(struct tg3 *tp)
10319 {
10320 tp->asf_counter = tp->asf_multiplier;
10321 tp->timer_counter = tp->timer_multiplier;
10322
10323 tp->timer.expires = jiffies + tp->timer_offset;
10324 add_timer(&tp->timer);
10325 }
10326
10327 static void tg3_timer_stop(struct tg3 *tp)
10328 {
10329 del_timer_sync(&tp->timer);
10330 }
10331
10332 /* Restart hardware after configuration changes, self-test, etc.
10333 * Invoked with tp->lock held.
10334 */
10335 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10336 __releases(tp->lock)
10337 __acquires(tp->lock)
10338 {
10339 int err;
10340
10341 err = tg3_init_hw(tp, reset_phy);
10342 if (err) {
10343 netdev_err(tp->dev,
10344 "Failed to re-initialize device, aborting\n");
10345 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10346 tg3_full_unlock(tp);
10347 tg3_timer_stop(tp);
10348 tp->irq_sync = 0;
10349 tg3_napi_enable(tp);
10350 dev_close(tp->dev);
10351 tg3_full_lock(tp, 0);
10352 }
10353 return err;
10354 }
10355
10356 static void tg3_reset_task(struct work_struct *work)
10357 {
10358 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10359 int err;
10360
10361 tg3_full_lock(tp, 0);
10362
10363 if (!netif_running(tp->dev)) {
10364 tg3_flag_clear(tp, RESET_TASK_PENDING);
10365 tg3_full_unlock(tp);
10366 return;
10367 }
10368
10369 tg3_full_unlock(tp);
10370
10371 tg3_phy_stop(tp);
10372
10373 tg3_netif_stop(tp);
10374
10375 tg3_full_lock(tp, 1);
10376
10377 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10378 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10379 tp->write32_rx_mbox = tg3_write_flush_reg32;
10380 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10381 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10382 }
10383
10384 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10385 err = tg3_init_hw(tp, 1);
10386 if (err)
10387 goto out;
10388
10389 tg3_netif_start(tp);
10390
10391 out:
10392 tg3_full_unlock(tp);
10393
10394 if (!err)
10395 tg3_phy_start(tp);
10396
10397 tg3_flag_clear(tp, RESET_TASK_PENDING);
10398 }
10399
10400 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10401 {
10402 irq_handler_t fn;
10403 unsigned long flags;
10404 char *name;
10405 struct tg3_napi *tnapi = &tp->napi[irq_num];
10406
10407 if (tp->irq_cnt == 1)
10408 name = tp->dev->name;
10409 else {
10410 name = &tnapi->irq_lbl[0];
10411 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10412 name[IFNAMSIZ-1] = 0;
10413 }
10414
10415 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10416 fn = tg3_msi;
10417 if (tg3_flag(tp, 1SHOT_MSI))
10418 fn = tg3_msi_1shot;
10419 flags = 0;
10420 } else {
10421 fn = tg3_interrupt;
10422 if (tg3_flag(tp, TAGGED_STATUS))
10423 fn = tg3_interrupt_tagged;
10424 flags = IRQF_SHARED;
10425 }
10426
10427 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10428 }
10429
10430 static int tg3_test_interrupt(struct tg3 *tp)
10431 {
10432 struct tg3_napi *tnapi = &tp->napi[0];
10433 struct net_device *dev = tp->dev;
10434 int err, i, intr_ok = 0;
10435 u32 val;
10436
10437 if (!netif_running(dev))
10438 return -ENODEV;
10439
10440 tg3_disable_ints(tp);
10441
10442 free_irq(tnapi->irq_vec, tnapi);
10443
10444 /*
10445 * Turn off MSI one shot mode. Otherwise this test has no
10446 * observable way to know whether the interrupt was delivered.
10447 */
10448 if (tg3_flag(tp, 57765_PLUS)) {
10449 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10450 tw32(MSGINT_MODE, val);
10451 }
10452
10453 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10454 IRQF_SHARED, dev->name, tnapi);
10455 if (err)
10456 return err;
10457
10458 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10459 tg3_enable_ints(tp);
10460
10461 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10462 tnapi->coal_now);
10463
10464 for (i = 0; i < 5; i++) {
10465 u32 int_mbox, misc_host_ctrl;
10466
10467 int_mbox = tr32_mailbox(tnapi->int_mbox);
10468 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10469
10470 if ((int_mbox != 0) ||
10471 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10472 intr_ok = 1;
10473 break;
10474 }
10475
10476 if (tg3_flag(tp, 57765_PLUS) &&
10477 tnapi->hw_status->status_tag != tnapi->last_tag)
10478 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10479
10480 msleep(10);
10481 }
10482
10483 tg3_disable_ints(tp);
10484
10485 free_irq(tnapi->irq_vec, tnapi);
10486
10487 err = tg3_request_irq(tp, 0);
10488
10489 if (err)
10490 return err;
10491
10492 if (intr_ok) {
10493 /* Reenable MSI one shot mode. */
10494 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10495 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10496 tw32(MSGINT_MODE, val);
10497 }
10498 return 0;
10499 }
10500
10501 return -EIO;
10502 }
10503
10504 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10505 * successfully restored
10506 */
10507 static int tg3_test_msi(struct tg3 *tp)
10508 {
10509 int err;
10510 u16 pci_cmd;
10511
10512 if (!tg3_flag(tp, USING_MSI))
10513 return 0;
10514
10515 /* Turn off SERR reporting in case MSI terminates with Master
10516 * Abort.
10517 */
10518 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10519 pci_write_config_word(tp->pdev, PCI_COMMAND,
10520 pci_cmd & ~PCI_COMMAND_SERR);
10521
10522 err = tg3_test_interrupt(tp);
10523
10524 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10525
10526 if (!err)
10527 return 0;
10528
10529 /* other failures */
10530 if (err != -EIO)
10531 return err;
10532
10533 /* MSI test failed, go back to INTx mode */
10534 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10535 "to INTx mode. Please report this failure to the PCI "
10536 "maintainer and include system chipset information\n");
10537
10538 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10539
10540 pci_disable_msi(tp->pdev);
10541
10542 tg3_flag_clear(tp, USING_MSI);
10543 tp->napi[0].irq_vec = tp->pdev->irq;
10544
10545 err = tg3_request_irq(tp, 0);
10546 if (err)
10547 return err;
10548
10549 /* Need to reset the chip because the MSI cycle may have terminated
10550 * with Master Abort.
10551 */
10552 tg3_full_lock(tp, 1);
10553
10554 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10555 err = tg3_init_hw(tp, 1);
10556
10557 tg3_full_unlock(tp);
10558
10559 if (err)
10560 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10561
10562 return err;
10563 }
10564
10565 static int tg3_request_firmware(struct tg3 *tp)
10566 {
10567 const __be32 *fw_data;
10568
10569 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10570 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10571 tp->fw_needed);
10572 return -ENOENT;
10573 }
10574
10575 fw_data = (void *)tp->fw->data;
10576
10577 /* Firmware blob starts with version numbers, followed by
10578 * start address and _full_ length including BSS sections
10579 * (which must be longer than the actual data, of course
10580 */
10581
10582 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10583 if (tp->fw_len < (tp->fw->size - 12)) {
10584 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10585 tp->fw_len, tp->fw_needed);
10586 release_firmware(tp->fw);
10587 tp->fw = NULL;
10588 return -EINVAL;
10589 }
10590
10591 /* We no longer need firmware; we have it. */
10592 tp->fw_needed = NULL;
10593 return 0;
10594 }
10595
10596 static u32 tg3_irq_count(struct tg3 *tp)
10597 {
10598 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10599
10600 if (irq_cnt > 1) {
10601 /* We want as many rx rings enabled as there are cpus.
10602 * In multiqueue MSI-X mode, the first MSI-X vector
10603 * only deals with link interrupts, etc, so we add
10604 * one to the number of vectors we are requesting.
10605 */
10606 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10607 }
10608
10609 return irq_cnt;
10610 }
10611
10612 static bool tg3_enable_msix(struct tg3 *tp)
10613 {
10614 int i, rc;
10615 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10616
10617 tp->txq_cnt = tp->txq_req;
10618 tp->rxq_cnt = tp->rxq_req;
10619 if (!tp->rxq_cnt)
10620 tp->rxq_cnt = netif_get_num_default_rss_queues();
10621 if (tp->rxq_cnt > tp->rxq_max)
10622 tp->rxq_cnt = tp->rxq_max;
10623
10624 /* Disable multiple TX rings by default. Simple round-robin hardware
10625 * scheduling of the TX rings can cause starvation of rings with
10626 * small packets when other rings have TSO or jumbo packets.
10627 */
10628 if (!tp->txq_req)
10629 tp->txq_cnt = 1;
10630
10631 tp->irq_cnt = tg3_irq_count(tp);
10632
10633 for (i = 0; i < tp->irq_max; i++) {
10634 msix_ent[i].entry = i;
10635 msix_ent[i].vector = 0;
10636 }
10637
10638 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10639 if (rc < 0) {
10640 return false;
10641 } else if (rc != 0) {
10642 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10643 return false;
10644 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10645 tp->irq_cnt, rc);
10646 tp->irq_cnt = rc;
10647 tp->rxq_cnt = max(rc - 1, 1);
10648 if (tp->txq_cnt)
10649 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10650 }
10651
10652 for (i = 0; i < tp->irq_max; i++)
10653 tp->napi[i].irq_vec = msix_ent[i].vector;
10654
10655 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10656 pci_disable_msix(tp->pdev);
10657 return false;
10658 }
10659
10660 if (tp->irq_cnt == 1)
10661 return true;
10662
10663 tg3_flag_set(tp, ENABLE_RSS);
10664
10665 if (tp->txq_cnt > 1)
10666 tg3_flag_set(tp, ENABLE_TSS);
10667
10668 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10669
10670 return true;
10671 }
10672
10673 static void tg3_ints_init(struct tg3 *tp)
10674 {
10675 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10676 !tg3_flag(tp, TAGGED_STATUS)) {
10677 /* All MSI supporting chips should support tagged
10678 * status. Assert that this is the case.
10679 */
10680 netdev_warn(tp->dev,
10681 "MSI without TAGGED_STATUS? Not using MSI\n");
10682 goto defcfg;
10683 }
10684
10685 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10686 tg3_flag_set(tp, USING_MSIX);
10687 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10688 tg3_flag_set(tp, USING_MSI);
10689
10690 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10691 u32 msi_mode = tr32(MSGINT_MODE);
10692 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10693 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10694 if (!tg3_flag(tp, 1SHOT_MSI))
10695 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10696 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10697 }
10698 defcfg:
10699 if (!tg3_flag(tp, USING_MSIX)) {
10700 tp->irq_cnt = 1;
10701 tp->napi[0].irq_vec = tp->pdev->irq;
10702 }
10703
10704 if (tp->irq_cnt == 1) {
10705 tp->txq_cnt = 1;
10706 tp->rxq_cnt = 1;
10707 netif_set_real_num_tx_queues(tp->dev, 1);
10708 netif_set_real_num_rx_queues(tp->dev, 1);
10709 }
10710 }
10711
10712 static void tg3_ints_fini(struct tg3 *tp)
10713 {
10714 if (tg3_flag(tp, USING_MSIX))
10715 pci_disable_msix(tp->pdev);
10716 else if (tg3_flag(tp, USING_MSI))
10717 pci_disable_msi(tp->pdev);
10718 tg3_flag_clear(tp, USING_MSI);
10719 tg3_flag_clear(tp, USING_MSIX);
10720 tg3_flag_clear(tp, ENABLE_RSS);
10721 tg3_flag_clear(tp, ENABLE_TSS);
10722 }
10723
10724 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10725 bool init)
10726 {
10727 struct net_device *dev = tp->dev;
10728 int i, err;
10729
10730 /*
10731 * Setup interrupts first so we know how
10732 * many NAPI resources to allocate
10733 */
10734 tg3_ints_init(tp);
10735
10736 tg3_rss_check_indir_tbl(tp);
10737
10738 /* The placement of this call is tied
10739 * to the setup and use of Host TX descriptors.
10740 */
10741 err = tg3_alloc_consistent(tp);
10742 if (err)
10743 goto err_out1;
10744
10745 tg3_napi_init(tp);
10746
10747 tg3_napi_enable(tp);
10748
10749 for (i = 0; i < tp->irq_cnt; i++) {
10750 struct tg3_napi *tnapi = &tp->napi[i];
10751 err = tg3_request_irq(tp, i);
10752 if (err) {
10753 for (i--; i >= 0; i--) {
10754 tnapi = &tp->napi[i];
10755 free_irq(tnapi->irq_vec, tnapi);
10756 }
10757 goto err_out2;
10758 }
10759 }
10760
10761 tg3_full_lock(tp, 0);
10762
10763 err = tg3_init_hw(tp, reset_phy);
10764 if (err) {
10765 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10766 tg3_free_rings(tp);
10767 }
10768
10769 tg3_full_unlock(tp);
10770
10771 if (err)
10772 goto err_out3;
10773
10774 if (test_irq && tg3_flag(tp, USING_MSI)) {
10775 err = tg3_test_msi(tp);
10776
10777 if (err) {
10778 tg3_full_lock(tp, 0);
10779 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10780 tg3_free_rings(tp);
10781 tg3_full_unlock(tp);
10782
10783 goto err_out2;
10784 }
10785
10786 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10787 u32 val = tr32(PCIE_TRANSACTION_CFG);
10788
10789 tw32(PCIE_TRANSACTION_CFG,
10790 val | PCIE_TRANS_CFG_1SHOT_MSI);
10791 }
10792 }
10793
10794 tg3_phy_start(tp);
10795
10796 tg3_hwmon_open(tp);
10797
10798 tg3_full_lock(tp, 0);
10799
10800 tg3_timer_start(tp);
10801 tg3_flag_set(tp, INIT_COMPLETE);
10802 tg3_enable_ints(tp);
10803
10804 if (init)
10805 tg3_ptp_init(tp);
10806 else
10807 tg3_ptp_resume(tp);
10808
10809
10810 tg3_full_unlock(tp);
10811
10812 netif_tx_start_all_queues(dev);
10813
10814 /*
10815 * Reset loopback feature if it was turned on while the device was down
10816 * make sure that it's installed properly now.
10817 */
10818 if (dev->features & NETIF_F_LOOPBACK)
10819 tg3_set_loopback(dev, dev->features);
10820
10821 return 0;
10822
10823 err_out3:
10824 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10825 struct tg3_napi *tnapi = &tp->napi[i];
10826 free_irq(tnapi->irq_vec, tnapi);
10827 }
10828
10829 err_out2:
10830 tg3_napi_disable(tp);
10831 tg3_napi_fini(tp);
10832 tg3_free_consistent(tp);
10833
10834 err_out1:
10835 tg3_ints_fini(tp);
10836
10837 return err;
10838 }
10839
10840 static void tg3_stop(struct tg3 *tp)
10841 {
10842 int i;
10843
10844 tg3_reset_task_cancel(tp);
10845 tg3_netif_stop(tp);
10846
10847 tg3_timer_stop(tp);
10848
10849 tg3_hwmon_close(tp);
10850
10851 tg3_phy_stop(tp);
10852
10853 tg3_full_lock(tp, 1);
10854
10855 tg3_disable_ints(tp);
10856
10857 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10858 tg3_free_rings(tp);
10859 tg3_flag_clear(tp, INIT_COMPLETE);
10860
10861 tg3_full_unlock(tp);
10862
10863 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10864 struct tg3_napi *tnapi = &tp->napi[i];
10865 free_irq(tnapi->irq_vec, tnapi);
10866 }
10867
10868 tg3_ints_fini(tp);
10869
10870 tg3_napi_fini(tp);
10871
10872 tg3_free_consistent(tp);
10873 }
10874
10875 static int tg3_open(struct net_device *dev)
10876 {
10877 struct tg3 *tp = netdev_priv(dev);
10878 int err;
10879
10880 if (tp->fw_needed) {
10881 err = tg3_request_firmware(tp);
10882 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10883 if (err)
10884 return err;
10885 } else if (err) {
10886 netdev_warn(tp->dev, "TSO capability disabled\n");
10887 tg3_flag_clear(tp, TSO_CAPABLE);
10888 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10889 netdev_notice(tp->dev, "TSO capability restored\n");
10890 tg3_flag_set(tp, TSO_CAPABLE);
10891 }
10892 }
10893
10894 tg3_carrier_off(tp);
10895
10896 err = tg3_power_up(tp);
10897 if (err)
10898 return err;
10899
10900 tg3_full_lock(tp, 0);
10901
10902 tg3_disable_ints(tp);
10903 tg3_flag_clear(tp, INIT_COMPLETE);
10904
10905 tg3_full_unlock(tp);
10906
10907 err = tg3_start(tp, true, true, true);
10908 if (err) {
10909 tg3_frob_aux_power(tp, false);
10910 pci_set_power_state(tp->pdev, PCI_D3hot);
10911 }
10912
10913 if (tg3_flag(tp, PTP_CAPABLE)) {
10914 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10915 &tp->pdev->dev);
10916 if (IS_ERR(tp->ptp_clock))
10917 tp->ptp_clock = NULL;
10918 }
10919
10920 return err;
10921 }
10922
10923 static int tg3_close(struct net_device *dev)
10924 {
10925 struct tg3 *tp = netdev_priv(dev);
10926
10927 tg3_ptp_fini(tp);
10928
10929 tg3_stop(tp);
10930
10931 /* Clear stats across close / open calls */
10932 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10933 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10934
10935 tg3_power_down(tp);
10936
10937 tg3_carrier_off(tp);
10938
10939 return 0;
10940 }
10941
10942 static inline u64 get_stat64(tg3_stat64_t *val)
10943 {
10944 return ((u64)val->high << 32) | ((u64)val->low);
10945 }
10946
10947 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10948 {
10949 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10950
10951 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10952 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
10953 tg3_asic_rev(tp) == ASIC_REV_5701)) {
10954 u32 val;
10955
10956 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10957 tg3_writephy(tp, MII_TG3_TEST1,
10958 val | MII_TG3_TEST1_CRC_EN);
10959 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10960 } else
10961 val = 0;
10962
10963 tp->phy_crc_errors += val;
10964
10965 return tp->phy_crc_errors;
10966 }
10967
10968 return get_stat64(&hw_stats->rx_fcs_errors);
10969 }
10970
10971 #define ESTAT_ADD(member) \
10972 estats->member = old_estats->member + \
10973 get_stat64(&hw_stats->member)
10974
10975 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10976 {
10977 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10978 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10979
10980 ESTAT_ADD(rx_octets);
10981 ESTAT_ADD(rx_fragments);
10982 ESTAT_ADD(rx_ucast_packets);
10983 ESTAT_ADD(rx_mcast_packets);
10984 ESTAT_ADD(rx_bcast_packets);
10985 ESTAT_ADD(rx_fcs_errors);
10986 ESTAT_ADD(rx_align_errors);
10987 ESTAT_ADD(rx_xon_pause_rcvd);
10988 ESTAT_ADD(rx_xoff_pause_rcvd);
10989 ESTAT_ADD(rx_mac_ctrl_rcvd);
10990 ESTAT_ADD(rx_xoff_entered);
10991 ESTAT_ADD(rx_frame_too_long_errors);
10992 ESTAT_ADD(rx_jabbers);
10993 ESTAT_ADD(rx_undersize_packets);
10994 ESTAT_ADD(rx_in_length_errors);
10995 ESTAT_ADD(rx_out_length_errors);
10996 ESTAT_ADD(rx_64_or_less_octet_packets);
10997 ESTAT_ADD(rx_65_to_127_octet_packets);
10998 ESTAT_ADD(rx_128_to_255_octet_packets);
10999 ESTAT_ADD(rx_256_to_511_octet_packets);
11000 ESTAT_ADD(rx_512_to_1023_octet_packets);
11001 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11002 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11003 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11004 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11005 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11006
11007 ESTAT_ADD(tx_octets);
11008 ESTAT_ADD(tx_collisions);
11009 ESTAT_ADD(tx_xon_sent);
11010 ESTAT_ADD(tx_xoff_sent);
11011 ESTAT_ADD(tx_flow_control);
11012 ESTAT_ADD(tx_mac_errors);
11013 ESTAT_ADD(tx_single_collisions);
11014 ESTAT_ADD(tx_mult_collisions);
11015 ESTAT_ADD(tx_deferred);
11016 ESTAT_ADD(tx_excessive_collisions);
11017 ESTAT_ADD(tx_late_collisions);
11018 ESTAT_ADD(tx_collide_2times);
11019 ESTAT_ADD(tx_collide_3times);
11020 ESTAT_ADD(tx_collide_4times);
11021 ESTAT_ADD(tx_collide_5times);
11022 ESTAT_ADD(tx_collide_6times);
11023 ESTAT_ADD(tx_collide_7times);
11024 ESTAT_ADD(tx_collide_8times);
11025 ESTAT_ADD(tx_collide_9times);
11026 ESTAT_ADD(tx_collide_10times);
11027 ESTAT_ADD(tx_collide_11times);
11028 ESTAT_ADD(tx_collide_12times);
11029 ESTAT_ADD(tx_collide_13times);
11030 ESTAT_ADD(tx_collide_14times);
11031 ESTAT_ADD(tx_collide_15times);
11032 ESTAT_ADD(tx_ucast_packets);
11033 ESTAT_ADD(tx_mcast_packets);
11034 ESTAT_ADD(tx_bcast_packets);
11035 ESTAT_ADD(tx_carrier_sense_errors);
11036 ESTAT_ADD(tx_discards);
11037 ESTAT_ADD(tx_errors);
11038
11039 ESTAT_ADD(dma_writeq_full);
11040 ESTAT_ADD(dma_write_prioq_full);
11041 ESTAT_ADD(rxbds_empty);
11042 ESTAT_ADD(rx_discards);
11043 ESTAT_ADD(rx_errors);
11044 ESTAT_ADD(rx_threshold_hit);
11045
11046 ESTAT_ADD(dma_readq_full);
11047 ESTAT_ADD(dma_read_prioq_full);
11048 ESTAT_ADD(tx_comp_queue_full);
11049
11050 ESTAT_ADD(ring_set_send_prod_index);
11051 ESTAT_ADD(ring_status_update);
11052 ESTAT_ADD(nic_irqs);
11053 ESTAT_ADD(nic_avoided_irqs);
11054 ESTAT_ADD(nic_tx_threshold_hit);
11055
11056 ESTAT_ADD(mbuf_lwm_thresh_hit);
11057 }
11058
11059 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11060 {
11061 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11062 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11063
11064 stats->rx_packets = old_stats->rx_packets +
11065 get_stat64(&hw_stats->rx_ucast_packets) +
11066 get_stat64(&hw_stats->rx_mcast_packets) +
11067 get_stat64(&hw_stats->rx_bcast_packets);
11068
11069 stats->tx_packets = old_stats->tx_packets +
11070 get_stat64(&hw_stats->tx_ucast_packets) +
11071 get_stat64(&hw_stats->tx_mcast_packets) +
11072 get_stat64(&hw_stats->tx_bcast_packets);
11073
11074 stats->rx_bytes = old_stats->rx_bytes +
11075 get_stat64(&hw_stats->rx_octets);
11076 stats->tx_bytes = old_stats->tx_bytes +
11077 get_stat64(&hw_stats->tx_octets);
11078
11079 stats->rx_errors = old_stats->rx_errors +
11080 get_stat64(&hw_stats->rx_errors);
11081 stats->tx_errors = old_stats->tx_errors +
11082 get_stat64(&hw_stats->tx_errors) +
11083 get_stat64(&hw_stats->tx_mac_errors) +
11084 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11085 get_stat64(&hw_stats->tx_discards);
11086
11087 stats->multicast = old_stats->multicast +
11088 get_stat64(&hw_stats->rx_mcast_packets);
11089 stats->collisions = old_stats->collisions +
11090 get_stat64(&hw_stats->tx_collisions);
11091
11092 stats->rx_length_errors = old_stats->rx_length_errors +
11093 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11094 get_stat64(&hw_stats->rx_undersize_packets);
11095
11096 stats->rx_over_errors = old_stats->rx_over_errors +
11097 get_stat64(&hw_stats->rxbds_empty);
11098 stats->rx_frame_errors = old_stats->rx_frame_errors +
11099 get_stat64(&hw_stats->rx_align_errors);
11100 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11101 get_stat64(&hw_stats->tx_discards);
11102 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11103 get_stat64(&hw_stats->tx_carrier_sense_errors);
11104
11105 stats->rx_crc_errors = old_stats->rx_crc_errors +
11106 tg3_calc_crc_errors(tp);
11107
11108 stats->rx_missed_errors = old_stats->rx_missed_errors +
11109 get_stat64(&hw_stats->rx_discards);
11110
11111 stats->rx_dropped = tp->rx_dropped;
11112 stats->tx_dropped = tp->tx_dropped;
11113 }
11114
11115 static int tg3_get_regs_len(struct net_device *dev)
11116 {
11117 return TG3_REG_BLK_SIZE;
11118 }
11119
11120 static void tg3_get_regs(struct net_device *dev,
11121 struct ethtool_regs *regs, void *_p)
11122 {
11123 struct tg3 *tp = netdev_priv(dev);
11124
11125 regs->version = 0;
11126
11127 memset(_p, 0, TG3_REG_BLK_SIZE);
11128
11129 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11130 return;
11131
11132 tg3_full_lock(tp, 0);
11133
11134 tg3_dump_legacy_regs(tp, (u32 *)_p);
11135
11136 tg3_full_unlock(tp);
11137 }
11138
11139 static int tg3_get_eeprom_len(struct net_device *dev)
11140 {
11141 struct tg3 *tp = netdev_priv(dev);
11142
11143 return tp->nvram_size;
11144 }
11145
11146 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11147 {
11148 struct tg3 *tp = netdev_priv(dev);
11149 int ret;
11150 u8 *pd;
11151 u32 i, offset, len, b_offset, b_count;
11152 __be32 val;
11153
11154 if (tg3_flag(tp, NO_NVRAM))
11155 return -EINVAL;
11156
11157 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11158 return -EAGAIN;
11159
11160 offset = eeprom->offset;
11161 len = eeprom->len;
11162 eeprom->len = 0;
11163
11164 eeprom->magic = TG3_EEPROM_MAGIC;
11165
11166 if (offset & 3) {
11167 /* adjustments to start on required 4 byte boundary */
11168 b_offset = offset & 3;
11169 b_count = 4 - b_offset;
11170 if (b_count > len) {
11171 /* i.e. offset=1 len=2 */
11172 b_count = len;
11173 }
11174 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11175 if (ret)
11176 return ret;
11177 memcpy(data, ((char *)&val) + b_offset, b_count);
11178 len -= b_count;
11179 offset += b_count;
11180 eeprom->len += b_count;
11181 }
11182
11183 /* read bytes up to the last 4 byte boundary */
11184 pd = &data[eeprom->len];
11185 for (i = 0; i < (len - (len & 3)); i += 4) {
11186 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11187 if (ret) {
11188 eeprom->len += i;
11189 return ret;
11190 }
11191 memcpy(pd + i, &val, 4);
11192 }
11193 eeprom->len += i;
11194
11195 if (len & 3) {
11196 /* read last bytes not ending on 4 byte boundary */
11197 pd = &data[eeprom->len];
11198 b_count = len & 3;
11199 b_offset = offset + len - b_count;
11200 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11201 if (ret)
11202 return ret;
11203 memcpy(pd, &val, b_count);
11204 eeprom->len += b_count;
11205 }
11206 return 0;
11207 }
11208
11209 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11210 {
11211 struct tg3 *tp = netdev_priv(dev);
11212 int ret;
11213 u32 offset, len, b_offset, odd_len;
11214 u8 *buf;
11215 __be32 start, end;
11216
11217 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11218 return -EAGAIN;
11219
11220 if (tg3_flag(tp, NO_NVRAM) ||
11221 eeprom->magic != TG3_EEPROM_MAGIC)
11222 return -EINVAL;
11223
11224 offset = eeprom->offset;
11225 len = eeprom->len;
11226
11227 if ((b_offset = (offset & 3))) {
11228 /* adjustments to start on required 4 byte boundary */
11229 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11230 if (ret)
11231 return ret;
11232 len += b_offset;
11233 offset &= ~3;
11234 if (len < 4)
11235 len = 4;
11236 }
11237
11238 odd_len = 0;
11239 if (len & 3) {
11240 /* adjustments to end on required 4 byte boundary */
11241 odd_len = 1;
11242 len = (len + 3) & ~3;
11243 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11244 if (ret)
11245 return ret;
11246 }
11247
11248 buf = data;
11249 if (b_offset || odd_len) {
11250 buf = kmalloc(len, GFP_KERNEL);
11251 if (!buf)
11252 return -ENOMEM;
11253 if (b_offset)
11254 memcpy(buf, &start, 4);
11255 if (odd_len)
11256 memcpy(buf+len-4, &end, 4);
11257 memcpy(buf + b_offset, data, eeprom->len);
11258 }
11259
11260 ret = tg3_nvram_write_block(tp, offset, len, buf);
11261
11262 if (buf != data)
11263 kfree(buf);
11264
11265 return ret;
11266 }
11267
11268 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11269 {
11270 struct tg3 *tp = netdev_priv(dev);
11271
11272 if (tg3_flag(tp, USE_PHYLIB)) {
11273 struct phy_device *phydev;
11274 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11275 return -EAGAIN;
11276 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11277 return phy_ethtool_gset(phydev, cmd);
11278 }
11279
11280 cmd->supported = (SUPPORTED_Autoneg);
11281
11282 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11283 cmd->supported |= (SUPPORTED_1000baseT_Half |
11284 SUPPORTED_1000baseT_Full);
11285
11286 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11287 cmd->supported |= (SUPPORTED_100baseT_Half |
11288 SUPPORTED_100baseT_Full |
11289 SUPPORTED_10baseT_Half |
11290 SUPPORTED_10baseT_Full |
11291 SUPPORTED_TP);
11292 cmd->port = PORT_TP;
11293 } else {
11294 cmd->supported |= SUPPORTED_FIBRE;
11295 cmd->port = PORT_FIBRE;
11296 }
11297
11298 cmd->advertising = tp->link_config.advertising;
11299 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11300 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11301 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11302 cmd->advertising |= ADVERTISED_Pause;
11303 } else {
11304 cmd->advertising |= ADVERTISED_Pause |
11305 ADVERTISED_Asym_Pause;
11306 }
11307 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11308 cmd->advertising |= ADVERTISED_Asym_Pause;
11309 }
11310 }
11311 if (netif_running(dev) && tp->link_up) {
11312 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11313 cmd->duplex = tp->link_config.active_duplex;
11314 cmd->lp_advertising = tp->link_config.rmt_adv;
11315 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11316 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11317 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11318 else
11319 cmd->eth_tp_mdix = ETH_TP_MDI;
11320 }
11321 } else {
11322 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11323 cmd->duplex = DUPLEX_UNKNOWN;
11324 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11325 }
11326 cmd->phy_address = tp->phy_addr;
11327 cmd->transceiver = XCVR_INTERNAL;
11328 cmd->autoneg = tp->link_config.autoneg;
11329 cmd->maxtxpkt = 0;
11330 cmd->maxrxpkt = 0;
11331 return 0;
11332 }
11333
11334 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11335 {
11336 struct tg3 *tp = netdev_priv(dev);
11337 u32 speed = ethtool_cmd_speed(cmd);
11338
11339 if (tg3_flag(tp, USE_PHYLIB)) {
11340 struct phy_device *phydev;
11341 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11342 return -EAGAIN;
11343 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11344 return phy_ethtool_sset(phydev, cmd);
11345 }
11346
11347 if (cmd->autoneg != AUTONEG_ENABLE &&
11348 cmd->autoneg != AUTONEG_DISABLE)
11349 return -EINVAL;
11350
11351 if (cmd->autoneg == AUTONEG_DISABLE &&
11352 cmd->duplex != DUPLEX_FULL &&
11353 cmd->duplex != DUPLEX_HALF)
11354 return -EINVAL;
11355
11356 if (cmd->autoneg == AUTONEG_ENABLE) {
11357 u32 mask = ADVERTISED_Autoneg |
11358 ADVERTISED_Pause |
11359 ADVERTISED_Asym_Pause;
11360
11361 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11362 mask |= ADVERTISED_1000baseT_Half |
11363 ADVERTISED_1000baseT_Full;
11364
11365 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11366 mask |= ADVERTISED_100baseT_Half |
11367 ADVERTISED_100baseT_Full |
11368 ADVERTISED_10baseT_Half |
11369 ADVERTISED_10baseT_Full |
11370 ADVERTISED_TP;
11371 else
11372 mask |= ADVERTISED_FIBRE;
11373
11374 if (cmd->advertising & ~mask)
11375 return -EINVAL;
11376
11377 mask &= (ADVERTISED_1000baseT_Half |
11378 ADVERTISED_1000baseT_Full |
11379 ADVERTISED_100baseT_Half |
11380 ADVERTISED_100baseT_Full |
11381 ADVERTISED_10baseT_Half |
11382 ADVERTISED_10baseT_Full);
11383
11384 cmd->advertising &= mask;
11385 } else {
11386 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11387 if (speed != SPEED_1000)
11388 return -EINVAL;
11389
11390 if (cmd->duplex != DUPLEX_FULL)
11391 return -EINVAL;
11392 } else {
11393 if (speed != SPEED_100 &&
11394 speed != SPEED_10)
11395 return -EINVAL;
11396 }
11397 }
11398
11399 tg3_full_lock(tp, 0);
11400
11401 tp->link_config.autoneg = cmd->autoneg;
11402 if (cmd->autoneg == AUTONEG_ENABLE) {
11403 tp->link_config.advertising = (cmd->advertising |
11404 ADVERTISED_Autoneg);
11405 tp->link_config.speed = SPEED_UNKNOWN;
11406 tp->link_config.duplex = DUPLEX_UNKNOWN;
11407 } else {
11408 tp->link_config.advertising = 0;
11409 tp->link_config.speed = speed;
11410 tp->link_config.duplex = cmd->duplex;
11411 }
11412
11413 if (netif_running(dev))
11414 tg3_setup_phy(tp, 1);
11415
11416 tg3_full_unlock(tp);
11417
11418 return 0;
11419 }
11420
11421 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11422 {
11423 struct tg3 *tp = netdev_priv(dev);
11424
11425 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11426 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11427 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11428 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11429 }
11430
11431 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11432 {
11433 struct tg3 *tp = netdev_priv(dev);
11434
11435 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11436 wol->supported = WAKE_MAGIC;
11437 else
11438 wol->supported = 0;
11439 wol->wolopts = 0;
11440 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11441 wol->wolopts = WAKE_MAGIC;
11442 memset(&wol->sopass, 0, sizeof(wol->sopass));
11443 }
11444
11445 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11446 {
11447 struct tg3 *tp = netdev_priv(dev);
11448 struct device *dp = &tp->pdev->dev;
11449
11450 if (wol->wolopts & ~WAKE_MAGIC)
11451 return -EINVAL;
11452 if ((wol->wolopts & WAKE_MAGIC) &&
11453 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11454 return -EINVAL;
11455
11456 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11457
11458 spin_lock_bh(&tp->lock);
11459 if (device_may_wakeup(dp))
11460 tg3_flag_set(tp, WOL_ENABLE);
11461 else
11462 tg3_flag_clear(tp, WOL_ENABLE);
11463 spin_unlock_bh(&tp->lock);
11464
11465 return 0;
11466 }
11467
11468 static u32 tg3_get_msglevel(struct net_device *dev)
11469 {
11470 struct tg3 *tp = netdev_priv(dev);
11471 return tp->msg_enable;
11472 }
11473
11474 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11475 {
11476 struct tg3 *tp = netdev_priv(dev);
11477 tp->msg_enable = value;
11478 }
11479
11480 static int tg3_nway_reset(struct net_device *dev)
11481 {
11482 struct tg3 *tp = netdev_priv(dev);
11483 int r;
11484
11485 if (!netif_running(dev))
11486 return -EAGAIN;
11487
11488 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11489 return -EINVAL;
11490
11491 if (tg3_flag(tp, USE_PHYLIB)) {
11492 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11493 return -EAGAIN;
11494 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11495 } else {
11496 u32 bmcr;
11497
11498 spin_lock_bh(&tp->lock);
11499 r = -EINVAL;
11500 tg3_readphy(tp, MII_BMCR, &bmcr);
11501 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11502 ((bmcr & BMCR_ANENABLE) ||
11503 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11504 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11505 BMCR_ANENABLE);
11506 r = 0;
11507 }
11508 spin_unlock_bh(&tp->lock);
11509 }
11510
11511 return r;
11512 }
11513
11514 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11515 {
11516 struct tg3 *tp = netdev_priv(dev);
11517
11518 ering->rx_max_pending = tp->rx_std_ring_mask;
11519 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11520 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11521 else
11522 ering->rx_jumbo_max_pending = 0;
11523
11524 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11525
11526 ering->rx_pending = tp->rx_pending;
11527 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11528 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11529 else
11530 ering->rx_jumbo_pending = 0;
11531
11532 ering->tx_pending = tp->napi[0].tx_pending;
11533 }
11534
11535 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11536 {
11537 struct tg3 *tp = netdev_priv(dev);
11538 int i, irq_sync = 0, err = 0;
11539
11540 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11541 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11542 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11543 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11544 (tg3_flag(tp, TSO_BUG) &&
11545 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11546 return -EINVAL;
11547
11548 if (netif_running(dev)) {
11549 tg3_phy_stop(tp);
11550 tg3_netif_stop(tp);
11551 irq_sync = 1;
11552 }
11553
11554 tg3_full_lock(tp, irq_sync);
11555
11556 tp->rx_pending = ering->rx_pending;
11557
11558 if (tg3_flag(tp, MAX_RXPEND_64) &&
11559 tp->rx_pending > 63)
11560 tp->rx_pending = 63;
11561 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11562
11563 for (i = 0; i < tp->irq_max; i++)
11564 tp->napi[i].tx_pending = ering->tx_pending;
11565
11566 if (netif_running(dev)) {
11567 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11568 err = tg3_restart_hw(tp, 1);
11569 if (!err)
11570 tg3_netif_start(tp);
11571 }
11572
11573 tg3_full_unlock(tp);
11574
11575 if (irq_sync && !err)
11576 tg3_phy_start(tp);
11577
11578 return err;
11579 }
11580
11581 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11582 {
11583 struct tg3 *tp = netdev_priv(dev);
11584
11585 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11586
11587 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11588 epause->rx_pause = 1;
11589 else
11590 epause->rx_pause = 0;
11591
11592 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11593 epause->tx_pause = 1;
11594 else
11595 epause->tx_pause = 0;
11596 }
11597
11598 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11599 {
11600 struct tg3 *tp = netdev_priv(dev);
11601 int err = 0;
11602
11603 if (tg3_flag(tp, USE_PHYLIB)) {
11604 u32 newadv;
11605 struct phy_device *phydev;
11606
11607 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11608
11609 if (!(phydev->supported & SUPPORTED_Pause) ||
11610 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11611 (epause->rx_pause != epause->tx_pause)))
11612 return -EINVAL;
11613
11614 tp->link_config.flowctrl = 0;
11615 if (epause->rx_pause) {
11616 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11617
11618 if (epause->tx_pause) {
11619 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11620 newadv = ADVERTISED_Pause;
11621 } else
11622 newadv = ADVERTISED_Pause |
11623 ADVERTISED_Asym_Pause;
11624 } else if (epause->tx_pause) {
11625 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11626 newadv = ADVERTISED_Asym_Pause;
11627 } else
11628 newadv = 0;
11629
11630 if (epause->autoneg)
11631 tg3_flag_set(tp, PAUSE_AUTONEG);
11632 else
11633 tg3_flag_clear(tp, PAUSE_AUTONEG);
11634
11635 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11636 u32 oldadv = phydev->advertising &
11637 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11638 if (oldadv != newadv) {
11639 phydev->advertising &=
11640 ~(ADVERTISED_Pause |
11641 ADVERTISED_Asym_Pause);
11642 phydev->advertising |= newadv;
11643 if (phydev->autoneg) {
11644 /*
11645 * Always renegotiate the link to
11646 * inform our link partner of our
11647 * flow control settings, even if the
11648 * flow control is forced. Let
11649 * tg3_adjust_link() do the final
11650 * flow control setup.
11651 */
11652 return phy_start_aneg(phydev);
11653 }
11654 }
11655
11656 if (!epause->autoneg)
11657 tg3_setup_flow_control(tp, 0, 0);
11658 } else {
11659 tp->link_config.advertising &=
11660 ~(ADVERTISED_Pause |
11661 ADVERTISED_Asym_Pause);
11662 tp->link_config.advertising |= newadv;
11663 }
11664 } else {
11665 int irq_sync = 0;
11666
11667 if (netif_running(dev)) {
11668 tg3_netif_stop(tp);
11669 irq_sync = 1;
11670 }
11671
11672 tg3_full_lock(tp, irq_sync);
11673
11674 if (epause->autoneg)
11675 tg3_flag_set(tp, PAUSE_AUTONEG);
11676 else
11677 tg3_flag_clear(tp, PAUSE_AUTONEG);
11678 if (epause->rx_pause)
11679 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11680 else
11681 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11682 if (epause->tx_pause)
11683 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11684 else
11685 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11686
11687 if (netif_running(dev)) {
11688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11689 err = tg3_restart_hw(tp, 1);
11690 if (!err)
11691 tg3_netif_start(tp);
11692 }
11693
11694 tg3_full_unlock(tp);
11695 }
11696
11697 return err;
11698 }
11699
11700 static int tg3_get_sset_count(struct net_device *dev, int sset)
11701 {
11702 switch (sset) {
11703 case ETH_SS_TEST:
11704 return TG3_NUM_TEST;
11705 case ETH_SS_STATS:
11706 return TG3_NUM_STATS;
11707 default:
11708 return -EOPNOTSUPP;
11709 }
11710 }
11711
11712 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11713 u32 *rules __always_unused)
11714 {
11715 struct tg3 *tp = netdev_priv(dev);
11716
11717 if (!tg3_flag(tp, SUPPORT_MSIX))
11718 return -EOPNOTSUPP;
11719
11720 switch (info->cmd) {
11721 case ETHTOOL_GRXRINGS:
11722 if (netif_running(tp->dev))
11723 info->data = tp->rxq_cnt;
11724 else {
11725 info->data = num_online_cpus();
11726 if (info->data > TG3_RSS_MAX_NUM_QS)
11727 info->data = TG3_RSS_MAX_NUM_QS;
11728 }
11729
11730 /* The first interrupt vector only
11731 * handles link interrupts.
11732 */
11733 info->data -= 1;
11734 return 0;
11735
11736 default:
11737 return -EOPNOTSUPP;
11738 }
11739 }
11740
11741 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11742 {
11743 u32 size = 0;
11744 struct tg3 *tp = netdev_priv(dev);
11745
11746 if (tg3_flag(tp, SUPPORT_MSIX))
11747 size = TG3_RSS_INDIR_TBL_SIZE;
11748
11749 return size;
11750 }
11751
11752 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11753 {
11754 struct tg3 *tp = netdev_priv(dev);
11755 int i;
11756
11757 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11758 indir[i] = tp->rss_ind_tbl[i];
11759
11760 return 0;
11761 }
11762
11763 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11764 {
11765 struct tg3 *tp = netdev_priv(dev);
11766 size_t i;
11767
11768 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11769 tp->rss_ind_tbl[i] = indir[i];
11770
11771 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11772 return 0;
11773
11774 /* It is legal to write the indirection
11775 * table while the device is running.
11776 */
11777 tg3_full_lock(tp, 0);
11778 tg3_rss_write_indir_tbl(tp);
11779 tg3_full_unlock(tp);
11780
11781 return 0;
11782 }
11783
11784 static void tg3_get_channels(struct net_device *dev,
11785 struct ethtool_channels *channel)
11786 {
11787 struct tg3 *tp = netdev_priv(dev);
11788 u32 deflt_qs = netif_get_num_default_rss_queues();
11789
11790 channel->max_rx = tp->rxq_max;
11791 channel->max_tx = tp->txq_max;
11792
11793 if (netif_running(dev)) {
11794 channel->rx_count = tp->rxq_cnt;
11795 channel->tx_count = tp->txq_cnt;
11796 } else {
11797 if (tp->rxq_req)
11798 channel->rx_count = tp->rxq_req;
11799 else
11800 channel->rx_count = min(deflt_qs, tp->rxq_max);
11801
11802 if (tp->txq_req)
11803 channel->tx_count = tp->txq_req;
11804 else
11805 channel->tx_count = min(deflt_qs, tp->txq_max);
11806 }
11807 }
11808
11809 static int tg3_set_channels(struct net_device *dev,
11810 struct ethtool_channels *channel)
11811 {
11812 struct tg3 *tp = netdev_priv(dev);
11813
11814 if (!tg3_flag(tp, SUPPORT_MSIX))
11815 return -EOPNOTSUPP;
11816
11817 if (channel->rx_count > tp->rxq_max ||
11818 channel->tx_count > tp->txq_max)
11819 return -EINVAL;
11820
11821 tp->rxq_req = channel->rx_count;
11822 tp->txq_req = channel->tx_count;
11823
11824 if (!netif_running(dev))
11825 return 0;
11826
11827 tg3_stop(tp);
11828
11829 tg3_carrier_off(tp);
11830
11831 tg3_start(tp, true, false, false);
11832
11833 return 0;
11834 }
11835
11836 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11837 {
11838 switch (stringset) {
11839 case ETH_SS_STATS:
11840 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11841 break;
11842 case ETH_SS_TEST:
11843 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11844 break;
11845 default:
11846 WARN_ON(1); /* we need a WARN() */
11847 break;
11848 }
11849 }
11850
11851 static int tg3_set_phys_id(struct net_device *dev,
11852 enum ethtool_phys_id_state state)
11853 {
11854 struct tg3 *tp = netdev_priv(dev);
11855
11856 if (!netif_running(tp->dev))
11857 return -EAGAIN;
11858
11859 switch (state) {
11860 case ETHTOOL_ID_ACTIVE:
11861 return 1; /* cycle on/off once per second */
11862
11863 case ETHTOOL_ID_ON:
11864 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11865 LED_CTRL_1000MBPS_ON |
11866 LED_CTRL_100MBPS_ON |
11867 LED_CTRL_10MBPS_ON |
11868 LED_CTRL_TRAFFIC_OVERRIDE |
11869 LED_CTRL_TRAFFIC_BLINK |
11870 LED_CTRL_TRAFFIC_LED);
11871 break;
11872
11873 case ETHTOOL_ID_OFF:
11874 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11875 LED_CTRL_TRAFFIC_OVERRIDE);
11876 break;
11877
11878 case ETHTOOL_ID_INACTIVE:
11879 tw32(MAC_LED_CTRL, tp->led_ctrl);
11880 break;
11881 }
11882
11883 return 0;
11884 }
11885
11886 static void tg3_get_ethtool_stats(struct net_device *dev,
11887 struct ethtool_stats *estats, u64 *tmp_stats)
11888 {
11889 struct tg3 *tp = netdev_priv(dev);
11890
11891 if (tp->hw_stats)
11892 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11893 else
11894 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11895 }
11896
11897 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11898 {
11899 int i;
11900 __be32 *buf;
11901 u32 offset = 0, len = 0;
11902 u32 magic, val;
11903
11904 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11905 return NULL;
11906
11907 if (magic == TG3_EEPROM_MAGIC) {
11908 for (offset = TG3_NVM_DIR_START;
11909 offset < TG3_NVM_DIR_END;
11910 offset += TG3_NVM_DIRENT_SIZE) {
11911 if (tg3_nvram_read(tp, offset, &val))
11912 return NULL;
11913
11914 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11915 TG3_NVM_DIRTYPE_EXTVPD)
11916 break;
11917 }
11918
11919 if (offset != TG3_NVM_DIR_END) {
11920 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11921 if (tg3_nvram_read(tp, offset + 4, &offset))
11922 return NULL;
11923
11924 offset = tg3_nvram_logical_addr(tp, offset);
11925 }
11926 }
11927
11928 if (!offset || !len) {
11929 offset = TG3_NVM_VPD_OFF;
11930 len = TG3_NVM_VPD_LEN;
11931 }
11932
11933 buf = kmalloc(len, GFP_KERNEL);
11934 if (buf == NULL)
11935 return NULL;
11936
11937 if (magic == TG3_EEPROM_MAGIC) {
11938 for (i = 0; i < len; i += 4) {
11939 /* The data is in little-endian format in NVRAM.
11940 * Use the big-endian read routines to preserve
11941 * the byte order as it exists in NVRAM.
11942 */
11943 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11944 goto error;
11945 }
11946 } else {
11947 u8 *ptr;
11948 ssize_t cnt;
11949 unsigned int pos = 0;
11950
11951 ptr = (u8 *)&buf[0];
11952 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11953 cnt = pci_read_vpd(tp->pdev, pos,
11954 len - pos, ptr);
11955 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11956 cnt = 0;
11957 else if (cnt < 0)
11958 goto error;
11959 }
11960 if (pos != len)
11961 goto error;
11962 }
11963
11964 *vpdlen = len;
11965
11966 return buf;
11967
11968 error:
11969 kfree(buf);
11970 return NULL;
11971 }
11972
11973 #define NVRAM_TEST_SIZE 0x100
11974 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11975 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11976 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11977 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11978 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11979 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11980 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11981 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11982
11983 static int tg3_test_nvram(struct tg3 *tp)
11984 {
11985 u32 csum, magic, len;
11986 __be32 *buf;
11987 int i, j, k, err = 0, size;
11988
11989 if (tg3_flag(tp, NO_NVRAM))
11990 return 0;
11991
11992 if (tg3_nvram_read(tp, 0, &magic) != 0)
11993 return -EIO;
11994
11995 if (magic == TG3_EEPROM_MAGIC)
11996 size = NVRAM_TEST_SIZE;
11997 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11998 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11999 TG3_EEPROM_SB_FORMAT_1) {
12000 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12001 case TG3_EEPROM_SB_REVISION_0:
12002 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12003 break;
12004 case TG3_EEPROM_SB_REVISION_2:
12005 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12006 break;
12007 case TG3_EEPROM_SB_REVISION_3:
12008 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12009 break;
12010 case TG3_EEPROM_SB_REVISION_4:
12011 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12012 break;
12013 case TG3_EEPROM_SB_REVISION_5:
12014 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12015 break;
12016 case TG3_EEPROM_SB_REVISION_6:
12017 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12018 break;
12019 default:
12020 return -EIO;
12021 }
12022 } else
12023 return 0;
12024 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12025 size = NVRAM_SELFBOOT_HW_SIZE;
12026 else
12027 return -EIO;
12028
12029 buf = kmalloc(size, GFP_KERNEL);
12030 if (buf == NULL)
12031 return -ENOMEM;
12032
12033 err = -EIO;
12034 for (i = 0, j = 0; i < size; i += 4, j++) {
12035 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12036 if (err)
12037 break;
12038 }
12039 if (i < size)
12040 goto out;
12041
12042 /* Selfboot format */
12043 magic = be32_to_cpu(buf[0]);
12044 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12045 TG3_EEPROM_MAGIC_FW) {
12046 u8 *buf8 = (u8 *) buf, csum8 = 0;
12047
12048 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12049 TG3_EEPROM_SB_REVISION_2) {
12050 /* For rev 2, the csum doesn't include the MBA. */
12051 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12052 csum8 += buf8[i];
12053 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12054 csum8 += buf8[i];
12055 } else {
12056 for (i = 0; i < size; i++)
12057 csum8 += buf8[i];
12058 }
12059
12060 if (csum8 == 0) {
12061 err = 0;
12062 goto out;
12063 }
12064
12065 err = -EIO;
12066 goto out;
12067 }
12068
12069 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12070 TG3_EEPROM_MAGIC_HW) {
12071 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12072 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12073 u8 *buf8 = (u8 *) buf;
12074
12075 /* Separate the parity bits and the data bytes. */
12076 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12077 if ((i == 0) || (i == 8)) {
12078 int l;
12079 u8 msk;
12080
12081 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12082 parity[k++] = buf8[i] & msk;
12083 i++;
12084 } else if (i == 16) {
12085 int l;
12086 u8 msk;
12087
12088 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12089 parity[k++] = buf8[i] & msk;
12090 i++;
12091
12092 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12093 parity[k++] = buf8[i] & msk;
12094 i++;
12095 }
12096 data[j++] = buf8[i];
12097 }
12098
12099 err = -EIO;
12100 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12101 u8 hw8 = hweight8(data[i]);
12102
12103 if ((hw8 & 0x1) && parity[i])
12104 goto out;
12105 else if (!(hw8 & 0x1) && !parity[i])
12106 goto out;
12107 }
12108 err = 0;
12109 goto out;
12110 }
12111
12112 err = -EIO;
12113
12114 /* Bootstrap checksum at offset 0x10 */
12115 csum = calc_crc((unsigned char *) buf, 0x10);
12116 if (csum != le32_to_cpu(buf[0x10/4]))
12117 goto out;
12118
12119 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12120 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12121 if (csum != le32_to_cpu(buf[0xfc/4]))
12122 goto out;
12123
12124 kfree(buf);
12125
12126 buf = tg3_vpd_readblock(tp, &len);
12127 if (!buf)
12128 return -ENOMEM;
12129
12130 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12131 if (i > 0) {
12132 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12133 if (j < 0)
12134 goto out;
12135
12136 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12137 goto out;
12138
12139 i += PCI_VPD_LRDT_TAG_SIZE;
12140 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12141 PCI_VPD_RO_KEYWORD_CHKSUM);
12142 if (j > 0) {
12143 u8 csum8 = 0;
12144
12145 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12146
12147 for (i = 0; i <= j; i++)
12148 csum8 += ((u8 *)buf)[i];
12149
12150 if (csum8)
12151 goto out;
12152 }
12153 }
12154
12155 err = 0;
12156
12157 out:
12158 kfree(buf);
12159 return err;
12160 }
12161
12162 #define TG3_SERDES_TIMEOUT_SEC 2
12163 #define TG3_COPPER_TIMEOUT_SEC 6
12164
12165 static int tg3_test_link(struct tg3 *tp)
12166 {
12167 int i, max;
12168
12169 if (!netif_running(tp->dev))
12170 return -ENODEV;
12171
12172 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12173 max = TG3_SERDES_TIMEOUT_SEC;
12174 else
12175 max = TG3_COPPER_TIMEOUT_SEC;
12176
12177 for (i = 0; i < max; i++) {
12178 if (tp->link_up)
12179 return 0;
12180
12181 if (msleep_interruptible(1000))
12182 break;
12183 }
12184
12185 return -EIO;
12186 }
12187
12188 /* Only test the commonly used registers */
12189 static int tg3_test_registers(struct tg3 *tp)
12190 {
12191 int i, is_5705, is_5750;
12192 u32 offset, read_mask, write_mask, val, save_val, read_val;
12193 static struct {
12194 u16 offset;
12195 u16 flags;
12196 #define TG3_FL_5705 0x1
12197 #define TG3_FL_NOT_5705 0x2
12198 #define TG3_FL_NOT_5788 0x4
12199 #define TG3_FL_NOT_5750 0x8
12200 u32 read_mask;
12201 u32 write_mask;
12202 } reg_tbl[] = {
12203 /* MAC Control Registers */
12204 { MAC_MODE, TG3_FL_NOT_5705,
12205 0x00000000, 0x00ef6f8c },
12206 { MAC_MODE, TG3_FL_5705,
12207 0x00000000, 0x01ef6b8c },
12208 { MAC_STATUS, TG3_FL_NOT_5705,
12209 0x03800107, 0x00000000 },
12210 { MAC_STATUS, TG3_FL_5705,
12211 0x03800100, 0x00000000 },
12212 { MAC_ADDR_0_HIGH, 0x0000,
12213 0x00000000, 0x0000ffff },
12214 { MAC_ADDR_0_LOW, 0x0000,
12215 0x00000000, 0xffffffff },
12216 { MAC_RX_MTU_SIZE, 0x0000,
12217 0x00000000, 0x0000ffff },
12218 { MAC_TX_MODE, 0x0000,
12219 0x00000000, 0x00000070 },
12220 { MAC_TX_LENGTHS, 0x0000,
12221 0x00000000, 0x00003fff },
12222 { MAC_RX_MODE, TG3_FL_NOT_5705,
12223 0x00000000, 0x000007fc },
12224 { MAC_RX_MODE, TG3_FL_5705,
12225 0x00000000, 0x000007dc },
12226 { MAC_HASH_REG_0, 0x0000,
12227 0x00000000, 0xffffffff },
12228 { MAC_HASH_REG_1, 0x0000,
12229 0x00000000, 0xffffffff },
12230 { MAC_HASH_REG_2, 0x0000,
12231 0x00000000, 0xffffffff },
12232 { MAC_HASH_REG_3, 0x0000,
12233 0x00000000, 0xffffffff },
12234
12235 /* Receive Data and Receive BD Initiator Control Registers. */
12236 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12237 0x00000000, 0xffffffff },
12238 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12239 0x00000000, 0xffffffff },
12240 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12241 0x00000000, 0x00000003 },
12242 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12243 0x00000000, 0xffffffff },
12244 { RCVDBDI_STD_BD+0, 0x0000,
12245 0x00000000, 0xffffffff },
12246 { RCVDBDI_STD_BD+4, 0x0000,
12247 0x00000000, 0xffffffff },
12248 { RCVDBDI_STD_BD+8, 0x0000,
12249 0x00000000, 0xffff0002 },
12250 { RCVDBDI_STD_BD+0xc, 0x0000,
12251 0x00000000, 0xffffffff },
12252
12253 /* Receive BD Initiator Control Registers. */
12254 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12255 0x00000000, 0xffffffff },
12256 { RCVBDI_STD_THRESH, TG3_FL_5705,
12257 0x00000000, 0x000003ff },
12258 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12259 0x00000000, 0xffffffff },
12260
12261 /* Host Coalescing Control Registers. */
12262 { HOSTCC_MODE, TG3_FL_NOT_5705,
12263 0x00000000, 0x00000004 },
12264 { HOSTCC_MODE, TG3_FL_5705,
12265 0x00000000, 0x000000f6 },
12266 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12267 0x00000000, 0xffffffff },
12268 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12269 0x00000000, 0x000003ff },
12270 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12271 0x00000000, 0xffffffff },
12272 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12273 0x00000000, 0x000003ff },
12274 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12275 0x00000000, 0xffffffff },
12276 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12277 0x00000000, 0x000000ff },
12278 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12279 0x00000000, 0xffffffff },
12280 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12281 0x00000000, 0x000000ff },
12282 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12283 0x00000000, 0xffffffff },
12284 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12285 0x00000000, 0xffffffff },
12286 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12287 0x00000000, 0xffffffff },
12288 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12289 0x00000000, 0x000000ff },
12290 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12291 0x00000000, 0xffffffff },
12292 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12293 0x00000000, 0x000000ff },
12294 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12295 0x00000000, 0xffffffff },
12296 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12297 0x00000000, 0xffffffff },
12298 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12299 0x00000000, 0xffffffff },
12300 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12301 0x00000000, 0xffffffff },
12302 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12303 0x00000000, 0xffffffff },
12304 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12305 0xffffffff, 0x00000000 },
12306 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12307 0xffffffff, 0x00000000 },
12308
12309 /* Buffer Manager Control Registers. */
12310 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12311 0x00000000, 0x007fff80 },
12312 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12313 0x00000000, 0x007fffff },
12314 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12315 0x00000000, 0x0000003f },
12316 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12317 0x00000000, 0x000001ff },
12318 { BUFMGR_MB_HIGH_WATER, 0x0000,
12319 0x00000000, 0x000001ff },
12320 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12321 0xffffffff, 0x00000000 },
12322 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12323 0xffffffff, 0x00000000 },
12324
12325 /* Mailbox Registers */
12326 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12327 0x00000000, 0x000001ff },
12328 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12329 0x00000000, 0x000001ff },
12330 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12331 0x00000000, 0x000007ff },
12332 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12333 0x00000000, 0x000001ff },
12334
12335 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12336 };
12337
12338 is_5705 = is_5750 = 0;
12339 if (tg3_flag(tp, 5705_PLUS)) {
12340 is_5705 = 1;
12341 if (tg3_flag(tp, 5750_PLUS))
12342 is_5750 = 1;
12343 }
12344
12345 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12346 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12347 continue;
12348
12349 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12350 continue;
12351
12352 if (tg3_flag(tp, IS_5788) &&
12353 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12354 continue;
12355
12356 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12357 continue;
12358
12359 offset = (u32) reg_tbl[i].offset;
12360 read_mask = reg_tbl[i].read_mask;
12361 write_mask = reg_tbl[i].write_mask;
12362
12363 /* Save the original register content */
12364 save_val = tr32(offset);
12365
12366 /* Determine the read-only value. */
12367 read_val = save_val & read_mask;
12368
12369 /* Write zero to the register, then make sure the read-only bits
12370 * are not changed and the read/write bits are all zeros.
12371 */
12372 tw32(offset, 0);
12373
12374 val = tr32(offset);
12375
12376 /* Test the read-only and read/write bits. */
12377 if (((val & read_mask) != read_val) || (val & write_mask))
12378 goto out;
12379
12380 /* Write ones to all the bits defined by RdMask and WrMask, then
12381 * make sure the read-only bits are not changed and the
12382 * read/write bits are all ones.
12383 */
12384 tw32(offset, read_mask | write_mask);
12385
12386 val = tr32(offset);
12387
12388 /* Test the read-only bits. */
12389 if ((val & read_mask) != read_val)
12390 goto out;
12391
12392 /* Test the read/write bits. */
12393 if ((val & write_mask) != write_mask)
12394 goto out;
12395
12396 tw32(offset, save_val);
12397 }
12398
12399 return 0;
12400
12401 out:
12402 if (netif_msg_hw(tp))
12403 netdev_err(tp->dev,
12404 "Register test failed at offset %x\n", offset);
12405 tw32(offset, save_val);
12406 return -EIO;
12407 }
12408
12409 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12410 {
12411 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12412 int i;
12413 u32 j;
12414
12415 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12416 for (j = 0; j < len; j += 4) {
12417 u32 val;
12418
12419 tg3_write_mem(tp, offset + j, test_pattern[i]);
12420 tg3_read_mem(tp, offset + j, &val);
12421 if (val != test_pattern[i])
12422 return -EIO;
12423 }
12424 }
12425 return 0;
12426 }
12427
12428 static int tg3_test_memory(struct tg3 *tp)
12429 {
12430 static struct mem_entry {
12431 u32 offset;
12432 u32 len;
12433 } mem_tbl_570x[] = {
12434 { 0x00000000, 0x00b50},
12435 { 0x00002000, 0x1c000},
12436 { 0xffffffff, 0x00000}
12437 }, mem_tbl_5705[] = {
12438 { 0x00000100, 0x0000c},
12439 { 0x00000200, 0x00008},
12440 { 0x00004000, 0x00800},
12441 { 0x00006000, 0x01000},
12442 { 0x00008000, 0x02000},
12443 { 0x00010000, 0x0e000},
12444 { 0xffffffff, 0x00000}
12445 }, mem_tbl_5755[] = {
12446 { 0x00000200, 0x00008},
12447 { 0x00004000, 0x00800},
12448 { 0x00006000, 0x00800},
12449 { 0x00008000, 0x02000},
12450 { 0x00010000, 0x0c000},
12451 { 0xffffffff, 0x00000}
12452 }, mem_tbl_5906[] = {
12453 { 0x00000200, 0x00008},
12454 { 0x00004000, 0x00400},
12455 { 0x00006000, 0x00400},
12456 { 0x00008000, 0x01000},
12457 { 0x00010000, 0x01000},
12458 { 0xffffffff, 0x00000}
12459 }, mem_tbl_5717[] = {
12460 { 0x00000200, 0x00008},
12461 { 0x00010000, 0x0a000},
12462 { 0x00020000, 0x13c00},
12463 { 0xffffffff, 0x00000}
12464 }, mem_tbl_57765[] = {
12465 { 0x00000200, 0x00008},
12466 { 0x00004000, 0x00800},
12467 { 0x00006000, 0x09800},
12468 { 0x00010000, 0x0a000},
12469 { 0xffffffff, 0x00000}
12470 };
12471 struct mem_entry *mem_tbl;
12472 int err = 0;
12473 int i;
12474
12475 if (tg3_flag(tp, 5717_PLUS))
12476 mem_tbl = mem_tbl_5717;
12477 else if (tg3_flag(tp, 57765_CLASS) ||
12478 tg3_asic_rev(tp) == ASIC_REV_5762)
12479 mem_tbl = mem_tbl_57765;
12480 else if (tg3_flag(tp, 5755_PLUS))
12481 mem_tbl = mem_tbl_5755;
12482 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12483 mem_tbl = mem_tbl_5906;
12484 else if (tg3_flag(tp, 5705_PLUS))
12485 mem_tbl = mem_tbl_5705;
12486 else
12487 mem_tbl = mem_tbl_570x;
12488
12489 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12490 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12491 if (err)
12492 break;
12493 }
12494
12495 return err;
12496 }
12497
12498 #define TG3_TSO_MSS 500
12499
12500 #define TG3_TSO_IP_HDR_LEN 20
12501 #define TG3_TSO_TCP_HDR_LEN 20
12502 #define TG3_TSO_TCP_OPT_LEN 12
12503
12504 static const u8 tg3_tso_header[] = {
12505 0x08, 0x00,
12506 0x45, 0x00, 0x00, 0x00,
12507 0x00, 0x00, 0x40, 0x00,
12508 0x40, 0x06, 0x00, 0x00,
12509 0x0a, 0x00, 0x00, 0x01,
12510 0x0a, 0x00, 0x00, 0x02,
12511 0x0d, 0x00, 0xe0, 0x00,
12512 0x00, 0x00, 0x01, 0x00,
12513 0x00, 0x00, 0x02, 0x00,
12514 0x80, 0x10, 0x10, 0x00,
12515 0x14, 0x09, 0x00, 0x00,
12516 0x01, 0x01, 0x08, 0x0a,
12517 0x11, 0x11, 0x11, 0x11,
12518 0x11, 0x11, 0x11, 0x11,
12519 };
12520
12521 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12522 {
12523 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12524 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12525 u32 budget;
12526 struct sk_buff *skb;
12527 u8 *tx_data, *rx_data;
12528 dma_addr_t map;
12529 int num_pkts, tx_len, rx_len, i, err;
12530 struct tg3_rx_buffer_desc *desc;
12531 struct tg3_napi *tnapi, *rnapi;
12532 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12533
12534 tnapi = &tp->napi[0];
12535 rnapi = &tp->napi[0];
12536 if (tp->irq_cnt > 1) {
12537 if (tg3_flag(tp, ENABLE_RSS))
12538 rnapi = &tp->napi[1];
12539 if (tg3_flag(tp, ENABLE_TSS))
12540 tnapi = &tp->napi[1];
12541 }
12542 coal_now = tnapi->coal_now | rnapi->coal_now;
12543
12544 err = -EIO;
12545
12546 tx_len = pktsz;
12547 skb = netdev_alloc_skb(tp->dev, tx_len);
12548 if (!skb)
12549 return -ENOMEM;
12550
12551 tx_data = skb_put(skb, tx_len);
12552 memcpy(tx_data, tp->dev->dev_addr, 6);
12553 memset(tx_data + 6, 0x0, 8);
12554
12555 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12556
12557 if (tso_loopback) {
12558 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12559
12560 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12561 TG3_TSO_TCP_OPT_LEN;
12562
12563 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12564 sizeof(tg3_tso_header));
12565 mss = TG3_TSO_MSS;
12566
12567 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12568 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12569
12570 /* Set the total length field in the IP header */
12571 iph->tot_len = htons((u16)(mss + hdr_len));
12572
12573 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12574 TXD_FLAG_CPU_POST_DMA);
12575
12576 if (tg3_flag(tp, HW_TSO_1) ||
12577 tg3_flag(tp, HW_TSO_2) ||
12578 tg3_flag(tp, HW_TSO_3)) {
12579 struct tcphdr *th;
12580 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12581 th = (struct tcphdr *)&tx_data[val];
12582 th->check = 0;
12583 } else
12584 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12585
12586 if (tg3_flag(tp, HW_TSO_3)) {
12587 mss |= (hdr_len & 0xc) << 12;
12588 if (hdr_len & 0x10)
12589 base_flags |= 0x00000010;
12590 base_flags |= (hdr_len & 0x3e0) << 5;
12591 } else if (tg3_flag(tp, HW_TSO_2))
12592 mss |= hdr_len << 9;
12593 else if (tg3_flag(tp, HW_TSO_1) ||
12594 tg3_asic_rev(tp) == ASIC_REV_5705) {
12595 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12596 } else {
12597 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12598 }
12599
12600 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12601 } else {
12602 num_pkts = 1;
12603 data_off = ETH_HLEN;
12604
12605 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12606 tx_len > VLAN_ETH_FRAME_LEN)
12607 base_flags |= TXD_FLAG_JMB_PKT;
12608 }
12609
12610 for (i = data_off; i < tx_len; i++)
12611 tx_data[i] = (u8) (i & 0xff);
12612
12613 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12614 if (pci_dma_mapping_error(tp->pdev, map)) {
12615 dev_kfree_skb(skb);
12616 return -EIO;
12617 }
12618
12619 val = tnapi->tx_prod;
12620 tnapi->tx_buffers[val].skb = skb;
12621 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12622
12623 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12624 rnapi->coal_now);
12625
12626 udelay(10);
12627
12628 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12629
12630 budget = tg3_tx_avail(tnapi);
12631 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12632 base_flags | TXD_FLAG_END, mss, 0)) {
12633 tnapi->tx_buffers[val].skb = NULL;
12634 dev_kfree_skb(skb);
12635 return -EIO;
12636 }
12637
12638 tnapi->tx_prod++;
12639
12640 /* Sync BD data before updating mailbox */
12641 wmb();
12642
12643 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12644 tr32_mailbox(tnapi->prodmbox);
12645
12646 udelay(10);
12647
12648 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12649 for (i = 0; i < 35; i++) {
12650 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12651 coal_now);
12652
12653 udelay(10);
12654
12655 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12656 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12657 if ((tx_idx == tnapi->tx_prod) &&
12658 (rx_idx == (rx_start_idx + num_pkts)))
12659 break;
12660 }
12661
12662 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12663 dev_kfree_skb(skb);
12664
12665 if (tx_idx != tnapi->tx_prod)
12666 goto out;
12667
12668 if (rx_idx != rx_start_idx + num_pkts)
12669 goto out;
12670
12671 val = data_off;
12672 while (rx_idx != rx_start_idx) {
12673 desc = &rnapi->rx_rcb[rx_start_idx++];
12674 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12675 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12676
12677 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12678 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12679 goto out;
12680
12681 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12682 - ETH_FCS_LEN;
12683
12684 if (!tso_loopback) {
12685 if (rx_len != tx_len)
12686 goto out;
12687
12688 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12689 if (opaque_key != RXD_OPAQUE_RING_STD)
12690 goto out;
12691 } else {
12692 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12693 goto out;
12694 }
12695 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12696 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12697 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12698 goto out;
12699 }
12700
12701 if (opaque_key == RXD_OPAQUE_RING_STD) {
12702 rx_data = tpr->rx_std_buffers[desc_idx].data;
12703 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12704 mapping);
12705 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12706 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12707 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12708 mapping);
12709 } else
12710 goto out;
12711
12712 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12713 PCI_DMA_FROMDEVICE);
12714
12715 rx_data += TG3_RX_OFFSET(tp);
12716 for (i = data_off; i < rx_len; i++, val++) {
12717 if (*(rx_data + i) != (u8) (val & 0xff))
12718 goto out;
12719 }
12720 }
12721
12722 err = 0;
12723
12724 /* tg3_free_rings will unmap and free the rx_data */
12725 out:
12726 return err;
12727 }
12728
12729 #define TG3_STD_LOOPBACK_FAILED 1
12730 #define TG3_JMB_LOOPBACK_FAILED 2
12731 #define TG3_TSO_LOOPBACK_FAILED 4
12732 #define TG3_LOOPBACK_FAILED \
12733 (TG3_STD_LOOPBACK_FAILED | \
12734 TG3_JMB_LOOPBACK_FAILED | \
12735 TG3_TSO_LOOPBACK_FAILED)
12736
12737 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12738 {
12739 int err = -EIO;
12740 u32 eee_cap;
12741 u32 jmb_pkt_sz = 9000;
12742
12743 if (tp->dma_limit)
12744 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12745
12746 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12747 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12748
12749 if (!netif_running(tp->dev)) {
12750 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12751 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12752 if (do_extlpbk)
12753 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12754 goto done;
12755 }
12756
12757 err = tg3_reset_hw(tp, 1);
12758 if (err) {
12759 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12760 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12761 if (do_extlpbk)
12762 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12763 goto done;
12764 }
12765
12766 if (tg3_flag(tp, ENABLE_RSS)) {
12767 int i;
12768
12769 /* Reroute all rx packets to the 1st queue */
12770 for (i = MAC_RSS_INDIR_TBL_0;
12771 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12772 tw32(i, 0x0);
12773 }
12774
12775 /* HW errata - mac loopback fails in some cases on 5780.
12776 * Normal traffic and PHY loopback are not affected by
12777 * errata. Also, the MAC loopback test is deprecated for
12778 * all newer ASIC revisions.
12779 */
12780 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12781 !tg3_flag(tp, CPMU_PRESENT)) {
12782 tg3_mac_loopback(tp, true);
12783
12784 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12785 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12786
12787 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12788 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12789 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12790
12791 tg3_mac_loopback(tp, false);
12792 }
12793
12794 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12795 !tg3_flag(tp, USE_PHYLIB)) {
12796 int i;
12797
12798 tg3_phy_lpbk_set(tp, 0, false);
12799
12800 /* Wait for link */
12801 for (i = 0; i < 100; i++) {
12802 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12803 break;
12804 mdelay(1);
12805 }
12806
12807 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12808 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12809 if (tg3_flag(tp, TSO_CAPABLE) &&
12810 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12811 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12812 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12813 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12814 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12815
12816 if (do_extlpbk) {
12817 tg3_phy_lpbk_set(tp, 0, true);
12818
12819 /* All link indications report up, but the hardware
12820 * isn't really ready for about 20 msec. Double it
12821 * to be sure.
12822 */
12823 mdelay(40);
12824
12825 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12826 data[TG3_EXT_LOOPB_TEST] |=
12827 TG3_STD_LOOPBACK_FAILED;
12828 if (tg3_flag(tp, TSO_CAPABLE) &&
12829 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12830 data[TG3_EXT_LOOPB_TEST] |=
12831 TG3_TSO_LOOPBACK_FAILED;
12832 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12833 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12834 data[TG3_EXT_LOOPB_TEST] |=
12835 TG3_JMB_LOOPBACK_FAILED;
12836 }
12837
12838 /* Re-enable gphy autopowerdown. */
12839 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12840 tg3_phy_toggle_apd(tp, true);
12841 }
12842
12843 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12844 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12845
12846 done:
12847 tp->phy_flags |= eee_cap;
12848
12849 return err;
12850 }
12851
12852 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12853 u64 *data)
12854 {
12855 struct tg3 *tp = netdev_priv(dev);
12856 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12857
12858 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12859 tg3_power_up(tp)) {
12860 etest->flags |= ETH_TEST_FL_FAILED;
12861 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12862 return;
12863 }
12864
12865 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12866
12867 if (tg3_test_nvram(tp) != 0) {
12868 etest->flags |= ETH_TEST_FL_FAILED;
12869 data[TG3_NVRAM_TEST] = 1;
12870 }
12871 if (!doextlpbk && tg3_test_link(tp)) {
12872 etest->flags |= ETH_TEST_FL_FAILED;
12873 data[TG3_LINK_TEST] = 1;
12874 }
12875 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12876 int err, err2 = 0, irq_sync = 0;
12877
12878 if (netif_running(dev)) {
12879 tg3_phy_stop(tp);
12880 tg3_netif_stop(tp);
12881 irq_sync = 1;
12882 }
12883
12884 tg3_full_lock(tp, irq_sync);
12885 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12886 err = tg3_nvram_lock(tp);
12887 tg3_halt_cpu(tp, RX_CPU_BASE);
12888 if (!tg3_flag(tp, 5705_PLUS))
12889 tg3_halt_cpu(tp, TX_CPU_BASE);
12890 if (!err)
12891 tg3_nvram_unlock(tp);
12892
12893 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12894 tg3_phy_reset(tp);
12895
12896 if (tg3_test_registers(tp) != 0) {
12897 etest->flags |= ETH_TEST_FL_FAILED;
12898 data[TG3_REGISTER_TEST] = 1;
12899 }
12900
12901 if (tg3_test_memory(tp) != 0) {
12902 etest->flags |= ETH_TEST_FL_FAILED;
12903 data[TG3_MEMORY_TEST] = 1;
12904 }
12905
12906 if (doextlpbk)
12907 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12908
12909 if (tg3_test_loopback(tp, data, doextlpbk))
12910 etest->flags |= ETH_TEST_FL_FAILED;
12911
12912 tg3_full_unlock(tp);
12913
12914 if (tg3_test_interrupt(tp) != 0) {
12915 etest->flags |= ETH_TEST_FL_FAILED;
12916 data[TG3_INTERRUPT_TEST] = 1;
12917 }
12918
12919 tg3_full_lock(tp, 0);
12920
12921 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12922 if (netif_running(dev)) {
12923 tg3_flag_set(tp, INIT_COMPLETE);
12924 err2 = tg3_restart_hw(tp, 1);
12925 if (!err2)
12926 tg3_netif_start(tp);
12927 }
12928
12929 tg3_full_unlock(tp);
12930
12931 if (irq_sync && !err2)
12932 tg3_phy_start(tp);
12933 }
12934 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12935 tg3_power_down(tp);
12936
12937 }
12938
12939 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12940 struct ifreq *ifr, int cmd)
12941 {
12942 struct tg3 *tp = netdev_priv(dev);
12943 struct hwtstamp_config stmpconf;
12944
12945 if (!tg3_flag(tp, PTP_CAPABLE))
12946 return -EINVAL;
12947
12948 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12949 return -EFAULT;
12950
12951 if (stmpconf.flags)
12952 return -EINVAL;
12953
12954 switch (stmpconf.tx_type) {
12955 case HWTSTAMP_TX_ON:
12956 tg3_flag_set(tp, TX_TSTAMP_EN);
12957 break;
12958 case HWTSTAMP_TX_OFF:
12959 tg3_flag_clear(tp, TX_TSTAMP_EN);
12960 break;
12961 default:
12962 return -ERANGE;
12963 }
12964
12965 switch (stmpconf.rx_filter) {
12966 case HWTSTAMP_FILTER_NONE:
12967 tp->rxptpctl = 0;
12968 break;
12969 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12970 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12971 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12972 break;
12973 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12974 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12975 TG3_RX_PTP_CTL_SYNC_EVNT;
12976 break;
12977 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12978 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12979 TG3_RX_PTP_CTL_DELAY_REQ;
12980 break;
12981 case HWTSTAMP_FILTER_PTP_V2_EVENT:
12982 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12983 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12984 break;
12985 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12986 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12987 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12988 break;
12989 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12990 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12991 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12992 break;
12993 case HWTSTAMP_FILTER_PTP_V2_SYNC:
12994 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12995 TG3_RX_PTP_CTL_SYNC_EVNT;
12996 break;
12997 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12998 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12999 TG3_RX_PTP_CTL_SYNC_EVNT;
13000 break;
13001 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13002 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13003 TG3_RX_PTP_CTL_SYNC_EVNT;
13004 break;
13005 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13006 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13007 TG3_RX_PTP_CTL_DELAY_REQ;
13008 break;
13009 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13010 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13011 TG3_RX_PTP_CTL_DELAY_REQ;
13012 break;
13013 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13014 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13015 TG3_RX_PTP_CTL_DELAY_REQ;
13016 break;
13017 default:
13018 return -ERANGE;
13019 }
13020
13021 if (netif_running(dev) && tp->rxptpctl)
13022 tw32(TG3_RX_PTP_CTL,
13023 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13024
13025 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13026 -EFAULT : 0;
13027 }
13028
13029 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13030 {
13031 struct mii_ioctl_data *data = if_mii(ifr);
13032 struct tg3 *tp = netdev_priv(dev);
13033 int err;
13034
13035 if (tg3_flag(tp, USE_PHYLIB)) {
13036 struct phy_device *phydev;
13037 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13038 return -EAGAIN;
13039 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13040 return phy_mii_ioctl(phydev, ifr, cmd);
13041 }
13042
13043 switch (cmd) {
13044 case SIOCGMIIPHY:
13045 data->phy_id = tp->phy_addr;
13046
13047 /* fallthru */
13048 case SIOCGMIIREG: {
13049 u32 mii_regval;
13050
13051 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13052 break; /* We have no PHY */
13053
13054 if (!netif_running(dev))
13055 return -EAGAIN;
13056
13057 spin_lock_bh(&tp->lock);
13058 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13059 data->reg_num & 0x1f, &mii_regval);
13060 spin_unlock_bh(&tp->lock);
13061
13062 data->val_out = mii_regval;
13063
13064 return err;
13065 }
13066
13067 case SIOCSMIIREG:
13068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13069 break; /* We have no PHY */
13070
13071 if (!netif_running(dev))
13072 return -EAGAIN;
13073
13074 spin_lock_bh(&tp->lock);
13075 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13076 data->reg_num & 0x1f, data->val_in);
13077 spin_unlock_bh(&tp->lock);
13078
13079 return err;
13080
13081 case SIOCSHWTSTAMP:
13082 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13083
13084 default:
13085 /* do nothing */
13086 break;
13087 }
13088 return -EOPNOTSUPP;
13089 }
13090
13091 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13092 {
13093 struct tg3 *tp = netdev_priv(dev);
13094
13095 memcpy(ec, &tp->coal, sizeof(*ec));
13096 return 0;
13097 }
13098
13099 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13100 {
13101 struct tg3 *tp = netdev_priv(dev);
13102 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13103 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13104
13105 if (!tg3_flag(tp, 5705_PLUS)) {
13106 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13107 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13108 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13109 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13110 }
13111
13112 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13113 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13114 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13115 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13116 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13117 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13118 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13119 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13120 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13121 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13122 return -EINVAL;
13123
13124 /* No rx interrupts will be generated if both are zero */
13125 if ((ec->rx_coalesce_usecs == 0) &&
13126 (ec->rx_max_coalesced_frames == 0))
13127 return -EINVAL;
13128
13129 /* No tx interrupts will be generated if both are zero */
13130 if ((ec->tx_coalesce_usecs == 0) &&
13131 (ec->tx_max_coalesced_frames == 0))
13132 return -EINVAL;
13133
13134 /* Only copy relevant parameters, ignore all others. */
13135 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13136 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13137 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13138 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13139 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13140 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13141 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13142 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13143 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13144
13145 if (netif_running(dev)) {
13146 tg3_full_lock(tp, 0);
13147 __tg3_set_coalesce(tp, &tp->coal);
13148 tg3_full_unlock(tp);
13149 }
13150 return 0;
13151 }
13152
13153 static const struct ethtool_ops tg3_ethtool_ops = {
13154 .get_settings = tg3_get_settings,
13155 .set_settings = tg3_set_settings,
13156 .get_drvinfo = tg3_get_drvinfo,
13157 .get_regs_len = tg3_get_regs_len,
13158 .get_regs = tg3_get_regs,
13159 .get_wol = tg3_get_wol,
13160 .set_wol = tg3_set_wol,
13161 .get_msglevel = tg3_get_msglevel,
13162 .set_msglevel = tg3_set_msglevel,
13163 .nway_reset = tg3_nway_reset,
13164 .get_link = ethtool_op_get_link,
13165 .get_eeprom_len = tg3_get_eeprom_len,
13166 .get_eeprom = tg3_get_eeprom,
13167 .set_eeprom = tg3_set_eeprom,
13168 .get_ringparam = tg3_get_ringparam,
13169 .set_ringparam = tg3_set_ringparam,
13170 .get_pauseparam = tg3_get_pauseparam,
13171 .set_pauseparam = tg3_set_pauseparam,
13172 .self_test = tg3_self_test,
13173 .get_strings = tg3_get_strings,
13174 .set_phys_id = tg3_set_phys_id,
13175 .get_ethtool_stats = tg3_get_ethtool_stats,
13176 .get_coalesce = tg3_get_coalesce,
13177 .set_coalesce = tg3_set_coalesce,
13178 .get_sset_count = tg3_get_sset_count,
13179 .get_rxnfc = tg3_get_rxnfc,
13180 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13181 .get_rxfh_indir = tg3_get_rxfh_indir,
13182 .set_rxfh_indir = tg3_set_rxfh_indir,
13183 .get_channels = tg3_get_channels,
13184 .set_channels = tg3_set_channels,
13185 .get_ts_info = tg3_get_ts_info,
13186 };
13187
13188 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13189 struct rtnl_link_stats64 *stats)
13190 {
13191 struct tg3 *tp = netdev_priv(dev);
13192
13193 spin_lock_bh(&tp->lock);
13194 if (!tp->hw_stats) {
13195 spin_unlock_bh(&tp->lock);
13196 return &tp->net_stats_prev;
13197 }
13198
13199 tg3_get_nstats(tp, stats);
13200 spin_unlock_bh(&tp->lock);
13201
13202 return stats;
13203 }
13204
13205 static void tg3_set_rx_mode(struct net_device *dev)
13206 {
13207 struct tg3 *tp = netdev_priv(dev);
13208
13209 if (!netif_running(dev))
13210 return;
13211
13212 tg3_full_lock(tp, 0);
13213 __tg3_set_rx_mode(dev);
13214 tg3_full_unlock(tp);
13215 }
13216
13217 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13218 int new_mtu)
13219 {
13220 dev->mtu = new_mtu;
13221
13222 if (new_mtu > ETH_DATA_LEN) {
13223 if (tg3_flag(tp, 5780_CLASS)) {
13224 netdev_update_features(dev);
13225 tg3_flag_clear(tp, TSO_CAPABLE);
13226 } else {
13227 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13228 }
13229 } else {
13230 if (tg3_flag(tp, 5780_CLASS)) {
13231 tg3_flag_set(tp, TSO_CAPABLE);
13232 netdev_update_features(dev);
13233 }
13234 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13235 }
13236 }
13237
13238 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13239 {
13240 struct tg3 *tp = netdev_priv(dev);
13241 int err, reset_phy = 0;
13242
13243 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13244 return -EINVAL;
13245
13246 if (!netif_running(dev)) {
13247 /* We'll just catch it later when the
13248 * device is up'd.
13249 */
13250 tg3_set_mtu(dev, tp, new_mtu);
13251 return 0;
13252 }
13253
13254 tg3_phy_stop(tp);
13255
13256 tg3_netif_stop(tp);
13257
13258 tg3_full_lock(tp, 1);
13259
13260 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13261
13262 tg3_set_mtu(dev, tp, new_mtu);
13263
13264 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13265 * breaks all requests to 256 bytes.
13266 */
13267 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13268 reset_phy = 1;
13269
13270 err = tg3_restart_hw(tp, reset_phy);
13271
13272 if (!err)
13273 tg3_netif_start(tp);
13274
13275 tg3_full_unlock(tp);
13276
13277 if (!err)
13278 tg3_phy_start(tp);
13279
13280 return err;
13281 }
13282
13283 static const struct net_device_ops tg3_netdev_ops = {
13284 .ndo_open = tg3_open,
13285 .ndo_stop = tg3_close,
13286 .ndo_start_xmit = tg3_start_xmit,
13287 .ndo_get_stats64 = tg3_get_stats64,
13288 .ndo_validate_addr = eth_validate_addr,
13289 .ndo_set_rx_mode = tg3_set_rx_mode,
13290 .ndo_set_mac_address = tg3_set_mac_addr,
13291 .ndo_do_ioctl = tg3_ioctl,
13292 .ndo_tx_timeout = tg3_tx_timeout,
13293 .ndo_change_mtu = tg3_change_mtu,
13294 .ndo_fix_features = tg3_fix_features,
13295 .ndo_set_features = tg3_set_features,
13296 #ifdef CONFIG_NET_POLL_CONTROLLER
13297 .ndo_poll_controller = tg3_poll_controller,
13298 #endif
13299 };
13300
13301 static void tg3_get_eeprom_size(struct tg3 *tp)
13302 {
13303 u32 cursize, val, magic;
13304
13305 tp->nvram_size = EEPROM_CHIP_SIZE;
13306
13307 if (tg3_nvram_read(tp, 0, &magic) != 0)
13308 return;
13309
13310 if ((magic != TG3_EEPROM_MAGIC) &&
13311 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13312 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13313 return;
13314
13315 /*
13316 * Size the chip by reading offsets at increasing powers of two.
13317 * When we encounter our validation signature, we know the addressing
13318 * has wrapped around, and thus have our chip size.
13319 */
13320 cursize = 0x10;
13321
13322 while (cursize < tp->nvram_size) {
13323 if (tg3_nvram_read(tp, cursize, &val) != 0)
13324 return;
13325
13326 if (val == magic)
13327 break;
13328
13329 cursize <<= 1;
13330 }
13331
13332 tp->nvram_size = cursize;
13333 }
13334
13335 static void tg3_get_nvram_size(struct tg3 *tp)
13336 {
13337 u32 val;
13338
13339 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13340 return;
13341
13342 /* Selfboot format */
13343 if (val != TG3_EEPROM_MAGIC) {
13344 tg3_get_eeprom_size(tp);
13345 return;
13346 }
13347
13348 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13349 if (val != 0) {
13350 /* This is confusing. We want to operate on the
13351 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13352 * call will read from NVRAM and byteswap the data
13353 * according to the byteswapping settings for all
13354 * other register accesses. This ensures the data we
13355 * want will always reside in the lower 16-bits.
13356 * However, the data in NVRAM is in LE format, which
13357 * means the data from the NVRAM read will always be
13358 * opposite the endianness of the CPU. The 16-bit
13359 * byteswap then brings the data to CPU endianness.
13360 */
13361 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13362 return;
13363 }
13364 }
13365 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13366 }
13367
13368 static void tg3_get_nvram_info(struct tg3 *tp)
13369 {
13370 u32 nvcfg1;
13371
13372 nvcfg1 = tr32(NVRAM_CFG1);
13373 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13374 tg3_flag_set(tp, FLASH);
13375 } else {
13376 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13377 tw32(NVRAM_CFG1, nvcfg1);
13378 }
13379
13380 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13381 tg3_flag(tp, 5780_CLASS)) {
13382 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13383 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13384 tp->nvram_jedecnum = JEDEC_ATMEL;
13385 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13386 tg3_flag_set(tp, NVRAM_BUFFERED);
13387 break;
13388 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13389 tp->nvram_jedecnum = JEDEC_ATMEL;
13390 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13391 break;
13392 case FLASH_VENDOR_ATMEL_EEPROM:
13393 tp->nvram_jedecnum = JEDEC_ATMEL;
13394 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13395 tg3_flag_set(tp, NVRAM_BUFFERED);
13396 break;
13397 case FLASH_VENDOR_ST:
13398 tp->nvram_jedecnum = JEDEC_ST;
13399 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13400 tg3_flag_set(tp, NVRAM_BUFFERED);
13401 break;
13402 case FLASH_VENDOR_SAIFUN:
13403 tp->nvram_jedecnum = JEDEC_SAIFUN;
13404 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13405 break;
13406 case FLASH_VENDOR_SST_SMALL:
13407 case FLASH_VENDOR_SST_LARGE:
13408 tp->nvram_jedecnum = JEDEC_SST;
13409 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13410 break;
13411 }
13412 } else {
13413 tp->nvram_jedecnum = JEDEC_ATMEL;
13414 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13415 tg3_flag_set(tp, NVRAM_BUFFERED);
13416 }
13417 }
13418
13419 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13420 {
13421 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13422 case FLASH_5752PAGE_SIZE_256:
13423 tp->nvram_pagesize = 256;
13424 break;
13425 case FLASH_5752PAGE_SIZE_512:
13426 tp->nvram_pagesize = 512;
13427 break;
13428 case FLASH_5752PAGE_SIZE_1K:
13429 tp->nvram_pagesize = 1024;
13430 break;
13431 case FLASH_5752PAGE_SIZE_2K:
13432 tp->nvram_pagesize = 2048;
13433 break;
13434 case FLASH_5752PAGE_SIZE_4K:
13435 tp->nvram_pagesize = 4096;
13436 break;
13437 case FLASH_5752PAGE_SIZE_264:
13438 tp->nvram_pagesize = 264;
13439 break;
13440 case FLASH_5752PAGE_SIZE_528:
13441 tp->nvram_pagesize = 528;
13442 break;
13443 }
13444 }
13445
13446 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13447 {
13448 u32 nvcfg1;
13449
13450 nvcfg1 = tr32(NVRAM_CFG1);
13451
13452 /* NVRAM protection for TPM */
13453 if (nvcfg1 & (1 << 27))
13454 tg3_flag_set(tp, PROTECTED_NVRAM);
13455
13456 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13457 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13458 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13459 tp->nvram_jedecnum = JEDEC_ATMEL;
13460 tg3_flag_set(tp, NVRAM_BUFFERED);
13461 break;
13462 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13463 tp->nvram_jedecnum = JEDEC_ATMEL;
13464 tg3_flag_set(tp, NVRAM_BUFFERED);
13465 tg3_flag_set(tp, FLASH);
13466 break;
13467 case FLASH_5752VENDOR_ST_M45PE10:
13468 case FLASH_5752VENDOR_ST_M45PE20:
13469 case FLASH_5752VENDOR_ST_M45PE40:
13470 tp->nvram_jedecnum = JEDEC_ST;
13471 tg3_flag_set(tp, NVRAM_BUFFERED);
13472 tg3_flag_set(tp, FLASH);
13473 break;
13474 }
13475
13476 if (tg3_flag(tp, FLASH)) {
13477 tg3_nvram_get_pagesize(tp, nvcfg1);
13478 } else {
13479 /* For eeprom, set pagesize to maximum eeprom size */
13480 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13481
13482 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13483 tw32(NVRAM_CFG1, nvcfg1);
13484 }
13485 }
13486
13487 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13488 {
13489 u32 nvcfg1, protect = 0;
13490
13491 nvcfg1 = tr32(NVRAM_CFG1);
13492
13493 /* NVRAM protection for TPM */
13494 if (nvcfg1 & (1 << 27)) {
13495 tg3_flag_set(tp, PROTECTED_NVRAM);
13496 protect = 1;
13497 }
13498
13499 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13500 switch (nvcfg1) {
13501 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13502 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13503 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13504 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13505 tp->nvram_jedecnum = JEDEC_ATMEL;
13506 tg3_flag_set(tp, NVRAM_BUFFERED);
13507 tg3_flag_set(tp, FLASH);
13508 tp->nvram_pagesize = 264;
13509 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13510 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13511 tp->nvram_size = (protect ? 0x3e200 :
13512 TG3_NVRAM_SIZE_512KB);
13513 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13514 tp->nvram_size = (protect ? 0x1f200 :
13515 TG3_NVRAM_SIZE_256KB);
13516 else
13517 tp->nvram_size = (protect ? 0x1f200 :
13518 TG3_NVRAM_SIZE_128KB);
13519 break;
13520 case FLASH_5752VENDOR_ST_M45PE10:
13521 case FLASH_5752VENDOR_ST_M45PE20:
13522 case FLASH_5752VENDOR_ST_M45PE40:
13523 tp->nvram_jedecnum = JEDEC_ST;
13524 tg3_flag_set(tp, NVRAM_BUFFERED);
13525 tg3_flag_set(tp, FLASH);
13526 tp->nvram_pagesize = 256;
13527 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13528 tp->nvram_size = (protect ?
13529 TG3_NVRAM_SIZE_64KB :
13530 TG3_NVRAM_SIZE_128KB);
13531 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13532 tp->nvram_size = (protect ?
13533 TG3_NVRAM_SIZE_64KB :
13534 TG3_NVRAM_SIZE_256KB);
13535 else
13536 tp->nvram_size = (protect ?
13537 TG3_NVRAM_SIZE_128KB :
13538 TG3_NVRAM_SIZE_512KB);
13539 break;
13540 }
13541 }
13542
13543 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13544 {
13545 u32 nvcfg1;
13546
13547 nvcfg1 = tr32(NVRAM_CFG1);
13548
13549 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13550 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13551 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13552 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13553 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13554 tp->nvram_jedecnum = JEDEC_ATMEL;
13555 tg3_flag_set(tp, NVRAM_BUFFERED);
13556 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13557
13558 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13559 tw32(NVRAM_CFG1, nvcfg1);
13560 break;
13561 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13562 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13563 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13564 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13565 tp->nvram_jedecnum = JEDEC_ATMEL;
13566 tg3_flag_set(tp, NVRAM_BUFFERED);
13567 tg3_flag_set(tp, FLASH);
13568 tp->nvram_pagesize = 264;
13569 break;
13570 case FLASH_5752VENDOR_ST_M45PE10:
13571 case FLASH_5752VENDOR_ST_M45PE20:
13572 case FLASH_5752VENDOR_ST_M45PE40:
13573 tp->nvram_jedecnum = JEDEC_ST;
13574 tg3_flag_set(tp, NVRAM_BUFFERED);
13575 tg3_flag_set(tp, FLASH);
13576 tp->nvram_pagesize = 256;
13577 break;
13578 }
13579 }
13580
13581 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13582 {
13583 u32 nvcfg1, protect = 0;
13584
13585 nvcfg1 = tr32(NVRAM_CFG1);
13586
13587 /* NVRAM protection for TPM */
13588 if (nvcfg1 & (1 << 27)) {
13589 tg3_flag_set(tp, PROTECTED_NVRAM);
13590 protect = 1;
13591 }
13592
13593 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13594 switch (nvcfg1) {
13595 case FLASH_5761VENDOR_ATMEL_ADB021D:
13596 case FLASH_5761VENDOR_ATMEL_ADB041D:
13597 case FLASH_5761VENDOR_ATMEL_ADB081D:
13598 case FLASH_5761VENDOR_ATMEL_ADB161D:
13599 case FLASH_5761VENDOR_ATMEL_MDB021D:
13600 case FLASH_5761VENDOR_ATMEL_MDB041D:
13601 case FLASH_5761VENDOR_ATMEL_MDB081D:
13602 case FLASH_5761VENDOR_ATMEL_MDB161D:
13603 tp->nvram_jedecnum = JEDEC_ATMEL;
13604 tg3_flag_set(tp, NVRAM_BUFFERED);
13605 tg3_flag_set(tp, FLASH);
13606 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13607 tp->nvram_pagesize = 256;
13608 break;
13609 case FLASH_5761VENDOR_ST_A_M45PE20:
13610 case FLASH_5761VENDOR_ST_A_M45PE40:
13611 case FLASH_5761VENDOR_ST_A_M45PE80:
13612 case FLASH_5761VENDOR_ST_A_M45PE16:
13613 case FLASH_5761VENDOR_ST_M_M45PE20:
13614 case FLASH_5761VENDOR_ST_M_M45PE40:
13615 case FLASH_5761VENDOR_ST_M_M45PE80:
13616 case FLASH_5761VENDOR_ST_M_M45PE16:
13617 tp->nvram_jedecnum = JEDEC_ST;
13618 tg3_flag_set(tp, NVRAM_BUFFERED);
13619 tg3_flag_set(tp, FLASH);
13620 tp->nvram_pagesize = 256;
13621 break;
13622 }
13623
13624 if (protect) {
13625 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13626 } else {
13627 switch (nvcfg1) {
13628 case FLASH_5761VENDOR_ATMEL_ADB161D:
13629 case FLASH_5761VENDOR_ATMEL_MDB161D:
13630 case FLASH_5761VENDOR_ST_A_M45PE16:
13631 case FLASH_5761VENDOR_ST_M_M45PE16:
13632 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13633 break;
13634 case FLASH_5761VENDOR_ATMEL_ADB081D:
13635 case FLASH_5761VENDOR_ATMEL_MDB081D:
13636 case FLASH_5761VENDOR_ST_A_M45PE80:
13637 case FLASH_5761VENDOR_ST_M_M45PE80:
13638 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13639 break;
13640 case FLASH_5761VENDOR_ATMEL_ADB041D:
13641 case FLASH_5761VENDOR_ATMEL_MDB041D:
13642 case FLASH_5761VENDOR_ST_A_M45PE40:
13643 case FLASH_5761VENDOR_ST_M_M45PE40:
13644 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13645 break;
13646 case FLASH_5761VENDOR_ATMEL_ADB021D:
13647 case FLASH_5761VENDOR_ATMEL_MDB021D:
13648 case FLASH_5761VENDOR_ST_A_M45PE20:
13649 case FLASH_5761VENDOR_ST_M_M45PE20:
13650 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13651 break;
13652 }
13653 }
13654 }
13655
13656 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13657 {
13658 tp->nvram_jedecnum = JEDEC_ATMEL;
13659 tg3_flag_set(tp, NVRAM_BUFFERED);
13660 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13661 }
13662
13663 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13664 {
13665 u32 nvcfg1;
13666
13667 nvcfg1 = tr32(NVRAM_CFG1);
13668
13669 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13670 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13671 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13672 tp->nvram_jedecnum = JEDEC_ATMEL;
13673 tg3_flag_set(tp, NVRAM_BUFFERED);
13674 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13675
13676 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13677 tw32(NVRAM_CFG1, nvcfg1);
13678 return;
13679 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13680 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13681 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13682 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13683 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13684 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13685 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13686 tp->nvram_jedecnum = JEDEC_ATMEL;
13687 tg3_flag_set(tp, NVRAM_BUFFERED);
13688 tg3_flag_set(tp, FLASH);
13689
13690 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13691 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13692 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13693 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13694 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13695 break;
13696 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13697 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13698 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13699 break;
13700 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13701 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13702 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13703 break;
13704 }
13705 break;
13706 case FLASH_5752VENDOR_ST_M45PE10:
13707 case FLASH_5752VENDOR_ST_M45PE20:
13708 case FLASH_5752VENDOR_ST_M45PE40:
13709 tp->nvram_jedecnum = JEDEC_ST;
13710 tg3_flag_set(tp, NVRAM_BUFFERED);
13711 tg3_flag_set(tp, FLASH);
13712
13713 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13714 case FLASH_5752VENDOR_ST_M45PE10:
13715 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13716 break;
13717 case FLASH_5752VENDOR_ST_M45PE20:
13718 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13719 break;
13720 case FLASH_5752VENDOR_ST_M45PE40:
13721 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13722 break;
13723 }
13724 break;
13725 default:
13726 tg3_flag_set(tp, NO_NVRAM);
13727 return;
13728 }
13729
13730 tg3_nvram_get_pagesize(tp, nvcfg1);
13731 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13732 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13733 }
13734
13735
13736 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13737 {
13738 u32 nvcfg1;
13739
13740 nvcfg1 = tr32(NVRAM_CFG1);
13741
13742 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13743 case FLASH_5717VENDOR_ATMEL_EEPROM:
13744 case FLASH_5717VENDOR_MICRO_EEPROM:
13745 tp->nvram_jedecnum = JEDEC_ATMEL;
13746 tg3_flag_set(tp, NVRAM_BUFFERED);
13747 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13748
13749 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13750 tw32(NVRAM_CFG1, nvcfg1);
13751 return;
13752 case FLASH_5717VENDOR_ATMEL_MDB011D:
13753 case FLASH_5717VENDOR_ATMEL_ADB011B:
13754 case FLASH_5717VENDOR_ATMEL_ADB011D:
13755 case FLASH_5717VENDOR_ATMEL_MDB021D:
13756 case FLASH_5717VENDOR_ATMEL_ADB021B:
13757 case FLASH_5717VENDOR_ATMEL_ADB021D:
13758 case FLASH_5717VENDOR_ATMEL_45USPT:
13759 tp->nvram_jedecnum = JEDEC_ATMEL;
13760 tg3_flag_set(tp, NVRAM_BUFFERED);
13761 tg3_flag_set(tp, FLASH);
13762
13763 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13764 case FLASH_5717VENDOR_ATMEL_MDB021D:
13765 /* Detect size with tg3_nvram_get_size() */
13766 break;
13767 case FLASH_5717VENDOR_ATMEL_ADB021B:
13768 case FLASH_5717VENDOR_ATMEL_ADB021D:
13769 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13770 break;
13771 default:
13772 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13773 break;
13774 }
13775 break;
13776 case FLASH_5717VENDOR_ST_M_M25PE10:
13777 case FLASH_5717VENDOR_ST_A_M25PE10:
13778 case FLASH_5717VENDOR_ST_M_M45PE10:
13779 case FLASH_5717VENDOR_ST_A_M45PE10:
13780 case FLASH_5717VENDOR_ST_M_M25PE20:
13781 case FLASH_5717VENDOR_ST_A_M25PE20:
13782 case FLASH_5717VENDOR_ST_M_M45PE20:
13783 case FLASH_5717VENDOR_ST_A_M45PE20:
13784 case FLASH_5717VENDOR_ST_25USPT:
13785 case FLASH_5717VENDOR_ST_45USPT:
13786 tp->nvram_jedecnum = JEDEC_ST;
13787 tg3_flag_set(tp, NVRAM_BUFFERED);
13788 tg3_flag_set(tp, FLASH);
13789
13790 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13791 case FLASH_5717VENDOR_ST_M_M25PE20:
13792 case FLASH_5717VENDOR_ST_M_M45PE20:
13793 /* Detect size with tg3_nvram_get_size() */
13794 break;
13795 case FLASH_5717VENDOR_ST_A_M25PE20:
13796 case FLASH_5717VENDOR_ST_A_M45PE20:
13797 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13798 break;
13799 default:
13800 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13801 break;
13802 }
13803 break;
13804 default:
13805 tg3_flag_set(tp, NO_NVRAM);
13806 return;
13807 }
13808
13809 tg3_nvram_get_pagesize(tp, nvcfg1);
13810 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13811 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13812 }
13813
13814 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13815 {
13816 u32 nvcfg1, nvmpinstrp;
13817
13818 nvcfg1 = tr32(NVRAM_CFG1);
13819 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13820
13821 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13822 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13823 tg3_flag_set(tp, NO_NVRAM);
13824 return;
13825 }
13826
13827 switch (nvmpinstrp) {
13828 case FLASH_5762_EEPROM_HD:
13829 nvmpinstrp = FLASH_5720_EEPROM_HD;
13830 break;
13831 case FLASH_5762_EEPROM_LD:
13832 nvmpinstrp = FLASH_5720_EEPROM_LD;
13833 break;
13834 }
13835 }
13836
13837 switch (nvmpinstrp) {
13838 case FLASH_5720_EEPROM_HD:
13839 case FLASH_5720_EEPROM_LD:
13840 tp->nvram_jedecnum = JEDEC_ATMEL;
13841 tg3_flag_set(tp, NVRAM_BUFFERED);
13842
13843 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13844 tw32(NVRAM_CFG1, nvcfg1);
13845 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13846 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13847 else
13848 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13849 return;
13850 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13851 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13852 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13853 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13854 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13855 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13856 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13857 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13858 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13859 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13860 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13861 case FLASH_5720VENDOR_ATMEL_45USPT:
13862 tp->nvram_jedecnum = JEDEC_ATMEL;
13863 tg3_flag_set(tp, NVRAM_BUFFERED);
13864 tg3_flag_set(tp, FLASH);
13865
13866 switch (nvmpinstrp) {
13867 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13868 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13869 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13870 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13871 break;
13872 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13873 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13874 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13875 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13876 break;
13877 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13878 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13879 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13880 break;
13881 default:
13882 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13883 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13884 break;
13885 }
13886 break;
13887 case FLASH_5720VENDOR_M_ST_M25PE10:
13888 case FLASH_5720VENDOR_M_ST_M45PE10:
13889 case FLASH_5720VENDOR_A_ST_M25PE10:
13890 case FLASH_5720VENDOR_A_ST_M45PE10:
13891 case FLASH_5720VENDOR_M_ST_M25PE20:
13892 case FLASH_5720VENDOR_M_ST_M45PE20:
13893 case FLASH_5720VENDOR_A_ST_M25PE20:
13894 case FLASH_5720VENDOR_A_ST_M45PE20:
13895 case FLASH_5720VENDOR_M_ST_M25PE40:
13896 case FLASH_5720VENDOR_M_ST_M45PE40:
13897 case FLASH_5720VENDOR_A_ST_M25PE40:
13898 case FLASH_5720VENDOR_A_ST_M45PE40:
13899 case FLASH_5720VENDOR_M_ST_M25PE80:
13900 case FLASH_5720VENDOR_M_ST_M45PE80:
13901 case FLASH_5720VENDOR_A_ST_M25PE80:
13902 case FLASH_5720VENDOR_A_ST_M45PE80:
13903 case FLASH_5720VENDOR_ST_25USPT:
13904 case FLASH_5720VENDOR_ST_45USPT:
13905 tp->nvram_jedecnum = JEDEC_ST;
13906 tg3_flag_set(tp, NVRAM_BUFFERED);
13907 tg3_flag_set(tp, FLASH);
13908
13909 switch (nvmpinstrp) {
13910 case FLASH_5720VENDOR_M_ST_M25PE20:
13911 case FLASH_5720VENDOR_M_ST_M45PE20:
13912 case FLASH_5720VENDOR_A_ST_M25PE20:
13913 case FLASH_5720VENDOR_A_ST_M45PE20:
13914 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13915 break;
13916 case FLASH_5720VENDOR_M_ST_M25PE40:
13917 case FLASH_5720VENDOR_M_ST_M45PE40:
13918 case FLASH_5720VENDOR_A_ST_M25PE40:
13919 case FLASH_5720VENDOR_A_ST_M45PE40:
13920 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13921 break;
13922 case FLASH_5720VENDOR_M_ST_M25PE80:
13923 case FLASH_5720VENDOR_M_ST_M45PE80:
13924 case FLASH_5720VENDOR_A_ST_M25PE80:
13925 case FLASH_5720VENDOR_A_ST_M45PE80:
13926 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13927 break;
13928 default:
13929 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13930 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13931 break;
13932 }
13933 break;
13934 default:
13935 tg3_flag_set(tp, NO_NVRAM);
13936 return;
13937 }
13938
13939 tg3_nvram_get_pagesize(tp, nvcfg1);
13940 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13941 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13942
13943 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13944 u32 val;
13945
13946 if (tg3_nvram_read(tp, 0, &val))
13947 return;
13948
13949 if (val != TG3_EEPROM_MAGIC &&
13950 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13951 tg3_flag_set(tp, NO_NVRAM);
13952 }
13953 }
13954
13955 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13956 static void tg3_nvram_init(struct tg3 *tp)
13957 {
13958 if (tg3_flag(tp, IS_SSB_CORE)) {
13959 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13960 tg3_flag_clear(tp, NVRAM);
13961 tg3_flag_clear(tp, NVRAM_BUFFERED);
13962 tg3_flag_set(tp, NO_NVRAM);
13963 return;
13964 }
13965
13966 tw32_f(GRC_EEPROM_ADDR,
13967 (EEPROM_ADDR_FSM_RESET |
13968 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13969 EEPROM_ADDR_CLKPERD_SHIFT)));
13970
13971 msleep(1);
13972
13973 /* Enable seeprom accesses. */
13974 tw32_f(GRC_LOCAL_CTRL,
13975 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13976 udelay(100);
13977
13978 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
13979 tg3_asic_rev(tp) != ASIC_REV_5701) {
13980 tg3_flag_set(tp, NVRAM);
13981
13982 if (tg3_nvram_lock(tp)) {
13983 netdev_warn(tp->dev,
13984 "Cannot get nvram lock, %s failed\n",
13985 __func__);
13986 return;
13987 }
13988 tg3_enable_nvram_access(tp);
13989
13990 tp->nvram_size = 0;
13991
13992 if (tg3_asic_rev(tp) == ASIC_REV_5752)
13993 tg3_get_5752_nvram_info(tp);
13994 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
13995 tg3_get_5755_nvram_info(tp);
13996 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
13997 tg3_asic_rev(tp) == ASIC_REV_5784 ||
13998 tg3_asic_rev(tp) == ASIC_REV_5785)
13999 tg3_get_5787_nvram_info(tp);
14000 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14001 tg3_get_5761_nvram_info(tp);
14002 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14003 tg3_get_5906_nvram_info(tp);
14004 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14005 tg3_flag(tp, 57765_CLASS))
14006 tg3_get_57780_nvram_info(tp);
14007 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14008 tg3_asic_rev(tp) == ASIC_REV_5719)
14009 tg3_get_5717_nvram_info(tp);
14010 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14011 tg3_asic_rev(tp) == ASIC_REV_5762)
14012 tg3_get_5720_nvram_info(tp);
14013 else
14014 tg3_get_nvram_info(tp);
14015
14016 if (tp->nvram_size == 0)
14017 tg3_get_nvram_size(tp);
14018
14019 tg3_disable_nvram_access(tp);
14020 tg3_nvram_unlock(tp);
14021
14022 } else {
14023 tg3_flag_clear(tp, NVRAM);
14024 tg3_flag_clear(tp, NVRAM_BUFFERED);
14025
14026 tg3_get_eeprom_size(tp);
14027 }
14028 }
14029
14030 struct subsys_tbl_ent {
14031 u16 subsys_vendor, subsys_devid;
14032 u32 phy_id;
14033 };
14034
14035 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14036 /* Broadcom boards. */
14037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14038 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14042 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14044 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14045 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14046 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14047 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14048 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14049 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14050 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14051 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14052 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14053 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14054 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14055 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14056 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14057 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14058 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14059
14060 /* 3com boards. */
14061 { TG3PCI_SUBVENDOR_ID_3COM,
14062 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14063 { TG3PCI_SUBVENDOR_ID_3COM,
14064 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14065 { TG3PCI_SUBVENDOR_ID_3COM,
14066 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14067 { TG3PCI_SUBVENDOR_ID_3COM,
14068 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14069 { TG3PCI_SUBVENDOR_ID_3COM,
14070 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14071
14072 /* DELL boards. */
14073 { TG3PCI_SUBVENDOR_ID_DELL,
14074 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14075 { TG3PCI_SUBVENDOR_ID_DELL,
14076 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14077 { TG3PCI_SUBVENDOR_ID_DELL,
14078 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14079 { TG3PCI_SUBVENDOR_ID_DELL,
14080 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14081
14082 /* Compaq boards. */
14083 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14084 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14085 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14086 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14087 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14088 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14089 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14090 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14091 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14092 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14093
14094 /* IBM boards. */
14095 { TG3PCI_SUBVENDOR_ID_IBM,
14096 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14097 };
14098
14099 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14100 {
14101 int i;
14102
14103 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14104 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14105 tp->pdev->subsystem_vendor) &&
14106 (subsys_id_to_phy_id[i].subsys_devid ==
14107 tp->pdev->subsystem_device))
14108 return &subsys_id_to_phy_id[i];
14109 }
14110 return NULL;
14111 }
14112
14113 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14114 {
14115 u32 val;
14116
14117 tp->phy_id = TG3_PHY_ID_INVALID;
14118 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14119
14120 /* Assume an onboard device and WOL capable by default. */
14121 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14122 tg3_flag_set(tp, WOL_CAP);
14123
14124 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14125 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14126 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14127 tg3_flag_set(tp, IS_NIC);
14128 }
14129 val = tr32(VCPU_CFGSHDW);
14130 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14131 tg3_flag_set(tp, ASPM_WORKAROUND);
14132 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14133 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14134 tg3_flag_set(tp, WOL_ENABLE);
14135 device_set_wakeup_enable(&tp->pdev->dev, true);
14136 }
14137 goto done;
14138 }
14139
14140 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14141 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14142 u32 nic_cfg, led_cfg;
14143 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14144 int eeprom_phy_serdes = 0;
14145
14146 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14147 tp->nic_sram_data_cfg = nic_cfg;
14148
14149 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14150 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14151 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14152 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14153 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14154 (ver > 0) && (ver < 0x100))
14155 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14156
14157 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14159
14160 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14161 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14162 eeprom_phy_serdes = 1;
14163
14164 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14165 if (nic_phy_id != 0) {
14166 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14167 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14168
14169 eeprom_phy_id = (id1 >> 16) << 10;
14170 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14171 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14172 } else
14173 eeprom_phy_id = 0;
14174
14175 tp->phy_id = eeprom_phy_id;
14176 if (eeprom_phy_serdes) {
14177 if (!tg3_flag(tp, 5705_PLUS))
14178 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14179 else
14180 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14181 }
14182
14183 if (tg3_flag(tp, 5750_PLUS))
14184 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14185 SHASTA_EXT_LED_MODE_MASK);
14186 else
14187 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14188
14189 switch (led_cfg) {
14190 default:
14191 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14192 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14193 break;
14194
14195 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14196 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14197 break;
14198
14199 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14200 tp->led_ctrl = LED_CTRL_MODE_MAC;
14201
14202 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14203 * read on some older 5700/5701 bootcode.
14204 */
14205 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14206 tg3_asic_rev(tp) == ASIC_REV_5701)
14207 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14208
14209 break;
14210
14211 case SHASTA_EXT_LED_SHARED:
14212 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14213 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14214 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14215 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14216 LED_CTRL_MODE_PHY_2);
14217 break;
14218
14219 case SHASTA_EXT_LED_MAC:
14220 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14221 break;
14222
14223 case SHASTA_EXT_LED_COMBO:
14224 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14225 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14226 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14227 LED_CTRL_MODE_PHY_2);
14228 break;
14229
14230 }
14231
14232 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14233 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14234 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14235 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14236
14237 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14238 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14239
14240 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14241 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14242 if ((tp->pdev->subsystem_vendor ==
14243 PCI_VENDOR_ID_ARIMA) &&
14244 (tp->pdev->subsystem_device == 0x205a ||
14245 tp->pdev->subsystem_device == 0x2063))
14246 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14247 } else {
14248 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14249 tg3_flag_set(tp, IS_NIC);
14250 }
14251
14252 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14253 tg3_flag_set(tp, ENABLE_ASF);
14254 if (tg3_flag(tp, 5750_PLUS))
14255 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14256 }
14257
14258 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14259 tg3_flag(tp, 5750_PLUS))
14260 tg3_flag_set(tp, ENABLE_APE);
14261
14262 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14263 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14264 tg3_flag_clear(tp, WOL_CAP);
14265
14266 if (tg3_flag(tp, WOL_CAP) &&
14267 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14268 tg3_flag_set(tp, WOL_ENABLE);
14269 device_set_wakeup_enable(&tp->pdev->dev, true);
14270 }
14271
14272 if (cfg2 & (1 << 17))
14273 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14274
14275 /* serdes signal pre-emphasis in register 0x590 set by */
14276 /* bootcode if bit 18 is set */
14277 if (cfg2 & (1 << 18))
14278 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14279
14280 if ((tg3_flag(tp, 57765_PLUS) ||
14281 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14282 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14283 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14284 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14285
14286 if (tg3_flag(tp, PCI_EXPRESS) &&
14287 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14288 !tg3_flag(tp, 57765_PLUS)) {
14289 u32 cfg3;
14290
14291 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14292 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14293 tg3_flag_set(tp, ASPM_WORKAROUND);
14294 }
14295
14296 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14297 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14298 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14299 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14300 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14301 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14302 }
14303 done:
14304 if (tg3_flag(tp, WOL_CAP))
14305 device_set_wakeup_enable(&tp->pdev->dev,
14306 tg3_flag(tp, WOL_ENABLE));
14307 else
14308 device_set_wakeup_capable(&tp->pdev->dev, false);
14309 }
14310
14311 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14312 {
14313 int i, err;
14314 u32 val2, off = offset * 8;
14315
14316 err = tg3_nvram_lock(tp);
14317 if (err)
14318 return err;
14319
14320 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14321 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14322 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14323 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14324 udelay(10);
14325
14326 for (i = 0; i < 100; i++) {
14327 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14328 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14329 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14330 break;
14331 }
14332 udelay(10);
14333 }
14334
14335 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14336
14337 tg3_nvram_unlock(tp);
14338 if (val2 & APE_OTP_STATUS_CMD_DONE)
14339 return 0;
14340
14341 return -EBUSY;
14342 }
14343
14344 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14345 {
14346 int i;
14347 u32 val;
14348
14349 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14350 tw32(OTP_CTRL, cmd);
14351
14352 /* Wait for up to 1 ms for command to execute. */
14353 for (i = 0; i < 100; i++) {
14354 val = tr32(OTP_STATUS);
14355 if (val & OTP_STATUS_CMD_DONE)
14356 break;
14357 udelay(10);
14358 }
14359
14360 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14361 }
14362
14363 /* Read the gphy configuration from the OTP region of the chip. The gphy
14364 * configuration is a 32-bit value that straddles the alignment boundary.
14365 * We do two 32-bit reads and then shift and merge the results.
14366 */
14367 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14368 {
14369 u32 bhalf_otp, thalf_otp;
14370
14371 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14372
14373 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14374 return 0;
14375
14376 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14377
14378 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14379 return 0;
14380
14381 thalf_otp = tr32(OTP_READ_DATA);
14382
14383 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14384
14385 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14386 return 0;
14387
14388 bhalf_otp = tr32(OTP_READ_DATA);
14389
14390 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14391 }
14392
14393 static void tg3_phy_init_link_config(struct tg3 *tp)
14394 {
14395 u32 adv = ADVERTISED_Autoneg;
14396
14397 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14398 adv |= ADVERTISED_1000baseT_Half |
14399 ADVERTISED_1000baseT_Full;
14400
14401 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14402 adv |= ADVERTISED_100baseT_Half |
14403 ADVERTISED_100baseT_Full |
14404 ADVERTISED_10baseT_Half |
14405 ADVERTISED_10baseT_Full |
14406 ADVERTISED_TP;
14407 else
14408 adv |= ADVERTISED_FIBRE;
14409
14410 tp->link_config.advertising = adv;
14411 tp->link_config.speed = SPEED_UNKNOWN;
14412 tp->link_config.duplex = DUPLEX_UNKNOWN;
14413 tp->link_config.autoneg = AUTONEG_ENABLE;
14414 tp->link_config.active_speed = SPEED_UNKNOWN;
14415 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14416
14417 tp->old_link = -1;
14418 }
14419
14420 static int tg3_phy_probe(struct tg3 *tp)
14421 {
14422 u32 hw_phy_id_1, hw_phy_id_2;
14423 u32 hw_phy_id, hw_phy_id_masked;
14424 int err;
14425
14426 /* flow control autonegotiation is default behavior */
14427 tg3_flag_set(tp, PAUSE_AUTONEG);
14428 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14429
14430 if (tg3_flag(tp, ENABLE_APE)) {
14431 switch (tp->pci_fn) {
14432 case 0:
14433 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14434 break;
14435 case 1:
14436 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14437 break;
14438 case 2:
14439 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14440 break;
14441 case 3:
14442 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14443 break;
14444 }
14445 }
14446
14447 if (tg3_flag(tp, USE_PHYLIB))
14448 return tg3_phy_init(tp);
14449
14450 /* Reading the PHY ID register can conflict with ASF
14451 * firmware access to the PHY hardware.
14452 */
14453 err = 0;
14454 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14455 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14456 } else {
14457 /* Now read the physical PHY_ID from the chip and verify
14458 * that it is sane. If it doesn't look good, we fall back
14459 * to either the hard-coded table based PHY_ID and failing
14460 * that the value found in the eeprom area.
14461 */
14462 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14463 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14464
14465 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14466 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14467 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14468
14469 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14470 }
14471
14472 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14473 tp->phy_id = hw_phy_id;
14474 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14475 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14476 else
14477 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14478 } else {
14479 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14480 /* Do nothing, phy ID already set up in
14481 * tg3_get_eeprom_hw_cfg().
14482 */
14483 } else {
14484 struct subsys_tbl_ent *p;
14485
14486 /* No eeprom signature? Try the hardcoded
14487 * subsys device table.
14488 */
14489 p = tg3_lookup_by_subsys(tp);
14490 if (p) {
14491 tp->phy_id = p->phy_id;
14492 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14493 /* For now we saw the IDs 0xbc050cd0,
14494 * 0xbc050f80 and 0xbc050c30 on devices
14495 * connected to an BCM4785 and there are
14496 * probably more. Just assume that the phy is
14497 * supported when it is connected to a SSB core
14498 * for now.
14499 */
14500 return -ENODEV;
14501 }
14502
14503 if (!tp->phy_id ||
14504 tp->phy_id == TG3_PHY_ID_BCM8002)
14505 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14506 }
14507 }
14508
14509 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14510 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14511 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14512 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14513 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14514 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14515 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14516 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14517 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14518
14519 tg3_phy_init_link_config(tp);
14520
14521 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14522 !tg3_flag(tp, ENABLE_APE) &&
14523 !tg3_flag(tp, ENABLE_ASF)) {
14524 u32 bmsr, dummy;
14525
14526 tg3_readphy(tp, MII_BMSR, &bmsr);
14527 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14528 (bmsr & BMSR_LSTATUS))
14529 goto skip_phy_reset;
14530
14531 err = tg3_phy_reset(tp);
14532 if (err)
14533 return err;
14534
14535 tg3_phy_set_wirespeed(tp);
14536
14537 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14538 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14539 tp->link_config.flowctrl);
14540
14541 tg3_writephy(tp, MII_BMCR,
14542 BMCR_ANENABLE | BMCR_ANRESTART);
14543 }
14544 }
14545
14546 skip_phy_reset:
14547 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14548 err = tg3_init_5401phy_dsp(tp);
14549 if (err)
14550 return err;
14551
14552 err = tg3_init_5401phy_dsp(tp);
14553 }
14554
14555 return err;
14556 }
14557
14558 static void tg3_read_vpd(struct tg3 *tp)
14559 {
14560 u8 *vpd_data;
14561 unsigned int block_end, rosize, len;
14562 u32 vpdlen;
14563 int j, i = 0;
14564
14565 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14566 if (!vpd_data)
14567 goto out_no_vpd;
14568
14569 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14570 if (i < 0)
14571 goto out_not_found;
14572
14573 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14574 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14575 i += PCI_VPD_LRDT_TAG_SIZE;
14576
14577 if (block_end > vpdlen)
14578 goto out_not_found;
14579
14580 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14581 PCI_VPD_RO_KEYWORD_MFR_ID);
14582 if (j > 0) {
14583 len = pci_vpd_info_field_size(&vpd_data[j]);
14584
14585 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14586 if (j + len > block_end || len != 4 ||
14587 memcmp(&vpd_data[j], "1028", 4))
14588 goto partno;
14589
14590 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14591 PCI_VPD_RO_KEYWORD_VENDOR0);
14592 if (j < 0)
14593 goto partno;
14594
14595 len = pci_vpd_info_field_size(&vpd_data[j]);
14596
14597 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14598 if (j + len > block_end)
14599 goto partno;
14600
14601 memcpy(tp->fw_ver, &vpd_data[j], len);
14602 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14603 }
14604
14605 partno:
14606 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14607 PCI_VPD_RO_KEYWORD_PARTNO);
14608 if (i < 0)
14609 goto out_not_found;
14610
14611 len = pci_vpd_info_field_size(&vpd_data[i]);
14612
14613 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14614 if (len > TG3_BPN_SIZE ||
14615 (len + i) > vpdlen)
14616 goto out_not_found;
14617
14618 memcpy(tp->board_part_number, &vpd_data[i], len);
14619
14620 out_not_found:
14621 kfree(vpd_data);
14622 if (tp->board_part_number[0])
14623 return;
14624
14625 out_no_vpd:
14626 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14627 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14628 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14629 strcpy(tp->board_part_number, "BCM5717");
14630 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14631 strcpy(tp->board_part_number, "BCM5718");
14632 else
14633 goto nomatch;
14634 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14635 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14636 strcpy(tp->board_part_number, "BCM57780");
14637 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14638 strcpy(tp->board_part_number, "BCM57760");
14639 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14640 strcpy(tp->board_part_number, "BCM57790");
14641 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14642 strcpy(tp->board_part_number, "BCM57788");
14643 else
14644 goto nomatch;
14645 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14646 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14647 strcpy(tp->board_part_number, "BCM57761");
14648 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14649 strcpy(tp->board_part_number, "BCM57765");
14650 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14651 strcpy(tp->board_part_number, "BCM57781");
14652 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14653 strcpy(tp->board_part_number, "BCM57785");
14654 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14655 strcpy(tp->board_part_number, "BCM57791");
14656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14657 strcpy(tp->board_part_number, "BCM57795");
14658 else
14659 goto nomatch;
14660 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14661 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14662 strcpy(tp->board_part_number, "BCM57762");
14663 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14664 strcpy(tp->board_part_number, "BCM57766");
14665 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14666 strcpy(tp->board_part_number, "BCM57782");
14667 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14668 strcpy(tp->board_part_number, "BCM57786");
14669 else
14670 goto nomatch;
14671 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14672 strcpy(tp->board_part_number, "BCM95906");
14673 } else {
14674 nomatch:
14675 strcpy(tp->board_part_number, "none");
14676 }
14677 }
14678
14679 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14680 {
14681 u32 val;
14682
14683 if (tg3_nvram_read(tp, offset, &val) ||
14684 (val & 0xfc000000) != 0x0c000000 ||
14685 tg3_nvram_read(tp, offset + 4, &val) ||
14686 val != 0)
14687 return 0;
14688
14689 return 1;
14690 }
14691
14692 static void tg3_read_bc_ver(struct tg3 *tp)
14693 {
14694 u32 val, offset, start, ver_offset;
14695 int i, dst_off;
14696 bool newver = false;
14697
14698 if (tg3_nvram_read(tp, 0xc, &offset) ||
14699 tg3_nvram_read(tp, 0x4, &start))
14700 return;
14701
14702 offset = tg3_nvram_logical_addr(tp, offset);
14703
14704 if (tg3_nvram_read(tp, offset, &val))
14705 return;
14706
14707 if ((val & 0xfc000000) == 0x0c000000) {
14708 if (tg3_nvram_read(tp, offset + 4, &val))
14709 return;
14710
14711 if (val == 0)
14712 newver = true;
14713 }
14714
14715 dst_off = strlen(tp->fw_ver);
14716
14717 if (newver) {
14718 if (TG3_VER_SIZE - dst_off < 16 ||
14719 tg3_nvram_read(tp, offset + 8, &ver_offset))
14720 return;
14721
14722 offset = offset + ver_offset - start;
14723 for (i = 0; i < 16; i += 4) {
14724 __be32 v;
14725 if (tg3_nvram_read_be32(tp, offset + i, &v))
14726 return;
14727
14728 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14729 }
14730 } else {
14731 u32 major, minor;
14732
14733 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14734 return;
14735
14736 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14737 TG3_NVM_BCVER_MAJSFT;
14738 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14739 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14740 "v%d.%02d", major, minor);
14741 }
14742 }
14743
14744 static void tg3_read_hwsb_ver(struct tg3 *tp)
14745 {
14746 u32 val, major, minor;
14747
14748 /* Use native endian representation */
14749 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14750 return;
14751
14752 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14753 TG3_NVM_HWSB_CFG1_MAJSFT;
14754 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14755 TG3_NVM_HWSB_CFG1_MINSFT;
14756
14757 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14758 }
14759
14760 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14761 {
14762 u32 offset, major, minor, build;
14763
14764 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14765
14766 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14767 return;
14768
14769 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14770 case TG3_EEPROM_SB_REVISION_0:
14771 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14772 break;
14773 case TG3_EEPROM_SB_REVISION_2:
14774 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14775 break;
14776 case TG3_EEPROM_SB_REVISION_3:
14777 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14778 break;
14779 case TG3_EEPROM_SB_REVISION_4:
14780 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14781 break;
14782 case TG3_EEPROM_SB_REVISION_5:
14783 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14784 break;
14785 case TG3_EEPROM_SB_REVISION_6:
14786 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14787 break;
14788 default:
14789 return;
14790 }
14791
14792 if (tg3_nvram_read(tp, offset, &val))
14793 return;
14794
14795 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14796 TG3_EEPROM_SB_EDH_BLD_SHFT;
14797 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14798 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14799 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14800
14801 if (minor > 99 || build > 26)
14802 return;
14803
14804 offset = strlen(tp->fw_ver);
14805 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14806 " v%d.%02d", major, minor);
14807
14808 if (build > 0) {
14809 offset = strlen(tp->fw_ver);
14810 if (offset < TG3_VER_SIZE - 1)
14811 tp->fw_ver[offset] = 'a' + build - 1;
14812 }
14813 }
14814
14815 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14816 {
14817 u32 val, offset, start;
14818 int i, vlen;
14819
14820 for (offset = TG3_NVM_DIR_START;
14821 offset < TG3_NVM_DIR_END;
14822 offset += TG3_NVM_DIRENT_SIZE) {
14823 if (tg3_nvram_read(tp, offset, &val))
14824 return;
14825
14826 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14827 break;
14828 }
14829
14830 if (offset == TG3_NVM_DIR_END)
14831 return;
14832
14833 if (!tg3_flag(tp, 5705_PLUS))
14834 start = 0x08000000;
14835 else if (tg3_nvram_read(tp, offset - 4, &start))
14836 return;
14837
14838 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14839 !tg3_fw_img_is_valid(tp, offset) ||
14840 tg3_nvram_read(tp, offset + 8, &val))
14841 return;
14842
14843 offset += val - start;
14844
14845 vlen = strlen(tp->fw_ver);
14846
14847 tp->fw_ver[vlen++] = ',';
14848 tp->fw_ver[vlen++] = ' ';
14849
14850 for (i = 0; i < 4; i++) {
14851 __be32 v;
14852 if (tg3_nvram_read_be32(tp, offset, &v))
14853 return;
14854
14855 offset += sizeof(v);
14856
14857 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14858 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14859 break;
14860 }
14861
14862 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14863 vlen += sizeof(v);
14864 }
14865 }
14866
14867 static void tg3_probe_ncsi(struct tg3 *tp)
14868 {
14869 u32 apedata;
14870
14871 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14872 if (apedata != APE_SEG_SIG_MAGIC)
14873 return;
14874
14875 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14876 if (!(apedata & APE_FW_STATUS_READY))
14877 return;
14878
14879 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14880 tg3_flag_set(tp, APE_HAS_NCSI);
14881 }
14882
14883 static void tg3_read_dash_ver(struct tg3 *tp)
14884 {
14885 int vlen;
14886 u32 apedata;
14887 char *fwtype;
14888
14889 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14890
14891 if (tg3_flag(tp, APE_HAS_NCSI))
14892 fwtype = "NCSI";
14893 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14894 fwtype = "SMASH";
14895 else
14896 fwtype = "DASH";
14897
14898 vlen = strlen(tp->fw_ver);
14899
14900 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14901 fwtype,
14902 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14903 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14904 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14905 (apedata & APE_FW_VERSION_BLDMSK));
14906 }
14907
14908 static void tg3_read_otp_ver(struct tg3 *tp)
14909 {
14910 u32 val, val2;
14911
14912 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14913 return;
14914
14915 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14916 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14917 TG3_OTP_MAGIC0_VALID(val)) {
14918 u64 val64 = (u64) val << 32 | val2;
14919 u32 ver = 0;
14920 int i, vlen;
14921
14922 for (i = 0; i < 7; i++) {
14923 if ((val64 & 0xff) == 0)
14924 break;
14925 ver = val64 & 0xff;
14926 val64 >>= 8;
14927 }
14928 vlen = strlen(tp->fw_ver);
14929 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14930 }
14931 }
14932
14933 static void tg3_read_fw_ver(struct tg3 *tp)
14934 {
14935 u32 val;
14936 bool vpd_vers = false;
14937
14938 if (tp->fw_ver[0] != 0)
14939 vpd_vers = true;
14940
14941 if (tg3_flag(tp, NO_NVRAM)) {
14942 strcat(tp->fw_ver, "sb");
14943 tg3_read_otp_ver(tp);
14944 return;
14945 }
14946
14947 if (tg3_nvram_read(tp, 0, &val))
14948 return;
14949
14950 if (val == TG3_EEPROM_MAGIC)
14951 tg3_read_bc_ver(tp);
14952 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14953 tg3_read_sb_ver(tp, val);
14954 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14955 tg3_read_hwsb_ver(tp);
14956
14957 if (tg3_flag(tp, ENABLE_ASF)) {
14958 if (tg3_flag(tp, ENABLE_APE)) {
14959 tg3_probe_ncsi(tp);
14960 if (!vpd_vers)
14961 tg3_read_dash_ver(tp);
14962 } else if (!vpd_vers) {
14963 tg3_read_mgmtfw_ver(tp);
14964 }
14965 }
14966
14967 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14968 }
14969
14970 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14971 {
14972 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14973 return TG3_RX_RET_MAX_SIZE_5717;
14974 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14975 return TG3_RX_RET_MAX_SIZE_5700;
14976 else
14977 return TG3_RX_RET_MAX_SIZE_5705;
14978 }
14979
14980 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14981 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14982 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14983 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14984 { },
14985 };
14986
14987 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14988 {
14989 struct pci_dev *peer;
14990 unsigned int func, devnr = tp->pdev->devfn & ~7;
14991
14992 for (func = 0; func < 8; func++) {
14993 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14994 if (peer && peer != tp->pdev)
14995 break;
14996 pci_dev_put(peer);
14997 }
14998 /* 5704 can be configured in single-port mode, set peer to
14999 * tp->pdev in that case.
15000 */
15001 if (!peer) {
15002 peer = tp->pdev;
15003 return peer;
15004 }
15005
15006 /*
15007 * We don't need to keep the refcount elevated; there's no way
15008 * to remove one half of this device without removing the other
15009 */
15010 pci_dev_put(peer);
15011
15012 return peer;
15013 }
15014
15015 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15016 {
15017 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15018 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15019 u32 reg;
15020
15021 /* All devices that use the alternate
15022 * ASIC REV location have a CPMU.
15023 */
15024 tg3_flag_set(tp, CPMU_PRESENT);
15025
15026 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15027 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15028 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15029 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15030 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15031 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15034 reg = TG3PCI_GEN2_PRODID_ASICREV;
15035 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15045 reg = TG3PCI_GEN15_PRODID_ASICREV;
15046 else
15047 reg = TG3PCI_PRODID_ASICREV;
15048
15049 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15050 }
15051
15052 /* Wrong chip ID in 5752 A0. This code can be removed later
15053 * as A0 is not in production.
15054 */
15055 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15056 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15057
15058 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15059 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15060
15061 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15062 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15063 tg3_asic_rev(tp) == ASIC_REV_5720)
15064 tg3_flag_set(tp, 5717_PLUS);
15065
15066 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15067 tg3_asic_rev(tp) == ASIC_REV_57766)
15068 tg3_flag_set(tp, 57765_CLASS);
15069
15070 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15071 tg3_asic_rev(tp) == ASIC_REV_5762)
15072 tg3_flag_set(tp, 57765_PLUS);
15073
15074 /* Intentionally exclude ASIC_REV_5906 */
15075 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15076 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15077 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15078 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15079 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15080 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15081 tg3_flag(tp, 57765_PLUS))
15082 tg3_flag_set(tp, 5755_PLUS);
15083
15084 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15085 tg3_asic_rev(tp) == ASIC_REV_5714)
15086 tg3_flag_set(tp, 5780_CLASS);
15087
15088 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15089 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15090 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15091 tg3_flag(tp, 5755_PLUS) ||
15092 tg3_flag(tp, 5780_CLASS))
15093 tg3_flag_set(tp, 5750_PLUS);
15094
15095 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15096 tg3_flag(tp, 5750_PLUS))
15097 tg3_flag_set(tp, 5705_PLUS);
15098 }
15099
15100 static bool tg3_10_100_only_device(struct tg3 *tp,
15101 const struct pci_device_id *ent)
15102 {
15103 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15104
15105 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15106 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15107 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15108 return true;
15109
15110 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15111 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15112 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15113 return true;
15114 } else {
15115 return true;
15116 }
15117 }
15118
15119 return false;
15120 }
15121
15122 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15123 {
15124 u32 misc_ctrl_reg;
15125 u32 pci_state_reg, grc_misc_cfg;
15126 u32 val;
15127 u16 pci_cmd;
15128 int err;
15129
15130 /* Force memory write invalidate off. If we leave it on,
15131 * then on 5700_BX chips we have to enable a workaround.
15132 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15133 * to match the cacheline size. The Broadcom driver have this
15134 * workaround but turns MWI off all the times so never uses
15135 * it. This seems to suggest that the workaround is insufficient.
15136 */
15137 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15138 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15139 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15140
15141 /* Important! -- Make sure register accesses are byteswapped
15142 * correctly. Also, for those chips that require it, make
15143 * sure that indirect register accesses are enabled before
15144 * the first operation.
15145 */
15146 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15147 &misc_ctrl_reg);
15148 tp->misc_host_ctrl |= (misc_ctrl_reg &
15149 MISC_HOST_CTRL_CHIPREV);
15150 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15151 tp->misc_host_ctrl);
15152
15153 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15154
15155 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15156 * we need to disable memory and use config. cycles
15157 * only to access all registers. The 5702/03 chips
15158 * can mistakenly decode the special cycles from the
15159 * ICH chipsets as memory write cycles, causing corruption
15160 * of register and memory space. Only certain ICH bridges
15161 * will drive special cycles with non-zero data during the
15162 * address phase which can fall within the 5703's address
15163 * range. This is not an ICH bug as the PCI spec allows
15164 * non-zero address during special cycles. However, only
15165 * these ICH bridges are known to drive non-zero addresses
15166 * during special cycles.
15167 *
15168 * Since special cycles do not cross PCI bridges, we only
15169 * enable this workaround if the 5703 is on the secondary
15170 * bus of these ICH bridges.
15171 */
15172 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15173 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15174 static struct tg3_dev_id {
15175 u32 vendor;
15176 u32 device;
15177 u32 rev;
15178 } ich_chipsets[] = {
15179 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15180 PCI_ANY_ID },
15181 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15182 PCI_ANY_ID },
15183 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15184 0xa },
15185 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15186 PCI_ANY_ID },
15187 { },
15188 };
15189 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15190 struct pci_dev *bridge = NULL;
15191
15192 while (pci_id->vendor != 0) {
15193 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15194 bridge);
15195 if (!bridge) {
15196 pci_id++;
15197 continue;
15198 }
15199 if (pci_id->rev != PCI_ANY_ID) {
15200 if (bridge->revision > pci_id->rev)
15201 continue;
15202 }
15203 if (bridge->subordinate &&
15204 (bridge->subordinate->number ==
15205 tp->pdev->bus->number)) {
15206 tg3_flag_set(tp, ICH_WORKAROUND);
15207 pci_dev_put(bridge);
15208 break;
15209 }
15210 }
15211 }
15212
15213 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15214 static struct tg3_dev_id {
15215 u32 vendor;
15216 u32 device;
15217 } bridge_chipsets[] = {
15218 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15219 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15220 { },
15221 };
15222 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15223 struct pci_dev *bridge = NULL;
15224
15225 while (pci_id->vendor != 0) {
15226 bridge = pci_get_device(pci_id->vendor,
15227 pci_id->device,
15228 bridge);
15229 if (!bridge) {
15230 pci_id++;
15231 continue;
15232 }
15233 if (bridge->subordinate &&
15234 (bridge->subordinate->number <=
15235 tp->pdev->bus->number) &&
15236 (bridge->subordinate->busn_res.end >=
15237 tp->pdev->bus->number)) {
15238 tg3_flag_set(tp, 5701_DMA_BUG);
15239 pci_dev_put(bridge);
15240 break;
15241 }
15242 }
15243 }
15244
15245 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15246 * DMA addresses > 40-bit. This bridge may have other additional
15247 * 57xx devices behind it in some 4-port NIC designs for example.
15248 * Any tg3 device found behind the bridge will also need the 40-bit
15249 * DMA workaround.
15250 */
15251 if (tg3_flag(tp, 5780_CLASS)) {
15252 tg3_flag_set(tp, 40BIT_DMA_BUG);
15253 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15254 } else {
15255 struct pci_dev *bridge = NULL;
15256
15257 do {
15258 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15259 PCI_DEVICE_ID_SERVERWORKS_EPB,
15260 bridge);
15261 if (bridge && bridge->subordinate &&
15262 (bridge->subordinate->number <=
15263 tp->pdev->bus->number) &&
15264 (bridge->subordinate->busn_res.end >=
15265 tp->pdev->bus->number)) {
15266 tg3_flag_set(tp, 40BIT_DMA_BUG);
15267 pci_dev_put(bridge);
15268 break;
15269 }
15270 } while (bridge);
15271 }
15272
15273 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15274 tg3_asic_rev(tp) == ASIC_REV_5714)
15275 tp->pdev_peer = tg3_find_peer(tp);
15276
15277 /* Determine TSO capabilities */
15278 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15279 ; /* Do nothing. HW bug. */
15280 else if (tg3_flag(tp, 57765_PLUS))
15281 tg3_flag_set(tp, HW_TSO_3);
15282 else if (tg3_flag(tp, 5755_PLUS) ||
15283 tg3_asic_rev(tp) == ASIC_REV_5906)
15284 tg3_flag_set(tp, HW_TSO_2);
15285 else if (tg3_flag(tp, 5750_PLUS)) {
15286 tg3_flag_set(tp, HW_TSO_1);
15287 tg3_flag_set(tp, TSO_BUG);
15288 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15289 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15290 tg3_flag_clear(tp, TSO_BUG);
15291 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15292 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15293 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15294 tg3_flag_set(tp, FW_TSO);
15295 tg3_flag_set(tp, TSO_BUG);
15296 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15297 tp->fw_needed = FIRMWARE_TG3TSO5;
15298 else
15299 tp->fw_needed = FIRMWARE_TG3TSO;
15300 }
15301
15302 /* Selectively allow TSO based on operating conditions */
15303 if (tg3_flag(tp, HW_TSO_1) ||
15304 tg3_flag(tp, HW_TSO_2) ||
15305 tg3_flag(tp, HW_TSO_3) ||
15306 tg3_flag(tp, FW_TSO)) {
15307 /* For firmware TSO, assume ASF is disabled.
15308 * We'll disable TSO later if we discover ASF
15309 * is enabled in tg3_get_eeprom_hw_cfg().
15310 */
15311 tg3_flag_set(tp, TSO_CAPABLE);
15312 } else {
15313 tg3_flag_clear(tp, TSO_CAPABLE);
15314 tg3_flag_clear(tp, TSO_BUG);
15315 tp->fw_needed = NULL;
15316 }
15317
15318 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15319 tp->fw_needed = FIRMWARE_TG3;
15320
15321 tp->irq_max = 1;
15322
15323 if (tg3_flag(tp, 5750_PLUS)) {
15324 tg3_flag_set(tp, SUPPORT_MSI);
15325 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15326 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15327 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15328 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15329 tp->pdev_peer == tp->pdev))
15330 tg3_flag_clear(tp, SUPPORT_MSI);
15331
15332 if (tg3_flag(tp, 5755_PLUS) ||
15333 tg3_asic_rev(tp) == ASIC_REV_5906) {
15334 tg3_flag_set(tp, 1SHOT_MSI);
15335 }
15336
15337 if (tg3_flag(tp, 57765_PLUS)) {
15338 tg3_flag_set(tp, SUPPORT_MSIX);
15339 tp->irq_max = TG3_IRQ_MAX_VECS;
15340 }
15341 }
15342
15343 tp->txq_max = 1;
15344 tp->rxq_max = 1;
15345 if (tp->irq_max > 1) {
15346 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15347 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15348
15349 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15350 tg3_asic_rev(tp) == ASIC_REV_5720)
15351 tp->txq_max = tp->irq_max - 1;
15352 }
15353
15354 if (tg3_flag(tp, 5755_PLUS) ||
15355 tg3_asic_rev(tp) == ASIC_REV_5906)
15356 tg3_flag_set(tp, SHORT_DMA_BUG);
15357
15358 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15359 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15360
15361 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15362 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15363 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15364 tg3_asic_rev(tp) == ASIC_REV_5762)
15365 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15366
15367 if (tg3_flag(tp, 57765_PLUS) &&
15368 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15369 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15370
15371 if (!tg3_flag(tp, 5705_PLUS) ||
15372 tg3_flag(tp, 5780_CLASS) ||
15373 tg3_flag(tp, USE_JUMBO_BDFLAG))
15374 tg3_flag_set(tp, JUMBO_CAPABLE);
15375
15376 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15377 &pci_state_reg);
15378
15379 if (pci_is_pcie(tp->pdev)) {
15380 u16 lnkctl;
15381
15382 tg3_flag_set(tp, PCI_EXPRESS);
15383
15384 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15385 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15386 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15387 tg3_flag_clear(tp, HW_TSO_2);
15388 tg3_flag_clear(tp, TSO_CAPABLE);
15389 }
15390 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15391 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15392 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15393 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15394 tg3_flag_set(tp, CLKREQ_BUG);
15395 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15396 tg3_flag_set(tp, L1PLLPD_EN);
15397 }
15398 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15399 /* BCM5785 devices are effectively PCIe devices, and should
15400 * follow PCIe codepaths, but do not have a PCIe capabilities
15401 * section.
15402 */
15403 tg3_flag_set(tp, PCI_EXPRESS);
15404 } else if (!tg3_flag(tp, 5705_PLUS) ||
15405 tg3_flag(tp, 5780_CLASS)) {
15406 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15407 if (!tp->pcix_cap) {
15408 dev_err(&tp->pdev->dev,
15409 "Cannot find PCI-X capability, aborting\n");
15410 return -EIO;
15411 }
15412
15413 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15414 tg3_flag_set(tp, PCIX_MODE);
15415 }
15416
15417 /* If we have an AMD 762 or VIA K8T800 chipset, write
15418 * reordering to the mailbox registers done by the host
15419 * controller can cause major troubles. We read back from
15420 * every mailbox register write to force the writes to be
15421 * posted to the chip in order.
15422 */
15423 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15424 !tg3_flag(tp, PCI_EXPRESS))
15425 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15426
15427 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15428 &tp->pci_cacheline_sz);
15429 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15430 &tp->pci_lat_timer);
15431 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15432 tp->pci_lat_timer < 64) {
15433 tp->pci_lat_timer = 64;
15434 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15435 tp->pci_lat_timer);
15436 }
15437
15438 /* Important! -- It is critical that the PCI-X hw workaround
15439 * situation is decided before the first MMIO register access.
15440 */
15441 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15442 /* 5700 BX chips need to have their TX producer index
15443 * mailboxes written twice to workaround a bug.
15444 */
15445 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15446
15447 /* If we are in PCI-X mode, enable register write workaround.
15448 *
15449 * The workaround is to use indirect register accesses
15450 * for all chip writes not to mailbox registers.
15451 */
15452 if (tg3_flag(tp, PCIX_MODE)) {
15453 u32 pm_reg;
15454
15455 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15456
15457 /* The chip can have it's power management PCI config
15458 * space registers clobbered due to this bug.
15459 * So explicitly force the chip into D0 here.
15460 */
15461 pci_read_config_dword(tp->pdev,
15462 tp->pm_cap + PCI_PM_CTRL,
15463 &pm_reg);
15464 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15465 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15466 pci_write_config_dword(tp->pdev,
15467 tp->pm_cap + PCI_PM_CTRL,
15468 pm_reg);
15469
15470 /* Also, force SERR#/PERR# in PCI command. */
15471 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15472 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15473 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15474 }
15475 }
15476
15477 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15478 tg3_flag_set(tp, PCI_HIGH_SPEED);
15479 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15480 tg3_flag_set(tp, PCI_32BIT);
15481
15482 /* Chip-specific fixup from Broadcom driver */
15483 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15484 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15485 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15486 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15487 }
15488
15489 /* Default fast path register access methods */
15490 tp->read32 = tg3_read32;
15491 tp->write32 = tg3_write32;
15492 tp->read32_mbox = tg3_read32;
15493 tp->write32_mbox = tg3_write32;
15494 tp->write32_tx_mbox = tg3_write32;
15495 tp->write32_rx_mbox = tg3_write32;
15496
15497 /* Various workaround register access methods */
15498 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15499 tp->write32 = tg3_write_indirect_reg32;
15500 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15501 (tg3_flag(tp, PCI_EXPRESS) &&
15502 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15503 /*
15504 * Back to back register writes can cause problems on these
15505 * chips, the workaround is to read back all reg writes
15506 * except those to mailbox regs.
15507 *
15508 * See tg3_write_indirect_reg32().
15509 */
15510 tp->write32 = tg3_write_flush_reg32;
15511 }
15512
15513 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15514 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15515 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15516 tp->write32_rx_mbox = tg3_write_flush_reg32;
15517 }
15518
15519 if (tg3_flag(tp, ICH_WORKAROUND)) {
15520 tp->read32 = tg3_read_indirect_reg32;
15521 tp->write32 = tg3_write_indirect_reg32;
15522 tp->read32_mbox = tg3_read_indirect_mbox;
15523 tp->write32_mbox = tg3_write_indirect_mbox;
15524 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15525 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15526
15527 iounmap(tp->regs);
15528 tp->regs = NULL;
15529
15530 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15531 pci_cmd &= ~PCI_COMMAND_MEMORY;
15532 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15533 }
15534 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15535 tp->read32_mbox = tg3_read32_mbox_5906;
15536 tp->write32_mbox = tg3_write32_mbox_5906;
15537 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15538 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15539 }
15540
15541 if (tp->write32 == tg3_write_indirect_reg32 ||
15542 (tg3_flag(tp, PCIX_MODE) &&
15543 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15544 tg3_asic_rev(tp) == ASIC_REV_5701)))
15545 tg3_flag_set(tp, SRAM_USE_CONFIG);
15546
15547 /* The memory arbiter has to be enabled in order for SRAM accesses
15548 * to succeed. Normally on powerup the tg3 chip firmware will make
15549 * sure it is enabled, but other entities such as system netboot
15550 * code might disable it.
15551 */
15552 val = tr32(MEMARB_MODE);
15553 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15554
15555 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15556 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15557 tg3_flag(tp, 5780_CLASS)) {
15558 if (tg3_flag(tp, PCIX_MODE)) {
15559 pci_read_config_dword(tp->pdev,
15560 tp->pcix_cap + PCI_X_STATUS,
15561 &val);
15562 tp->pci_fn = val & 0x7;
15563 }
15564 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15565 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15566 tg3_asic_rev(tp) == ASIC_REV_5720) {
15567 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15568 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15569 val = tr32(TG3_CPMU_STATUS);
15570
15571 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15572 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15573 else
15574 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15575 TG3_CPMU_STATUS_FSHFT_5719;
15576 }
15577
15578 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15579 tp->write32_tx_mbox = tg3_write_flush_reg32;
15580 tp->write32_rx_mbox = tg3_write_flush_reg32;
15581 }
15582
15583 /* Get eeprom hw config before calling tg3_set_power_state().
15584 * In particular, the TG3_FLAG_IS_NIC flag must be
15585 * determined before calling tg3_set_power_state() so that
15586 * we know whether or not to switch out of Vaux power.
15587 * When the flag is set, it means that GPIO1 is used for eeprom
15588 * write protect and also implies that it is a LOM where GPIOs
15589 * are not used to switch power.
15590 */
15591 tg3_get_eeprom_hw_cfg(tp);
15592
15593 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15594 tg3_flag_clear(tp, TSO_CAPABLE);
15595 tg3_flag_clear(tp, TSO_BUG);
15596 tp->fw_needed = NULL;
15597 }
15598
15599 if (tg3_flag(tp, ENABLE_APE)) {
15600 /* Allow reads and writes to the
15601 * APE register and memory space.
15602 */
15603 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15604 PCISTATE_ALLOW_APE_SHMEM_WR |
15605 PCISTATE_ALLOW_APE_PSPACE_WR;
15606 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15607 pci_state_reg);
15608
15609 tg3_ape_lock_init(tp);
15610 }
15611
15612 /* Set up tp->grc_local_ctrl before calling
15613 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15614 * will bring 5700's external PHY out of reset.
15615 * It is also used as eeprom write protect on LOMs.
15616 */
15617 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15618 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15619 tg3_flag(tp, EEPROM_WRITE_PROT))
15620 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15621 GRC_LCLCTRL_GPIO_OUTPUT1);
15622 /* Unused GPIO3 must be driven as output on 5752 because there
15623 * are no pull-up resistors on unused GPIO pins.
15624 */
15625 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15626 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15627
15628 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15629 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15630 tg3_flag(tp, 57765_CLASS))
15631 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15632
15633 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15634 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15635 /* Turn off the debug UART. */
15636 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15637 if (tg3_flag(tp, IS_NIC))
15638 /* Keep VMain power. */
15639 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15640 GRC_LCLCTRL_GPIO_OUTPUT0;
15641 }
15642
15643 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15644 tp->grc_local_ctrl |=
15645 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15646
15647 /* Switch out of Vaux if it is a NIC */
15648 tg3_pwrsrc_switch_to_vmain(tp);
15649
15650 /* Derive initial jumbo mode from MTU assigned in
15651 * ether_setup() via the alloc_etherdev() call
15652 */
15653 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15654 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15655
15656 /* Determine WakeOnLan speed to use. */
15657 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15658 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15659 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15660 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15661 tg3_flag_clear(tp, WOL_SPEED_100MB);
15662 } else {
15663 tg3_flag_set(tp, WOL_SPEED_100MB);
15664 }
15665
15666 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15667 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15668
15669 /* A few boards don't want Ethernet@WireSpeed phy feature */
15670 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15671 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15672 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15673 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15674 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15675 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15676 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15677
15678 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15679 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15680 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15681 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15682 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15683
15684 if (tg3_flag(tp, 5705_PLUS) &&
15685 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15686 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15687 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15688 !tg3_flag(tp, 57765_PLUS)) {
15689 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15690 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15691 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15692 tg3_asic_rev(tp) == ASIC_REV_5761) {
15693 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15694 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15695 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15696 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15697 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15698 } else
15699 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15700 }
15701
15702 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15703 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15704 tp->phy_otp = tg3_read_otp_phycfg(tp);
15705 if (tp->phy_otp == 0)
15706 tp->phy_otp = TG3_OTP_DEFAULT;
15707 }
15708
15709 if (tg3_flag(tp, CPMU_PRESENT))
15710 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15711 else
15712 tp->mi_mode = MAC_MI_MODE_BASE;
15713
15714 tp->coalesce_mode = 0;
15715 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15716 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15717 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15718
15719 /* Set these bits to enable statistics workaround. */
15720 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15721 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15722 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15723 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15724 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15725 }
15726
15727 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15728 tg3_asic_rev(tp) == ASIC_REV_57780)
15729 tg3_flag_set(tp, USE_PHYLIB);
15730
15731 err = tg3_mdio_init(tp);
15732 if (err)
15733 return err;
15734
15735 /* Initialize data/descriptor byte/word swapping. */
15736 val = tr32(GRC_MODE);
15737 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15738 tg3_asic_rev(tp) == ASIC_REV_5762)
15739 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15740 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15741 GRC_MODE_B2HRX_ENABLE |
15742 GRC_MODE_HTX2B_ENABLE |
15743 GRC_MODE_HOST_STACKUP);
15744 else
15745 val &= GRC_MODE_HOST_STACKUP;
15746
15747 tw32(GRC_MODE, val | tp->grc_mode);
15748
15749 tg3_switch_clocks(tp);
15750
15751 /* Clear this out for sanity. */
15752 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15753
15754 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15755 &pci_state_reg);
15756 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15757 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15758 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15759 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15760 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15761 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15762 void __iomem *sram_base;
15763
15764 /* Write some dummy words into the SRAM status block
15765 * area, see if it reads back correctly. If the return
15766 * value is bad, force enable the PCIX workaround.
15767 */
15768 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15769
15770 writel(0x00000000, sram_base);
15771 writel(0x00000000, sram_base + 4);
15772 writel(0xffffffff, sram_base + 4);
15773 if (readl(sram_base) != 0x00000000)
15774 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15775 }
15776 }
15777
15778 udelay(50);
15779 tg3_nvram_init(tp);
15780
15781 grc_misc_cfg = tr32(GRC_MISC_CFG);
15782 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15783
15784 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15785 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15786 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15787 tg3_flag_set(tp, IS_5788);
15788
15789 if (!tg3_flag(tp, IS_5788) &&
15790 tg3_asic_rev(tp) != ASIC_REV_5700)
15791 tg3_flag_set(tp, TAGGED_STATUS);
15792 if (tg3_flag(tp, TAGGED_STATUS)) {
15793 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15794 HOSTCC_MODE_CLRTICK_TXBD);
15795
15796 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15797 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15798 tp->misc_host_ctrl);
15799 }
15800
15801 /* Preserve the APE MAC_MODE bits */
15802 if (tg3_flag(tp, ENABLE_APE))
15803 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15804 else
15805 tp->mac_mode = 0;
15806
15807 if (tg3_10_100_only_device(tp, ent))
15808 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15809
15810 err = tg3_phy_probe(tp);
15811 if (err) {
15812 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15813 /* ... but do not return immediately ... */
15814 tg3_mdio_fini(tp);
15815 }
15816
15817 tg3_read_vpd(tp);
15818 tg3_read_fw_ver(tp);
15819
15820 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15821 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15822 } else {
15823 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15824 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15825 else
15826 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15827 }
15828
15829 /* 5700 {AX,BX} chips have a broken status block link
15830 * change bit implementation, so we must use the
15831 * status register in those cases.
15832 */
15833 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15834 tg3_flag_set(tp, USE_LINKCHG_REG);
15835 else
15836 tg3_flag_clear(tp, USE_LINKCHG_REG);
15837
15838 /* The led_ctrl is set during tg3_phy_probe, here we might
15839 * have to force the link status polling mechanism based
15840 * upon subsystem IDs.
15841 */
15842 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15843 tg3_asic_rev(tp) == ASIC_REV_5701 &&
15844 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15845 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15846 tg3_flag_set(tp, USE_LINKCHG_REG);
15847 }
15848
15849 /* For all SERDES we poll the MAC status register. */
15850 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15851 tg3_flag_set(tp, POLL_SERDES);
15852 else
15853 tg3_flag_clear(tp, POLL_SERDES);
15854
15855 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15856 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15857 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
15858 tg3_flag(tp, PCIX_MODE)) {
15859 tp->rx_offset = NET_SKB_PAD;
15860 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15861 tp->rx_copy_thresh = ~(u16)0;
15862 #endif
15863 }
15864
15865 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15866 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15867 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15868
15869 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15870
15871 /* Increment the rx prod index on the rx std ring by at most
15872 * 8 for these chips to workaround hw errata.
15873 */
15874 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15875 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15876 tg3_asic_rev(tp) == ASIC_REV_5755)
15877 tp->rx_std_max_post = 8;
15878
15879 if (tg3_flag(tp, ASPM_WORKAROUND))
15880 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15881 PCIE_PWR_MGMT_L1_THRESH_MSK;
15882
15883 return err;
15884 }
15885
15886 #ifdef CONFIG_SPARC
15887 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15888 {
15889 struct net_device *dev = tp->dev;
15890 struct pci_dev *pdev = tp->pdev;
15891 struct device_node *dp = pci_device_to_OF_node(pdev);
15892 const unsigned char *addr;
15893 int len;
15894
15895 addr = of_get_property(dp, "local-mac-address", &len);
15896 if (addr && len == 6) {
15897 memcpy(dev->dev_addr, addr, 6);
15898 return 0;
15899 }
15900 return -ENODEV;
15901 }
15902
15903 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15904 {
15905 struct net_device *dev = tp->dev;
15906
15907 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15908 return 0;
15909 }
15910 #endif
15911
15912 static int tg3_get_device_address(struct tg3 *tp)
15913 {
15914 struct net_device *dev = tp->dev;
15915 u32 hi, lo, mac_offset;
15916 int addr_ok = 0;
15917 int err;
15918
15919 #ifdef CONFIG_SPARC
15920 if (!tg3_get_macaddr_sparc(tp))
15921 return 0;
15922 #endif
15923
15924 if (tg3_flag(tp, IS_SSB_CORE)) {
15925 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15926 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15927 return 0;
15928 }
15929
15930 mac_offset = 0x7c;
15931 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15932 tg3_flag(tp, 5780_CLASS)) {
15933 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15934 mac_offset = 0xcc;
15935 if (tg3_nvram_lock(tp))
15936 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15937 else
15938 tg3_nvram_unlock(tp);
15939 } else if (tg3_flag(tp, 5717_PLUS)) {
15940 if (tp->pci_fn & 1)
15941 mac_offset = 0xcc;
15942 if (tp->pci_fn > 1)
15943 mac_offset += 0x18c;
15944 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15945 mac_offset = 0x10;
15946
15947 /* First try to get it from MAC address mailbox. */
15948 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15949 if ((hi >> 16) == 0x484b) {
15950 dev->dev_addr[0] = (hi >> 8) & 0xff;
15951 dev->dev_addr[1] = (hi >> 0) & 0xff;
15952
15953 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15954 dev->dev_addr[2] = (lo >> 24) & 0xff;
15955 dev->dev_addr[3] = (lo >> 16) & 0xff;
15956 dev->dev_addr[4] = (lo >> 8) & 0xff;
15957 dev->dev_addr[5] = (lo >> 0) & 0xff;
15958
15959 /* Some old bootcode may report a 0 MAC address in SRAM */
15960 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15961 }
15962 if (!addr_ok) {
15963 /* Next, try NVRAM. */
15964 if (!tg3_flag(tp, NO_NVRAM) &&
15965 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15966 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15967 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15968 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15969 }
15970 /* Finally just fetch it out of the MAC control regs. */
15971 else {
15972 hi = tr32(MAC_ADDR_0_HIGH);
15973 lo = tr32(MAC_ADDR_0_LOW);
15974
15975 dev->dev_addr[5] = lo & 0xff;
15976 dev->dev_addr[4] = (lo >> 8) & 0xff;
15977 dev->dev_addr[3] = (lo >> 16) & 0xff;
15978 dev->dev_addr[2] = (lo >> 24) & 0xff;
15979 dev->dev_addr[1] = hi & 0xff;
15980 dev->dev_addr[0] = (hi >> 8) & 0xff;
15981 }
15982 }
15983
15984 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15985 #ifdef CONFIG_SPARC
15986 if (!tg3_get_default_macaddr_sparc(tp))
15987 return 0;
15988 #endif
15989 return -EINVAL;
15990 }
15991 return 0;
15992 }
15993
15994 #define BOUNDARY_SINGLE_CACHELINE 1
15995 #define BOUNDARY_MULTI_CACHELINE 2
15996
15997 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15998 {
15999 int cacheline_size;
16000 u8 byte;
16001 int goal;
16002
16003 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16004 if (byte == 0)
16005 cacheline_size = 1024;
16006 else
16007 cacheline_size = (int) byte * 4;
16008
16009 /* On 5703 and later chips, the boundary bits have no
16010 * effect.
16011 */
16012 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16013 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16014 !tg3_flag(tp, PCI_EXPRESS))
16015 goto out;
16016
16017 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16018 goal = BOUNDARY_MULTI_CACHELINE;
16019 #else
16020 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16021 goal = BOUNDARY_SINGLE_CACHELINE;
16022 #else
16023 goal = 0;
16024 #endif
16025 #endif
16026
16027 if (tg3_flag(tp, 57765_PLUS)) {
16028 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16029 goto out;
16030 }
16031
16032 if (!goal)
16033 goto out;
16034
16035 /* PCI controllers on most RISC systems tend to disconnect
16036 * when a device tries to burst across a cache-line boundary.
16037 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16038 *
16039 * Unfortunately, for PCI-E there are only limited
16040 * write-side controls for this, and thus for reads
16041 * we will still get the disconnects. We'll also waste
16042 * these PCI cycles for both read and write for chips
16043 * other than 5700 and 5701 which do not implement the
16044 * boundary bits.
16045 */
16046 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16047 switch (cacheline_size) {
16048 case 16:
16049 case 32:
16050 case 64:
16051 case 128:
16052 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16053 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16054 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16055 } else {
16056 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16057 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16058 }
16059 break;
16060
16061 case 256:
16062 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16063 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16064 break;
16065
16066 default:
16067 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16068 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16069 break;
16070 }
16071 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16072 switch (cacheline_size) {
16073 case 16:
16074 case 32:
16075 case 64:
16076 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16077 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16078 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16079 break;
16080 }
16081 /* fallthrough */
16082 case 128:
16083 default:
16084 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16085 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16086 break;
16087 }
16088 } else {
16089 switch (cacheline_size) {
16090 case 16:
16091 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16092 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16093 DMA_RWCTRL_WRITE_BNDRY_16);
16094 break;
16095 }
16096 /* fallthrough */
16097 case 32:
16098 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16099 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16100 DMA_RWCTRL_WRITE_BNDRY_32);
16101 break;
16102 }
16103 /* fallthrough */
16104 case 64:
16105 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16106 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16107 DMA_RWCTRL_WRITE_BNDRY_64);
16108 break;
16109 }
16110 /* fallthrough */
16111 case 128:
16112 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16113 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16114 DMA_RWCTRL_WRITE_BNDRY_128);
16115 break;
16116 }
16117 /* fallthrough */
16118 case 256:
16119 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16120 DMA_RWCTRL_WRITE_BNDRY_256);
16121 break;
16122 case 512:
16123 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16124 DMA_RWCTRL_WRITE_BNDRY_512);
16125 break;
16126 case 1024:
16127 default:
16128 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16129 DMA_RWCTRL_WRITE_BNDRY_1024);
16130 break;
16131 }
16132 }
16133
16134 out:
16135 return val;
16136 }
16137
16138 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16139 int size, int to_device)
16140 {
16141 struct tg3_internal_buffer_desc test_desc;
16142 u32 sram_dma_descs;
16143 int i, ret;
16144
16145 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16146
16147 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16148 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16149 tw32(RDMAC_STATUS, 0);
16150 tw32(WDMAC_STATUS, 0);
16151
16152 tw32(BUFMGR_MODE, 0);
16153 tw32(FTQ_RESET, 0);
16154
16155 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16156 test_desc.addr_lo = buf_dma & 0xffffffff;
16157 test_desc.nic_mbuf = 0x00002100;
16158 test_desc.len = size;
16159
16160 /*
16161 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16162 * the *second* time the tg3 driver was getting loaded after an
16163 * initial scan.
16164 *
16165 * Broadcom tells me:
16166 * ...the DMA engine is connected to the GRC block and a DMA
16167 * reset may affect the GRC block in some unpredictable way...
16168 * The behavior of resets to individual blocks has not been tested.
16169 *
16170 * Broadcom noted the GRC reset will also reset all sub-components.
16171 */
16172 if (to_device) {
16173 test_desc.cqid_sqid = (13 << 8) | 2;
16174
16175 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16176 udelay(40);
16177 } else {
16178 test_desc.cqid_sqid = (16 << 8) | 7;
16179
16180 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16181 udelay(40);
16182 }
16183 test_desc.flags = 0x00000005;
16184
16185 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16186 u32 val;
16187
16188 val = *(((u32 *)&test_desc) + i);
16189 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16190 sram_dma_descs + (i * sizeof(u32)));
16191 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16192 }
16193 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16194
16195 if (to_device)
16196 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16197 else
16198 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16199
16200 ret = -ENODEV;
16201 for (i = 0; i < 40; i++) {
16202 u32 val;
16203
16204 if (to_device)
16205 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16206 else
16207 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16208 if ((val & 0xffff) == sram_dma_descs) {
16209 ret = 0;
16210 break;
16211 }
16212
16213 udelay(100);
16214 }
16215
16216 return ret;
16217 }
16218
16219 #define TEST_BUFFER_SIZE 0x2000
16220
16221 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16222 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16223 { },
16224 };
16225
16226 static int tg3_test_dma(struct tg3 *tp)
16227 {
16228 dma_addr_t buf_dma;
16229 u32 *buf, saved_dma_rwctrl;
16230 int ret = 0;
16231
16232 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16233 &buf_dma, GFP_KERNEL);
16234 if (!buf) {
16235 ret = -ENOMEM;
16236 goto out_nofree;
16237 }
16238
16239 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16240 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16241
16242 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16243
16244 if (tg3_flag(tp, 57765_PLUS))
16245 goto out;
16246
16247 if (tg3_flag(tp, PCI_EXPRESS)) {
16248 /* DMA read watermark not used on PCIE */
16249 tp->dma_rwctrl |= 0x00180000;
16250 } else if (!tg3_flag(tp, PCIX_MODE)) {
16251 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16252 tg3_asic_rev(tp) == ASIC_REV_5750)
16253 tp->dma_rwctrl |= 0x003f0000;
16254 else
16255 tp->dma_rwctrl |= 0x003f000f;
16256 } else {
16257 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16258 tg3_asic_rev(tp) == ASIC_REV_5704) {
16259 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16260 u32 read_water = 0x7;
16261
16262 /* If the 5704 is behind the EPB bridge, we can
16263 * do the less restrictive ONE_DMA workaround for
16264 * better performance.
16265 */
16266 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16267 tg3_asic_rev(tp) == ASIC_REV_5704)
16268 tp->dma_rwctrl |= 0x8000;
16269 else if (ccval == 0x6 || ccval == 0x7)
16270 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16271
16272 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16273 read_water = 4;
16274 /* Set bit 23 to enable PCIX hw bug fix */
16275 tp->dma_rwctrl |=
16276 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16277 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16278 (1 << 23);
16279 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16280 /* 5780 always in PCIX mode */
16281 tp->dma_rwctrl |= 0x00144000;
16282 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16283 /* 5714 always in PCIX mode */
16284 tp->dma_rwctrl |= 0x00148000;
16285 } else {
16286 tp->dma_rwctrl |= 0x001b000f;
16287 }
16288 }
16289 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16290 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16291
16292 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16293 tg3_asic_rev(tp) == ASIC_REV_5704)
16294 tp->dma_rwctrl &= 0xfffffff0;
16295
16296 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16297 tg3_asic_rev(tp) == ASIC_REV_5701) {
16298 /* Remove this if it causes problems for some boards. */
16299 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16300
16301 /* On 5700/5701 chips, we need to set this bit.
16302 * Otherwise the chip will issue cacheline transactions
16303 * to streamable DMA memory with not all the byte
16304 * enables turned on. This is an error on several
16305 * RISC PCI controllers, in particular sparc64.
16306 *
16307 * On 5703/5704 chips, this bit has been reassigned
16308 * a different meaning. In particular, it is used
16309 * on those chips to enable a PCI-X workaround.
16310 */
16311 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16312 }
16313
16314 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16315
16316 #if 0
16317 /* Unneeded, already done by tg3_get_invariants. */
16318 tg3_switch_clocks(tp);
16319 #endif
16320
16321 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16322 tg3_asic_rev(tp) != ASIC_REV_5701)
16323 goto out;
16324
16325 /* It is best to perform DMA test with maximum write burst size
16326 * to expose the 5700/5701 write DMA bug.
16327 */
16328 saved_dma_rwctrl = tp->dma_rwctrl;
16329 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16330 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16331
16332 while (1) {
16333 u32 *p = buf, i;
16334
16335 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16336 p[i] = i;
16337
16338 /* Send the buffer to the chip. */
16339 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16340 if (ret) {
16341 dev_err(&tp->pdev->dev,
16342 "%s: Buffer write failed. err = %d\n",
16343 __func__, ret);
16344 break;
16345 }
16346
16347 #if 0
16348 /* validate data reached card RAM correctly. */
16349 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16350 u32 val;
16351 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16352 if (le32_to_cpu(val) != p[i]) {
16353 dev_err(&tp->pdev->dev,
16354 "%s: Buffer corrupted on device! "
16355 "(%d != %d)\n", __func__, val, i);
16356 /* ret = -ENODEV here? */
16357 }
16358 p[i] = 0;
16359 }
16360 #endif
16361 /* Now read it back. */
16362 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16363 if (ret) {
16364 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16365 "err = %d\n", __func__, ret);
16366 break;
16367 }
16368
16369 /* Verify it. */
16370 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16371 if (p[i] == i)
16372 continue;
16373
16374 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16375 DMA_RWCTRL_WRITE_BNDRY_16) {
16376 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16377 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16378 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16379 break;
16380 } else {
16381 dev_err(&tp->pdev->dev,
16382 "%s: Buffer corrupted on read back! "
16383 "(%d != %d)\n", __func__, p[i], i);
16384 ret = -ENODEV;
16385 goto out;
16386 }
16387 }
16388
16389 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16390 /* Success. */
16391 ret = 0;
16392 break;
16393 }
16394 }
16395 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16396 DMA_RWCTRL_WRITE_BNDRY_16) {
16397 /* DMA test passed without adjusting DMA boundary,
16398 * now look for chipsets that are known to expose the
16399 * DMA bug without failing the test.
16400 */
16401 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16402 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16403 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16404 } else {
16405 /* Safe to use the calculated DMA boundary. */
16406 tp->dma_rwctrl = saved_dma_rwctrl;
16407 }
16408
16409 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16410 }
16411
16412 out:
16413 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16414 out_nofree:
16415 return ret;
16416 }
16417
16418 static void tg3_init_bufmgr_config(struct tg3 *tp)
16419 {
16420 if (tg3_flag(tp, 57765_PLUS)) {
16421 tp->bufmgr_config.mbuf_read_dma_low_water =
16422 DEFAULT_MB_RDMA_LOW_WATER_5705;
16423 tp->bufmgr_config.mbuf_mac_rx_low_water =
16424 DEFAULT_MB_MACRX_LOW_WATER_57765;
16425 tp->bufmgr_config.mbuf_high_water =
16426 DEFAULT_MB_HIGH_WATER_57765;
16427
16428 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16429 DEFAULT_MB_RDMA_LOW_WATER_5705;
16430 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16431 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16432 tp->bufmgr_config.mbuf_high_water_jumbo =
16433 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16434 } else if (tg3_flag(tp, 5705_PLUS)) {
16435 tp->bufmgr_config.mbuf_read_dma_low_water =
16436 DEFAULT_MB_RDMA_LOW_WATER_5705;
16437 tp->bufmgr_config.mbuf_mac_rx_low_water =
16438 DEFAULT_MB_MACRX_LOW_WATER_5705;
16439 tp->bufmgr_config.mbuf_high_water =
16440 DEFAULT_MB_HIGH_WATER_5705;
16441 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16442 tp->bufmgr_config.mbuf_mac_rx_low_water =
16443 DEFAULT_MB_MACRX_LOW_WATER_5906;
16444 tp->bufmgr_config.mbuf_high_water =
16445 DEFAULT_MB_HIGH_WATER_5906;
16446 }
16447
16448 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16449 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16450 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16451 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16452 tp->bufmgr_config.mbuf_high_water_jumbo =
16453 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16454 } else {
16455 tp->bufmgr_config.mbuf_read_dma_low_water =
16456 DEFAULT_MB_RDMA_LOW_WATER;
16457 tp->bufmgr_config.mbuf_mac_rx_low_water =
16458 DEFAULT_MB_MACRX_LOW_WATER;
16459 tp->bufmgr_config.mbuf_high_water =
16460 DEFAULT_MB_HIGH_WATER;
16461
16462 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16463 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16464 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16465 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16466 tp->bufmgr_config.mbuf_high_water_jumbo =
16467 DEFAULT_MB_HIGH_WATER_JUMBO;
16468 }
16469
16470 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16471 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16472 }
16473
16474 static char *tg3_phy_string(struct tg3 *tp)
16475 {
16476 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16477 case TG3_PHY_ID_BCM5400: return "5400";
16478 case TG3_PHY_ID_BCM5401: return "5401";
16479 case TG3_PHY_ID_BCM5411: return "5411";
16480 case TG3_PHY_ID_BCM5701: return "5701";
16481 case TG3_PHY_ID_BCM5703: return "5703";
16482 case TG3_PHY_ID_BCM5704: return "5704";
16483 case TG3_PHY_ID_BCM5705: return "5705";
16484 case TG3_PHY_ID_BCM5750: return "5750";
16485 case TG3_PHY_ID_BCM5752: return "5752";
16486 case TG3_PHY_ID_BCM5714: return "5714";
16487 case TG3_PHY_ID_BCM5780: return "5780";
16488 case TG3_PHY_ID_BCM5755: return "5755";
16489 case TG3_PHY_ID_BCM5787: return "5787";
16490 case TG3_PHY_ID_BCM5784: return "5784";
16491 case TG3_PHY_ID_BCM5756: return "5722/5756";
16492 case TG3_PHY_ID_BCM5906: return "5906";
16493 case TG3_PHY_ID_BCM5761: return "5761";
16494 case TG3_PHY_ID_BCM5718C: return "5718C";
16495 case TG3_PHY_ID_BCM5718S: return "5718S";
16496 case TG3_PHY_ID_BCM57765: return "57765";
16497 case TG3_PHY_ID_BCM5719C: return "5719C";
16498 case TG3_PHY_ID_BCM5720C: return "5720C";
16499 case TG3_PHY_ID_BCM5762: return "5762C";
16500 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16501 case 0: return "serdes";
16502 default: return "unknown";
16503 }
16504 }
16505
16506 static char *tg3_bus_string(struct tg3 *tp, char *str)
16507 {
16508 if (tg3_flag(tp, PCI_EXPRESS)) {
16509 strcpy(str, "PCI Express");
16510 return str;
16511 } else if (tg3_flag(tp, PCIX_MODE)) {
16512 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16513
16514 strcpy(str, "PCIX:");
16515
16516 if ((clock_ctrl == 7) ||
16517 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16518 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16519 strcat(str, "133MHz");
16520 else if (clock_ctrl == 0)
16521 strcat(str, "33MHz");
16522 else if (clock_ctrl == 2)
16523 strcat(str, "50MHz");
16524 else if (clock_ctrl == 4)
16525 strcat(str, "66MHz");
16526 else if (clock_ctrl == 6)
16527 strcat(str, "100MHz");
16528 } else {
16529 strcpy(str, "PCI:");
16530 if (tg3_flag(tp, PCI_HIGH_SPEED))
16531 strcat(str, "66MHz");
16532 else
16533 strcat(str, "33MHz");
16534 }
16535 if (tg3_flag(tp, PCI_32BIT))
16536 strcat(str, ":32-bit");
16537 else
16538 strcat(str, ":64-bit");
16539 return str;
16540 }
16541
16542 static void tg3_init_coal(struct tg3 *tp)
16543 {
16544 struct ethtool_coalesce *ec = &tp->coal;
16545
16546 memset(ec, 0, sizeof(*ec));
16547 ec->cmd = ETHTOOL_GCOALESCE;
16548 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16549 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16550 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16551 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16552 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16553 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16554 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16555 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16556 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16557
16558 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16559 HOSTCC_MODE_CLRTICK_TXBD)) {
16560 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16561 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16562 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16563 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16564 }
16565
16566 if (tg3_flag(tp, 5705_PLUS)) {
16567 ec->rx_coalesce_usecs_irq = 0;
16568 ec->tx_coalesce_usecs_irq = 0;
16569 ec->stats_block_coalesce_usecs = 0;
16570 }
16571 }
16572
16573 static int tg3_init_one(struct pci_dev *pdev,
16574 const struct pci_device_id *ent)
16575 {
16576 struct net_device *dev;
16577 struct tg3 *tp;
16578 int i, err, pm_cap;
16579 u32 sndmbx, rcvmbx, intmbx;
16580 char str[40];
16581 u64 dma_mask, persist_dma_mask;
16582 netdev_features_t features = 0;
16583
16584 printk_once(KERN_INFO "%s\n", version);
16585
16586 err = pci_enable_device(pdev);
16587 if (err) {
16588 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16589 return err;
16590 }
16591
16592 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16593 if (err) {
16594 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16595 goto err_out_disable_pdev;
16596 }
16597
16598 pci_set_master(pdev);
16599
16600 /* Find power-management capability. */
16601 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16602 if (pm_cap == 0) {
16603 dev_err(&pdev->dev,
16604 "Cannot find Power Management capability, aborting\n");
16605 err = -EIO;
16606 goto err_out_free_res;
16607 }
16608
16609 err = pci_set_power_state(pdev, PCI_D0);
16610 if (err) {
16611 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16612 goto err_out_free_res;
16613 }
16614
16615 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16616 if (!dev) {
16617 err = -ENOMEM;
16618 goto err_out_power_down;
16619 }
16620
16621 SET_NETDEV_DEV(dev, &pdev->dev);
16622
16623 tp = netdev_priv(dev);
16624 tp->pdev = pdev;
16625 tp->dev = dev;
16626 tp->pm_cap = pm_cap;
16627 tp->rx_mode = TG3_DEF_RX_MODE;
16628 tp->tx_mode = TG3_DEF_TX_MODE;
16629 tp->irq_sync = 1;
16630
16631 if (tg3_debug > 0)
16632 tp->msg_enable = tg3_debug;
16633 else
16634 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16635
16636 if (pdev_is_ssb_gige_core(pdev)) {
16637 tg3_flag_set(tp, IS_SSB_CORE);
16638 if (ssb_gige_must_flush_posted_writes(pdev))
16639 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16640 if (ssb_gige_one_dma_at_once(pdev))
16641 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16642 if (ssb_gige_have_roboswitch(pdev))
16643 tg3_flag_set(tp, ROBOSWITCH);
16644 if (ssb_gige_is_rgmii(pdev))
16645 tg3_flag_set(tp, RGMII_MODE);
16646 }
16647
16648 /* The word/byte swap controls here control register access byte
16649 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16650 * setting below.
16651 */
16652 tp->misc_host_ctrl =
16653 MISC_HOST_CTRL_MASK_PCI_INT |
16654 MISC_HOST_CTRL_WORD_SWAP |
16655 MISC_HOST_CTRL_INDIR_ACCESS |
16656 MISC_HOST_CTRL_PCISTATE_RW;
16657
16658 /* The NONFRM (non-frame) byte/word swap controls take effect
16659 * on descriptor entries, anything which isn't packet data.
16660 *
16661 * The StrongARM chips on the board (one for tx, one for rx)
16662 * are running in big-endian mode.
16663 */
16664 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16665 GRC_MODE_WSWAP_NONFRM_DATA);
16666 #ifdef __BIG_ENDIAN
16667 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16668 #endif
16669 spin_lock_init(&tp->lock);
16670 spin_lock_init(&tp->indirect_lock);
16671 INIT_WORK(&tp->reset_task, tg3_reset_task);
16672
16673 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16674 if (!tp->regs) {
16675 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16676 err = -ENOMEM;
16677 goto err_out_free_dev;
16678 }
16679
16680 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16681 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16683 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16684 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16685 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16686 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16687 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16688 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16689 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16690 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16691 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16692 tg3_flag_set(tp, ENABLE_APE);
16693 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16694 if (!tp->aperegs) {
16695 dev_err(&pdev->dev,
16696 "Cannot map APE registers, aborting\n");
16697 err = -ENOMEM;
16698 goto err_out_iounmap;
16699 }
16700 }
16701
16702 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16703 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16704
16705 dev->ethtool_ops = &tg3_ethtool_ops;
16706 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16707 dev->netdev_ops = &tg3_netdev_ops;
16708 dev->irq = pdev->irq;
16709
16710 err = tg3_get_invariants(tp, ent);
16711 if (err) {
16712 dev_err(&pdev->dev,
16713 "Problem fetching invariants of chip, aborting\n");
16714 goto err_out_apeunmap;
16715 }
16716
16717 /* The EPB bridge inside 5714, 5715, and 5780 and any
16718 * device behind the EPB cannot support DMA addresses > 40-bit.
16719 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16720 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16721 * do DMA address check in tg3_start_xmit().
16722 */
16723 if (tg3_flag(tp, IS_5788))
16724 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16725 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16726 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16727 #ifdef CONFIG_HIGHMEM
16728 dma_mask = DMA_BIT_MASK(64);
16729 #endif
16730 } else
16731 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16732
16733 /* Configure DMA attributes. */
16734 if (dma_mask > DMA_BIT_MASK(32)) {
16735 err = pci_set_dma_mask(pdev, dma_mask);
16736 if (!err) {
16737 features |= NETIF_F_HIGHDMA;
16738 err = pci_set_consistent_dma_mask(pdev,
16739 persist_dma_mask);
16740 if (err < 0) {
16741 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16742 "DMA for consistent allocations\n");
16743 goto err_out_apeunmap;
16744 }
16745 }
16746 }
16747 if (err || dma_mask == DMA_BIT_MASK(32)) {
16748 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16749 if (err) {
16750 dev_err(&pdev->dev,
16751 "No usable DMA configuration, aborting\n");
16752 goto err_out_apeunmap;
16753 }
16754 }
16755
16756 tg3_init_bufmgr_config(tp);
16757
16758 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16759
16760 /* 5700 B0 chips do not support checksumming correctly due
16761 * to hardware bugs.
16762 */
16763 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16764 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16765
16766 if (tg3_flag(tp, 5755_PLUS))
16767 features |= NETIF_F_IPV6_CSUM;
16768 }
16769
16770 /* TSO is on by default on chips that support hardware TSO.
16771 * Firmware TSO on older chips gives lower performance, so it
16772 * is off by default, but can be enabled using ethtool.
16773 */
16774 if ((tg3_flag(tp, HW_TSO_1) ||
16775 tg3_flag(tp, HW_TSO_2) ||
16776 tg3_flag(tp, HW_TSO_3)) &&
16777 (features & NETIF_F_IP_CSUM))
16778 features |= NETIF_F_TSO;
16779 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16780 if (features & NETIF_F_IPV6_CSUM)
16781 features |= NETIF_F_TSO6;
16782 if (tg3_flag(tp, HW_TSO_3) ||
16783 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16784 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16785 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16786 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16787 tg3_asic_rev(tp) == ASIC_REV_57780)
16788 features |= NETIF_F_TSO_ECN;
16789 }
16790
16791 dev->features |= features;
16792 dev->vlan_features |= features;
16793
16794 /*
16795 * Add loopback capability only for a subset of devices that support
16796 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16797 * loopback for the remaining devices.
16798 */
16799 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16800 !tg3_flag(tp, CPMU_PRESENT))
16801 /* Add the loopback capability */
16802 features |= NETIF_F_LOOPBACK;
16803
16804 dev->hw_features |= features;
16805
16806 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16807 !tg3_flag(tp, TSO_CAPABLE) &&
16808 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16809 tg3_flag_set(tp, MAX_RXPEND_64);
16810 tp->rx_pending = 63;
16811 }
16812
16813 err = tg3_get_device_address(tp);
16814 if (err) {
16815 dev_err(&pdev->dev,
16816 "Could not obtain valid ethernet address, aborting\n");
16817 goto err_out_apeunmap;
16818 }
16819
16820 /*
16821 * Reset chip in case UNDI or EFI driver did not shutdown
16822 * DMA self test will enable WDMAC and we'll see (spurious)
16823 * pending DMA on the PCI bus at that point.
16824 */
16825 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16826 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16827 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16828 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16829 }
16830
16831 err = tg3_test_dma(tp);
16832 if (err) {
16833 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16834 goto err_out_apeunmap;
16835 }
16836
16837 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16838 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16839 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16840 for (i = 0; i < tp->irq_max; i++) {
16841 struct tg3_napi *tnapi = &tp->napi[i];
16842
16843 tnapi->tp = tp;
16844 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16845
16846 tnapi->int_mbox = intmbx;
16847 if (i <= 4)
16848 intmbx += 0x8;
16849 else
16850 intmbx += 0x4;
16851
16852 tnapi->consmbox = rcvmbx;
16853 tnapi->prodmbox = sndmbx;
16854
16855 if (i)
16856 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16857 else
16858 tnapi->coal_now = HOSTCC_MODE_NOW;
16859
16860 if (!tg3_flag(tp, SUPPORT_MSIX))
16861 break;
16862
16863 /*
16864 * If we support MSIX, we'll be using RSS. If we're using
16865 * RSS, the first vector only handles link interrupts and the
16866 * remaining vectors handle rx and tx interrupts. Reuse the
16867 * mailbox values for the next iteration. The values we setup
16868 * above are still useful for the single vectored mode.
16869 */
16870 if (!i)
16871 continue;
16872
16873 rcvmbx += 0x8;
16874
16875 if (sndmbx & 0x4)
16876 sndmbx -= 0x4;
16877 else
16878 sndmbx += 0xc;
16879 }
16880
16881 tg3_init_coal(tp);
16882
16883 pci_set_drvdata(pdev, dev);
16884
16885 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16886 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16887 tg3_asic_rev(tp) == ASIC_REV_5762)
16888 tg3_flag_set(tp, PTP_CAPABLE);
16889
16890 if (tg3_flag(tp, 5717_PLUS)) {
16891 /* Resume a low-power mode */
16892 tg3_frob_aux_power(tp, false);
16893 }
16894
16895 tg3_timer_init(tp);
16896
16897 tg3_carrier_off(tp);
16898
16899 err = register_netdev(dev);
16900 if (err) {
16901 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16902 goto err_out_apeunmap;
16903 }
16904
16905 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16906 tp->board_part_number,
16907 tg3_chip_rev_id(tp),
16908 tg3_bus_string(tp, str),
16909 dev->dev_addr);
16910
16911 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16912 struct phy_device *phydev;
16913 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16914 netdev_info(dev,
16915 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16916 phydev->drv->name, dev_name(&phydev->dev));
16917 } else {
16918 char *ethtype;
16919
16920 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16921 ethtype = "10/100Base-TX";
16922 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16923 ethtype = "1000Base-SX";
16924 else
16925 ethtype = "10/100/1000Base-T";
16926
16927 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16928 "(WireSpeed[%d], EEE[%d])\n",
16929 tg3_phy_string(tp), ethtype,
16930 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16931 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16932 }
16933
16934 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16935 (dev->features & NETIF_F_RXCSUM) != 0,
16936 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16937 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16938 tg3_flag(tp, ENABLE_ASF) != 0,
16939 tg3_flag(tp, TSO_CAPABLE) != 0);
16940 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16941 tp->dma_rwctrl,
16942 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16943 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16944
16945 pci_save_state(pdev);
16946
16947 return 0;
16948
16949 err_out_apeunmap:
16950 if (tp->aperegs) {
16951 iounmap(tp->aperegs);
16952 tp->aperegs = NULL;
16953 }
16954
16955 err_out_iounmap:
16956 if (tp->regs) {
16957 iounmap(tp->regs);
16958 tp->regs = NULL;
16959 }
16960
16961 err_out_free_dev:
16962 free_netdev(dev);
16963
16964 err_out_power_down:
16965 pci_set_power_state(pdev, PCI_D3hot);
16966
16967 err_out_free_res:
16968 pci_release_regions(pdev);
16969
16970 err_out_disable_pdev:
16971 pci_disable_device(pdev);
16972 pci_set_drvdata(pdev, NULL);
16973 return err;
16974 }
16975
16976 static void tg3_remove_one(struct pci_dev *pdev)
16977 {
16978 struct net_device *dev = pci_get_drvdata(pdev);
16979
16980 if (dev) {
16981 struct tg3 *tp = netdev_priv(dev);
16982
16983 release_firmware(tp->fw);
16984
16985 tg3_reset_task_cancel(tp);
16986
16987 if (tg3_flag(tp, USE_PHYLIB)) {
16988 tg3_phy_fini(tp);
16989 tg3_mdio_fini(tp);
16990 }
16991
16992 unregister_netdev(dev);
16993 if (tp->aperegs) {
16994 iounmap(tp->aperegs);
16995 tp->aperegs = NULL;
16996 }
16997 if (tp->regs) {
16998 iounmap(tp->regs);
16999 tp->regs = NULL;
17000 }
17001 free_netdev(dev);
17002 pci_release_regions(pdev);
17003 pci_disable_device(pdev);
17004 pci_set_drvdata(pdev, NULL);
17005 }
17006 }
17007
17008 #ifdef CONFIG_PM_SLEEP
17009 static int tg3_suspend(struct device *device)
17010 {
17011 struct pci_dev *pdev = to_pci_dev(device);
17012 struct net_device *dev = pci_get_drvdata(pdev);
17013 struct tg3 *tp = netdev_priv(dev);
17014 int err;
17015
17016 if (!netif_running(dev))
17017 return 0;
17018
17019 tg3_reset_task_cancel(tp);
17020 tg3_phy_stop(tp);
17021 tg3_netif_stop(tp);
17022
17023 tg3_timer_stop(tp);
17024
17025 tg3_full_lock(tp, 1);
17026 tg3_disable_ints(tp);
17027 tg3_full_unlock(tp);
17028
17029 netif_device_detach(dev);
17030
17031 tg3_full_lock(tp, 0);
17032 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17033 tg3_flag_clear(tp, INIT_COMPLETE);
17034 tg3_full_unlock(tp);
17035
17036 err = tg3_power_down_prepare(tp);
17037 if (err) {
17038 int err2;
17039
17040 tg3_full_lock(tp, 0);
17041
17042 tg3_flag_set(tp, INIT_COMPLETE);
17043 err2 = tg3_restart_hw(tp, 1);
17044 if (err2)
17045 goto out;
17046
17047 tg3_timer_start(tp);
17048
17049 netif_device_attach(dev);
17050 tg3_netif_start(tp);
17051
17052 out:
17053 tg3_full_unlock(tp);
17054
17055 if (!err2)
17056 tg3_phy_start(tp);
17057 }
17058
17059 return err;
17060 }
17061
17062 static int tg3_resume(struct device *device)
17063 {
17064 struct pci_dev *pdev = to_pci_dev(device);
17065 struct net_device *dev = pci_get_drvdata(pdev);
17066 struct tg3 *tp = netdev_priv(dev);
17067 int err;
17068
17069 if (!netif_running(dev))
17070 return 0;
17071
17072 netif_device_attach(dev);
17073
17074 tg3_full_lock(tp, 0);
17075
17076 tg3_flag_set(tp, INIT_COMPLETE);
17077 err = tg3_restart_hw(tp, 1);
17078 if (err)
17079 goto out;
17080
17081 tg3_timer_start(tp);
17082
17083 tg3_netif_start(tp);
17084
17085 out:
17086 tg3_full_unlock(tp);
17087
17088 if (!err)
17089 tg3_phy_start(tp);
17090
17091 return err;
17092 }
17093
17094 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17095 #define TG3_PM_OPS (&tg3_pm_ops)
17096
17097 #else
17098
17099 #define TG3_PM_OPS NULL
17100
17101 #endif /* CONFIG_PM_SLEEP */
17102
17103 /**
17104 * tg3_io_error_detected - called when PCI error is detected
17105 * @pdev: Pointer to PCI device
17106 * @state: The current pci connection state
17107 *
17108 * This function is called after a PCI bus error affecting
17109 * this device has been detected.
17110 */
17111 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17112 pci_channel_state_t state)
17113 {
17114 struct net_device *netdev = pci_get_drvdata(pdev);
17115 struct tg3 *tp = netdev_priv(netdev);
17116 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17117
17118 netdev_info(netdev, "PCI I/O error detected\n");
17119
17120 rtnl_lock();
17121
17122 if (!netif_running(netdev))
17123 goto done;
17124
17125 tg3_phy_stop(tp);
17126
17127 tg3_netif_stop(tp);
17128
17129 tg3_timer_stop(tp);
17130
17131 /* Want to make sure that the reset task doesn't run */
17132 tg3_reset_task_cancel(tp);
17133
17134 netif_device_detach(netdev);
17135
17136 /* Clean up software state, even if MMIO is blocked */
17137 tg3_full_lock(tp, 0);
17138 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17139 tg3_full_unlock(tp);
17140
17141 done:
17142 if (state == pci_channel_io_perm_failure)
17143 err = PCI_ERS_RESULT_DISCONNECT;
17144 else
17145 pci_disable_device(pdev);
17146
17147 rtnl_unlock();
17148
17149 return err;
17150 }
17151
17152 /**
17153 * tg3_io_slot_reset - called after the pci bus has been reset.
17154 * @pdev: Pointer to PCI device
17155 *
17156 * Restart the card from scratch, as if from a cold-boot.
17157 * At this point, the card has exprienced a hard reset,
17158 * followed by fixups by BIOS, and has its config space
17159 * set up identically to what it was at cold boot.
17160 */
17161 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17162 {
17163 struct net_device *netdev = pci_get_drvdata(pdev);
17164 struct tg3 *tp = netdev_priv(netdev);
17165 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17166 int err;
17167
17168 rtnl_lock();
17169
17170 if (pci_enable_device(pdev)) {
17171 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17172 goto done;
17173 }
17174
17175 pci_set_master(pdev);
17176 pci_restore_state(pdev);
17177 pci_save_state(pdev);
17178
17179 if (!netif_running(netdev)) {
17180 rc = PCI_ERS_RESULT_RECOVERED;
17181 goto done;
17182 }
17183
17184 err = tg3_power_up(tp);
17185 if (err)
17186 goto done;
17187
17188 rc = PCI_ERS_RESULT_RECOVERED;
17189
17190 done:
17191 rtnl_unlock();
17192
17193 return rc;
17194 }
17195
17196 /**
17197 * tg3_io_resume - called when traffic can start flowing again.
17198 * @pdev: Pointer to PCI device
17199 *
17200 * This callback is called when the error recovery driver tells
17201 * us that its OK to resume normal operation.
17202 */
17203 static void tg3_io_resume(struct pci_dev *pdev)
17204 {
17205 struct net_device *netdev = pci_get_drvdata(pdev);
17206 struct tg3 *tp = netdev_priv(netdev);
17207 int err;
17208
17209 rtnl_lock();
17210
17211 if (!netif_running(netdev))
17212 goto done;
17213
17214 tg3_full_lock(tp, 0);
17215 tg3_flag_set(tp, INIT_COMPLETE);
17216 err = tg3_restart_hw(tp, 1);
17217 if (err) {
17218 tg3_full_unlock(tp);
17219 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17220 goto done;
17221 }
17222
17223 netif_device_attach(netdev);
17224
17225 tg3_timer_start(tp);
17226
17227 tg3_netif_start(tp);
17228
17229 tg3_full_unlock(tp);
17230
17231 tg3_phy_start(tp);
17232
17233 done:
17234 rtnl_unlock();
17235 }
17236
17237 static const struct pci_error_handlers tg3_err_handler = {
17238 .error_detected = tg3_io_error_detected,
17239 .slot_reset = tg3_io_slot_reset,
17240 .resume = tg3_io_resume
17241 };
17242
17243 static struct pci_driver tg3_driver = {
17244 .name = DRV_MODULE_NAME,
17245 .id_table = tg3_pci_tbl,
17246 .probe = tg3_init_one,
17247 .remove = tg3_remove_one,
17248 .err_handler = &tg3_err_handler,
17249 .driver.pm = TG3_PM_OPS,
17250 };
17251
17252 static int __init tg3_init(void)
17253 {
17254 return pci_register_driver(&tg3_driver);
17255 }
17256
17257 static void __exit tg3_cleanup(void)
17258 {
17259 pci_unregister_driver(&tg3_driver);
17260 }
17261
17262 module_init(tg3_init);
17263 module_exit(tg3_cleanup);
This page took 0.413144 seconds and 6 git commands to generate.