tg3: Refactor the 2nd type of cpu pause
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217
218 static char version[] =
219 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257 TG3_DRV_DATA_FLAG_5705_10_100},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286 PCI_VENDOR_ID_LENOVO,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347 {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353 const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355 { "rx_octets" },
356 { "rx_fragments" },
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
360 { "rx_fcs_errors" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
367 { "rx_jabbers" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
381
382 { "tx_octets" },
383 { "tx_collisions" },
384
385 { "tx_xon_sent" },
386 { "tx_xoff_sent" },
387 { "tx_flow_control" },
388 { "tx_mac_errors" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
391 { "tx_deferred" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
412 { "tx_discards" },
413 { "tx_errors" },
414
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
417 { "rxbds_empty" },
418 { "rx_discards" },
419 { "rx_errors" },
420 { "rx_threshold_hit" },
421
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
425
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
428 { "nic_irqs" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
431
432 { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
444
445
446 static const struct {
447 const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449 [TG3_NVRAM_TEST] = { "nvram test (online) " },
450 [TG3_LINK_TEST] = { "link test (online) " },
451 [TG3_REGISTER_TEST] = { "register test (offline)" },
452 [TG3_MEMORY_TEST] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
457 };
458
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464 writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469 return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474 writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479 return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484 unsigned long flags;
485
486 spin_lock_irqsave(&tp->indirect_lock, flags);
487 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494 writel(val, tp->regs + off);
495 readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500 unsigned long flags;
501 u32 val;
502
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512 unsigned long flags;
513
514 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516 TG3_64BIT_REG_LOW, val);
517 return;
518 }
519 if (off == TG3_RX_STD_PROD_IDX_REG) {
520 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521 TG3_64BIT_REG_LOW, val);
522 return;
523 }
524
525 spin_lock_irqsave(&tp->indirect_lock, flags);
526 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528 spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
532 */
533 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534 (val == 0x1)) {
535 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537 }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542 unsigned long flags;
543 u32 val;
544
545 spin_lock_irqsave(&tp->indirect_lock, flags);
546 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548 spin_unlock_irqrestore(&tp->indirect_lock, flags);
549 return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556 */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560 /* Non-posted methods */
561 tp->write32(tp, off, val);
562 else {
563 /* Posted method */
564 tg3_write32(tp, off, val);
565 if (usec_wait)
566 udelay(usec_wait);
567 tp->read32(tp, off);
568 }
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
571 */
572 if (usec_wait)
573 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578 tp->write32_mbox(tp, off, val);
579 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581 !tg3_flag(tp, ICH_WORKAROUND)))
582 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587 void __iomem *mbox = tp->regs + off;
588 writel(val, mbox);
589 if (tg3_flag(tp, TXD_MBOX_HWBUG))
590 writel(val, mbox);
591 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592 tg3_flag(tp, FLUSH_POSTED_WRITES))
593 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598 return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603 writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619 unsigned long flags;
620
621 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623 return;
624
625 spin_lock_irqsave(&tp->indirect_lock, flags);
626 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632 } else {
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 }
639 spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644 unsigned long flags;
645
646 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648 *val = 0;
649 return;
650 }
651
652 spin_lock_irqsave(&tp->indirect_lock, flags);
653 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659 } else {
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 }
666 spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671 int i;
672 u32 regbase, bit;
673
674 if (tg3_asic_rev(tp) == ASIC_REV_5761)
675 regbase = TG3_APE_LOCK_GRANT;
676 else
677 regbase = TG3_APE_PER_LOCK_GRANT;
678
679 /* Make sure the driver hasn't any stale locks. */
680 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681 switch (i) {
682 case TG3_APE_LOCK_PHY0:
683 case TG3_APE_LOCK_PHY1:
684 case TG3_APE_LOCK_PHY2:
685 case TG3_APE_LOCK_PHY3:
686 bit = APE_LOCK_GRANT_DRIVER;
687 break;
688 default:
689 if (!tp->pci_fn)
690 bit = APE_LOCK_GRANT_DRIVER;
691 else
692 bit = 1 << tp->pci_fn;
693 }
694 tg3_ape_write32(tp, regbase + 4 * i, bit);
695 }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701 int i, off;
702 int ret = 0;
703 u32 status, req, gnt, bit;
704
705 if (!tg3_flag(tp, ENABLE_APE))
706 return 0;
707
708 switch (locknum) {
709 case TG3_APE_LOCK_GPIO:
710 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711 return 0;
712 case TG3_APE_LOCK_GRC:
713 case TG3_APE_LOCK_MEM:
714 if (!tp->pci_fn)
715 bit = APE_LOCK_REQ_DRIVER;
716 else
717 bit = 1 << tp->pci_fn;
718 break;
719 case TG3_APE_LOCK_PHY0:
720 case TG3_APE_LOCK_PHY1:
721 case TG3_APE_LOCK_PHY2:
722 case TG3_APE_LOCK_PHY3:
723 bit = APE_LOCK_REQ_DRIVER;
724 break;
725 default:
726 return -EINVAL;
727 }
728
729 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730 req = TG3_APE_LOCK_REQ;
731 gnt = TG3_APE_LOCK_GRANT;
732 } else {
733 req = TG3_APE_PER_LOCK_REQ;
734 gnt = TG3_APE_PER_LOCK_GRANT;
735 }
736
737 off = 4 * locknum;
738
739 tg3_ape_write32(tp, req + off, bit);
740
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i = 0; i < 100; i++) {
743 status = tg3_ape_read32(tp, gnt + off);
744 if (status == bit)
745 break;
746 udelay(10);
747 }
748
749 if (status != bit) {
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp, gnt + off, bit);
752 ret = -EBUSY;
753 }
754
755 return ret;
756 }
757
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760 u32 gnt, bit;
761
762 if (!tg3_flag(tp, ENABLE_APE))
763 return;
764
765 switch (locknum) {
766 case TG3_APE_LOCK_GPIO:
767 if (tg3_asic_rev(tp) == ASIC_REV_5761)
768 return;
769 case TG3_APE_LOCK_GRC:
770 case TG3_APE_LOCK_MEM:
771 if (!tp->pci_fn)
772 bit = APE_LOCK_GRANT_DRIVER;
773 else
774 bit = 1 << tp->pci_fn;
775 break;
776 case TG3_APE_LOCK_PHY0:
777 case TG3_APE_LOCK_PHY1:
778 case TG3_APE_LOCK_PHY2:
779 case TG3_APE_LOCK_PHY3:
780 bit = APE_LOCK_GRANT_DRIVER;
781 break;
782 default:
783 return;
784 }
785
786 if (tg3_asic_rev(tp) == ASIC_REV_5761)
787 gnt = TG3_APE_LOCK_GRANT;
788 else
789 gnt = TG3_APE_PER_LOCK_GRANT;
790
791 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796 u32 apedata;
797
798 while (timeout_us) {
799 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800 return -EBUSY;
801
802 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804 break;
805
806 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808 udelay(10);
809 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810 }
811
812 return timeout_us ? 0 : -EBUSY;
813 }
814
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817 u32 i, apedata;
818
819 for (i = 0; i < timeout_us / 10; i++) {
820 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 break;
824
825 udelay(10);
826 }
827
828 return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832 u32 len)
833 {
834 int err;
835 u32 i, bufoff, msgoff, maxlen, apedata;
836
837 if (!tg3_flag(tp, APE_HAS_NCSI))
838 return 0;
839
840 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841 if (apedata != APE_SEG_SIG_MAGIC)
842 return -ENODEV;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845 if (!(apedata & APE_FW_STATUS_READY))
846 return -EAGAIN;
847
848 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849 TG3_APE_SHMEM_BASE;
850 msgoff = bufoff + 2 * sizeof(u32);
851 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853 while (len) {
854 u32 length;
855
856 /* Cap xfer sizes to scratchpad limits. */
857 length = (len > maxlen) ? maxlen : len;
858 len -= length;
859
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
862 return -EAGAIN;
863
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err = tg3_ape_event_lock(tp, 1000);
866 if (err)
867 return err;
868
869 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870 APE_EVENT_STATUS_SCRTCHPD_READ |
871 APE_EVENT_STATUS_EVENT_PENDING;
872 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874 tg3_ape_write32(tp, bufoff, base_off);
875 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880 base_off += length;
881
882 if (tg3_ape_wait_for_event(tp, 30000))
883 return -EAGAIN;
884
885 for (i = 0; length; i += 4, length -= 4) {
886 u32 val = tg3_ape_read32(tp, msgoff + i);
887 memcpy(data, &val, sizeof(u32));
888 data++;
889 }
890 }
891
892 return 0;
893 }
894
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897 int err;
898 u32 apedata;
899
900 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901 if (apedata != APE_SEG_SIG_MAGIC)
902 return -EAGAIN;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905 if (!(apedata & APE_FW_STATUS_READY))
906 return -EAGAIN;
907
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err = tg3_ape_event_lock(tp, 1000);
910 if (err)
911 return err;
912
913 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914 event | APE_EVENT_STATUS_EVENT_PENDING);
915
916 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918
919 return 0;
920 }
921
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924 u32 event;
925 u32 apedata;
926
927 if (!tg3_flag(tp, ENABLE_APE))
928 return;
929
930 switch (kind) {
931 case RESET_KIND_INIT:
932 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933 APE_HOST_SEG_SIG_MAGIC);
934 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935 APE_HOST_SEG_LEN_MAGIC);
936 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941 APE_HOST_BEHAV_NO_PHYLOCK);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943 TG3_APE_HOST_DRVR_STATE_START);
944
945 event = APE_EVENT_STATUS_STATE_START;
946 break;
947 case RESET_KIND_SHUTDOWN:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
952 */
953 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954
955 if (device_may_wakeup(&tp->pdev->dev) &&
956 tg3_flag(tp, WOL_ENABLE)) {
957 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958 TG3_APE_HOST_WOL_SPEED_AUTO);
959 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960 } else
961 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962
963 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964
965 event = APE_EVENT_STATUS_STATE_UNLOAD;
966 break;
967 case RESET_KIND_SUSPEND:
968 event = APE_EVENT_STATUS_STATE_SUSPEND;
969 break;
970 default:
971 return;
972 }
973
974 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976 tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981 int i;
982
983 tw32(TG3PCI_MISC_HOST_CTRL,
984 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985 for (i = 0; i < tp->irq_max; i++)
986 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991 int i;
992
993 tp->irq_sync = 0;
994 wmb();
995
996 tw32(TG3PCI_MISC_HOST_CTRL,
997 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000 for (i = 0; i < tp->irq_cnt; i++) {
1001 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004 if (tg3_flag(tp, 1SHOT_MSI))
1005 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007 tp->coal_now |= tnapi->coal_now;
1008 }
1009
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp, TAGGED_STATUS) &&
1012 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014 else
1015 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022 struct tg3 *tp = tnapi->tp;
1023 struct tg3_hw_status *sblk = tnapi->hw_status;
1024 unsigned int work_exists = 0;
1025
1026 /* check for phy events */
1027 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028 if (sblk->status & SD_STATUS_LINK_CHG)
1029 work_exists = 1;
1030 }
1031
1032 /* check for TX work to do */
1033 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034 work_exists = 1;
1035
1036 /* check for RX work to do */
1037 if (tnapi->rx_rcb_prod_idx &&
1038 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039 work_exists = 1;
1040
1041 return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1048 */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051 struct tg3 *tp = tnapi->tp;
1052
1053 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054 mmiowb();
1055
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1059 */
1060 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067 u32 clock_ctrl;
1068 u32 orig_clock_ctrl;
1069
1070 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071 return;
1072
1073 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075 orig_clock_ctrl = clock_ctrl;
1076 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077 CLOCK_CTRL_CLKRUN_OENABLE |
1078 0x1f);
1079 tp->pci_clock_ctrl = clock_ctrl;
1080
1081 if (tg3_flag(tp, 5705_PLUS)) {
1082 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085 }
1086 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl |
1089 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090 40);
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093 40);
1094 }
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS 5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101 u32 *val)
1102 {
1103 u32 frame_val;
1104 unsigned int loops;
1105 int ret;
1106
1107 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108 tw32_f(MAC_MI_MODE,
1109 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110 udelay(80);
1111 }
1112
1113 tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115 *val = 0x0;
1116
1117 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118 MI_COM_PHY_ADDR_MASK);
1119 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120 MI_COM_REG_ADDR_MASK);
1121 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123 tw32_f(MAC_MI_COM, frame_val);
1124
1125 loops = PHY_BUSY_LOOPS;
1126 while (loops != 0) {
1127 udelay(10);
1128 frame_val = tr32(MAC_MI_COM);
1129
1130 if ((frame_val & MI_COM_BUSY) == 0) {
1131 udelay(5);
1132 frame_val = tr32(MAC_MI_COM);
1133 break;
1134 }
1135 loops -= 1;
1136 }
1137
1138 ret = -EBUSY;
1139 if (loops != 0) {
1140 *val = frame_val & MI_COM_DATA_MASK;
1141 ret = 0;
1142 }
1143
1144 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146 udelay(80);
1147 }
1148
1149 tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151 return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160 u32 val)
1161 {
1162 u32 frame_val;
1163 unsigned int loops;
1164 int ret;
1165
1166 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168 return 0;
1169
1170 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171 tw32_f(MAC_MI_MODE,
1172 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173 udelay(80);
1174 }
1175
1176 tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179 MI_COM_PHY_ADDR_MASK);
1180 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181 MI_COM_REG_ADDR_MASK);
1182 frame_val |= (val & MI_COM_DATA_MASK);
1183 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185 tw32_f(MAC_MI_COM, frame_val);
1186
1187 loops = PHY_BUSY_LOOPS;
1188 while (loops != 0) {
1189 udelay(10);
1190 frame_val = tr32(MAC_MI_COM);
1191 if ((frame_val & MI_COM_BUSY) == 0) {
1192 udelay(5);
1193 frame_val = tr32(MAC_MI_COM);
1194 break;
1195 }
1196 loops -= 1;
1197 }
1198
1199 ret = -EBUSY;
1200 if (loops != 0)
1201 ret = 0;
1202
1203 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205 udelay(80);
1206 }
1207
1208 tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210 return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220 int err;
1221
1222 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223 if (err)
1224 goto done;
1225
1226 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227 if (err)
1228 goto done;
1229
1230 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238 return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243 int err;
1244
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246 if (err)
1247 goto done;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261 return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266 int err;
1267
1268 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269 if (!err)
1270 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272 return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277 int err;
1278
1279 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280 if (!err)
1281 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283 return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288 int err;
1289
1290 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC);
1293 if (!err)
1294 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296 return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309 u32 val;
1310 int err;
1311
1312 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314 if (err)
1315 return err;
1316 if (enable)
1317
1318 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319 else
1320 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325 return err;
1326 }
1327
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330 u32 phy_control;
1331 int limit, err;
1332
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1335 */
1336 phy_control = BMCR_RESET;
1337 err = tg3_writephy(tp, MII_BMCR, phy_control);
1338 if (err != 0)
1339 return -EBUSY;
1340
1341 limit = 5000;
1342 while (limit--) {
1343 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344 if (err != 0)
1345 return -EBUSY;
1346
1347 if ((phy_control & BMCR_RESET) == 0) {
1348 udelay(40);
1349 break;
1350 }
1351 udelay(10);
1352 }
1353 if (limit < 0)
1354 return -EBUSY;
1355
1356 return 0;
1357 }
1358
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361 struct tg3 *tp = bp->priv;
1362 u32 val;
1363
1364 spin_lock_bh(&tp->lock);
1365
1366 if (tg3_readphy(tp, reg, &val))
1367 val = -EIO;
1368
1369 spin_unlock_bh(&tp->lock);
1370
1371 return val;
1372 }
1373
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 ret = 0;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (tg3_writephy(tp, reg, val))
1382 ret = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return ret;
1387 }
1388
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391 return 0;
1392 }
1393
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396 u32 val;
1397 struct phy_device *phydev;
1398
1399 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401 case PHY_ID_BCM50610:
1402 case PHY_ID_BCM50610M:
1403 val = MAC_PHYCFG2_50610_LED_MODES;
1404 break;
1405 case PHY_ID_BCMAC131:
1406 val = MAC_PHYCFG2_AC131_LED_MODES;
1407 break;
1408 case PHY_ID_RTL8211C:
1409 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410 break;
1411 case PHY_ID_RTL8201E:
1412 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413 break;
1414 default:
1415 return;
1416 }
1417
1418 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419 tw32(MAC_PHYCFG2, val);
1420
1421 val = tr32(MAC_PHYCFG1);
1422 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425 tw32(MAC_PHYCFG1, val);
1426
1427 return;
1428 }
1429
1430 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432 MAC_PHYCFG2_FMODE_MASK_MASK |
1433 MAC_PHYCFG2_GMODE_MASK_MASK |
1434 MAC_PHYCFG2_ACT_MASK_MASK |
1435 MAC_PHYCFG2_QUAL_MASK_MASK |
1436 MAC_PHYCFG2_INBAND_ENABLE;
1437
1438 tw32(MAC_PHYCFG2, val);
1439
1440 val = tr32(MAC_PHYCFG1);
1441 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448 }
1449 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451 tw32(MAC_PHYCFG1, val);
1452
1453 val = tr32(MAC_EXT_RGMII_MODE);
1454 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455 MAC_RGMII_MODE_RX_QUALITY |
1456 MAC_RGMII_MODE_RX_ACTIVITY |
1457 MAC_RGMII_MODE_RX_ENG_DET |
1458 MAC_RGMII_MODE_TX_ENABLE |
1459 MAC_RGMII_MODE_TX_LOWPWR |
1460 MAC_RGMII_MODE_TX_RESET);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_RGMII_MODE_RX_INT_B |
1464 MAC_RGMII_MODE_RX_QUALITY |
1465 MAC_RGMII_MODE_RX_ACTIVITY |
1466 MAC_RGMII_MODE_RX_ENG_DET;
1467 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 val |= MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET;
1471 }
1472 tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478 tw32_f(MAC_MI_MODE, tp->mi_mode);
1479 udelay(80);
1480
1481 if (tg3_flag(tp, MDIOBUS_INITED) &&
1482 tg3_asic_rev(tp) == ASIC_REV_5785)
1483 tg3_mdio_config_5785(tp);
1484 }
1485
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488 int i;
1489 u32 reg;
1490 struct phy_device *phydev;
1491
1492 if (tg3_flag(tp, 5717_PLUS)) {
1493 u32 is_serdes;
1494
1495 tp->phy_addr = tp->pci_fn + 1;
1496
1497 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1498 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499 else
1500 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES;
1502 if (is_serdes)
1503 tp->phy_addr += 7;
1504 } else
1505 tp->phy_addr = TG3_PHY_MII_ADDR;
1506
1507 tg3_mdio_start(tp);
1508
1509 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510 return 0;
1511
1512 tp->mdio_bus = mdiobus_alloc();
1513 if (tp->mdio_bus == NULL)
1514 return -ENOMEM;
1515
1516 tp->mdio_bus->name = "tg3 mdio bus";
1517 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519 tp->mdio_bus->priv = tp;
1520 tp->mdio_bus->parent = &tp->pdev->dev;
1521 tp->mdio_bus->read = &tg3_mdio_read;
1522 tp->mdio_bus->write = &tg3_mdio_write;
1523 tp->mdio_bus->reset = &tg3_mdio_reset;
1524 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525 tp->mdio_bus->irq = &tp->mdio_irq[0];
1526
1527 for (i = 0; i < PHY_MAX_ADDR; i++)
1528 tp->mdio_bus->irq[i] = PHY_POLL;
1529
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1534 */
1535 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536 tg3_bmcr_reset(tp);
1537
1538 i = mdiobus_register(tp->mdio_bus);
1539 if (i) {
1540 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541 mdiobus_free(tp->mdio_bus);
1542 return i;
1543 }
1544
1545 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546
1547 if (!phydev || !phydev->drv) {
1548 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549 mdiobus_unregister(tp->mdio_bus);
1550 mdiobus_free(tp->mdio_bus);
1551 return -ENODEV;
1552 }
1553
1554 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555 case PHY_ID_BCM57780:
1556 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558 break;
1559 case PHY_ID_BCM50610:
1560 case PHY_ID_BCM50610M:
1561 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562 PHY_BRCM_RX_REFCLK_UNUSED |
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571 /* fallthru */
1572 case PHY_ID_RTL8211C:
1573 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574 break;
1575 case PHY_ID_RTL8201E:
1576 case PHY_ID_BCMAC131:
1577 phydev->interface = PHY_INTERFACE_MODE_MII;
1578 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580 break;
1581 }
1582
1583 tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586 tg3_mdio_config_5785(tp);
1587
1588 return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593 if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 tg3_flag_clear(tp, MDIOBUS_INITED);
1595 mdiobus_unregister(tp->mdio_bus);
1596 mdiobus_free(tp->mdio_bus);
1597 }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603 u32 val;
1604
1605 val = tr32(GRC_RX_CPU_EVENT);
1606 val |= GRC_RX_CPU_DRIVER_EVENT;
1607 tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609 tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617 int i;
1618 unsigned int delay_cnt;
1619 long time_remain;
1620
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain = (long)(tp->last_event_jiffies + 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624 (long)jiffies;
1625 if (time_remain < 0)
1626 return;
1627
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt = jiffies_to_usecs(time_remain);
1630 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 delay_cnt = (delay_cnt >> 3) + 1;
1633
1634 for (i = 0; i < delay_cnt; i++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636 break;
1637 udelay(8);
1638 }
1639 }
1640
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644 u32 reg, val;
1645
1646 val = 0;
1647 if (!tg3_readphy(tp, MII_BMCR, &reg))
1648 val = reg << 16;
1649 if (!tg3_readphy(tp, MII_BMSR, &reg))
1650 val |= (reg & 0xffff);
1651 *data++ = val;
1652
1653 val = 0;
1654 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655 val = reg << 16;
1656 if (!tg3_readphy(tp, MII_LPA, &reg))
1657 val |= (reg & 0xffff);
1658 *data++ = val;
1659
1660 val = 0;
1661 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663 val = reg << 16;
1664 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665 val |= (reg & 0xffff);
1666 }
1667 *data++ = val;
1668
1669 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670 val = reg << 16;
1671 else
1672 val = 0;
1673 *data++ = val;
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679 u32 data[4];
1680
1681 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682 return;
1683
1684 tg3_phy_gather_ump_data(tp, data);
1685
1686 tg3_wait_for_event_ack(tp);
1687
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694
1695 tg3_generate_fw_event(tp);
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp);
1704
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706
1707 tg3_generate_fw_event(tp);
1708
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp);
1711 }
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719
1720 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721 switch (kind) {
1722 case RESET_KIND_INIT:
1723 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724 DRV_STATE_START);
1725 break;
1726
1727 case RESET_KIND_SHUTDOWN:
1728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729 DRV_STATE_UNLOAD);
1730 break;
1731
1732 case RESET_KIND_SUSPEND:
1733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 DRV_STATE_SUSPEND);
1735 break;
1736
1737 default:
1738 break;
1739 }
1740 }
1741
1742 if (kind == RESET_KIND_INIT ||
1743 kind == RESET_KIND_SUSPEND)
1744 tg3_ape_driver_state_change(tp, kind);
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 switch (kind) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1755 break;
1756
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1760 break;
1761
1762 default:
1763 break;
1764 }
1765 }
1766
1767 if (kind == RESET_KIND_SHUTDOWN)
1768 tg3_ape_driver_state_change(tp, kind);
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774 if (tg3_flag(tp, ENABLE_ASF)) {
1775 switch (kind) {
1776 case RESET_KIND_INIT:
1777 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 DRV_STATE_START);
1779 break;
1780
1781 case RESET_KIND_SHUTDOWN:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 DRV_STATE_UNLOAD);
1784 break;
1785
1786 case RESET_KIND_SUSPEND:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_SUSPEND);
1789 break;
1790
1791 default:
1792 break;
1793 }
1794 }
1795 }
1796
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799 int i;
1800 u32 val;
1801
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1804 return 0;
1805 }
1806
1807 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 return 0;
1812 udelay(100);
1813 }
1814 return -ENODEV;
1815 }
1816
1817 /* Wait for firmware initialization to complete. */
1818 for (i = 0; i < 100000; i++) {
1819 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821 break;
1822 udelay(10);
1823 }
1824
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1829 */
1830 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831 tg3_flag_set(tp, NO_FWARE_REPORTED);
1832
1833 netdev_info(tp->dev, "No firmware running\n");
1834 }
1835
1836 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1839 */
1840 mdelay(10);
1841 }
1842
1843 return 0;
1844 }
1845
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848 if (!netif_carrier_ok(tp->dev)) {
1849 netif_info(tp, link, tp->dev, "Link is down\n");
1850 tg3_ump_link_report(tp);
1851 } else if (netif_msg_link(tp)) {
1852 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853 (tp->link_config.active_speed == SPEED_1000 ?
1854 1000 :
1855 (tp->link_config.active_speed == SPEED_100 ?
1856 100 : 10)),
1857 (tp->link_config.active_duplex == DUPLEX_FULL ?
1858 "full" : "half"));
1859
1860 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862 "on" : "off",
1863 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864 "on" : "off");
1865
1866 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867 netdev_info(tp->dev, "EEE is %s\n",
1868 tp->setlpicnt ? "enabled" : "disabled");
1869
1870 tg3_ump_link_report(tp);
1871 }
1872 }
1873
1874 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1875 {
1876 u16 miireg;
1877
1878 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1879 miireg = ADVERTISE_1000XPAUSE;
1880 else if (flow_ctrl & FLOW_CTRL_TX)
1881 miireg = ADVERTISE_1000XPSE_ASYM;
1882 else if (flow_ctrl & FLOW_CTRL_RX)
1883 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1884 else
1885 miireg = 0;
1886
1887 return miireg;
1888 }
1889
1890 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1891 {
1892 u8 cap = 0;
1893
1894 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1895 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1896 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1897 if (lcladv & ADVERTISE_1000XPAUSE)
1898 cap = FLOW_CTRL_RX;
1899 if (rmtadv & ADVERTISE_1000XPAUSE)
1900 cap = FLOW_CTRL_TX;
1901 }
1902
1903 return cap;
1904 }
1905
1906 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1907 {
1908 u8 autoneg;
1909 u8 flowctrl = 0;
1910 u32 old_rx_mode = tp->rx_mode;
1911 u32 old_tx_mode = tp->tx_mode;
1912
1913 if (tg3_flag(tp, USE_PHYLIB))
1914 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1915 else
1916 autoneg = tp->link_config.autoneg;
1917
1918 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1919 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1920 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1921 else
1922 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1923 } else
1924 flowctrl = tp->link_config.flowctrl;
1925
1926 tp->link_config.active_flowctrl = flowctrl;
1927
1928 if (flowctrl & FLOW_CTRL_RX)
1929 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1930 else
1931 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1932
1933 if (old_rx_mode != tp->rx_mode)
1934 tw32_f(MAC_RX_MODE, tp->rx_mode);
1935
1936 if (flowctrl & FLOW_CTRL_TX)
1937 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1938 else
1939 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1940
1941 if (old_tx_mode != tp->tx_mode)
1942 tw32_f(MAC_TX_MODE, tp->tx_mode);
1943 }
1944
1945 static void tg3_adjust_link(struct net_device *dev)
1946 {
1947 u8 oldflowctrl, linkmesg = 0;
1948 u32 mac_mode, lcl_adv, rmt_adv;
1949 struct tg3 *tp = netdev_priv(dev);
1950 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1951
1952 spin_lock_bh(&tp->lock);
1953
1954 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1955 MAC_MODE_HALF_DUPLEX);
1956
1957 oldflowctrl = tp->link_config.active_flowctrl;
1958
1959 if (phydev->link) {
1960 lcl_adv = 0;
1961 rmt_adv = 0;
1962
1963 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1964 mac_mode |= MAC_MODE_PORT_MODE_MII;
1965 else if (phydev->speed == SPEED_1000 ||
1966 tg3_asic_rev(tp) != ASIC_REV_5785)
1967 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1968 else
1969 mac_mode |= MAC_MODE_PORT_MODE_MII;
1970
1971 if (phydev->duplex == DUPLEX_HALF)
1972 mac_mode |= MAC_MODE_HALF_DUPLEX;
1973 else {
1974 lcl_adv = mii_advertise_flowctrl(
1975 tp->link_config.flowctrl);
1976
1977 if (phydev->pause)
1978 rmt_adv = LPA_PAUSE_CAP;
1979 if (phydev->asym_pause)
1980 rmt_adv |= LPA_PAUSE_ASYM;
1981 }
1982
1983 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1984 } else
1985 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1986
1987 if (mac_mode != tp->mac_mode) {
1988 tp->mac_mode = mac_mode;
1989 tw32_f(MAC_MODE, tp->mac_mode);
1990 udelay(40);
1991 }
1992
1993 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1994 if (phydev->speed == SPEED_10)
1995 tw32(MAC_MI_STAT,
1996 MAC_MI_STAT_10MBPS_MODE |
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1998 else
1999 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000 }
2001
2002 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2003 tw32(MAC_TX_LENGTHS,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2005 (6 << TX_LENGTHS_IPG_SHIFT) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2007 else
2008 tw32(MAC_TX_LENGTHS,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2010 (6 << TX_LENGTHS_IPG_SHIFT) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2012
2013 if (phydev->link != tp->old_link ||
2014 phydev->speed != tp->link_config.active_speed ||
2015 phydev->duplex != tp->link_config.active_duplex ||
2016 oldflowctrl != tp->link_config.active_flowctrl)
2017 linkmesg = 1;
2018
2019 tp->old_link = phydev->link;
2020 tp->link_config.active_speed = phydev->speed;
2021 tp->link_config.active_duplex = phydev->duplex;
2022
2023 spin_unlock_bh(&tp->lock);
2024
2025 if (linkmesg)
2026 tg3_link_report(tp);
2027 }
2028
2029 static int tg3_phy_init(struct tg3 *tp)
2030 {
2031 struct phy_device *phydev;
2032
2033 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2034 return 0;
2035
2036 /* Bring the PHY back to a known state. */
2037 tg3_bmcr_reset(tp);
2038
2039 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2040
2041 /* Attach the MAC to the PHY. */
2042 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2043 tg3_adjust_link, phydev->interface);
2044 if (IS_ERR(phydev)) {
2045 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev);
2047 }
2048
2049 /* Mask with MAC supported features. */
2050 switch (phydev->interface) {
2051 case PHY_INTERFACE_MODE_GMII:
2052 case PHY_INTERFACE_MODE_RGMII:
2053 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2054 phydev->supported &= (PHY_GBIT_FEATURES |
2055 SUPPORTED_Pause |
2056 SUPPORTED_Asym_Pause);
2057 break;
2058 }
2059 /* fallthru */
2060 case PHY_INTERFACE_MODE_MII:
2061 phydev->supported &= (PHY_BASIC_FEATURES |
2062 SUPPORTED_Pause |
2063 SUPPORTED_Asym_Pause);
2064 break;
2065 default:
2066 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 return -EINVAL;
2068 }
2069
2070 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2071
2072 phydev->advertising = phydev->supported;
2073
2074 return 0;
2075 }
2076
2077 static void tg3_phy_start(struct tg3 *tp)
2078 {
2079 struct phy_device *phydev;
2080
2081 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2082 return;
2083
2084 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2085
2086 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2087 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2088 phydev->speed = tp->link_config.speed;
2089 phydev->duplex = tp->link_config.duplex;
2090 phydev->autoneg = tp->link_config.autoneg;
2091 phydev->advertising = tp->link_config.advertising;
2092 }
2093
2094 phy_start(phydev);
2095
2096 phy_start_aneg(phydev);
2097 }
2098
2099 static void tg3_phy_stop(struct tg3 *tp)
2100 {
2101 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2102 return;
2103
2104 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2105 }
2106
2107 static void tg3_phy_fini(struct tg3 *tp)
2108 {
2109 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2110 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2111 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2112 }
2113 }
2114
2115 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2116 {
2117 int err;
2118 u32 val;
2119
2120 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2121 return 0;
2122
2123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err = tg3_phy_auxctl_write(tp,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2128 0x4c20);
2129 goto done;
2130 }
2131
2132 err = tg3_phy_auxctl_read(tp,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2134 if (err)
2135 return err;
2136
2137 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2138 err = tg3_phy_auxctl_write(tp,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2140
2141 done:
2142 return err;
2143 }
2144
2145 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2146 {
2147 u32 phytest;
2148
2149 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2150 u32 phy;
2151
2152 tg3_writephy(tp, MII_TG3_FET_TEST,
2153 phytest | MII_TG3_FET_SHADOW_EN);
2154 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2155 if (enable)
2156 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2157 else
2158 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2160 }
2161 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2162 }
2163 }
2164
2165 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2166 {
2167 u32 reg;
2168
2169 if (!tg3_flag(tp, 5705_PLUS) ||
2170 (tg3_flag(tp, 5717_PLUS) &&
2171 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2172 return;
2173
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2175 tg3_phy_fet_toggle_apd(tp, enable);
2176 return;
2177 }
2178
2179 reg = MII_TG3_MISC_SHDW_WREN |
2180 MII_TG3_MISC_SHDW_SCR5_SEL |
2181 MII_TG3_MISC_SHDW_SCR5_LPED |
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2183 MII_TG3_MISC_SHDW_SCR5_SDTL |
2184 MII_TG3_MISC_SHDW_SCR5_C125OE;
2185 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2186 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2187
2188 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2189
2190
2191 reg = MII_TG3_MISC_SHDW_WREN |
2192 MII_TG3_MISC_SHDW_APD_SEL |
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2194 if (enable)
2195 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2196
2197 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2198 }
2199
2200 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2201 {
2202 u32 phy;
2203
2204 if (!tg3_flag(tp, 5705_PLUS) ||
2205 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2206 return;
2207
2208 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2209 u32 ephy;
2210
2211 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2212 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2213
2214 tg3_writephy(tp, MII_TG3_FET_TEST,
2215 ephy | MII_TG3_FET_SHADOW_EN);
2216 if (!tg3_readphy(tp, reg, &phy)) {
2217 if (enable)
2218 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2219 else
2220 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221 tg3_writephy(tp, reg, phy);
2222 }
2223 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2224 }
2225 } else {
2226 int ret;
2227
2228 ret = tg3_phy_auxctl_read(tp,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2230 if (!ret) {
2231 if (enable)
2232 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2233 else
2234 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235 tg3_phy_auxctl_write(tp,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2237 }
2238 }
2239 }
2240
2241 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2242 {
2243 int ret;
2244 u32 val;
2245
2246 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2247 return;
2248
2249 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2250 if (!ret)
2251 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2252 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2253 }
2254
2255 static void tg3_phy_apply_otp(struct tg3 *tp)
2256 {
2257 u32 otp, phy;
2258
2259 if (!tp->phy_otp)
2260 return;
2261
2262 otp = tp->phy_otp;
2263
2264 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2265 return;
2266
2267 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2268 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2269 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2270
2271 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2272 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2273 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2274
2275 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2276 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2277 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2278
2279 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2280 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2281
2282 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2284
2285 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2286 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2287 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2288
2289 tg3_phy_toggle_auxctl_smdsp(tp, false);
2290 }
2291
2292 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2293 {
2294 u32 val;
2295
2296 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2297 return;
2298
2299 tp->setlpicnt = 0;
2300
2301 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2302 current_link_up == 1 &&
2303 tp->link_config.active_duplex == DUPLEX_FULL &&
2304 (tp->link_config.active_speed == SPEED_100 ||
2305 tp->link_config.active_speed == SPEED_1000)) {
2306 u32 eeectl;
2307
2308 if (tp->link_config.active_speed == SPEED_1000)
2309 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2310 else
2311 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2312
2313 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2314
2315 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2316 TG3_CL45_D7_EEERES_STAT, &val);
2317
2318 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2319 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2320 tp->setlpicnt = 2;
2321 }
2322
2323 if (!tp->setlpicnt) {
2324 if (current_link_up == 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2326 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp, false);
2328 }
2329
2330 val = tr32(TG3_CPMU_EEE_MODE);
2331 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2332 }
2333 }
2334
2335 static void tg3_phy_eee_enable(struct tg3 *tp)
2336 {
2337 u32 val;
2338
2339 if (tp->link_config.active_speed == SPEED_1000 &&
2340 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2341 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2342 tg3_flag(tp, 57765_CLASS)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2344 val = MII_TG3_DSP_TAP26_ALNOKO |
2345 MII_TG3_DSP_TAP26_RMRXSTO;
2346 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2347 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348 }
2349
2350 val = tr32(TG3_CPMU_EEE_MODE);
2351 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2352 }
2353
2354 static int tg3_wait_macro_done(struct tg3 *tp)
2355 {
2356 int limit = 100;
2357
2358 while (limit--) {
2359 u32 tmp32;
2360
2361 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2362 if ((tmp32 & 0x1000) == 0)
2363 break;
2364 }
2365 }
2366 if (limit < 0)
2367 return -EBUSY;
2368
2369 return 0;
2370 }
2371
2372 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2373 {
2374 static const u32 test_pat[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2379 };
2380 int chan;
2381
2382 for (chan = 0; chan < 4; chan++) {
2383 int i;
2384
2385 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2386 (chan * 0x2000) | 0x0200);
2387 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2388
2389 for (i = 0; i < 6; i++)
2390 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2391 test_pat[chan][i]);
2392
2393 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2394 if (tg3_wait_macro_done(tp)) {
2395 *resetp = 1;
2396 return -EBUSY;
2397 }
2398
2399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2400 (chan * 0x2000) | 0x0200);
2401 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2402 if (tg3_wait_macro_done(tp)) {
2403 *resetp = 1;
2404 return -EBUSY;
2405 }
2406
2407 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2408 if (tg3_wait_macro_done(tp)) {
2409 *resetp = 1;
2410 return -EBUSY;
2411 }
2412
2413 for (i = 0; i < 6; i += 2) {
2414 u32 low, high;
2415
2416 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2418 tg3_wait_macro_done(tp)) {
2419 *resetp = 1;
2420 return -EBUSY;
2421 }
2422 low &= 0x7fff;
2423 high &= 0x000f;
2424 if (low != test_pat[chan][i] ||
2425 high != test_pat[chan][i+1]) {
2426 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2429
2430 return -EBUSY;
2431 }
2432 }
2433 }
2434
2435 return 0;
2436 }
2437
2438 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2439 {
2440 int chan;
2441
2442 for (chan = 0; chan < 4; chan++) {
2443 int i;
2444
2445 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2446 (chan * 0x2000) | 0x0200);
2447 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2448 for (i = 0; i < 6; i++)
2449 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2451 if (tg3_wait_macro_done(tp))
2452 return -EBUSY;
2453 }
2454
2455 return 0;
2456 }
2457
2458 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2459 {
2460 u32 reg32, phy9_orig;
2461 int retries, do_phy_reset, err;
2462
2463 retries = 10;
2464 do_phy_reset = 1;
2465 do {
2466 if (do_phy_reset) {
2467 err = tg3_bmcr_reset(tp);
2468 if (err)
2469 return err;
2470 do_phy_reset = 0;
2471 }
2472
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2475 continue;
2476
2477 reg32 |= 0x3000;
2478 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2479
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp, MII_BMCR,
2482 BMCR_FULLDPLX | BMCR_SPEED1000);
2483
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2486 continue;
2487
2488 tg3_writephy(tp, MII_CTRL1000,
2489 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2490
2491 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2492 if (err)
2493 return err;
2494
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp, 0x8005, 0x0800);
2497
2498 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2499 if (!err)
2500 break;
2501 } while (--retries);
2502
2503 err = tg3_phy_reset_chanpat(tp);
2504 if (err)
2505 return err;
2506
2507 tg3_phydsp_write(tp, 0x8005, 0x0000);
2508
2509 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2510 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2511
2512 tg3_phy_toggle_auxctl_smdsp(tp, false);
2513
2514 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2515
2516 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2517 reg32 &= ~0x3000;
2518 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2519 } else if (!err)
2520 err = -EBUSY;
2521
2522 return err;
2523 }
2524
2525 static void tg3_carrier_on(struct tg3 *tp)
2526 {
2527 netif_carrier_on(tp->dev);
2528 tp->link_up = true;
2529 }
2530
2531 static void tg3_carrier_off(struct tg3 *tp)
2532 {
2533 netif_carrier_off(tp->dev);
2534 tp->link_up = false;
2535 }
2536
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2539 */
2540 static int tg3_phy_reset(struct tg3 *tp)
2541 {
2542 u32 val, cpmuctrl;
2543 int err;
2544
2545 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2546 val = tr32(GRC_MISC_CFG);
2547 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2548 udelay(40);
2549 }
2550 err = tg3_readphy(tp, MII_BMSR, &val);
2551 err |= tg3_readphy(tp, MII_BMSR, &val);
2552 if (err != 0)
2553 return -EBUSY;
2554
2555 if (netif_running(tp->dev) && tp->link_up) {
2556 tg3_carrier_off(tp);
2557 tg3_link_report(tp);
2558 }
2559
2560 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2561 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2562 tg3_asic_rev(tp) == ASIC_REV_5705) {
2563 err = tg3_phy_reset_5703_4_5(tp);
2564 if (err)
2565 return err;
2566 goto out;
2567 }
2568
2569 cpmuctrl = 0;
2570 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2571 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2572 cpmuctrl = tr32(TG3_CPMU_CTRL);
2573 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2574 tw32(TG3_CPMU_CTRL,
2575 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2576 }
2577
2578 err = tg3_bmcr_reset(tp);
2579 if (err)
2580 return err;
2581
2582 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2583 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2584 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2585
2586 tw32(TG3_CPMU_CTRL, cpmuctrl);
2587 }
2588
2589 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2590 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2591 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2592 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5) {
2594 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2595 udelay(40);
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597 }
2598 }
2599
2600 if (tg3_flag(tp, 5717_PLUS) &&
2601 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2602 return 0;
2603
2604 tg3_phy_apply_otp(tp);
2605
2606 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2607 tg3_phy_toggle_apd(tp, true);
2608 else
2609 tg3_phy_toggle_apd(tp, false);
2610
2611 out:
2612 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2614 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp, false);
2617 }
2618
2619 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2620 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622 }
2623
2624 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2626 tg3_phydsp_write(tp, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp, false);
2630 }
2631 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2633 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2634 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2635 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2636 tg3_writephy(tp, MII_TG3_TEST1,
2637 MII_TG3_TEST1_TRIM_EN | 0x4);
2638 } else
2639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2640
2641 tg3_phy_toggle_auxctl_smdsp(tp, false);
2642 }
2643 }
2644
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2650 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err = tg3_phy_auxctl_read(tp,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2654 if (!err)
2655 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2656 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2657 }
2658
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2661 */
2662 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2663 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2664 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2665 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2666 }
2667
2668 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2671 }
2672
2673 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2674 tg3_phydsp_write(tp, 0xffb, 0x4000);
2675
2676 tg3_phy_toggle_automdix(tp, 1);
2677 tg3_phy_set_wirespeed(tp);
2678 return 0;
2679 }
2680
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2690
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2696
2697 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 {
2699 u32 status, shift;
2700
2701 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2702 tg3_asic_rev(tp) == ASIC_REV_5719)
2703 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2704 else
2705 status = tr32(TG3_CPMU_DRV_STATUS);
2706
2707 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2708 status &= ~(TG3_GPIO_MSG_MASK << shift);
2709 status |= (newstat << shift);
2710
2711 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2712 tg3_asic_rev(tp) == ASIC_REV_5719)
2713 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2714 else
2715 tw32(TG3_CPMU_DRV_STATUS, status);
2716
2717 return status >> TG3_APE_GPIO_MSG_SHIFT;
2718 }
2719
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2721 {
2722 if (!tg3_flag(tp, IS_NIC))
2723 return 0;
2724
2725 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2726 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2727 tg3_asic_rev(tp) == ASIC_REV_5720) {
2728 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2729 return -EIO;
2730
2731 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2732
2733 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY);
2735
2736 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2737 } else {
2738 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740 }
2741
2742 return 0;
2743 }
2744
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 {
2747 u32 grc_local_ctrl;
2748
2749 if (!tg3_flag(tp, IS_NIC) ||
2750 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2751 tg3_asic_rev(tp) == ASIC_REV_5701)
2752 return;
2753
2754 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2755
2756 tw32_wait_f(GRC_LOCAL_CTRL,
2757 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY);
2759
2760 tw32_wait_f(GRC_LOCAL_CTRL,
2761 grc_local_ctrl,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764 tw32_wait_f(GRC_LOCAL_CTRL,
2765 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 }
2768
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2770 {
2771 if (!tg3_flag(tp, IS_NIC))
2772 return;
2773
2774 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2775 tg3_asic_rev(tp) == ASIC_REV_5701) {
2776 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2777 (GRC_LCLCTRL_GPIO_OE0 |
2778 GRC_LCLCTRL_GPIO_OE1 |
2779 GRC_LCLCTRL_GPIO_OE2 |
2780 GRC_LCLCTRL_GPIO_OUTPUT0 |
2781 GRC_LCLCTRL_GPIO_OUTPUT1),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY);
2783 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2787 GRC_LCLCTRL_GPIO_OE1 |
2788 GRC_LCLCTRL_GPIO_OE2 |
2789 GRC_LCLCTRL_GPIO_OUTPUT0 |
2790 GRC_LCLCTRL_GPIO_OUTPUT1 |
2791 tp->grc_local_ctrl;
2792 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2796 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2800 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2802 } else {
2803 u32 no_gpio2;
2804 u32 grc_local_ctrl = 0;
2805
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2808 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2809 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2810 grc_local_ctrl,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2812 }
2813
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2 = tp->nic_sram_data_cfg &
2816 NIC_SRAM_DATA_CFG_NO_GPIO2;
2817
2818 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT1 |
2822 GRC_LCLCTRL_GPIO_OUTPUT2;
2823 if (no_gpio2) {
2824 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2825 GRC_LCLCTRL_GPIO_OUTPUT2);
2826 }
2827 tw32_wait_f(GRC_LOCAL_CTRL,
2828 tp->grc_local_ctrl | grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2832
2833 tw32_wait_f(GRC_LOCAL_CTRL,
2834 tp->grc_local_ctrl | grc_local_ctrl,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837 if (!no_gpio2) {
2838 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2839 tw32_wait_f(GRC_LOCAL_CTRL,
2840 tp->grc_local_ctrl | grc_local_ctrl,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 }
2843 }
2844 }
2845
2846 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 {
2848 u32 msg = 0;
2849
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2852 return;
2853
2854 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2855 msg = TG3_GPIO_MSG_NEED_VAUX;
2856
2857 msg = tg3_set_function_status(tp, msg);
2858
2859 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2860 goto done;
2861
2862 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2863 tg3_pwrsrc_switch_to_vaux(tp);
2864 else
2865 tg3_pwrsrc_die_with_vmain(tp);
2866
2867 done:
2868 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2869 }
2870
2871 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2872 {
2873 bool need_vaux = false;
2874
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2877 return;
2878
2879 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2880 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2881 tg3_asic_rev(tp) == ASIC_REV_5720) {
2882 tg3_frob_aux_power_5717(tp, include_wol ?
2883 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884 return;
2885 }
2886
2887 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2888 struct net_device *dev_peer;
2889
2890 dev_peer = pci_get_drvdata(tp->pdev_peer);
2891
2892 /* remove_one() may have been run on the peer. */
2893 if (dev_peer) {
2894 struct tg3 *tp_peer = netdev_priv(dev_peer);
2895
2896 if (tg3_flag(tp_peer, INIT_COMPLETE))
2897 return;
2898
2899 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2900 tg3_flag(tp_peer, ENABLE_ASF))
2901 need_vaux = true;
2902 }
2903 }
2904
2905 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2906 tg3_flag(tp, ENABLE_ASF))
2907 need_vaux = true;
2908
2909 if (need_vaux)
2910 tg3_pwrsrc_switch_to_vaux(tp);
2911 else
2912 tg3_pwrsrc_die_with_vmain(tp);
2913 }
2914
2915 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2916 {
2917 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2918 return 1;
2919 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2920 if (speed != SPEED_10)
2921 return 1;
2922 } else if (speed == SPEED_10)
2923 return 1;
2924
2925 return 0;
2926 }
2927
2928 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 {
2930 u32 val;
2931
2932 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2933 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2934 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2935 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2936
2937 sg_dig_ctrl |=
2938 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2939 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2940 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2941 }
2942 return;
2943 }
2944
2945 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2946 tg3_bmcr_reset(tp);
2947 val = tr32(GRC_MISC_CFG);
2948 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2949 udelay(40);
2950 return;
2951 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2952 u32 phytest;
2953 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2954 u32 phy;
2955
2956 tg3_writephy(tp, MII_ADVERTISE, 0);
2957 tg3_writephy(tp, MII_BMCR,
2958 BMCR_ANENABLE | BMCR_ANRESTART);
2959
2960 tg3_writephy(tp, MII_TG3_FET_TEST,
2961 phytest | MII_TG3_FET_SHADOW_EN);
2962 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2963 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2964 tg3_writephy(tp,
2965 MII_TG3_FET_SHDW_AUXMODE4,
2966 phy);
2967 }
2968 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2969 }
2970 return;
2971 } else if (do_low_power) {
2972 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2974
2975 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2977 MII_TG3_AUXCTL_PCTL_VREG_11V;
2978 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2979 }
2980
2981 /* The PHY should not be powered down on some chips because
2982 * of bugs.
2983 */
2984 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2985 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2986 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2987 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2988 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2989 !tp->pci_fn))
2990 return;
2991
2992 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2993 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2994 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2995 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2996 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2998 }
2999
3000 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3001 }
3002
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3 *tp)
3005 {
3006 if (tg3_flag(tp, NVRAM)) {
3007 int i;
3008
3009 if (tp->nvram_lock_cnt == 0) {
3010 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3011 for (i = 0; i < 8000; i++) {
3012 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3013 break;
3014 udelay(20);
3015 }
3016 if (i == 8000) {
3017 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018 return -ENODEV;
3019 }
3020 }
3021 tp->nvram_lock_cnt++;
3022 }
3023 return 0;
3024 }
3025
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3 *tp)
3028 {
3029 if (tg3_flag(tp, NVRAM)) {
3030 if (tp->nvram_lock_cnt > 0)
3031 tp->nvram_lock_cnt--;
3032 if (tp->nvram_lock_cnt == 0)
3033 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034 }
3035 }
3036
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3 *tp)
3039 {
3040 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3041 u32 nvaccess = tr32(NVRAM_ACCESS);
3042
3043 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044 }
3045 }
3046
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3 *tp)
3049 {
3050 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3051 u32 nvaccess = tr32(NVRAM_ACCESS);
3052
3053 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054 }
3055 }
3056
3057 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3058 u32 offset, u32 *val)
3059 {
3060 u32 tmp;
3061 int i;
3062
3063 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3064 return -EINVAL;
3065
3066 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3067 EEPROM_ADDR_DEVID_MASK |
3068 EEPROM_ADDR_READ);
3069 tw32(GRC_EEPROM_ADDR,
3070 tmp |
3071 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3072 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3073 EEPROM_ADDR_ADDR_MASK) |
3074 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3075
3076 for (i = 0; i < 1000; i++) {
3077 tmp = tr32(GRC_EEPROM_ADDR);
3078
3079 if (tmp & EEPROM_ADDR_COMPLETE)
3080 break;
3081 msleep(1);
3082 }
3083 if (!(tmp & EEPROM_ADDR_COMPLETE))
3084 return -EBUSY;
3085
3086 tmp = tr32(GRC_EEPROM_DATA);
3087
3088 /*
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3091 */
3092 *val = swab32(tmp);
3093
3094 return 0;
3095 }
3096
3097 #define NVRAM_CMD_TIMEOUT 10000
3098
3099 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 {
3101 int i;
3102
3103 tw32(NVRAM_CMD, nvram_cmd);
3104 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3105 udelay(10);
3106 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3107 udelay(10);
3108 break;
3109 }
3110 }
3111
3112 if (i == NVRAM_CMD_TIMEOUT)
3113 return -EBUSY;
3114
3115 return 0;
3116 }
3117
3118 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3119 {
3120 if (tg3_flag(tp, NVRAM) &&
3121 tg3_flag(tp, NVRAM_BUFFERED) &&
3122 tg3_flag(tp, FLASH) &&
3123 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3124 (tp->nvram_jedecnum == JEDEC_ATMEL))
3125
3126 addr = ((addr / tp->nvram_pagesize) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS) +
3128 (addr % tp->nvram_pagesize);
3129
3130 return addr;
3131 }
3132
3133 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3134 {
3135 if (tg3_flag(tp, NVRAM) &&
3136 tg3_flag(tp, NVRAM_BUFFERED) &&
3137 tg3_flag(tp, FLASH) &&
3138 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3139 (tp->nvram_jedecnum == JEDEC_ATMEL))
3140
3141 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3142 tp->nvram_pagesize) +
3143 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3144
3145 return addr;
3146 }
3147
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3153 */
3154 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 {
3156 int ret;
3157
3158 if (!tg3_flag(tp, NVRAM))
3159 return tg3_nvram_read_using_eeprom(tp, offset, val);
3160
3161 offset = tg3_nvram_phys_addr(tp, offset);
3162
3163 if (offset > NVRAM_ADDR_MSK)
3164 return -EINVAL;
3165
3166 ret = tg3_nvram_lock(tp);
3167 if (ret)
3168 return ret;
3169
3170 tg3_enable_nvram_access(tp);
3171
3172 tw32(NVRAM_ADDR, offset);
3173 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3174 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3175
3176 if (ret == 0)
3177 *val = tr32(NVRAM_RDDATA);
3178
3179 tg3_disable_nvram_access(tp);
3180
3181 tg3_nvram_unlock(tp);
3182
3183 return ret;
3184 }
3185
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3188 {
3189 u32 v;
3190 int res = tg3_nvram_read(tp, offset, &v);
3191 if (!res)
3192 *val = cpu_to_be32(v);
3193 return res;
3194 }
3195
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3197 u32 offset, u32 len, u8 *buf)
3198 {
3199 int i, j, rc = 0;
3200 u32 val;
3201
3202 for (i = 0; i < len; i += 4) {
3203 u32 addr;
3204 __be32 data;
3205
3206 addr = offset + i;
3207
3208 memcpy(&data, buf + i, 4);
3209
3210 /*
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3215 */
3216 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3217
3218 val = tr32(GRC_EEPROM_ADDR);
3219 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3220
3221 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3222 EEPROM_ADDR_READ);
3223 tw32(GRC_EEPROM_ADDR, val |
3224 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3225 (addr & EEPROM_ADDR_ADDR_MASK) |
3226 EEPROM_ADDR_START |
3227 EEPROM_ADDR_WRITE);
3228
3229 for (j = 0; j < 1000; j++) {
3230 val = tr32(GRC_EEPROM_ADDR);
3231
3232 if (val & EEPROM_ADDR_COMPLETE)
3233 break;
3234 msleep(1);
3235 }
3236 if (!(val & EEPROM_ADDR_COMPLETE)) {
3237 rc = -EBUSY;
3238 break;
3239 }
3240 }
3241
3242 return rc;
3243 }
3244
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247 u8 *buf)
3248 {
3249 int ret = 0;
3250 u32 pagesize = tp->nvram_pagesize;
3251 u32 pagemask = pagesize - 1;
3252 u32 nvram_cmd;
3253 u8 *tmp;
3254
3255 tmp = kmalloc(pagesize, GFP_KERNEL);
3256 if (tmp == NULL)
3257 return -ENOMEM;
3258
3259 while (len) {
3260 int j;
3261 u32 phy_addr, page_off, size;
3262
3263 phy_addr = offset & ~pagemask;
3264
3265 for (j = 0; j < pagesize; j += 4) {
3266 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3267 (__be32 *) (tmp + j));
3268 if (ret)
3269 break;
3270 }
3271 if (ret)
3272 break;
3273
3274 page_off = offset & pagemask;
3275 size = pagesize;
3276 if (len < size)
3277 size = len;
3278
3279 len -= size;
3280
3281 memcpy(tmp + page_off, buf, size);
3282
3283 offset = offset + (pagesize - page_off);
3284
3285 tg3_enable_nvram_access(tp);
3286
3287 /*
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3290 */
3291 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292
3293 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3294 break;
3295
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR, phy_addr);
3298
3299 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3301
3302 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3303 break;
3304
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3307
3308 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3309 break;
3310
3311 for (j = 0; j < pagesize; j += 4) {
3312 __be32 data;
3313
3314 data = *((__be32 *) (tmp + j));
3315
3316 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3317
3318 tw32(NVRAM_ADDR, phy_addr + j);
3319
3320 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321 NVRAM_CMD_WR;
3322
3323 if (j == 0)
3324 nvram_cmd |= NVRAM_CMD_FIRST;
3325 else if (j == (pagesize - 4))
3326 nvram_cmd |= NVRAM_CMD_LAST;
3327
3328 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329 if (ret)
3330 break;
3331 }
3332 if (ret)
3333 break;
3334 }
3335
3336 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3337 tg3_nvram_exec_cmd(tp, nvram_cmd);
3338
3339 kfree(tmp);
3340
3341 return ret;
3342 }
3343
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3346 u8 *buf)
3347 {
3348 int i, ret = 0;
3349
3350 for (i = 0; i < len; i += 4, offset += 4) {
3351 u32 page_off, phy_addr, nvram_cmd;
3352 __be32 data;
3353
3354 memcpy(&data, buf + i, 4);
3355 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3356
3357 page_off = offset % tp->nvram_pagesize;
3358
3359 phy_addr = tg3_nvram_phys_addr(tp, offset);
3360
3361 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3362
3363 if (page_off == 0 || i == 0)
3364 nvram_cmd |= NVRAM_CMD_FIRST;
3365 if (page_off == (tp->nvram_pagesize - 4))
3366 nvram_cmd |= NVRAM_CMD_LAST;
3367
3368 if (i == (len - 4))
3369 nvram_cmd |= NVRAM_CMD_LAST;
3370
3371 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3372 !tg3_flag(tp, FLASH) ||
3373 !tg3_flag(tp, 57765_PLUS))
3374 tw32(NVRAM_ADDR, phy_addr);
3375
3376 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3377 !tg3_flag(tp, 5755_PLUS) &&
3378 (tp->nvram_jedecnum == JEDEC_ST) &&
3379 (nvram_cmd & NVRAM_CMD_FIRST)) {
3380 u32 cmd;
3381
3382 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383 ret = tg3_nvram_exec_cmd(tp, cmd);
3384 if (ret)
3385 break;
3386 }
3387 if (!tg3_flag(tp, FLASH)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3390 }
3391
3392 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3393 if (ret)
3394 break;
3395 }
3396 return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 {
3402 int ret;
3403
3404 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3405 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407 udelay(40);
3408 }
3409
3410 if (!tg3_flag(tp, NVRAM)) {
3411 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412 } else {
3413 u32 grc_mode;
3414
3415 ret = tg3_nvram_lock(tp);
3416 if (ret)
3417 return ret;
3418
3419 tg3_enable_nvram_access(tp);
3420 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3421 tw32(NVRAM_WRITE1, 0x406);
3422
3423 grc_mode = tr32(GRC_MODE);
3424 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3425
3426 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3427 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3428 buf);
3429 } else {
3430 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431 buf);
3432 }
3433
3434 grc_mode = tr32(GRC_MODE);
3435 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3436
3437 tg3_disable_nvram_access(tp);
3438 tg3_nvram_unlock(tp);
3439 }
3440
3441 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3442 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3443 udelay(40);
3444 }
3445
3446 return ret;
3447 }
3448
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3453
3454 /* tp->lock is held. */
3455 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3456 {
3457 int i;
3458 const int iters = 10000;
3459
3460 for (i = 0; i < iters; i++) {
3461 tw32(cpu_base + CPU_STATE, 0xffffffff);
3462 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3463 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3464 break;
3465 }
3466
3467 return (i == iters) ? -EBUSY : 0;
3468 }
3469
3470 /* tp->lock is held. */
3471 static int tg3_rxcpu_pause(struct tg3 *tp)
3472 {
3473 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3474
3475 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3476 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3477 udelay(10);
3478
3479 return rc;
3480 }
3481
3482 /* tp->lock is held. */
3483 static int tg3_txcpu_pause(struct tg3 *tp)
3484 {
3485 return tg3_pause_cpu(tp, TX_CPU_BASE);
3486 }
3487
3488 /* tp->lock is held. */
3489 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3490 {
3491 tw32(cpu_base + CPU_STATE, 0xffffffff);
3492 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3493 }
3494
3495 /* tp->lock is held. */
3496 static void tg3_rxcpu_resume(struct tg3 *tp)
3497 {
3498 tg3_resume_cpu(tp, RX_CPU_BASE);
3499 }
3500
3501 /* tp->lock is held. */
3502 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3503 {
3504 int rc;
3505
3506 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3507
3508 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3509 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3510
3511 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3512 return 0;
3513 }
3514 if (cpu_base == RX_CPU_BASE) {
3515 rc = tg3_rxcpu_pause(tp);
3516 } else {
3517 /*
3518 * There is only an Rx CPU for the 5750 derivative in the
3519 * BCM4785.
3520 */
3521 if (tg3_flag(tp, IS_SSB_CORE))
3522 return 0;
3523
3524 rc = tg3_txcpu_pause(tp);
3525 }
3526
3527 if (rc) {
3528 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3529 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3530 return -ENODEV;
3531 }
3532
3533 /* Clear firmware's nvram arbitration. */
3534 if (tg3_flag(tp, NVRAM))
3535 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3536 return 0;
3537 }
3538
3539 struct fw_info {
3540 unsigned int fw_base;
3541 unsigned int fw_len;
3542 const __be32 *fw_data;
3543 };
3544
3545 /* tp->lock is held. */
3546 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3547 u32 cpu_scratch_base, int cpu_scratch_size,
3548 struct fw_info *info)
3549 {
3550 int err, lock_err, i;
3551 void (*write_op)(struct tg3 *, u32, u32);
3552
3553 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3554 netdev_err(tp->dev,
3555 "%s: Trying to load TX cpu firmware which is 5705\n",
3556 __func__);
3557 return -EINVAL;
3558 }
3559
3560 if (tg3_flag(tp, 5705_PLUS))
3561 write_op = tg3_write_mem;
3562 else
3563 write_op = tg3_write_indirect_reg32;
3564
3565 /* It is possible that bootcode is still loading at this point.
3566 * Get the nvram lock first before halting the cpu.
3567 */
3568 lock_err = tg3_nvram_lock(tp);
3569 err = tg3_halt_cpu(tp, cpu_base);
3570 if (!lock_err)
3571 tg3_nvram_unlock(tp);
3572 if (err)
3573 goto out;
3574
3575 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3576 write_op(tp, cpu_scratch_base + i, 0);
3577 tw32(cpu_base + CPU_STATE, 0xffffffff);
3578 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3579 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3580 write_op(tp, (cpu_scratch_base +
3581 (info->fw_base & 0xffff) +
3582 (i * sizeof(u32))),
3583 be32_to_cpu(info->fw_data[i]));
3584
3585 err = 0;
3586
3587 out:
3588 return err;
3589 }
3590
3591 /* tp->lock is held. */
3592 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3593 {
3594 int i;
3595 const int iters = 5;
3596
3597 tw32(cpu_base + CPU_STATE, 0xffffffff);
3598 tw32_f(cpu_base + CPU_PC, pc);
3599
3600 for (i = 0; i < iters; i++) {
3601 if (tr32(cpu_base + CPU_PC) == pc)
3602 break;
3603 tw32(cpu_base + CPU_STATE, 0xffffffff);
3604 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3605 tw32_f(cpu_base + CPU_PC, pc);
3606 udelay(1000);
3607 }
3608
3609 return (i == iters) ? -EBUSY : 0;
3610 }
3611
3612 /* tp->lock is held. */
3613 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3614 {
3615 struct fw_info info;
3616 const __be32 *fw_data;
3617 int err;
3618
3619 fw_data = (void *)tp->fw->data;
3620
3621 /* Firmware blob starts with version numbers, followed by
3622 start address and length. We are setting complete length.
3623 length = end_address_of_bss - start_address_of_text.
3624 Remainder is the blob to be loaded contiguously
3625 from start address. */
3626
3627 info.fw_base = be32_to_cpu(fw_data[1]);
3628 info.fw_len = tp->fw->size - 12;
3629 info.fw_data = &fw_data[3];
3630
3631 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3632 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3633 &info);
3634 if (err)
3635 return err;
3636
3637 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3638 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3639 &info);
3640 if (err)
3641 return err;
3642
3643 /* Now startup only the RX cpu. */
3644 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, info.fw_base);
3645 if (err) {
3646 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3647 "should be %08x\n", __func__,
3648 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3649 return -ENODEV;
3650 }
3651
3652 tg3_rxcpu_resume(tp);
3653
3654 return 0;
3655 }
3656
3657 /* tp->lock is held. */
3658 static int tg3_load_tso_firmware(struct tg3 *tp)
3659 {
3660 struct fw_info info;
3661 const __be32 *fw_data;
3662 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3663 int err;
3664
3665 if (!tg3_flag(tp, FW_TSO))
3666 return 0;
3667
3668 fw_data = (void *)tp->fw->data;
3669
3670 /* Firmware blob starts with version numbers, followed by
3671 start address and length. We are setting complete length.
3672 length = end_address_of_bss - start_address_of_text.
3673 Remainder is the blob to be loaded contiguously
3674 from start address. */
3675
3676 info.fw_base = be32_to_cpu(fw_data[1]);
3677 cpu_scratch_size = tp->fw_len;
3678 info.fw_len = tp->fw->size - 12;
3679 info.fw_data = &fw_data[3];
3680
3681 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3682 cpu_base = RX_CPU_BASE;
3683 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3684 } else {
3685 cpu_base = TX_CPU_BASE;
3686 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3687 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3688 }
3689
3690 err = tg3_load_firmware_cpu(tp, cpu_base,
3691 cpu_scratch_base, cpu_scratch_size,
3692 &info);
3693 if (err)
3694 return err;
3695
3696 /* Now startup the cpu. */
3697 err = tg3_pause_cpu_and_set_pc(tp, cpu_base, info.fw_base);
3698 if (err) {
3699 netdev_err(tp->dev,
3700 "%s fails to set CPU PC, is %08x should be %08x\n",
3701 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3702 return -ENODEV;
3703 }
3704
3705 tg3_resume_cpu(tp, cpu_base);
3706 return 0;
3707 }
3708
3709
3710 /* tp->lock is held. */
3711 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3712 {
3713 u32 addr_high, addr_low;
3714 int i;
3715
3716 addr_high = ((tp->dev->dev_addr[0] << 8) |
3717 tp->dev->dev_addr[1]);
3718 addr_low = ((tp->dev->dev_addr[2] << 24) |
3719 (tp->dev->dev_addr[3] << 16) |
3720 (tp->dev->dev_addr[4] << 8) |
3721 (tp->dev->dev_addr[5] << 0));
3722 for (i = 0; i < 4; i++) {
3723 if (i == 1 && skip_mac_1)
3724 continue;
3725 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3726 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3727 }
3728
3729 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3730 tg3_asic_rev(tp) == ASIC_REV_5704) {
3731 for (i = 0; i < 12; i++) {
3732 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3733 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3734 }
3735 }
3736
3737 addr_high = (tp->dev->dev_addr[0] +
3738 tp->dev->dev_addr[1] +
3739 tp->dev->dev_addr[2] +
3740 tp->dev->dev_addr[3] +
3741 tp->dev->dev_addr[4] +
3742 tp->dev->dev_addr[5]) &
3743 TX_BACKOFF_SEED_MASK;
3744 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3745 }
3746
3747 static void tg3_enable_register_access(struct tg3 *tp)
3748 {
3749 /*
3750 * Make sure register accesses (indirect or otherwise) will function
3751 * correctly.
3752 */
3753 pci_write_config_dword(tp->pdev,
3754 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3755 }
3756
3757 static int tg3_power_up(struct tg3 *tp)
3758 {
3759 int err;
3760
3761 tg3_enable_register_access(tp);
3762
3763 err = pci_set_power_state(tp->pdev, PCI_D0);
3764 if (!err) {
3765 /* Switch out of Vaux if it is a NIC */
3766 tg3_pwrsrc_switch_to_vmain(tp);
3767 } else {
3768 netdev_err(tp->dev, "Transition to D0 failed\n");
3769 }
3770
3771 return err;
3772 }
3773
3774 static int tg3_setup_phy(struct tg3 *, int);
3775
3776 static int tg3_power_down_prepare(struct tg3 *tp)
3777 {
3778 u32 misc_host_ctrl;
3779 bool device_should_wake, do_low_power;
3780
3781 tg3_enable_register_access(tp);
3782
3783 /* Restore the CLKREQ setting. */
3784 if (tg3_flag(tp, CLKREQ_BUG))
3785 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3786 PCI_EXP_LNKCTL_CLKREQ_EN);
3787
3788 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3789 tw32(TG3PCI_MISC_HOST_CTRL,
3790 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3791
3792 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3793 tg3_flag(tp, WOL_ENABLE);
3794
3795 if (tg3_flag(tp, USE_PHYLIB)) {
3796 do_low_power = false;
3797 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3798 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3799 struct phy_device *phydev;
3800 u32 phyid, advertising;
3801
3802 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3803
3804 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3805
3806 tp->link_config.speed = phydev->speed;
3807 tp->link_config.duplex = phydev->duplex;
3808 tp->link_config.autoneg = phydev->autoneg;
3809 tp->link_config.advertising = phydev->advertising;
3810
3811 advertising = ADVERTISED_TP |
3812 ADVERTISED_Pause |
3813 ADVERTISED_Autoneg |
3814 ADVERTISED_10baseT_Half;
3815
3816 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3817 if (tg3_flag(tp, WOL_SPEED_100MB))
3818 advertising |=
3819 ADVERTISED_100baseT_Half |
3820 ADVERTISED_100baseT_Full |
3821 ADVERTISED_10baseT_Full;
3822 else
3823 advertising |= ADVERTISED_10baseT_Full;
3824 }
3825
3826 phydev->advertising = advertising;
3827
3828 phy_start_aneg(phydev);
3829
3830 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3831 if (phyid != PHY_ID_BCMAC131) {
3832 phyid &= PHY_BCM_OUI_MASK;
3833 if (phyid == PHY_BCM_OUI_1 ||
3834 phyid == PHY_BCM_OUI_2 ||
3835 phyid == PHY_BCM_OUI_3)
3836 do_low_power = true;
3837 }
3838 }
3839 } else {
3840 do_low_power = true;
3841
3842 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3843 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3844
3845 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3846 tg3_setup_phy(tp, 0);
3847 }
3848
3849 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3850 u32 val;
3851
3852 val = tr32(GRC_VCPU_EXT_CTRL);
3853 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3854 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3855 int i;
3856 u32 val;
3857
3858 for (i = 0; i < 200; i++) {
3859 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3860 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3861 break;
3862 msleep(1);
3863 }
3864 }
3865 if (tg3_flag(tp, WOL_CAP))
3866 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3867 WOL_DRV_STATE_SHUTDOWN |
3868 WOL_DRV_WOL |
3869 WOL_SET_MAGIC_PKT);
3870
3871 if (device_should_wake) {
3872 u32 mac_mode;
3873
3874 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3875 if (do_low_power &&
3876 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3877 tg3_phy_auxctl_write(tp,
3878 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3879 MII_TG3_AUXCTL_PCTL_WOL_EN |
3880 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3881 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3882 udelay(40);
3883 }
3884
3885 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3886 mac_mode = MAC_MODE_PORT_MODE_GMII;
3887 else
3888 mac_mode = MAC_MODE_PORT_MODE_MII;
3889
3890 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3891 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3892 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3893 SPEED_100 : SPEED_10;
3894 if (tg3_5700_link_polarity(tp, speed))
3895 mac_mode |= MAC_MODE_LINK_POLARITY;
3896 else
3897 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3898 }
3899 } else {
3900 mac_mode = MAC_MODE_PORT_MODE_TBI;
3901 }
3902
3903 if (!tg3_flag(tp, 5750_PLUS))
3904 tw32(MAC_LED_CTRL, tp->led_ctrl);
3905
3906 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3907 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3908 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3909 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3910
3911 if (tg3_flag(tp, ENABLE_APE))
3912 mac_mode |= MAC_MODE_APE_TX_EN |
3913 MAC_MODE_APE_RX_EN |
3914 MAC_MODE_TDE_ENABLE;
3915
3916 tw32_f(MAC_MODE, mac_mode);
3917 udelay(100);
3918
3919 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3920 udelay(10);
3921 }
3922
3923 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3924 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3925 tg3_asic_rev(tp) == ASIC_REV_5701)) {
3926 u32 base_val;
3927
3928 base_val = tp->pci_clock_ctrl;
3929 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3930 CLOCK_CTRL_TXCLK_DISABLE);
3931
3932 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3933 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3934 } else if (tg3_flag(tp, 5780_CLASS) ||
3935 tg3_flag(tp, CPMU_PRESENT) ||
3936 tg3_asic_rev(tp) == ASIC_REV_5906) {
3937 /* do nothing */
3938 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3939 u32 newbits1, newbits2;
3940
3941 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3942 tg3_asic_rev(tp) == ASIC_REV_5701) {
3943 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3944 CLOCK_CTRL_TXCLK_DISABLE |
3945 CLOCK_CTRL_ALTCLK);
3946 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3947 } else if (tg3_flag(tp, 5705_PLUS)) {
3948 newbits1 = CLOCK_CTRL_625_CORE;
3949 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3950 } else {
3951 newbits1 = CLOCK_CTRL_ALTCLK;
3952 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3953 }
3954
3955 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3956 40);
3957
3958 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3959 40);
3960
3961 if (!tg3_flag(tp, 5705_PLUS)) {
3962 u32 newbits3;
3963
3964 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3965 tg3_asic_rev(tp) == ASIC_REV_5701) {
3966 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3967 CLOCK_CTRL_TXCLK_DISABLE |
3968 CLOCK_CTRL_44MHZ_CORE);
3969 } else {
3970 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3971 }
3972
3973 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3974 tp->pci_clock_ctrl | newbits3, 40);
3975 }
3976 }
3977
3978 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3979 tg3_power_down_phy(tp, do_low_power);
3980
3981 tg3_frob_aux_power(tp, true);
3982
3983 /* Workaround for unstable PLL clock */
3984 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3985 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
3986 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
3987 u32 val = tr32(0x7d00);
3988
3989 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3990 tw32(0x7d00, val);
3991 if (!tg3_flag(tp, ENABLE_ASF)) {
3992 int err;
3993
3994 err = tg3_nvram_lock(tp);
3995 tg3_halt_cpu(tp, RX_CPU_BASE);
3996 if (!err)
3997 tg3_nvram_unlock(tp);
3998 }
3999 }
4000
4001 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4002
4003 return 0;
4004 }
4005
4006 static void tg3_power_down(struct tg3 *tp)
4007 {
4008 tg3_power_down_prepare(tp);
4009
4010 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4011 pci_set_power_state(tp->pdev, PCI_D3hot);
4012 }
4013
4014 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4015 {
4016 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4017 case MII_TG3_AUX_STAT_10HALF:
4018 *speed = SPEED_10;
4019 *duplex = DUPLEX_HALF;
4020 break;
4021
4022 case MII_TG3_AUX_STAT_10FULL:
4023 *speed = SPEED_10;
4024 *duplex = DUPLEX_FULL;
4025 break;
4026
4027 case MII_TG3_AUX_STAT_100HALF:
4028 *speed = SPEED_100;
4029 *duplex = DUPLEX_HALF;
4030 break;
4031
4032 case MII_TG3_AUX_STAT_100FULL:
4033 *speed = SPEED_100;
4034 *duplex = DUPLEX_FULL;
4035 break;
4036
4037 case MII_TG3_AUX_STAT_1000HALF:
4038 *speed = SPEED_1000;
4039 *duplex = DUPLEX_HALF;
4040 break;
4041
4042 case MII_TG3_AUX_STAT_1000FULL:
4043 *speed = SPEED_1000;
4044 *duplex = DUPLEX_FULL;
4045 break;
4046
4047 default:
4048 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4049 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4050 SPEED_10;
4051 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4052 DUPLEX_HALF;
4053 break;
4054 }
4055 *speed = SPEED_UNKNOWN;
4056 *duplex = DUPLEX_UNKNOWN;
4057 break;
4058 }
4059 }
4060
4061 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4062 {
4063 int err = 0;
4064 u32 val, new_adv;
4065
4066 new_adv = ADVERTISE_CSMA;
4067 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4068 new_adv |= mii_advertise_flowctrl(flowctrl);
4069
4070 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4071 if (err)
4072 goto done;
4073
4074 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4075 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4076
4077 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4078 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4079 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4080
4081 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4082 if (err)
4083 goto done;
4084 }
4085
4086 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4087 goto done;
4088
4089 tw32(TG3_CPMU_EEE_MODE,
4090 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4091
4092 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4093 if (!err) {
4094 u32 err2;
4095
4096 val = 0;
4097 /* Advertise 100-BaseTX EEE ability */
4098 if (advertise & ADVERTISED_100baseT_Full)
4099 val |= MDIO_AN_EEE_ADV_100TX;
4100 /* Advertise 1000-BaseT EEE ability */
4101 if (advertise & ADVERTISED_1000baseT_Full)
4102 val |= MDIO_AN_EEE_ADV_1000T;
4103 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4104 if (err)
4105 val = 0;
4106
4107 switch (tg3_asic_rev(tp)) {
4108 case ASIC_REV_5717:
4109 case ASIC_REV_57765:
4110 case ASIC_REV_57766:
4111 case ASIC_REV_5719:
4112 /* If we advertised any eee advertisements above... */
4113 if (val)
4114 val = MII_TG3_DSP_TAP26_ALNOKO |
4115 MII_TG3_DSP_TAP26_RMRXSTO |
4116 MII_TG3_DSP_TAP26_OPCSINPT;
4117 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4118 /* Fall through */
4119 case ASIC_REV_5720:
4120 case ASIC_REV_5762:
4121 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4122 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4123 MII_TG3_DSP_CH34TP2_HIBW01);
4124 }
4125
4126 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4127 if (!err)
4128 err = err2;
4129 }
4130
4131 done:
4132 return err;
4133 }
4134
4135 static void tg3_phy_copper_begin(struct tg3 *tp)
4136 {
4137 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4138 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4139 u32 adv, fc;
4140
4141 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4142 adv = ADVERTISED_10baseT_Half |
4143 ADVERTISED_10baseT_Full;
4144 if (tg3_flag(tp, WOL_SPEED_100MB))
4145 adv |= ADVERTISED_100baseT_Half |
4146 ADVERTISED_100baseT_Full;
4147
4148 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4149 } else {
4150 adv = tp->link_config.advertising;
4151 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4152 adv &= ~(ADVERTISED_1000baseT_Half |
4153 ADVERTISED_1000baseT_Full);
4154
4155 fc = tp->link_config.flowctrl;
4156 }
4157
4158 tg3_phy_autoneg_cfg(tp, adv, fc);
4159
4160 tg3_writephy(tp, MII_BMCR,
4161 BMCR_ANENABLE | BMCR_ANRESTART);
4162 } else {
4163 int i;
4164 u32 bmcr, orig_bmcr;
4165
4166 tp->link_config.active_speed = tp->link_config.speed;
4167 tp->link_config.active_duplex = tp->link_config.duplex;
4168
4169 bmcr = 0;
4170 switch (tp->link_config.speed) {
4171 default:
4172 case SPEED_10:
4173 break;
4174
4175 case SPEED_100:
4176 bmcr |= BMCR_SPEED100;
4177 break;
4178
4179 case SPEED_1000:
4180 bmcr |= BMCR_SPEED1000;
4181 break;
4182 }
4183
4184 if (tp->link_config.duplex == DUPLEX_FULL)
4185 bmcr |= BMCR_FULLDPLX;
4186
4187 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4188 (bmcr != orig_bmcr)) {
4189 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4190 for (i = 0; i < 1500; i++) {
4191 u32 tmp;
4192
4193 udelay(10);
4194 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4195 tg3_readphy(tp, MII_BMSR, &tmp))
4196 continue;
4197 if (!(tmp & BMSR_LSTATUS)) {
4198 udelay(40);
4199 break;
4200 }
4201 }
4202 tg3_writephy(tp, MII_BMCR, bmcr);
4203 udelay(40);
4204 }
4205 }
4206 }
4207
4208 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4209 {
4210 int err;
4211
4212 /* Turn off tap power management. */
4213 /* Set Extended packet length bit */
4214 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4215
4216 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4217 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4218 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4219 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4220 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4221
4222 udelay(40);
4223
4224 return err;
4225 }
4226
4227 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4228 {
4229 u32 advmsk, tgtadv, advertising;
4230
4231 advertising = tp->link_config.advertising;
4232 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4233
4234 advmsk = ADVERTISE_ALL;
4235 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4236 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4237 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4238 }
4239
4240 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4241 return false;
4242
4243 if ((*lcladv & advmsk) != tgtadv)
4244 return false;
4245
4246 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4247 u32 tg3_ctrl;
4248
4249 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4250
4251 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4252 return false;
4253
4254 if (tgtadv &&
4255 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4256 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4257 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4258 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4259 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4260 } else {
4261 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4262 }
4263
4264 if (tg3_ctrl != tgtadv)
4265 return false;
4266 }
4267
4268 return true;
4269 }
4270
4271 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4272 {
4273 u32 lpeth = 0;
4274
4275 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4276 u32 val;
4277
4278 if (tg3_readphy(tp, MII_STAT1000, &val))
4279 return false;
4280
4281 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4282 }
4283
4284 if (tg3_readphy(tp, MII_LPA, rmtadv))
4285 return false;
4286
4287 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4288 tp->link_config.rmt_adv = lpeth;
4289
4290 return true;
4291 }
4292
4293 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4294 {
4295 if (curr_link_up != tp->link_up) {
4296 if (curr_link_up) {
4297 tg3_carrier_on(tp);
4298 } else {
4299 tg3_carrier_off(tp);
4300 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4301 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4302 }
4303
4304 tg3_link_report(tp);
4305 return true;
4306 }
4307
4308 return false;
4309 }
4310
4311 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4312 {
4313 int current_link_up;
4314 u32 bmsr, val;
4315 u32 lcl_adv, rmt_adv;
4316 u16 current_speed;
4317 u8 current_duplex;
4318 int i, err;
4319
4320 tw32(MAC_EVENT, 0);
4321
4322 tw32_f(MAC_STATUS,
4323 (MAC_STATUS_SYNC_CHANGED |
4324 MAC_STATUS_CFG_CHANGED |
4325 MAC_STATUS_MI_COMPLETION |
4326 MAC_STATUS_LNKSTATE_CHANGED));
4327 udelay(40);
4328
4329 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4330 tw32_f(MAC_MI_MODE,
4331 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4332 udelay(80);
4333 }
4334
4335 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4336
4337 /* Some third-party PHYs need to be reset on link going
4338 * down.
4339 */
4340 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4341 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4342 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4343 tp->link_up) {
4344 tg3_readphy(tp, MII_BMSR, &bmsr);
4345 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4346 !(bmsr & BMSR_LSTATUS))
4347 force_reset = 1;
4348 }
4349 if (force_reset)
4350 tg3_phy_reset(tp);
4351
4352 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4353 tg3_readphy(tp, MII_BMSR, &bmsr);
4354 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4355 !tg3_flag(tp, INIT_COMPLETE))
4356 bmsr = 0;
4357
4358 if (!(bmsr & BMSR_LSTATUS)) {
4359 err = tg3_init_5401phy_dsp(tp);
4360 if (err)
4361 return err;
4362
4363 tg3_readphy(tp, MII_BMSR, &bmsr);
4364 for (i = 0; i < 1000; i++) {
4365 udelay(10);
4366 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4367 (bmsr & BMSR_LSTATUS)) {
4368 udelay(40);
4369 break;
4370 }
4371 }
4372
4373 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4374 TG3_PHY_REV_BCM5401_B0 &&
4375 !(bmsr & BMSR_LSTATUS) &&
4376 tp->link_config.active_speed == SPEED_1000) {
4377 err = tg3_phy_reset(tp);
4378 if (!err)
4379 err = tg3_init_5401phy_dsp(tp);
4380 if (err)
4381 return err;
4382 }
4383 }
4384 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4385 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4386 /* 5701 {A0,B0} CRC bug workaround */
4387 tg3_writephy(tp, 0x15, 0x0a75);
4388 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4389 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4390 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4391 }
4392
4393 /* Clear pending interrupts... */
4394 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4395 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4396
4397 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4398 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4399 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4400 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4401
4402 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4403 tg3_asic_rev(tp) == ASIC_REV_5701) {
4404 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4405 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4406 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4407 else
4408 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4409 }
4410
4411 current_link_up = 0;
4412 current_speed = SPEED_UNKNOWN;
4413 current_duplex = DUPLEX_UNKNOWN;
4414 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4415 tp->link_config.rmt_adv = 0;
4416
4417 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4418 err = tg3_phy_auxctl_read(tp,
4419 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4420 &val);
4421 if (!err && !(val & (1 << 10))) {
4422 tg3_phy_auxctl_write(tp,
4423 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4424 val | (1 << 10));
4425 goto relink;
4426 }
4427 }
4428
4429 bmsr = 0;
4430 for (i = 0; i < 100; i++) {
4431 tg3_readphy(tp, MII_BMSR, &bmsr);
4432 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4433 (bmsr & BMSR_LSTATUS))
4434 break;
4435 udelay(40);
4436 }
4437
4438 if (bmsr & BMSR_LSTATUS) {
4439 u32 aux_stat, bmcr;
4440
4441 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4442 for (i = 0; i < 2000; i++) {
4443 udelay(10);
4444 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4445 aux_stat)
4446 break;
4447 }
4448
4449 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4450 &current_speed,
4451 &current_duplex);
4452
4453 bmcr = 0;
4454 for (i = 0; i < 200; i++) {
4455 tg3_readphy(tp, MII_BMCR, &bmcr);
4456 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4457 continue;
4458 if (bmcr && bmcr != 0x7fff)
4459 break;
4460 udelay(10);
4461 }
4462
4463 lcl_adv = 0;
4464 rmt_adv = 0;
4465
4466 tp->link_config.active_speed = current_speed;
4467 tp->link_config.active_duplex = current_duplex;
4468
4469 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4470 if ((bmcr & BMCR_ANENABLE) &&
4471 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4472 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4473 current_link_up = 1;
4474 } else {
4475 if (!(bmcr & BMCR_ANENABLE) &&
4476 tp->link_config.speed == current_speed &&
4477 tp->link_config.duplex == current_duplex &&
4478 tp->link_config.flowctrl ==
4479 tp->link_config.active_flowctrl) {
4480 current_link_up = 1;
4481 }
4482 }
4483
4484 if (current_link_up == 1 &&
4485 tp->link_config.active_duplex == DUPLEX_FULL) {
4486 u32 reg, bit;
4487
4488 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4489 reg = MII_TG3_FET_GEN_STAT;
4490 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4491 } else {
4492 reg = MII_TG3_EXT_STAT;
4493 bit = MII_TG3_EXT_STAT_MDIX;
4494 }
4495
4496 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4497 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4498
4499 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4500 }
4501 }
4502
4503 relink:
4504 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4505 tg3_phy_copper_begin(tp);
4506
4507 if (tg3_flag(tp, ROBOSWITCH)) {
4508 current_link_up = 1;
4509 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4510 current_speed = SPEED_1000;
4511 current_duplex = DUPLEX_FULL;
4512 tp->link_config.active_speed = current_speed;
4513 tp->link_config.active_duplex = current_duplex;
4514 }
4515
4516 tg3_readphy(tp, MII_BMSR, &bmsr);
4517 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4518 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4519 current_link_up = 1;
4520 }
4521
4522 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4523 if (current_link_up == 1) {
4524 if (tp->link_config.active_speed == SPEED_100 ||
4525 tp->link_config.active_speed == SPEED_10)
4526 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4527 else
4528 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4529 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4530 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4531 else
4532 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4533
4534 /* In order for the 5750 core in BCM4785 chip to work properly
4535 * in RGMII mode, the Led Control Register must be set up.
4536 */
4537 if (tg3_flag(tp, RGMII_MODE)) {
4538 u32 led_ctrl = tr32(MAC_LED_CTRL);
4539 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4540
4541 if (tp->link_config.active_speed == SPEED_10)
4542 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4543 else if (tp->link_config.active_speed == SPEED_100)
4544 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4545 LED_CTRL_100MBPS_ON);
4546 else if (tp->link_config.active_speed == SPEED_1000)
4547 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4548 LED_CTRL_1000MBPS_ON);
4549
4550 tw32(MAC_LED_CTRL, led_ctrl);
4551 udelay(40);
4552 }
4553
4554 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4555 if (tp->link_config.active_duplex == DUPLEX_HALF)
4556 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4557
4558 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4559 if (current_link_up == 1 &&
4560 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4561 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4562 else
4563 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4564 }
4565
4566 /* ??? Without this setting Netgear GA302T PHY does not
4567 * ??? send/receive packets...
4568 */
4569 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4570 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4571 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4572 tw32_f(MAC_MI_MODE, tp->mi_mode);
4573 udelay(80);
4574 }
4575
4576 tw32_f(MAC_MODE, tp->mac_mode);
4577 udelay(40);
4578
4579 tg3_phy_eee_adjust(tp, current_link_up);
4580
4581 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4582 /* Polled via timer. */
4583 tw32_f(MAC_EVENT, 0);
4584 } else {
4585 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4586 }
4587 udelay(40);
4588
4589 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4590 current_link_up == 1 &&
4591 tp->link_config.active_speed == SPEED_1000 &&
4592 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4593 udelay(120);
4594 tw32_f(MAC_STATUS,
4595 (MAC_STATUS_SYNC_CHANGED |
4596 MAC_STATUS_CFG_CHANGED));
4597 udelay(40);
4598 tg3_write_mem(tp,
4599 NIC_SRAM_FIRMWARE_MBOX,
4600 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4601 }
4602
4603 /* Prevent send BD corruption. */
4604 if (tg3_flag(tp, CLKREQ_BUG)) {
4605 if (tp->link_config.active_speed == SPEED_100 ||
4606 tp->link_config.active_speed == SPEED_10)
4607 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4608 PCI_EXP_LNKCTL_CLKREQ_EN);
4609 else
4610 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4611 PCI_EXP_LNKCTL_CLKREQ_EN);
4612 }
4613
4614 tg3_test_and_report_link_chg(tp, current_link_up);
4615
4616 return 0;
4617 }
4618
4619 struct tg3_fiber_aneginfo {
4620 int state;
4621 #define ANEG_STATE_UNKNOWN 0
4622 #define ANEG_STATE_AN_ENABLE 1
4623 #define ANEG_STATE_RESTART_INIT 2
4624 #define ANEG_STATE_RESTART 3
4625 #define ANEG_STATE_DISABLE_LINK_OK 4
4626 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4627 #define ANEG_STATE_ABILITY_DETECT 6
4628 #define ANEG_STATE_ACK_DETECT_INIT 7
4629 #define ANEG_STATE_ACK_DETECT 8
4630 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4631 #define ANEG_STATE_COMPLETE_ACK 10
4632 #define ANEG_STATE_IDLE_DETECT_INIT 11
4633 #define ANEG_STATE_IDLE_DETECT 12
4634 #define ANEG_STATE_LINK_OK 13
4635 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4636 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4637
4638 u32 flags;
4639 #define MR_AN_ENABLE 0x00000001
4640 #define MR_RESTART_AN 0x00000002
4641 #define MR_AN_COMPLETE 0x00000004
4642 #define MR_PAGE_RX 0x00000008
4643 #define MR_NP_LOADED 0x00000010
4644 #define MR_TOGGLE_TX 0x00000020
4645 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4646 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4647 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4648 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4649 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4650 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4651 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4652 #define MR_TOGGLE_RX 0x00002000
4653 #define MR_NP_RX 0x00004000
4654
4655 #define MR_LINK_OK 0x80000000
4656
4657 unsigned long link_time, cur_time;
4658
4659 u32 ability_match_cfg;
4660 int ability_match_count;
4661
4662 char ability_match, idle_match, ack_match;
4663
4664 u32 txconfig, rxconfig;
4665 #define ANEG_CFG_NP 0x00000080
4666 #define ANEG_CFG_ACK 0x00000040
4667 #define ANEG_CFG_RF2 0x00000020
4668 #define ANEG_CFG_RF1 0x00000010
4669 #define ANEG_CFG_PS2 0x00000001
4670 #define ANEG_CFG_PS1 0x00008000
4671 #define ANEG_CFG_HD 0x00004000
4672 #define ANEG_CFG_FD 0x00002000
4673 #define ANEG_CFG_INVAL 0x00001f06
4674
4675 };
4676 #define ANEG_OK 0
4677 #define ANEG_DONE 1
4678 #define ANEG_TIMER_ENAB 2
4679 #define ANEG_FAILED -1
4680
4681 #define ANEG_STATE_SETTLE_TIME 10000
4682
4683 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4684 struct tg3_fiber_aneginfo *ap)
4685 {
4686 u16 flowctrl;
4687 unsigned long delta;
4688 u32 rx_cfg_reg;
4689 int ret;
4690
4691 if (ap->state == ANEG_STATE_UNKNOWN) {
4692 ap->rxconfig = 0;
4693 ap->link_time = 0;
4694 ap->cur_time = 0;
4695 ap->ability_match_cfg = 0;
4696 ap->ability_match_count = 0;
4697 ap->ability_match = 0;
4698 ap->idle_match = 0;
4699 ap->ack_match = 0;
4700 }
4701 ap->cur_time++;
4702
4703 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4704 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4705
4706 if (rx_cfg_reg != ap->ability_match_cfg) {
4707 ap->ability_match_cfg = rx_cfg_reg;
4708 ap->ability_match = 0;
4709 ap->ability_match_count = 0;
4710 } else {
4711 if (++ap->ability_match_count > 1) {
4712 ap->ability_match = 1;
4713 ap->ability_match_cfg = rx_cfg_reg;
4714 }
4715 }
4716 if (rx_cfg_reg & ANEG_CFG_ACK)
4717 ap->ack_match = 1;
4718 else
4719 ap->ack_match = 0;
4720
4721 ap->idle_match = 0;
4722 } else {
4723 ap->idle_match = 1;
4724 ap->ability_match_cfg = 0;
4725 ap->ability_match_count = 0;
4726 ap->ability_match = 0;
4727 ap->ack_match = 0;
4728
4729 rx_cfg_reg = 0;
4730 }
4731
4732 ap->rxconfig = rx_cfg_reg;
4733 ret = ANEG_OK;
4734
4735 switch (ap->state) {
4736 case ANEG_STATE_UNKNOWN:
4737 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4738 ap->state = ANEG_STATE_AN_ENABLE;
4739
4740 /* fallthru */
4741 case ANEG_STATE_AN_ENABLE:
4742 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4743 if (ap->flags & MR_AN_ENABLE) {
4744 ap->link_time = 0;
4745 ap->cur_time = 0;
4746 ap->ability_match_cfg = 0;
4747 ap->ability_match_count = 0;
4748 ap->ability_match = 0;
4749 ap->idle_match = 0;
4750 ap->ack_match = 0;
4751
4752 ap->state = ANEG_STATE_RESTART_INIT;
4753 } else {
4754 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4755 }
4756 break;
4757
4758 case ANEG_STATE_RESTART_INIT:
4759 ap->link_time = ap->cur_time;
4760 ap->flags &= ~(MR_NP_LOADED);
4761 ap->txconfig = 0;
4762 tw32(MAC_TX_AUTO_NEG, 0);
4763 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4764 tw32_f(MAC_MODE, tp->mac_mode);
4765 udelay(40);
4766
4767 ret = ANEG_TIMER_ENAB;
4768 ap->state = ANEG_STATE_RESTART;
4769
4770 /* fallthru */
4771 case ANEG_STATE_RESTART:
4772 delta = ap->cur_time - ap->link_time;
4773 if (delta > ANEG_STATE_SETTLE_TIME)
4774 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4775 else
4776 ret = ANEG_TIMER_ENAB;
4777 break;
4778
4779 case ANEG_STATE_DISABLE_LINK_OK:
4780 ret = ANEG_DONE;
4781 break;
4782
4783 case ANEG_STATE_ABILITY_DETECT_INIT:
4784 ap->flags &= ~(MR_TOGGLE_TX);
4785 ap->txconfig = ANEG_CFG_FD;
4786 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4787 if (flowctrl & ADVERTISE_1000XPAUSE)
4788 ap->txconfig |= ANEG_CFG_PS1;
4789 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4790 ap->txconfig |= ANEG_CFG_PS2;
4791 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4792 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4793 tw32_f(MAC_MODE, tp->mac_mode);
4794 udelay(40);
4795
4796 ap->state = ANEG_STATE_ABILITY_DETECT;
4797 break;
4798
4799 case ANEG_STATE_ABILITY_DETECT:
4800 if (ap->ability_match != 0 && ap->rxconfig != 0)
4801 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4802 break;
4803
4804 case ANEG_STATE_ACK_DETECT_INIT:
4805 ap->txconfig |= ANEG_CFG_ACK;
4806 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4807 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4808 tw32_f(MAC_MODE, tp->mac_mode);
4809 udelay(40);
4810
4811 ap->state = ANEG_STATE_ACK_DETECT;
4812
4813 /* fallthru */
4814 case ANEG_STATE_ACK_DETECT:
4815 if (ap->ack_match != 0) {
4816 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4817 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4818 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4819 } else {
4820 ap->state = ANEG_STATE_AN_ENABLE;
4821 }
4822 } else if (ap->ability_match != 0 &&
4823 ap->rxconfig == 0) {
4824 ap->state = ANEG_STATE_AN_ENABLE;
4825 }
4826 break;
4827
4828 case ANEG_STATE_COMPLETE_ACK_INIT:
4829 if (ap->rxconfig & ANEG_CFG_INVAL) {
4830 ret = ANEG_FAILED;
4831 break;
4832 }
4833 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4834 MR_LP_ADV_HALF_DUPLEX |
4835 MR_LP_ADV_SYM_PAUSE |
4836 MR_LP_ADV_ASYM_PAUSE |
4837 MR_LP_ADV_REMOTE_FAULT1 |
4838 MR_LP_ADV_REMOTE_FAULT2 |
4839 MR_LP_ADV_NEXT_PAGE |
4840 MR_TOGGLE_RX |
4841 MR_NP_RX);
4842 if (ap->rxconfig & ANEG_CFG_FD)
4843 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4844 if (ap->rxconfig & ANEG_CFG_HD)
4845 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4846 if (ap->rxconfig & ANEG_CFG_PS1)
4847 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4848 if (ap->rxconfig & ANEG_CFG_PS2)
4849 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4850 if (ap->rxconfig & ANEG_CFG_RF1)
4851 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4852 if (ap->rxconfig & ANEG_CFG_RF2)
4853 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4854 if (ap->rxconfig & ANEG_CFG_NP)
4855 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4856
4857 ap->link_time = ap->cur_time;
4858
4859 ap->flags ^= (MR_TOGGLE_TX);
4860 if (ap->rxconfig & 0x0008)
4861 ap->flags |= MR_TOGGLE_RX;
4862 if (ap->rxconfig & ANEG_CFG_NP)
4863 ap->flags |= MR_NP_RX;
4864 ap->flags |= MR_PAGE_RX;
4865
4866 ap->state = ANEG_STATE_COMPLETE_ACK;
4867 ret = ANEG_TIMER_ENAB;
4868 break;
4869
4870 case ANEG_STATE_COMPLETE_ACK:
4871 if (ap->ability_match != 0 &&
4872 ap->rxconfig == 0) {
4873 ap->state = ANEG_STATE_AN_ENABLE;
4874 break;
4875 }
4876 delta = ap->cur_time - ap->link_time;
4877 if (delta > ANEG_STATE_SETTLE_TIME) {
4878 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4879 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4880 } else {
4881 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4882 !(ap->flags & MR_NP_RX)) {
4883 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4884 } else {
4885 ret = ANEG_FAILED;
4886 }
4887 }
4888 }
4889 break;
4890
4891 case ANEG_STATE_IDLE_DETECT_INIT:
4892 ap->link_time = ap->cur_time;
4893 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4894 tw32_f(MAC_MODE, tp->mac_mode);
4895 udelay(40);
4896
4897 ap->state = ANEG_STATE_IDLE_DETECT;
4898 ret = ANEG_TIMER_ENAB;
4899 break;
4900
4901 case ANEG_STATE_IDLE_DETECT:
4902 if (ap->ability_match != 0 &&
4903 ap->rxconfig == 0) {
4904 ap->state = ANEG_STATE_AN_ENABLE;
4905 break;
4906 }
4907 delta = ap->cur_time - ap->link_time;
4908 if (delta > ANEG_STATE_SETTLE_TIME) {
4909 /* XXX another gem from the Broadcom driver :( */
4910 ap->state = ANEG_STATE_LINK_OK;
4911 }
4912 break;
4913
4914 case ANEG_STATE_LINK_OK:
4915 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4916 ret = ANEG_DONE;
4917 break;
4918
4919 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4920 /* ??? unimplemented */
4921 break;
4922
4923 case ANEG_STATE_NEXT_PAGE_WAIT:
4924 /* ??? unimplemented */
4925 break;
4926
4927 default:
4928 ret = ANEG_FAILED;
4929 break;
4930 }
4931
4932 return ret;
4933 }
4934
4935 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4936 {
4937 int res = 0;
4938 struct tg3_fiber_aneginfo aninfo;
4939 int status = ANEG_FAILED;
4940 unsigned int tick;
4941 u32 tmp;
4942
4943 tw32_f(MAC_TX_AUTO_NEG, 0);
4944
4945 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4946 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4947 udelay(40);
4948
4949 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4950 udelay(40);
4951
4952 memset(&aninfo, 0, sizeof(aninfo));
4953 aninfo.flags |= MR_AN_ENABLE;
4954 aninfo.state = ANEG_STATE_UNKNOWN;
4955 aninfo.cur_time = 0;
4956 tick = 0;
4957 while (++tick < 195000) {
4958 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4959 if (status == ANEG_DONE || status == ANEG_FAILED)
4960 break;
4961
4962 udelay(1);
4963 }
4964
4965 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4966 tw32_f(MAC_MODE, tp->mac_mode);
4967 udelay(40);
4968
4969 *txflags = aninfo.txconfig;
4970 *rxflags = aninfo.flags;
4971
4972 if (status == ANEG_DONE &&
4973 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4974 MR_LP_ADV_FULL_DUPLEX)))
4975 res = 1;
4976
4977 return res;
4978 }
4979
4980 static void tg3_init_bcm8002(struct tg3 *tp)
4981 {
4982 u32 mac_status = tr32(MAC_STATUS);
4983 int i;
4984
4985 /* Reset when initting first time or we have a link. */
4986 if (tg3_flag(tp, INIT_COMPLETE) &&
4987 !(mac_status & MAC_STATUS_PCS_SYNCED))
4988 return;
4989
4990 /* Set PLL lock range. */
4991 tg3_writephy(tp, 0x16, 0x8007);
4992
4993 /* SW reset */
4994 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4995
4996 /* Wait for reset to complete. */
4997 /* XXX schedule_timeout() ... */
4998 for (i = 0; i < 500; i++)
4999 udelay(10);
5000
5001 /* Config mode; select PMA/Ch 1 regs. */
5002 tg3_writephy(tp, 0x10, 0x8411);
5003
5004 /* Enable auto-lock and comdet, select txclk for tx. */
5005 tg3_writephy(tp, 0x11, 0x0a10);
5006
5007 tg3_writephy(tp, 0x18, 0x00a0);
5008 tg3_writephy(tp, 0x16, 0x41ff);
5009
5010 /* Assert and deassert POR. */
5011 tg3_writephy(tp, 0x13, 0x0400);
5012 udelay(40);
5013 tg3_writephy(tp, 0x13, 0x0000);
5014
5015 tg3_writephy(tp, 0x11, 0x0a50);
5016 udelay(40);
5017 tg3_writephy(tp, 0x11, 0x0a10);
5018
5019 /* Wait for signal to stabilize */
5020 /* XXX schedule_timeout() ... */
5021 for (i = 0; i < 15000; i++)
5022 udelay(10);
5023
5024 /* Deselect the channel register so we can read the PHYID
5025 * later.
5026 */
5027 tg3_writephy(tp, 0x10, 0x8011);
5028 }
5029
5030 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5031 {
5032 u16 flowctrl;
5033 u32 sg_dig_ctrl, sg_dig_status;
5034 u32 serdes_cfg, expected_sg_dig_ctrl;
5035 int workaround, port_a;
5036 int current_link_up;
5037
5038 serdes_cfg = 0;
5039 expected_sg_dig_ctrl = 0;
5040 workaround = 0;
5041 port_a = 1;
5042 current_link_up = 0;
5043
5044 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5045 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5046 workaround = 1;
5047 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5048 port_a = 0;
5049
5050 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5051 /* preserve bits 20-23 for voltage regulator */
5052 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5053 }
5054
5055 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5056
5057 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5058 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5059 if (workaround) {
5060 u32 val = serdes_cfg;
5061
5062 if (port_a)
5063 val |= 0xc010000;
5064 else
5065 val |= 0x4010000;
5066 tw32_f(MAC_SERDES_CFG, val);
5067 }
5068
5069 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5070 }
5071 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5072 tg3_setup_flow_control(tp, 0, 0);
5073 current_link_up = 1;
5074 }
5075 goto out;
5076 }
5077
5078 /* Want auto-negotiation. */
5079 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5080
5081 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5082 if (flowctrl & ADVERTISE_1000XPAUSE)
5083 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5084 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5085 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5086
5087 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5088 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5089 tp->serdes_counter &&
5090 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5091 MAC_STATUS_RCVD_CFG)) ==
5092 MAC_STATUS_PCS_SYNCED)) {
5093 tp->serdes_counter--;
5094 current_link_up = 1;
5095 goto out;
5096 }
5097 restart_autoneg:
5098 if (workaround)
5099 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5100 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5101 udelay(5);
5102 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5103
5104 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5105 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5106 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5107 MAC_STATUS_SIGNAL_DET)) {
5108 sg_dig_status = tr32(SG_DIG_STATUS);
5109 mac_status = tr32(MAC_STATUS);
5110
5111 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5112 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5113 u32 local_adv = 0, remote_adv = 0;
5114
5115 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5116 local_adv |= ADVERTISE_1000XPAUSE;
5117 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5118 local_adv |= ADVERTISE_1000XPSE_ASYM;
5119
5120 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5121 remote_adv |= LPA_1000XPAUSE;
5122 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5123 remote_adv |= LPA_1000XPAUSE_ASYM;
5124
5125 tp->link_config.rmt_adv =
5126 mii_adv_to_ethtool_adv_x(remote_adv);
5127
5128 tg3_setup_flow_control(tp, local_adv, remote_adv);
5129 current_link_up = 1;
5130 tp->serdes_counter = 0;
5131 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5132 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5133 if (tp->serdes_counter)
5134 tp->serdes_counter--;
5135 else {
5136 if (workaround) {
5137 u32 val = serdes_cfg;
5138
5139 if (port_a)
5140 val |= 0xc010000;
5141 else
5142 val |= 0x4010000;
5143
5144 tw32_f(MAC_SERDES_CFG, val);
5145 }
5146
5147 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5148 udelay(40);
5149
5150 /* Link parallel detection - link is up */
5151 /* only if we have PCS_SYNC and not */
5152 /* receiving config code words */
5153 mac_status = tr32(MAC_STATUS);
5154 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5155 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5156 tg3_setup_flow_control(tp, 0, 0);
5157 current_link_up = 1;
5158 tp->phy_flags |=
5159 TG3_PHYFLG_PARALLEL_DETECT;
5160 tp->serdes_counter =
5161 SERDES_PARALLEL_DET_TIMEOUT;
5162 } else
5163 goto restart_autoneg;
5164 }
5165 }
5166 } else {
5167 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5168 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5169 }
5170
5171 out:
5172 return current_link_up;
5173 }
5174
5175 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5176 {
5177 int current_link_up = 0;
5178
5179 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5180 goto out;
5181
5182 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5183 u32 txflags, rxflags;
5184 int i;
5185
5186 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5187 u32 local_adv = 0, remote_adv = 0;
5188
5189 if (txflags & ANEG_CFG_PS1)
5190 local_adv |= ADVERTISE_1000XPAUSE;
5191 if (txflags & ANEG_CFG_PS2)
5192 local_adv |= ADVERTISE_1000XPSE_ASYM;
5193
5194 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5195 remote_adv |= LPA_1000XPAUSE;
5196 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5197 remote_adv |= LPA_1000XPAUSE_ASYM;
5198
5199 tp->link_config.rmt_adv =
5200 mii_adv_to_ethtool_adv_x(remote_adv);
5201
5202 tg3_setup_flow_control(tp, local_adv, remote_adv);
5203
5204 current_link_up = 1;
5205 }
5206 for (i = 0; i < 30; i++) {
5207 udelay(20);
5208 tw32_f(MAC_STATUS,
5209 (MAC_STATUS_SYNC_CHANGED |
5210 MAC_STATUS_CFG_CHANGED));
5211 udelay(40);
5212 if ((tr32(MAC_STATUS) &
5213 (MAC_STATUS_SYNC_CHANGED |
5214 MAC_STATUS_CFG_CHANGED)) == 0)
5215 break;
5216 }
5217
5218 mac_status = tr32(MAC_STATUS);
5219 if (current_link_up == 0 &&
5220 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5221 !(mac_status & MAC_STATUS_RCVD_CFG))
5222 current_link_up = 1;
5223 } else {
5224 tg3_setup_flow_control(tp, 0, 0);
5225
5226 /* Forcing 1000FD link up. */
5227 current_link_up = 1;
5228
5229 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5230 udelay(40);
5231
5232 tw32_f(MAC_MODE, tp->mac_mode);
5233 udelay(40);
5234 }
5235
5236 out:
5237 return current_link_up;
5238 }
5239
5240 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5241 {
5242 u32 orig_pause_cfg;
5243 u16 orig_active_speed;
5244 u8 orig_active_duplex;
5245 u32 mac_status;
5246 int current_link_up;
5247 int i;
5248
5249 orig_pause_cfg = tp->link_config.active_flowctrl;
5250 orig_active_speed = tp->link_config.active_speed;
5251 orig_active_duplex = tp->link_config.active_duplex;
5252
5253 if (!tg3_flag(tp, HW_AUTONEG) &&
5254 tp->link_up &&
5255 tg3_flag(tp, INIT_COMPLETE)) {
5256 mac_status = tr32(MAC_STATUS);
5257 mac_status &= (MAC_STATUS_PCS_SYNCED |
5258 MAC_STATUS_SIGNAL_DET |
5259 MAC_STATUS_CFG_CHANGED |
5260 MAC_STATUS_RCVD_CFG);
5261 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5262 MAC_STATUS_SIGNAL_DET)) {
5263 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5264 MAC_STATUS_CFG_CHANGED));
5265 return 0;
5266 }
5267 }
5268
5269 tw32_f(MAC_TX_AUTO_NEG, 0);
5270
5271 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5272 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5273 tw32_f(MAC_MODE, tp->mac_mode);
5274 udelay(40);
5275
5276 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5277 tg3_init_bcm8002(tp);
5278
5279 /* Enable link change event even when serdes polling. */
5280 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5281 udelay(40);
5282
5283 current_link_up = 0;
5284 tp->link_config.rmt_adv = 0;
5285 mac_status = tr32(MAC_STATUS);
5286
5287 if (tg3_flag(tp, HW_AUTONEG))
5288 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5289 else
5290 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5291
5292 tp->napi[0].hw_status->status =
5293 (SD_STATUS_UPDATED |
5294 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5295
5296 for (i = 0; i < 100; i++) {
5297 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5298 MAC_STATUS_CFG_CHANGED));
5299 udelay(5);
5300 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5301 MAC_STATUS_CFG_CHANGED |
5302 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5303 break;
5304 }
5305
5306 mac_status = tr32(MAC_STATUS);
5307 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5308 current_link_up = 0;
5309 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5310 tp->serdes_counter == 0) {
5311 tw32_f(MAC_MODE, (tp->mac_mode |
5312 MAC_MODE_SEND_CONFIGS));
5313 udelay(1);
5314 tw32_f(MAC_MODE, tp->mac_mode);
5315 }
5316 }
5317
5318 if (current_link_up == 1) {
5319 tp->link_config.active_speed = SPEED_1000;
5320 tp->link_config.active_duplex = DUPLEX_FULL;
5321 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5322 LED_CTRL_LNKLED_OVERRIDE |
5323 LED_CTRL_1000MBPS_ON));
5324 } else {
5325 tp->link_config.active_speed = SPEED_UNKNOWN;
5326 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5327 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5328 LED_CTRL_LNKLED_OVERRIDE |
5329 LED_CTRL_TRAFFIC_OVERRIDE));
5330 }
5331
5332 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5333 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5334 if (orig_pause_cfg != now_pause_cfg ||
5335 orig_active_speed != tp->link_config.active_speed ||
5336 orig_active_duplex != tp->link_config.active_duplex)
5337 tg3_link_report(tp);
5338 }
5339
5340 return 0;
5341 }
5342
5343 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5344 {
5345 int current_link_up, err = 0;
5346 u32 bmsr, bmcr;
5347 u16 current_speed;
5348 u8 current_duplex;
5349 u32 local_adv, remote_adv;
5350
5351 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5352 tw32_f(MAC_MODE, tp->mac_mode);
5353 udelay(40);
5354
5355 tw32(MAC_EVENT, 0);
5356
5357 tw32_f(MAC_STATUS,
5358 (MAC_STATUS_SYNC_CHANGED |
5359 MAC_STATUS_CFG_CHANGED |
5360 MAC_STATUS_MI_COMPLETION |
5361 MAC_STATUS_LNKSTATE_CHANGED));
5362 udelay(40);
5363
5364 if (force_reset)
5365 tg3_phy_reset(tp);
5366
5367 current_link_up = 0;
5368 current_speed = SPEED_UNKNOWN;
5369 current_duplex = DUPLEX_UNKNOWN;
5370 tp->link_config.rmt_adv = 0;
5371
5372 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5373 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5374 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5375 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5376 bmsr |= BMSR_LSTATUS;
5377 else
5378 bmsr &= ~BMSR_LSTATUS;
5379 }
5380
5381 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5382
5383 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5384 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5385 /* do nothing, just check for link up at the end */
5386 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5387 u32 adv, newadv;
5388
5389 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5390 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5391 ADVERTISE_1000XPAUSE |
5392 ADVERTISE_1000XPSE_ASYM |
5393 ADVERTISE_SLCT);
5394
5395 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5396 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5397
5398 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5399 tg3_writephy(tp, MII_ADVERTISE, newadv);
5400 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5401 tg3_writephy(tp, MII_BMCR, bmcr);
5402
5403 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5404 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5405 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5406
5407 return err;
5408 }
5409 } else {
5410 u32 new_bmcr;
5411
5412 bmcr &= ~BMCR_SPEED1000;
5413 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5414
5415 if (tp->link_config.duplex == DUPLEX_FULL)
5416 new_bmcr |= BMCR_FULLDPLX;
5417
5418 if (new_bmcr != bmcr) {
5419 /* BMCR_SPEED1000 is a reserved bit that needs
5420 * to be set on write.
5421 */
5422 new_bmcr |= BMCR_SPEED1000;
5423
5424 /* Force a linkdown */
5425 if (tp->link_up) {
5426 u32 adv;
5427
5428 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5429 adv &= ~(ADVERTISE_1000XFULL |
5430 ADVERTISE_1000XHALF |
5431 ADVERTISE_SLCT);
5432 tg3_writephy(tp, MII_ADVERTISE, adv);
5433 tg3_writephy(tp, MII_BMCR, bmcr |
5434 BMCR_ANRESTART |
5435 BMCR_ANENABLE);
5436 udelay(10);
5437 tg3_carrier_off(tp);
5438 }
5439 tg3_writephy(tp, MII_BMCR, new_bmcr);
5440 bmcr = new_bmcr;
5441 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5442 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5443 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5444 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5445 bmsr |= BMSR_LSTATUS;
5446 else
5447 bmsr &= ~BMSR_LSTATUS;
5448 }
5449 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5450 }
5451 }
5452
5453 if (bmsr & BMSR_LSTATUS) {
5454 current_speed = SPEED_1000;
5455 current_link_up = 1;
5456 if (bmcr & BMCR_FULLDPLX)
5457 current_duplex = DUPLEX_FULL;
5458 else
5459 current_duplex = DUPLEX_HALF;
5460
5461 local_adv = 0;
5462 remote_adv = 0;
5463
5464 if (bmcr & BMCR_ANENABLE) {
5465 u32 common;
5466
5467 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5468 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5469 common = local_adv & remote_adv;
5470 if (common & (ADVERTISE_1000XHALF |
5471 ADVERTISE_1000XFULL)) {
5472 if (common & ADVERTISE_1000XFULL)
5473 current_duplex = DUPLEX_FULL;
5474 else
5475 current_duplex = DUPLEX_HALF;
5476
5477 tp->link_config.rmt_adv =
5478 mii_adv_to_ethtool_adv_x(remote_adv);
5479 } else if (!tg3_flag(tp, 5780_CLASS)) {
5480 /* Link is up via parallel detect */
5481 } else {
5482 current_link_up = 0;
5483 }
5484 }
5485 }
5486
5487 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5488 tg3_setup_flow_control(tp, local_adv, remote_adv);
5489
5490 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5491 if (tp->link_config.active_duplex == DUPLEX_HALF)
5492 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5493
5494 tw32_f(MAC_MODE, tp->mac_mode);
5495 udelay(40);
5496
5497 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5498
5499 tp->link_config.active_speed = current_speed;
5500 tp->link_config.active_duplex = current_duplex;
5501
5502 tg3_test_and_report_link_chg(tp, current_link_up);
5503 return err;
5504 }
5505
5506 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5507 {
5508 if (tp->serdes_counter) {
5509 /* Give autoneg time to complete. */
5510 tp->serdes_counter--;
5511 return;
5512 }
5513
5514 if (!tp->link_up &&
5515 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5516 u32 bmcr;
5517
5518 tg3_readphy(tp, MII_BMCR, &bmcr);
5519 if (bmcr & BMCR_ANENABLE) {
5520 u32 phy1, phy2;
5521
5522 /* Select shadow register 0x1f */
5523 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5524 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5525
5526 /* Select expansion interrupt status register */
5527 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5528 MII_TG3_DSP_EXP1_INT_STAT);
5529 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5530 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5531
5532 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5533 /* We have signal detect and not receiving
5534 * config code words, link is up by parallel
5535 * detection.
5536 */
5537
5538 bmcr &= ~BMCR_ANENABLE;
5539 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5540 tg3_writephy(tp, MII_BMCR, bmcr);
5541 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5542 }
5543 }
5544 } else if (tp->link_up &&
5545 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5546 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5547 u32 phy2;
5548
5549 /* Select expansion interrupt status register */
5550 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5551 MII_TG3_DSP_EXP1_INT_STAT);
5552 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5553 if (phy2 & 0x20) {
5554 u32 bmcr;
5555
5556 /* Config code words received, turn on autoneg. */
5557 tg3_readphy(tp, MII_BMCR, &bmcr);
5558 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5559
5560 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5561
5562 }
5563 }
5564 }
5565
5566 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5567 {
5568 u32 val;
5569 int err;
5570
5571 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5572 err = tg3_setup_fiber_phy(tp, force_reset);
5573 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5574 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5575 else
5576 err = tg3_setup_copper_phy(tp, force_reset);
5577
5578 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5579 u32 scale;
5580
5581 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5582 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5583 scale = 65;
5584 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5585 scale = 6;
5586 else
5587 scale = 12;
5588
5589 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5590 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5591 tw32(GRC_MISC_CFG, val);
5592 }
5593
5594 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5595 (6 << TX_LENGTHS_IPG_SHIFT);
5596 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5597 tg3_asic_rev(tp) == ASIC_REV_5762)
5598 val |= tr32(MAC_TX_LENGTHS) &
5599 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5600 TX_LENGTHS_CNT_DWN_VAL_MSK);
5601
5602 if (tp->link_config.active_speed == SPEED_1000 &&
5603 tp->link_config.active_duplex == DUPLEX_HALF)
5604 tw32(MAC_TX_LENGTHS, val |
5605 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5606 else
5607 tw32(MAC_TX_LENGTHS, val |
5608 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5609
5610 if (!tg3_flag(tp, 5705_PLUS)) {
5611 if (tp->link_up) {
5612 tw32(HOSTCC_STAT_COAL_TICKS,
5613 tp->coal.stats_block_coalesce_usecs);
5614 } else {
5615 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5616 }
5617 }
5618
5619 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5620 val = tr32(PCIE_PWR_MGMT_THRESH);
5621 if (!tp->link_up)
5622 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5623 tp->pwrmgmt_thresh;
5624 else
5625 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5626 tw32(PCIE_PWR_MGMT_THRESH, val);
5627 }
5628
5629 return err;
5630 }
5631
5632 /* tp->lock must be held */
5633 static u64 tg3_refclk_read(struct tg3 *tp)
5634 {
5635 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5636 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5637 }
5638
5639 /* tp->lock must be held */
5640 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5641 {
5642 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5643 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5644 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5645 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5646 }
5647
5648 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5649 static inline void tg3_full_unlock(struct tg3 *tp);
5650 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5651 {
5652 struct tg3 *tp = netdev_priv(dev);
5653
5654 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5655 SOF_TIMESTAMPING_RX_SOFTWARE |
5656 SOF_TIMESTAMPING_SOFTWARE |
5657 SOF_TIMESTAMPING_TX_HARDWARE |
5658 SOF_TIMESTAMPING_RX_HARDWARE |
5659 SOF_TIMESTAMPING_RAW_HARDWARE;
5660
5661 if (tp->ptp_clock)
5662 info->phc_index = ptp_clock_index(tp->ptp_clock);
5663 else
5664 info->phc_index = -1;
5665
5666 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5667
5668 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5669 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5670 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5671 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5672 return 0;
5673 }
5674
5675 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5676 {
5677 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5678 bool neg_adj = false;
5679 u32 correction = 0;
5680
5681 if (ppb < 0) {
5682 neg_adj = true;
5683 ppb = -ppb;
5684 }
5685
5686 /* Frequency adjustment is performed using hardware with a 24 bit
5687 * accumulator and a programmable correction value. On each clk, the
5688 * correction value gets added to the accumulator and when it
5689 * overflows, the time counter is incremented/decremented.
5690 *
5691 * So conversion from ppb to correction value is
5692 * ppb * (1 << 24) / 1000000000
5693 */
5694 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5695 TG3_EAV_REF_CLK_CORRECT_MASK;
5696
5697 tg3_full_lock(tp, 0);
5698
5699 if (correction)
5700 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5701 TG3_EAV_REF_CLK_CORRECT_EN |
5702 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5703 else
5704 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5705
5706 tg3_full_unlock(tp);
5707
5708 return 0;
5709 }
5710
5711 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5712 {
5713 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5714
5715 tg3_full_lock(tp, 0);
5716 tp->ptp_adjust += delta;
5717 tg3_full_unlock(tp);
5718
5719 return 0;
5720 }
5721
5722 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5723 {
5724 u64 ns;
5725 u32 remainder;
5726 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5727
5728 tg3_full_lock(tp, 0);
5729 ns = tg3_refclk_read(tp);
5730 ns += tp->ptp_adjust;
5731 tg3_full_unlock(tp);
5732
5733 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5734 ts->tv_nsec = remainder;
5735
5736 return 0;
5737 }
5738
5739 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5740 const struct timespec *ts)
5741 {
5742 u64 ns;
5743 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5744
5745 ns = timespec_to_ns(ts);
5746
5747 tg3_full_lock(tp, 0);
5748 tg3_refclk_write(tp, ns);
5749 tp->ptp_adjust = 0;
5750 tg3_full_unlock(tp);
5751
5752 return 0;
5753 }
5754
5755 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5756 struct ptp_clock_request *rq, int on)
5757 {
5758 return -EOPNOTSUPP;
5759 }
5760
5761 static const struct ptp_clock_info tg3_ptp_caps = {
5762 .owner = THIS_MODULE,
5763 .name = "tg3 clock",
5764 .max_adj = 250000000,
5765 .n_alarm = 0,
5766 .n_ext_ts = 0,
5767 .n_per_out = 0,
5768 .pps = 0,
5769 .adjfreq = tg3_ptp_adjfreq,
5770 .adjtime = tg3_ptp_adjtime,
5771 .gettime = tg3_ptp_gettime,
5772 .settime = tg3_ptp_settime,
5773 .enable = tg3_ptp_enable,
5774 };
5775
5776 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5777 struct skb_shared_hwtstamps *timestamp)
5778 {
5779 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5780 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5781 tp->ptp_adjust);
5782 }
5783
5784 /* tp->lock must be held */
5785 static void tg3_ptp_init(struct tg3 *tp)
5786 {
5787 if (!tg3_flag(tp, PTP_CAPABLE))
5788 return;
5789
5790 /* Initialize the hardware clock to the system time. */
5791 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5792 tp->ptp_adjust = 0;
5793 tp->ptp_info = tg3_ptp_caps;
5794 }
5795
5796 /* tp->lock must be held */
5797 static void tg3_ptp_resume(struct tg3 *tp)
5798 {
5799 if (!tg3_flag(tp, PTP_CAPABLE))
5800 return;
5801
5802 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5803 tp->ptp_adjust = 0;
5804 }
5805
5806 static void tg3_ptp_fini(struct tg3 *tp)
5807 {
5808 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5809 return;
5810
5811 ptp_clock_unregister(tp->ptp_clock);
5812 tp->ptp_clock = NULL;
5813 tp->ptp_adjust = 0;
5814 }
5815
5816 static inline int tg3_irq_sync(struct tg3 *tp)
5817 {
5818 return tp->irq_sync;
5819 }
5820
5821 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5822 {
5823 int i;
5824
5825 dst = (u32 *)((u8 *)dst + off);
5826 for (i = 0; i < len; i += sizeof(u32))
5827 *dst++ = tr32(off + i);
5828 }
5829
5830 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5831 {
5832 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5833 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5834 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5835 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5836 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5837 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5838 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5839 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5840 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5841 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5842 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5843 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5844 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5845 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5846 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5847 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5848 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5849 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5850 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5851
5852 if (tg3_flag(tp, SUPPORT_MSIX))
5853 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5854
5855 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5856 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5857 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5858 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5859 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5860 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5861 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5862 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5863
5864 if (!tg3_flag(tp, 5705_PLUS)) {
5865 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5866 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5867 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5868 }
5869
5870 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5871 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5872 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5873 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5874 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5875
5876 if (tg3_flag(tp, NVRAM))
5877 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5878 }
5879
5880 static void tg3_dump_state(struct tg3 *tp)
5881 {
5882 int i;
5883 u32 *regs;
5884
5885 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5886 if (!regs)
5887 return;
5888
5889 if (tg3_flag(tp, PCI_EXPRESS)) {
5890 /* Read up to but not including private PCI registers */
5891 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5892 regs[i / sizeof(u32)] = tr32(i);
5893 } else
5894 tg3_dump_legacy_regs(tp, regs);
5895
5896 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5897 if (!regs[i + 0] && !regs[i + 1] &&
5898 !regs[i + 2] && !regs[i + 3])
5899 continue;
5900
5901 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5902 i * 4,
5903 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5904 }
5905
5906 kfree(regs);
5907
5908 for (i = 0; i < tp->irq_cnt; i++) {
5909 struct tg3_napi *tnapi = &tp->napi[i];
5910
5911 /* SW status block */
5912 netdev_err(tp->dev,
5913 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5914 i,
5915 tnapi->hw_status->status,
5916 tnapi->hw_status->status_tag,
5917 tnapi->hw_status->rx_jumbo_consumer,
5918 tnapi->hw_status->rx_consumer,
5919 tnapi->hw_status->rx_mini_consumer,
5920 tnapi->hw_status->idx[0].rx_producer,
5921 tnapi->hw_status->idx[0].tx_consumer);
5922
5923 netdev_err(tp->dev,
5924 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5925 i,
5926 tnapi->last_tag, tnapi->last_irq_tag,
5927 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5928 tnapi->rx_rcb_ptr,
5929 tnapi->prodring.rx_std_prod_idx,
5930 tnapi->prodring.rx_std_cons_idx,
5931 tnapi->prodring.rx_jmb_prod_idx,
5932 tnapi->prodring.rx_jmb_cons_idx);
5933 }
5934 }
5935
5936 /* This is called whenever we suspect that the system chipset is re-
5937 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5938 * is bogus tx completions. We try to recover by setting the
5939 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5940 * in the workqueue.
5941 */
5942 static void tg3_tx_recover(struct tg3 *tp)
5943 {
5944 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5945 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5946
5947 netdev_warn(tp->dev,
5948 "The system may be re-ordering memory-mapped I/O "
5949 "cycles to the network device, attempting to recover. "
5950 "Please report the problem to the driver maintainer "
5951 "and include system chipset information.\n");
5952
5953 spin_lock(&tp->lock);
5954 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5955 spin_unlock(&tp->lock);
5956 }
5957
5958 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5959 {
5960 /* Tell compiler to fetch tx indices from memory. */
5961 barrier();
5962 return tnapi->tx_pending -
5963 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5964 }
5965
5966 /* Tigon3 never reports partial packet sends. So we do not
5967 * need special logic to handle SKBs that have not had all
5968 * of their frags sent yet, like SunGEM does.
5969 */
5970 static void tg3_tx(struct tg3_napi *tnapi)
5971 {
5972 struct tg3 *tp = tnapi->tp;
5973 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5974 u32 sw_idx = tnapi->tx_cons;
5975 struct netdev_queue *txq;
5976 int index = tnapi - tp->napi;
5977 unsigned int pkts_compl = 0, bytes_compl = 0;
5978
5979 if (tg3_flag(tp, ENABLE_TSS))
5980 index--;
5981
5982 txq = netdev_get_tx_queue(tp->dev, index);
5983
5984 while (sw_idx != hw_idx) {
5985 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5986 struct sk_buff *skb = ri->skb;
5987 int i, tx_bug = 0;
5988
5989 if (unlikely(skb == NULL)) {
5990 tg3_tx_recover(tp);
5991 return;
5992 }
5993
5994 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5995 struct skb_shared_hwtstamps timestamp;
5996 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5997 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5998
5999 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6000
6001 skb_tstamp_tx(skb, &timestamp);
6002 }
6003
6004 pci_unmap_single(tp->pdev,
6005 dma_unmap_addr(ri, mapping),
6006 skb_headlen(skb),
6007 PCI_DMA_TODEVICE);
6008
6009 ri->skb = NULL;
6010
6011 while (ri->fragmented) {
6012 ri->fragmented = false;
6013 sw_idx = NEXT_TX(sw_idx);
6014 ri = &tnapi->tx_buffers[sw_idx];
6015 }
6016
6017 sw_idx = NEXT_TX(sw_idx);
6018
6019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6020 ri = &tnapi->tx_buffers[sw_idx];
6021 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6022 tx_bug = 1;
6023
6024 pci_unmap_page(tp->pdev,
6025 dma_unmap_addr(ri, mapping),
6026 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6027 PCI_DMA_TODEVICE);
6028
6029 while (ri->fragmented) {
6030 ri->fragmented = false;
6031 sw_idx = NEXT_TX(sw_idx);
6032 ri = &tnapi->tx_buffers[sw_idx];
6033 }
6034
6035 sw_idx = NEXT_TX(sw_idx);
6036 }
6037
6038 pkts_compl++;
6039 bytes_compl += skb->len;
6040
6041 dev_kfree_skb(skb);
6042
6043 if (unlikely(tx_bug)) {
6044 tg3_tx_recover(tp);
6045 return;
6046 }
6047 }
6048
6049 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6050
6051 tnapi->tx_cons = sw_idx;
6052
6053 /* Need to make the tx_cons update visible to tg3_start_xmit()
6054 * before checking for netif_queue_stopped(). Without the
6055 * memory barrier, there is a small possibility that tg3_start_xmit()
6056 * will miss it and cause the queue to be stopped forever.
6057 */
6058 smp_mb();
6059
6060 if (unlikely(netif_tx_queue_stopped(txq) &&
6061 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6062 __netif_tx_lock(txq, smp_processor_id());
6063 if (netif_tx_queue_stopped(txq) &&
6064 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6065 netif_tx_wake_queue(txq);
6066 __netif_tx_unlock(txq);
6067 }
6068 }
6069
6070 static void tg3_frag_free(bool is_frag, void *data)
6071 {
6072 if (is_frag)
6073 put_page(virt_to_head_page(data));
6074 else
6075 kfree(data);
6076 }
6077
6078 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6079 {
6080 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6081 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6082
6083 if (!ri->data)
6084 return;
6085
6086 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6087 map_sz, PCI_DMA_FROMDEVICE);
6088 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6089 ri->data = NULL;
6090 }
6091
6092
6093 /* Returns size of skb allocated or < 0 on error.
6094 *
6095 * We only need to fill in the address because the other members
6096 * of the RX descriptor are invariant, see tg3_init_rings.
6097 *
6098 * Note the purposeful assymetry of cpu vs. chip accesses. For
6099 * posting buffers we only dirty the first cache line of the RX
6100 * descriptor (containing the address). Whereas for the RX status
6101 * buffers the cpu only reads the last cacheline of the RX descriptor
6102 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6103 */
6104 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6105 u32 opaque_key, u32 dest_idx_unmasked,
6106 unsigned int *frag_size)
6107 {
6108 struct tg3_rx_buffer_desc *desc;
6109 struct ring_info *map;
6110 u8 *data;
6111 dma_addr_t mapping;
6112 int skb_size, data_size, dest_idx;
6113
6114 switch (opaque_key) {
6115 case RXD_OPAQUE_RING_STD:
6116 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6117 desc = &tpr->rx_std[dest_idx];
6118 map = &tpr->rx_std_buffers[dest_idx];
6119 data_size = tp->rx_pkt_map_sz;
6120 break;
6121
6122 case RXD_OPAQUE_RING_JUMBO:
6123 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6124 desc = &tpr->rx_jmb[dest_idx].std;
6125 map = &tpr->rx_jmb_buffers[dest_idx];
6126 data_size = TG3_RX_JMB_MAP_SZ;
6127 break;
6128
6129 default:
6130 return -EINVAL;
6131 }
6132
6133 /* Do not overwrite any of the map or rp information
6134 * until we are sure we can commit to a new buffer.
6135 *
6136 * Callers depend upon this behavior and assume that
6137 * we leave everything unchanged if we fail.
6138 */
6139 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6140 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6141 if (skb_size <= PAGE_SIZE) {
6142 data = netdev_alloc_frag(skb_size);
6143 *frag_size = skb_size;
6144 } else {
6145 data = kmalloc(skb_size, GFP_ATOMIC);
6146 *frag_size = 0;
6147 }
6148 if (!data)
6149 return -ENOMEM;
6150
6151 mapping = pci_map_single(tp->pdev,
6152 data + TG3_RX_OFFSET(tp),
6153 data_size,
6154 PCI_DMA_FROMDEVICE);
6155 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6156 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6157 return -EIO;
6158 }
6159
6160 map->data = data;
6161 dma_unmap_addr_set(map, mapping, mapping);
6162
6163 desc->addr_hi = ((u64)mapping >> 32);
6164 desc->addr_lo = ((u64)mapping & 0xffffffff);
6165
6166 return data_size;
6167 }
6168
6169 /* We only need to move over in the address because the other
6170 * members of the RX descriptor are invariant. See notes above
6171 * tg3_alloc_rx_data for full details.
6172 */
6173 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6174 struct tg3_rx_prodring_set *dpr,
6175 u32 opaque_key, int src_idx,
6176 u32 dest_idx_unmasked)
6177 {
6178 struct tg3 *tp = tnapi->tp;
6179 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6180 struct ring_info *src_map, *dest_map;
6181 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6182 int dest_idx;
6183
6184 switch (opaque_key) {
6185 case RXD_OPAQUE_RING_STD:
6186 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6187 dest_desc = &dpr->rx_std[dest_idx];
6188 dest_map = &dpr->rx_std_buffers[dest_idx];
6189 src_desc = &spr->rx_std[src_idx];
6190 src_map = &spr->rx_std_buffers[src_idx];
6191 break;
6192
6193 case RXD_OPAQUE_RING_JUMBO:
6194 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6195 dest_desc = &dpr->rx_jmb[dest_idx].std;
6196 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6197 src_desc = &spr->rx_jmb[src_idx].std;
6198 src_map = &spr->rx_jmb_buffers[src_idx];
6199 break;
6200
6201 default:
6202 return;
6203 }
6204
6205 dest_map->data = src_map->data;
6206 dma_unmap_addr_set(dest_map, mapping,
6207 dma_unmap_addr(src_map, mapping));
6208 dest_desc->addr_hi = src_desc->addr_hi;
6209 dest_desc->addr_lo = src_desc->addr_lo;
6210
6211 /* Ensure that the update to the skb happens after the physical
6212 * addresses have been transferred to the new BD location.
6213 */
6214 smp_wmb();
6215
6216 src_map->data = NULL;
6217 }
6218
6219 /* The RX ring scheme is composed of multiple rings which post fresh
6220 * buffers to the chip, and one special ring the chip uses to report
6221 * status back to the host.
6222 *
6223 * The special ring reports the status of received packets to the
6224 * host. The chip does not write into the original descriptor the
6225 * RX buffer was obtained from. The chip simply takes the original
6226 * descriptor as provided by the host, updates the status and length
6227 * field, then writes this into the next status ring entry.
6228 *
6229 * Each ring the host uses to post buffers to the chip is described
6230 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6231 * it is first placed into the on-chip ram. When the packet's length
6232 * is known, it walks down the TG3_BDINFO entries to select the ring.
6233 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6234 * which is within the range of the new packet's length is chosen.
6235 *
6236 * The "separate ring for rx status" scheme may sound queer, but it makes
6237 * sense from a cache coherency perspective. If only the host writes
6238 * to the buffer post rings, and only the chip writes to the rx status
6239 * rings, then cache lines never move beyond shared-modified state.
6240 * If both the host and chip were to write into the same ring, cache line
6241 * eviction could occur since both entities want it in an exclusive state.
6242 */
6243 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6244 {
6245 struct tg3 *tp = tnapi->tp;
6246 u32 work_mask, rx_std_posted = 0;
6247 u32 std_prod_idx, jmb_prod_idx;
6248 u32 sw_idx = tnapi->rx_rcb_ptr;
6249 u16 hw_idx;
6250 int received;
6251 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6252
6253 hw_idx = *(tnapi->rx_rcb_prod_idx);
6254 /*
6255 * We need to order the read of hw_idx and the read of
6256 * the opaque cookie.
6257 */
6258 rmb();
6259 work_mask = 0;
6260 received = 0;
6261 std_prod_idx = tpr->rx_std_prod_idx;
6262 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6263 while (sw_idx != hw_idx && budget > 0) {
6264 struct ring_info *ri;
6265 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6266 unsigned int len;
6267 struct sk_buff *skb;
6268 dma_addr_t dma_addr;
6269 u32 opaque_key, desc_idx, *post_ptr;
6270 u8 *data;
6271 u64 tstamp = 0;
6272
6273 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6274 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6275 if (opaque_key == RXD_OPAQUE_RING_STD) {
6276 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6277 dma_addr = dma_unmap_addr(ri, mapping);
6278 data = ri->data;
6279 post_ptr = &std_prod_idx;
6280 rx_std_posted++;
6281 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6282 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6283 dma_addr = dma_unmap_addr(ri, mapping);
6284 data = ri->data;
6285 post_ptr = &jmb_prod_idx;
6286 } else
6287 goto next_pkt_nopost;
6288
6289 work_mask |= opaque_key;
6290
6291 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6292 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6293 drop_it:
6294 tg3_recycle_rx(tnapi, tpr, opaque_key,
6295 desc_idx, *post_ptr);
6296 drop_it_no_recycle:
6297 /* Other statistics kept track of by card. */
6298 tp->rx_dropped++;
6299 goto next_pkt;
6300 }
6301
6302 prefetch(data + TG3_RX_OFFSET(tp));
6303 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6304 ETH_FCS_LEN;
6305
6306 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6307 RXD_FLAG_PTPSTAT_PTPV1 ||
6308 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6309 RXD_FLAG_PTPSTAT_PTPV2) {
6310 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6311 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6312 }
6313
6314 if (len > TG3_RX_COPY_THRESH(tp)) {
6315 int skb_size;
6316 unsigned int frag_size;
6317
6318 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6319 *post_ptr, &frag_size);
6320 if (skb_size < 0)
6321 goto drop_it;
6322
6323 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6324 PCI_DMA_FROMDEVICE);
6325
6326 skb = build_skb(data, frag_size);
6327 if (!skb) {
6328 tg3_frag_free(frag_size != 0, data);
6329 goto drop_it_no_recycle;
6330 }
6331 skb_reserve(skb, TG3_RX_OFFSET(tp));
6332 /* Ensure that the update to the data happens
6333 * after the usage of the old DMA mapping.
6334 */
6335 smp_wmb();
6336
6337 ri->data = NULL;
6338
6339 } else {
6340 tg3_recycle_rx(tnapi, tpr, opaque_key,
6341 desc_idx, *post_ptr);
6342
6343 skb = netdev_alloc_skb(tp->dev,
6344 len + TG3_RAW_IP_ALIGN);
6345 if (skb == NULL)
6346 goto drop_it_no_recycle;
6347
6348 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6349 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6350 memcpy(skb->data,
6351 data + TG3_RX_OFFSET(tp),
6352 len);
6353 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6354 }
6355
6356 skb_put(skb, len);
6357 if (tstamp)
6358 tg3_hwclock_to_timestamp(tp, tstamp,
6359 skb_hwtstamps(skb));
6360
6361 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6362 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6363 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6364 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6365 skb->ip_summed = CHECKSUM_UNNECESSARY;
6366 else
6367 skb_checksum_none_assert(skb);
6368
6369 skb->protocol = eth_type_trans(skb, tp->dev);
6370
6371 if (len > (tp->dev->mtu + ETH_HLEN) &&
6372 skb->protocol != htons(ETH_P_8021Q)) {
6373 dev_kfree_skb(skb);
6374 goto drop_it_no_recycle;
6375 }
6376
6377 if (desc->type_flags & RXD_FLAG_VLAN &&
6378 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6379 __vlan_hwaccel_put_tag(skb,
6380 desc->err_vlan & RXD_VLAN_MASK);
6381
6382 napi_gro_receive(&tnapi->napi, skb);
6383
6384 received++;
6385 budget--;
6386
6387 next_pkt:
6388 (*post_ptr)++;
6389
6390 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6391 tpr->rx_std_prod_idx = std_prod_idx &
6392 tp->rx_std_ring_mask;
6393 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6394 tpr->rx_std_prod_idx);
6395 work_mask &= ~RXD_OPAQUE_RING_STD;
6396 rx_std_posted = 0;
6397 }
6398 next_pkt_nopost:
6399 sw_idx++;
6400 sw_idx &= tp->rx_ret_ring_mask;
6401
6402 /* Refresh hw_idx to see if there is new work */
6403 if (sw_idx == hw_idx) {
6404 hw_idx = *(tnapi->rx_rcb_prod_idx);
6405 rmb();
6406 }
6407 }
6408
6409 /* ACK the status ring. */
6410 tnapi->rx_rcb_ptr = sw_idx;
6411 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6412
6413 /* Refill RX ring(s). */
6414 if (!tg3_flag(tp, ENABLE_RSS)) {
6415 /* Sync BD data before updating mailbox */
6416 wmb();
6417
6418 if (work_mask & RXD_OPAQUE_RING_STD) {
6419 tpr->rx_std_prod_idx = std_prod_idx &
6420 tp->rx_std_ring_mask;
6421 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6422 tpr->rx_std_prod_idx);
6423 }
6424 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6425 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6426 tp->rx_jmb_ring_mask;
6427 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6428 tpr->rx_jmb_prod_idx);
6429 }
6430 mmiowb();
6431 } else if (work_mask) {
6432 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6433 * updated before the producer indices can be updated.
6434 */
6435 smp_wmb();
6436
6437 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6438 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6439
6440 if (tnapi != &tp->napi[1]) {
6441 tp->rx_refill = true;
6442 napi_schedule(&tp->napi[1].napi);
6443 }
6444 }
6445
6446 return received;
6447 }
6448
6449 static void tg3_poll_link(struct tg3 *tp)
6450 {
6451 /* handle link change and other phy events */
6452 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6453 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6454
6455 if (sblk->status & SD_STATUS_LINK_CHG) {
6456 sblk->status = SD_STATUS_UPDATED |
6457 (sblk->status & ~SD_STATUS_LINK_CHG);
6458 spin_lock(&tp->lock);
6459 if (tg3_flag(tp, USE_PHYLIB)) {
6460 tw32_f(MAC_STATUS,
6461 (MAC_STATUS_SYNC_CHANGED |
6462 MAC_STATUS_CFG_CHANGED |
6463 MAC_STATUS_MI_COMPLETION |
6464 MAC_STATUS_LNKSTATE_CHANGED));
6465 udelay(40);
6466 } else
6467 tg3_setup_phy(tp, 0);
6468 spin_unlock(&tp->lock);
6469 }
6470 }
6471 }
6472
6473 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6474 struct tg3_rx_prodring_set *dpr,
6475 struct tg3_rx_prodring_set *spr)
6476 {
6477 u32 si, di, cpycnt, src_prod_idx;
6478 int i, err = 0;
6479
6480 while (1) {
6481 src_prod_idx = spr->rx_std_prod_idx;
6482
6483 /* Make sure updates to the rx_std_buffers[] entries and the
6484 * standard producer index are seen in the correct order.
6485 */
6486 smp_rmb();
6487
6488 if (spr->rx_std_cons_idx == src_prod_idx)
6489 break;
6490
6491 if (spr->rx_std_cons_idx < src_prod_idx)
6492 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6493 else
6494 cpycnt = tp->rx_std_ring_mask + 1 -
6495 spr->rx_std_cons_idx;
6496
6497 cpycnt = min(cpycnt,
6498 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6499
6500 si = spr->rx_std_cons_idx;
6501 di = dpr->rx_std_prod_idx;
6502
6503 for (i = di; i < di + cpycnt; i++) {
6504 if (dpr->rx_std_buffers[i].data) {
6505 cpycnt = i - di;
6506 err = -ENOSPC;
6507 break;
6508 }
6509 }
6510
6511 if (!cpycnt)
6512 break;
6513
6514 /* Ensure that updates to the rx_std_buffers ring and the
6515 * shadowed hardware producer ring from tg3_recycle_skb() are
6516 * ordered correctly WRT the skb check above.
6517 */
6518 smp_rmb();
6519
6520 memcpy(&dpr->rx_std_buffers[di],
6521 &spr->rx_std_buffers[si],
6522 cpycnt * sizeof(struct ring_info));
6523
6524 for (i = 0; i < cpycnt; i++, di++, si++) {
6525 struct tg3_rx_buffer_desc *sbd, *dbd;
6526 sbd = &spr->rx_std[si];
6527 dbd = &dpr->rx_std[di];
6528 dbd->addr_hi = sbd->addr_hi;
6529 dbd->addr_lo = sbd->addr_lo;
6530 }
6531
6532 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6533 tp->rx_std_ring_mask;
6534 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6535 tp->rx_std_ring_mask;
6536 }
6537
6538 while (1) {
6539 src_prod_idx = spr->rx_jmb_prod_idx;
6540
6541 /* Make sure updates to the rx_jmb_buffers[] entries and
6542 * the jumbo producer index are seen in the correct order.
6543 */
6544 smp_rmb();
6545
6546 if (spr->rx_jmb_cons_idx == src_prod_idx)
6547 break;
6548
6549 if (spr->rx_jmb_cons_idx < src_prod_idx)
6550 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6551 else
6552 cpycnt = tp->rx_jmb_ring_mask + 1 -
6553 spr->rx_jmb_cons_idx;
6554
6555 cpycnt = min(cpycnt,
6556 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6557
6558 si = spr->rx_jmb_cons_idx;
6559 di = dpr->rx_jmb_prod_idx;
6560
6561 for (i = di; i < di + cpycnt; i++) {
6562 if (dpr->rx_jmb_buffers[i].data) {
6563 cpycnt = i - di;
6564 err = -ENOSPC;
6565 break;
6566 }
6567 }
6568
6569 if (!cpycnt)
6570 break;
6571
6572 /* Ensure that updates to the rx_jmb_buffers ring and the
6573 * shadowed hardware producer ring from tg3_recycle_skb() are
6574 * ordered correctly WRT the skb check above.
6575 */
6576 smp_rmb();
6577
6578 memcpy(&dpr->rx_jmb_buffers[di],
6579 &spr->rx_jmb_buffers[si],
6580 cpycnt * sizeof(struct ring_info));
6581
6582 for (i = 0; i < cpycnt; i++, di++, si++) {
6583 struct tg3_rx_buffer_desc *sbd, *dbd;
6584 sbd = &spr->rx_jmb[si].std;
6585 dbd = &dpr->rx_jmb[di].std;
6586 dbd->addr_hi = sbd->addr_hi;
6587 dbd->addr_lo = sbd->addr_lo;
6588 }
6589
6590 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6591 tp->rx_jmb_ring_mask;
6592 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6593 tp->rx_jmb_ring_mask;
6594 }
6595
6596 return err;
6597 }
6598
6599 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6600 {
6601 struct tg3 *tp = tnapi->tp;
6602
6603 /* run TX completion thread */
6604 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6605 tg3_tx(tnapi);
6606 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6607 return work_done;
6608 }
6609
6610 if (!tnapi->rx_rcb_prod_idx)
6611 return work_done;
6612
6613 /* run RX thread, within the bounds set by NAPI.
6614 * All RX "locking" is done by ensuring outside
6615 * code synchronizes with tg3->napi.poll()
6616 */
6617 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6618 work_done += tg3_rx(tnapi, budget - work_done);
6619
6620 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6621 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6622 int i, err = 0;
6623 u32 std_prod_idx = dpr->rx_std_prod_idx;
6624 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6625
6626 tp->rx_refill = false;
6627 for (i = 1; i <= tp->rxq_cnt; i++)
6628 err |= tg3_rx_prodring_xfer(tp, dpr,
6629 &tp->napi[i].prodring);
6630
6631 wmb();
6632
6633 if (std_prod_idx != dpr->rx_std_prod_idx)
6634 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6635 dpr->rx_std_prod_idx);
6636
6637 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6638 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6639 dpr->rx_jmb_prod_idx);
6640
6641 mmiowb();
6642
6643 if (err)
6644 tw32_f(HOSTCC_MODE, tp->coal_now);
6645 }
6646
6647 return work_done;
6648 }
6649
6650 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6651 {
6652 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6653 schedule_work(&tp->reset_task);
6654 }
6655
6656 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6657 {
6658 cancel_work_sync(&tp->reset_task);
6659 tg3_flag_clear(tp, RESET_TASK_PENDING);
6660 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6661 }
6662
6663 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6664 {
6665 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6666 struct tg3 *tp = tnapi->tp;
6667 int work_done = 0;
6668 struct tg3_hw_status *sblk = tnapi->hw_status;
6669
6670 while (1) {
6671 work_done = tg3_poll_work(tnapi, work_done, budget);
6672
6673 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6674 goto tx_recovery;
6675
6676 if (unlikely(work_done >= budget))
6677 break;
6678
6679 /* tp->last_tag is used in tg3_int_reenable() below
6680 * to tell the hw how much work has been processed,
6681 * so we must read it before checking for more work.
6682 */
6683 tnapi->last_tag = sblk->status_tag;
6684 tnapi->last_irq_tag = tnapi->last_tag;
6685 rmb();
6686
6687 /* check for RX/TX work to do */
6688 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6689 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6690
6691 /* This test here is not race free, but will reduce
6692 * the number of interrupts by looping again.
6693 */
6694 if (tnapi == &tp->napi[1] && tp->rx_refill)
6695 continue;
6696
6697 napi_complete(napi);
6698 /* Reenable interrupts. */
6699 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6700
6701 /* This test here is synchronized by napi_schedule()
6702 * and napi_complete() to close the race condition.
6703 */
6704 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6705 tw32(HOSTCC_MODE, tp->coalesce_mode |
6706 HOSTCC_MODE_ENABLE |
6707 tnapi->coal_now);
6708 }
6709 mmiowb();
6710 break;
6711 }
6712 }
6713
6714 return work_done;
6715
6716 tx_recovery:
6717 /* work_done is guaranteed to be less than budget. */
6718 napi_complete(napi);
6719 tg3_reset_task_schedule(tp);
6720 return work_done;
6721 }
6722
6723 static void tg3_process_error(struct tg3 *tp)
6724 {
6725 u32 val;
6726 bool real_error = false;
6727
6728 if (tg3_flag(tp, ERROR_PROCESSED))
6729 return;
6730
6731 /* Check Flow Attention register */
6732 val = tr32(HOSTCC_FLOW_ATTN);
6733 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6734 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6735 real_error = true;
6736 }
6737
6738 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6739 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6740 real_error = true;
6741 }
6742
6743 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6744 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6745 real_error = true;
6746 }
6747
6748 if (!real_error)
6749 return;
6750
6751 tg3_dump_state(tp);
6752
6753 tg3_flag_set(tp, ERROR_PROCESSED);
6754 tg3_reset_task_schedule(tp);
6755 }
6756
6757 static int tg3_poll(struct napi_struct *napi, int budget)
6758 {
6759 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6760 struct tg3 *tp = tnapi->tp;
6761 int work_done = 0;
6762 struct tg3_hw_status *sblk = tnapi->hw_status;
6763
6764 while (1) {
6765 if (sblk->status & SD_STATUS_ERROR)
6766 tg3_process_error(tp);
6767
6768 tg3_poll_link(tp);
6769
6770 work_done = tg3_poll_work(tnapi, work_done, budget);
6771
6772 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6773 goto tx_recovery;
6774
6775 if (unlikely(work_done >= budget))
6776 break;
6777
6778 if (tg3_flag(tp, TAGGED_STATUS)) {
6779 /* tp->last_tag is used in tg3_int_reenable() below
6780 * to tell the hw how much work has been processed,
6781 * so we must read it before checking for more work.
6782 */
6783 tnapi->last_tag = sblk->status_tag;
6784 tnapi->last_irq_tag = tnapi->last_tag;
6785 rmb();
6786 } else
6787 sblk->status &= ~SD_STATUS_UPDATED;
6788
6789 if (likely(!tg3_has_work(tnapi))) {
6790 napi_complete(napi);
6791 tg3_int_reenable(tnapi);
6792 break;
6793 }
6794 }
6795
6796 return work_done;
6797
6798 tx_recovery:
6799 /* work_done is guaranteed to be less than budget. */
6800 napi_complete(napi);
6801 tg3_reset_task_schedule(tp);
6802 return work_done;
6803 }
6804
6805 static void tg3_napi_disable(struct tg3 *tp)
6806 {
6807 int i;
6808
6809 for (i = tp->irq_cnt - 1; i >= 0; i--)
6810 napi_disable(&tp->napi[i].napi);
6811 }
6812
6813 static void tg3_napi_enable(struct tg3 *tp)
6814 {
6815 int i;
6816
6817 for (i = 0; i < tp->irq_cnt; i++)
6818 napi_enable(&tp->napi[i].napi);
6819 }
6820
6821 static void tg3_napi_init(struct tg3 *tp)
6822 {
6823 int i;
6824
6825 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6826 for (i = 1; i < tp->irq_cnt; i++)
6827 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6828 }
6829
6830 static void tg3_napi_fini(struct tg3 *tp)
6831 {
6832 int i;
6833
6834 for (i = 0; i < tp->irq_cnt; i++)
6835 netif_napi_del(&tp->napi[i].napi);
6836 }
6837
6838 static inline void tg3_netif_stop(struct tg3 *tp)
6839 {
6840 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6841 tg3_napi_disable(tp);
6842 netif_carrier_off(tp->dev);
6843 netif_tx_disable(tp->dev);
6844 }
6845
6846 /* tp->lock must be held */
6847 static inline void tg3_netif_start(struct tg3 *tp)
6848 {
6849 tg3_ptp_resume(tp);
6850
6851 /* NOTE: unconditional netif_tx_wake_all_queues is only
6852 * appropriate so long as all callers are assured to
6853 * have free tx slots (such as after tg3_init_hw)
6854 */
6855 netif_tx_wake_all_queues(tp->dev);
6856
6857 if (tp->link_up)
6858 netif_carrier_on(tp->dev);
6859
6860 tg3_napi_enable(tp);
6861 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6862 tg3_enable_ints(tp);
6863 }
6864
6865 static void tg3_irq_quiesce(struct tg3 *tp)
6866 {
6867 int i;
6868
6869 BUG_ON(tp->irq_sync);
6870
6871 tp->irq_sync = 1;
6872 smp_mb();
6873
6874 for (i = 0; i < tp->irq_cnt; i++)
6875 synchronize_irq(tp->napi[i].irq_vec);
6876 }
6877
6878 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6879 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6880 * with as well. Most of the time, this is not necessary except when
6881 * shutting down the device.
6882 */
6883 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6884 {
6885 spin_lock_bh(&tp->lock);
6886 if (irq_sync)
6887 tg3_irq_quiesce(tp);
6888 }
6889
6890 static inline void tg3_full_unlock(struct tg3 *tp)
6891 {
6892 spin_unlock_bh(&tp->lock);
6893 }
6894
6895 /* One-shot MSI handler - Chip automatically disables interrupt
6896 * after sending MSI so driver doesn't have to do it.
6897 */
6898 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6899 {
6900 struct tg3_napi *tnapi = dev_id;
6901 struct tg3 *tp = tnapi->tp;
6902
6903 prefetch(tnapi->hw_status);
6904 if (tnapi->rx_rcb)
6905 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6906
6907 if (likely(!tg3_irq_sync(tp)))
6908 napi_schedule(&tnapi->napi);
6909
6910 return IRQ_HANDLED;
6911 }
6912
6913 /* MSI ISR - No need to check for interrupt sharing and no need to
6914 * flush status block and interrupt mailbox. PCI ordering rules
6915 * guarantee that MSI will arrive after the status block.
6916 */
6917 static irqreturn_t tg3_msi(int irq, void *dev_id)
6918 {
6919 struct tg3_napi *tnapi = dev_id;
6920 struct tg3 *tp = tnapi->tp;
6921
6922 prefetch(tnapi->hw_status);
6923 if (tnapi->rx_rcb)
6924 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6925 /*
6926 * Writing any value to intr-mbox-0 clears PCI INTA# and
6927 * chip-internal interrupt pending events.
6928 * Writing non-zero to intr-mbox-0 additional tells the
6929 * NIC to stop sending us irqs, engaging "in-intr-handler"
6930 * event coalescing.
6931 */
6932 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6933 if (likely(!tg3_irq_sync(tp)))
6934 napi_schedule(&tnapi->napi);
6935
6936 return IRQ_RETVAL(1);
6937 }
6938
6939 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6940 {
6941 struct tg3_napi *tnapi = dev_id;
6942 struct tg3 *tp = tnapi->tp;
6943 struct tg3_hw_status *sblk = tnapi->hw_status;
6944 unsigned int handled = 1;
6945
6946 /* In INTx mode, it is possible for the interrupt to arrive at
6947 * the CPU before the status block posted prior to the interrupt.
6948 * Reading the PCI State register will confirm whether the
6949 * interrupt is ours and will flush the status block.
6950 */
6951 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6952 if (tg3_flag(tp, CHIP_RESETTING) ||
6953 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6954 handled = 0;
6955 goto out;
6956 }
6957 }
6958
6959 /*
6960 * Writing any value to intr-mbox-0 clears PCI INTA# and
6961 * chip-internal interrupt pending events.
6962 * Writing non-zero to intr-mbox-0 additional tells the
6963 * NIC to stop sending us irqs, engaging "in-intr-handler"
6964 * event coalescing.
6965 *
6966 * Flush the mailbox to de-assert the IRQ immediately to prevent
6967 * spurious interrupts. The flush impacts performance but
6968 * excessive spurious interrupts can be worse in some cases.
6969 */
6970 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6971 if (tg3_irq_sync(tp))
6972 goto out;
6973 sblk->status &= ~SD_STATUS_UPDATED;
6974 if (likely(tg3_has_work(tnapi))) {
6975 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6976 napi_schedule(&tnapi->napi);
6977 } else {
6978 /* No work, shared interrupt perhaps? re-enable
6979 * interrupts, and flush that PCI write
6980 */
6981 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6982 0x00000000);
6983 }
6984 out:
6985 return IRQ_RETVAL(handled);
6986 }
6987
6988 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6989 {
6990 struct tg3_napi *tnapi = dev_id;
6991 struct tg3 *tp = tnapi->tp;
6992 struct tg3_hw_status *sblk = tnapi->hw_status;
6993 unsigned int handled = 1;
6994
6995 /* In INTx mode, it is possible for the interrupt to arrive at
6996 * the CPU before the status block posted prior to the interrupt.
6997 * Reading the PCI State register will confirm whether the
6998 * interrupt is ours and will flush the status block.
6999 */
7000 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7001 if (tg3_flag(tp, CHIP_RESETTING) ||
7002 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7003 handled = 0;
7004 goto out;
7005 }
7006 }
7007
7008 /*
7009 * writing any value to intr-mbox-0 clears PCI INTA# and
7010 * chip-internal interrupt pending events.
7011 * writing non-zero to intr-mbox-0 additional tells the
7012 * NIC to stop sending us irqs, engaging "in-intr-handler"
7013 * event coalescing.
7014 *
7015 * Flush the mailbox to de-assert the IRQ immediately to prevent
7016 * spurious interrupts. The flush impacts performance but
7017 * excessive spurious interrupts can be worse in some cases.
7018 */
7019 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7020
7021 /*
7022 * In a shared interrupt configuration, sometimes other devices'
7023 * interrupts will scream. We record the current status tag here
7024 * so that the above check can report that the screaming interrupts
7025 * are unhandled. Eventually they will be silenced.
7026 */
7027 tnapi->last_irq_tag = sblk->status_tag;
7028
7029 if (tg3_irq_sync(tp))
7030 goto out;
7031
7032 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7033
7034 napi_schedule(&tnapi->napi);
7035
7036 out:
7037 return IRQ_RETVAL(handled);
7038 }
7039
7040 /* ISR for interrupt test */
7041 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7042 {
7043 struct tg3_napi *tnapi = dev_id;
7044 struct tg3 *tp = tnapi->tp;
7045 struct tg3_hw_status *sblk = tnapi->hw_status;
7046
7047 if ((sblk->status & SD_STATUS_UPDATED) ||
7048 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7049 tg3_disable_ints(tp);
7050 return IRQ_RETVAL(1);
7051 }
7052 return IRQ_RETVAL(0);
7053 }
7054
7055 #ifdef CONFIG_NET_POLL_CONTROLLER
7056 static void tg3_poll_controller(struct net_device *dev)
7057 {
7058 int i;
7059 struct tg3 *tp = netdev_priv(dev);
7060
7061 if (tg3_irq_sync(tp))
7062 return;
7063
7064 for (i = 0; i < tp->irq_cnt; i++)
7065 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7066 }
7067 #endif
7068
7069 static void tg3_tx_timeout(struct net_device *dev)
7070 {
7071 struct tg3 *tp = netdev_priv(dev);
7072
7073 if (netif_msg_tx_err(tp)) {
7074 netdev_err(dev, "transmit timed out, resetting\n");
7075 tg3_dump_state(tp);
7076 }
7077
7078 tg3_reset_task_schedule(tp);
7079 }
7080
7081 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7082 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7083 {
7084 u32 base = (u32) mapping & 0xffffffff;
7085
7086 return (base > 0xffffdcc0) && (base + len + 8 < base);
7087 }
7088
7089 /* Test for DMA addresses > 40-bit */
7090 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7091 int len)
7092 {
7093 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7094 if (tg3_flag(tp, 40BIT_DMA_BUG))
7095 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7096 return 0;
7097 #else
7098 return 0;
7099 #endif
7100 }
7101
7102 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7103 dma_addr_t mapping, u32 len, u32 flags,
7104 u32 mss, u32 vlan)
7105 {
7106 txbd->addr_hi = ((u64) mapping >> 32);
7107 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7108 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7109 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7110 }
7111
7112 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7113 dma_addr_t map, u32 len, u32 flags,
7114 u32 mss, u32 vlan)
7115 {
7116 struct tg3 *tp = tnapi->tp;
7117 bool hwbug = false;
7118
7119 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7120 hwbug = true;
7121
7122 if (tg3_4g_overflow_test(map, len))
7123 hwbug = true;
7124
7125 if (tg3_40bit_overflow_test(tp, map, len))
7126 hwbug = true;
7127
7128 if (tp->dma_limit) {
7129 u32 prvidx = *entry;
7130 u32 tmp_flag = flags & ~TXD_FLAG_END;
7131 while (len > tp->dma_limit && *budget) {
7132 u32 frag_len = tp->dma_limit;
7133 len -= tp->dma_limit;
7134
7135 /* Avoid the 8byte DMA problem */
7136 if (len <= 8) {
7137 len += tp->dma_limit / 2;
7138 frag_len = tp->dma_limit / 2;
7139 }
7140
7141 tnapi->tx_buffers[*entry].fragmented = true;
7142
7143 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7144 frag_len, tmp_flag, mss, vlan);
7145 *budget -= 1;
7146 prvidx = *entry;
7147 *entry = NEXT_TX(*entry);
7148
7149 map += frag_len;
7150 }
7151
7152 if (len) {
7153 if (*budget) {
7154 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7155 len, flags, mss, vlan);
7156 *budget -= 1;
7157 *entry = NEXT_TX(*entry);
7158 } else {
7159 hwbug = true;
7160 tnapi->tx_buffers[prvidx].fragmented = false;
7161 }
7162 }
7163 } else {
7164 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7165 len, flags, mss, vlan);
7166 *entry = NEXT_TX(*entry);
7167 }
7168
7169 return hwbug;
7170 }
7171
7172 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7173 {
7174 int i;
7175 struct sk_buff *skb;
7176 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7177
7178 skb = txb->skb;
7179 txb->skb = NULL;
7180
7181 pci_unmap_single(tnapi->tp->pdev,
7182 dma_unmap_addr(txb, mapping),
7183 skb_headlen(skb),
7184 PCI_DMA_TODEVICE);
7185
7186 while (txb->fragmented) {
7187 txb->fragmented = false;
7188 entry = NEXT_TX(entry);
7189 txb = &tnapi->tx_buffers[entry];
7190 }
7191
7192 for (i = 0; i <= last; i++) {
7193 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7194
7195 entry = NEXT_TX(entry);
7196 txb = &tnapi->tx_buffers[entry];
7197
7198 pci_unmap_page(tnapi->tp->pdev,
7199 dma_unmap_addr(txb, mapping),
7200 skb_frag_size(frag), PCI_DMA_TODEVICE);
7201
7202 while (txb->fragmented) {
7203 txb->fragmented = false;
7204 entry = NEXT_TX(entry);
7205 txb = &tnapi->tx_buffers[entry];
7206 }
7207 }
7208 }
7209
7210 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7211 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7212 struct sk_buff **pskb,
7213 u32 *entry, u32 *budget,
7214 u32 base_flags, u32 mss, u32 vlan)
7215 {
7216 struct tg3 *tp = tnapi->tp;
7217 struct sk_buff *new_skb, *skb = *pskb;
7218 dma_addr_t new_addr = 0;
7219 int ret = 0;
7220
7221 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7222 new_skb = skb_copy(skb, GFP_ATOMIC);
7223 else {
7224 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7225
7226 new_skb = skb_copy_expand(skb,
7227 skb_headroom(skb) + more_headroom,
7228 skb_tailroom(skb), GFP_ATOMIC);
7229 }
7230
7231 if (!new_skb) {
7232 ret = -1;
7233 } else {
7234 /* New SKB is guaranteed to be linear. */
7235 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7236 PCI_DMA_TODEVICE);
7237 /* Make sure the mapping succeeded */
7238 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7239 dev_kfree_skb(new_skb);
7240 ret = -1;
7241 } else {
7242 u32 save_entry = *entry;
7243
7244 base_flags |= TXD_FLAG_END;
7245
7246 tnapi->tx_buffers[*entry].skb = new_skb;
7247 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7248 mapping, new_addr);
7249
7250 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7251 new_skb->len, base_flags,
7252 mss, vlan)) {
7253 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7254 dev_kfree_skb(new_skb);
7255 ret = -1;
7256 }
7257 }
7258 }
7259
7260 dev_kfree_skb(skb);
7261 *pskb = new_skb;
7262 return ret;
7263 }
7264
7265 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7266
7267 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7268 * TSO header is greater than 80 bytes.
7269 */
7270 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7271 {
7272 struct sk_buff *segs, *nskb;
7273 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7274
7275 /* Estimate the number of fragments in the worst case */
7276 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7277 netif_stop_queue(tp->dev);
7278
7279 /* netif_tx_stop_queue() must be done before checking
7280 * checking tx index in tg3_tx_avail() below, because in
7281 * tg3_tx(), we update tx index before checking for
7282 * netif_tx_queue_stopped().
7283 */
7284 smp_mb();
7285 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7286 return NETDEV_TX_BUSY;
7287
7288 netif_wake_queue(tp->dev);
7289 }
7290
7291 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7292 if (IS_ERR(segs))
7293 goto tg3_tso_bug_end;
7294
7295 do {
7296 nskb = segs;
7297 segs = segs->next;
7298 nskb->next = NULL;
7299 tg3_start_xmit(nskb, tp->dev);
7300 } while (segs);
7301
7302 tg3_tso_bug_end:
7303 dev_kfree_skb(skb);
7304
7305 return NETDEV_TX_OK;
7306 }
7307
7308 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7309 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7310 */
7311 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7312 {
7313 struct tg3 *tp = netdev_priv(dev);
7314 u32 len, entry, base_flags, mss, vlan = 0;
7315 u32 budget;
7316 int i = -1, would_hit_hwbug;
7317 dma_addr_t mapping;
7318 struct tg3_napi *tnapi;
7319 struct netdev_queue *txq;
7320 unsigned int last;
7321
7322 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7323 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7324 if (tg3_flag(tp, ENABLE_TSS))
7325 tnapi++;
7326
7327 budget = tg3_tx_avail(tnapi);
7328
7329 /* We are running in BH disabled context with netif_tx_lock
7330 * and TX reclaim runs via tp->napi.poll inside of a software
7331 * interrupt. Furthermore, IRQ processing runs lockless so we have
7332 * no IRQ context deadlocks to worry about either. Rejoice!
7333 */
7334 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7335 if (!netif_tx_queue_stopped(txq)) {
7336 netif_tx_stop_queue(txq);
7337
7338 /* This is a hard error, log it. */
7339 netdev_err(dev,
7340 "BUG! Tx Ring full when queue awake!\n");
7341 }
7342 return NETDEV_TX_BUSY;
7343 }
7344
7345 entry = tnapi->tx_prod;
7346 base_flags = 0;
7347 if (skb->ip_summed == CHECKSUM_PARTIAL)
7348 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7349
7350 mss = skb_shinfo(skb)->gso_size;
7351 if (mss) {
7352 struct iphdr *iph;
7353 u32 tcp_opt_len, hdr_len;
7354
7355 if (skb_header_cloned(skb) &&
7356 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7357 goto drop;
7358
7359 iph = ip_hdr(skb);
7360 tcp_opt_len = tcp_optlen(skb);
7361
7362 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7363
7364 if (!skb_is_gso_v6(skb)) {
7365 iph->check = 0;
7366 iph->tot_len = htons(mss + hdr_len);
7367 }
7368
7369 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7370 tg3_flag(tp, TSO_BUG))
7371 return tg3_tso_bug(tp, skb);
7372
7373 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7374 TXD_FLAG_CPU_POST_DMA);
7375
7376 if (tg3_flag(tp, HW_TSO_1) ||
7377 tg3_flag(tp, HW_TSO_2) ||
7378 tg3_flag(tp, HW_TSO_3)) {
7379 tcp_hdr(skb)->check = 0;
7380 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7381 } else
7382 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7383 iph->daddr, 0,
7384 IPPROTO_TCP,
7385 0);
7386
7387 if (tg3_flag(tp, HW_TSO_3)) {
7388 mss |= (hdr_len & 0xc) << 12;
7389 if (hdr_len & 0x10)
7390 base_flags |= 0x00000010;
7391 base_flags |= (hdr_len & 0x3e0) << 5;
7392 } else if (tg3_flag(tp, HW_TSO_2))
7393 mss |= hdr_len << 9;
7394 else if (tg3_flag(tp, HW_TSO_1) ||
7395 tg3_asic_rev(tp) == ASIC_REV_5705) {
7396 if (tcp_opt_len || iph->ihl > 5) {
7397 int tsflags;
7398
7399 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7400 mss |= (tsflags << 11);
7401 }
7402 } else {
7403 if (tcp_opt_len || iph->ihl > 5) {
7404 int tsflags;
7405
7406 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7407 base_flags |= tsflags << 12;
7408 }
7409 }
7410 }
7411
7412 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7413 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7414 base_flags |= TXD_FLAG_JMB_PKT;
7415
7416 if (vlan_tx_tag_present(skb)) {
7417 base_flags |= TXD_FLAG_VLAN;
7418 vlan = vlan_tx_tag_get(skb);
7419 }
7420
7421 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7422 tg3_flag(tp, TX_TSTAMP_EN)) {
7423 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7424 base_flags |= TXD_FLAG_HWTSTAMP;
7425 }
7426
7427 len = skb_headlen(skb);
7428
7429 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7430 if (pci_dma_mapping_error(tp->pdev, mapping))
7431 goto drop;
7432
7433
7434 tnapi->tx_buffers[entry].skb = skb;
7435 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7436
7437 would_hit_hwbug = 0;
7438
7439 if (tg3_flag(tp, 5701_DMA_BUG))
7440 would_hit_hwbug = 1;
7441
7442 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7443 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7444 mss, vlan)) {
7445 would_hit_hwbug = 1;
7446 } else if (skb_shinfo(skb)->nr_frags > 0) {
7447 u32 tmp_mss = mss;
7448
7449 if (!tg3_flag(tp, HW_TSO_1) &&
7450 !tg3_flag(tp, HW_TSO_2) &&
7451 !tg3_flag(tp, HW_TSO_3))
7452 tmp_mss = 0;
7453
7454 /* Now loop through additional data
7455 * fragments, and queue them.
7456 */
7457 last = skb_shinfo(skb)->nr_frags - 1;
7458 for (i = 0; i <= last; i++) {
7459 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7460
7461 len = skb_frag_size(frag);
7462 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7463 len, DMA_TO_DEVICE);
7464
7465 tnapi->tx_buffers[entry].skb = NULL;
7466 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7467 mapping);
7468 if (dma_mapping_error(&tp->pdev->dev, mapping))
7469 goto dma_error;
7470
7471 if (!budget ||
7472 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7473 len, base_flags |
7474 ((i == last) ? TXD_FLAG_END : 0),
7475 tmp_mss, vlan)) {
7476 would_hit_hwbug = 1;
7477 break;
7478 }
7479 }
7480 }
7481
7482 if (would_hit_hwbug) {
7483 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7484
7485 /* If the workaround fails due to memory/mapping
7486 * failure, silently drop this packet.
7487 */
7488 entry = tnapi->tx_prod;
7489 budget = tg3_tx_avail(tnapi);
7490 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7491 base_flags, mss, vlan))
7492 goto drop_nofree;
7493 }
7494
7495 skb_tx_timestamp(skb);
7496 netdev_tx_sent_queue(txq, skb->len);
7497
7498 /* Sync BD data before updating mailbox */
7499 wmb();
7500
7501 /* Packets are ready, update Tx producer idx local and on card. */
7502 tw32_tx_mbox(tnapi->prodmbox, entry);
7503
7504 tnapi->tx_prod = entry;
7505 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7506 netif_tx_stop_queue(txq);
7507
7508 /* netif_tx_stop_queue() must be done before checking
7509 * checking tx index in tg3_tx_avail() below, because in
7510 * tg3_tx(), we update tx index before checking for
7511 * netif_tx_queue_stopped().
7512 */
7513 smp_mb();
7514 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7515 netif_tx_wake_queue(txq);
7516 }
7517
7518 mmiowb();
7519 return NETDEV_TX_OK;
7520
7521 dma_error:
7522 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7523 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7524 drop:
7525 dev_kfree_skb(skb);
7526 drop_nofree:
7527 tp->tx_dropped++;
7528 return NETDEV_TX_OK;
7529 }
7530
7531 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7532 {
7533 if (enable) {
7534 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7535 MAC_MODE_PORT_MODE_MASK);
7536
7537 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7538
7539 if (!tg3_flag(tp, 5705_PLUS))
7540 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7541
7542 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7543 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7544 else
7545 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7546 } else {
7547 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7548
7549 if (tg3_flag(tp, 5705_PLUS) ||
7550 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7551 tg3_asic_rev(tp) == ASIC_REV_5700)
7552 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7553 }
7554
7555 tw32(MAC_MODE, tp->mac_mode);
7556 udelay(40);
7557 }
7558
7559 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7560 {
7561 u32 val, bmcr, mac_mode, ptest = 0;
7562
7563 tg3_phy_toggle_apd(tp, false);
7564 tg3_phy_toggle_automdix(tp, 0);
7565
7566 if (extlpbk && tg3_phy_set_extloopbk(tp))
7567 return -EIO;
7568
7569 bmcr = BMCR_FULLDPLX;
7570 switch (speed) {
7571 case SPEED_10:
7572 break;
7573 case SPEED_100:
7574 bmcr |= BMCR_SPEED100;
7575 break;
7576 case SPEED_1000:
7577 default:
7578 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7579 speed = SPEED_100;
7580 bmcr |= BMCR_SPEED100;
7581 } else {
7582 speed = SPEED_1000;
7583 bmcr |= BMCR_SPEED1000;
7584 }
7585 }
7586
7587 if (extlpbk) {
7588 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7589 tg3_readphy(tp, MII_CTRL1000, &val);
7590 val |= CTL1000_AS_MASTER |
7591 CTL1000_ENABLE_MASTER;
7592 tg3_writephy(tp, MII_CTRL1000, val);
7593 } else {
7594 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7595 MII_TG3_FET_PTEST_TRIM_2;
7596 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7597 }
7598 } else
7599 bmcr |= BMCR_LOOPBACK;
7600
7601 tg3_writephy(tp, MII_BMCR, bmcr);
7602
7603 /* The write needs to be flushed for the FETs */
7604 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7605 tg3_readphy(tp, MII_BMCR, &bmcr);
7606
7607 udelay(40);
7608
7609 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7610 tg3_asic_rev(tp) == ASIC_REV_5785) {
7611 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7612 MII_TG3_FET_PTEST_FRC_TX_LINK |
7613 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7614
7615 /* The write needs to be flushed for the AC131 */
7616 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7617 }
7618
7619 /* Reset to prevent losing 1st rx packet intermittently */
7620 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7621 tg3_flag(tp, 5780_CLASS)) {
7622 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7623 udelay(10);
7624 tw32_f(MAC_RX_MODE, tp->rx_mode);
7625 }
7626
7627 mac_mode = tp->mac_mode &
7628 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7629 if (speed == SPEED_1000)
7630 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7631 else
7632 mac_mode |= MAC_MODE_PORT_MODE_MII;
7633
7634 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7635 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7636
7637 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7638 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7639 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7640 mac_mode |= MAC_MODE_LINK_POLARITY;
7641
7642 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7643 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7644 }
7645
7646 tw32(MAC_MODE, mac_mode);
7647 udelay(40);
7648
7649 return 0;
7650 }
7651
7652 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7653 {
7654 struct tg3 *tp = netdev_priv(dev);
7655
7656 if (features & NETIF_F_LOOPBACK) {
7657 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7658 return;
7659
7660 spin_lock_bh(&tp->lock);
7661 tg3_mac_loopback(tp, true);
7662 netif_carrier_on(tp->dev);
7663 spin_unlock_bh(&tp->lock);
7664 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7665 } else {
7666 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7667 return;
7668
7669 spin_lock_bh(&tp->lock);
7670 tg3_mac_loopback(tp, false);
7671 /* Force link status check */
7672 tg3_setup_phy(tp, 1);
7673 spin_unlock_bh(&tp->lock);
7674 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7675 }
7676 }
7677
7678 static netdev_features_t tg3_fix_features(struct net_device *dev,
7679 netdev_features_t features)
7680 {
7681 struct tg3 *tp = netdev_priv(dev);
7682
7683 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7684 features &= ~NETIF_F_ALL_TSO;
7685
7686 return features;
7687 }
7688
7689 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7690 {
7691 netdev_features_t changed = dev->features ^ features;
7692
7693 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7694 tg3_set_loopback(dev, features);
7695
7696 return 0;
7697 }
7698
7699 static void tg3_rx_prodring_free(struct tg3 *tp,
7700 struct tg3_rx_prodring_set *tpr)
7701 {
7702 int i;
7703
7704 if (tpr != &tp->napi[0].prodring) {
7705 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7706 i = (i + 1) & tp->rx_std_ring_mask)
7707 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7708 tp->rx_pkt_map_sz);
7709
7710 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7711 for (i = tpr->rx_jmb_cons_idx;
7712 i != tpr->rx_jmb_prod_idx;
7713 i = (i + 1) & tp->rx_jmb_ring_mask) {
7714 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7715 TG3_RX_JMB_MAP_SZ);
7716 }
7717 }
7718
7719 return;
7720 }
7721
7722 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7723 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7724 tp->rx_pkt_map_sz);
7725
7726 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7727 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7728 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7729 TG3_RX_JMB_MAP_SZ);
7730 }
7731 }
7732
7733 /* Initialize rx rings for packet processing.
7734 *
7735 * The chip has been shut down and the driver detached from
7736 * the networking, so no interrupts or new tx packets will
7737 * end up in the driver. tp->{tx,}lock are held and thus
7738 * we may not sleep.
7739 */
7740 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7741 struct tg3_rx_prodring_set *tpr)
7742 {
7743 u32 i, rx_pkt_dma_sz;
7744
7745 tpr->rx_std_cons_idx = 0;
7746 tpr->rx_std_prod_idx = 0;
7747 tpr->rx_jmb_cons_idx = 0;
7748 tpr->rx_jmb_prod_idx = 0;
7749
7750 if (tpr != &tp->napi[0].prodring) {
7751 memset(&tpr->rx_std_buffers[0], 0,
7752 TG3_RX_STD_BUFF_RING_SIZE(tp));
7753 if (tpr->rx_jmb_buffers)
7754 memset(&tpr->rx_jmb_buffers[0], 0,
7755 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7756 goto done;
7757 }
7758
7759 /* Zero out all descriptors. */
7760 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7761
7762 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7763 if (tg3_flag(tp, 5780_CLASS) &&
7764 tp->dev->mtu > ETH_DATA_LEN)
7765 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7766 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7767
7768 /* Initialize invariants of the rings, we only set this
7769 * stuff once. This works because the card does not
7770 * write into the rx buffer posting rings.
7771 */
7772 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7773 struct tg3_rx_buffer_desc *rxd;
7774
7775 rxd = &tpr->rx_std[i];
7776 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7777 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7778 rxd->opaque = (RXD_OPAQUE_RING_STD |
7779 (i << RXD_OPAQUE_INDEX_SHIFT));
7780 }
7781
7782 /* Now allocate fresh SKBs for each rx ring. */
7783 for (i = 0; i < tp->rx_pending; i++) {
7784 unsigned int frag_size;
7785
7786 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7787 &frag_size) < 0) {
7788 netdev_warn(tp->dev,
7789 "Using a smaller RX standard ring. Only "
7790 "%d out of %d buffers were allocated "
7791 "successfully\n", i, tp->rx_pending);
7792 if (i == 0)
7793 goto initfail;
7794 tp->rx_pending = i;
7795 break;
7796 }
7797 }
7798
7799 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7800 goto done;
7801
7802 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7803
7804 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7805 goto done;
7806
7807 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7808 struct tg3_rx_buffer_desc *rxd;
7809
7810 rxd = &tpr->rx_jmb[i].std;
7811 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7812 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7813 RXD_FLAG_JUMBO;
7814 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7815 (i << RXD_OPAQUE_INDEX_SHIFT));
7816 }
7817
7818 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7819 unsigned int frag_size;
7820
7821 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7822 &frag_size) < 0) {
7823 netdev_warn(tp->dev,
7824 "Using a smaller RX jumbo ring. Only %d "
7825 "out of %d buffers were allocated "
7826 "successfully\n", i, tp->rx_jumbo_pending);
7827 if (i == 0)
7828 goto initfail;
7829 tp->rx_jumbo_pending = i;
7830 break;
7831 }
7832 }
7833
7834 done:
7835 return 0;
7836
7837 initfail:
7838 tg3_rx_prodring_free(tp, tpr);
7839 return -ENOMEM;
7840 }
7841
7842 static void tg3_rx_prodring_fini(struct tg3 *tp,
7843 struct tg3_rx_prodring_set *tpr)
7844 {
7845 kfree(tpr->rx_std_buffers);
7846 tpr->rx_std_buffers = NULL;
7847 kfree(tpr->rx_jmb_buffers);
7848 tpr->rx_jmb_buffers = NULL;
7849 if (tpr->rx_std) {
7850 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7851 tpr->rx_std, tpr->rx_std_mapping);
7852 tpr->rx_std = NULL;
7853 }
7854 if (tpr->rx_jmb) {
7855 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7856 tpr->rx_jmb, tpr->rx_jmb_mapping);
7857 tpr->rx_jmb = NULL;
7858 }
7859 }
7860
7861 static int tg3_rx_prodring_init(struct tg3 *tp,
7862 struct tg3_rx_prodring_set *tpr)
7863 {
7864 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7865 GFP_KERNEL);
7866 if (!tpr->rx_std_buffers)
7867 return -ENOMEM;
7868
7869 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7870 TG3_RX_STD_RING_BYTES(tp),
7871 &tpr->rx_std_mapping,
7872 GFP_KERNEL);
7873 if (!tpr->rx_std)
7874 goto err_out;
7875
7876 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7877 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7878 GFP_KERNEL);
7879 if (!tpr->rx_jmb_buffers)
7880 goto err_out;
7881
7882 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7883 TG3_RX_JMB_RING_BYTES(tp),
7884 &tpr->rx_jmb_mapping,
7885 GFP_KERNEL);
7886 if (!tpr->rx_jmb)
7887 goto err_out;
7888 }
7889
7890 return 0;
7891
7892 err_out:
7893 tg3_rx_prodring_fini(tp, tpr);
7894 return -ENOMEM;
7895 }
7896
7897 /* Free up pending packets in all rx/tx rings.
7898 *
7899 * The chip has been shut down and the driver detached from
7900 * the networking, so no interrupts or new tx packets will
7901 * end up in the driver. tp->{tx,}lock is not held and we are not
7902 * in an interrupt context and thus may sleep.
7903 */
7904 static void tg3_free_rings(struct tg3 *tp)
7905 {
7906 int i, j;
7907
7908 for (j = 0; j < tp->irq_cnt; j++) {
7909 struct tg3_napi *tnapi = &tp->napi[j];
7910
7911 tg3_rx_prodring_free(tp, &tnapi->prodring);
7912
7913 if (!tnapi->tx_buffers)
7914 continue;
7915
7916 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7917 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7918
7919 if (!skb)
7920 continue;
7921
7922 tg3_tx_skb_unmap(tnapi, i,
7923 skb_shinfo(skb)->nr_frags - 1);
7924
7925 dev_kfree_skb_any(skb);
7926 }
7927 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7928 }
7929 }
7930
7931 /* Initialize tx/rx rings for packet processing.
7932 *
7933 * The chip has been shut down and the driver detached from
7934 * the networking, so no interrupts or new tx packets will
7935 * end up in the driver. tp->{tx,}lock are held and thus
7936 * we may not sleep.
7937 */
7938 static int tg3_init_rings(struct tg3 *tp)
7939 {
7940 int i;
7941
7942 /* Free up all the SKBs. */
7943 tg3_free_rings(tp);
7944
7945 for (i = 0; i < tp->irq_cnt; i++) {
7946 struct tg3_napi *tnapi = &tp->napi[i];
7947
7948 tnapi->last_tag = 0;
7949 tnapi->last_irq_tag = 0;
7950 tnapi->hw_status->status = 0;
7951 tnapi->hw_status->status_tag = 0;
7952 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7953
7954 tnapi->tx_prod = 0;
7955 tnapi->tx_cons = 0;
7956 if (tnapi->tx_ring)
7957 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7958
7959 tnapi->rx_rcb_ptr = 0;
7960 if (tnapi->rx_rcb)
7961 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7962
7963 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7964 tg3_free_rings(tp);
7965 return -ENOMEM;
7966 }
7967 }
7968
7969 return 0;
7970 }
7971
7972 static void tg3_mem_tx_release(struct tg3 *tp)
7973 {
7974 int i;
7975
7976 for (i = 0; i < tp->irq_max; i++) {
7977 struct tg3_napi *tnapi = &tp->napi[i];
7978
7979 if (tnapi->tx_ring) {
7980 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7981 tnapi->tx_ring, tnapi->tx_desc_mapping);
7982 tnapi->tx_ring = NULL;
7983 }
7984
7985 kfree(tnapi->tx_buffers);
7986 tnapi->tx_buffers = NULL;
7987 }
7988 }
7989
7990 static int tg3_mem_tx_acquire(struct tg3 *tp)
7991 {
7992 int i;
7993 struct tg3_napi *tnapi = &tp->napi[0];
7994
7995 /* If multivector TSS is enabled, vector 0 does not handle
7996 * tx interrupts. Don't allocate any resources for it.
7997 */
7998 if (tg3_flag(tp, ENABLE_TSS))
7999 tnapi++;
8000
8001 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8002 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8003 TG3_TX_RING_SIZE, GFP_KERNEL);
8004 if (!tnapi->tx_buffers)
8005 goto err_out;
8006
8007 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8008 TG3_TX_RING_BYTES,
8009 &tnapi->tx_desc_mapping,
8010 GFP_KERNEL);
8011 if (!tnapi->tx_ring)
8012 goto err_out;
8013 }
8014
8015 return 0;
8016
8017 err_out:
8018 tg3_mem_tx_release(tp);
8019 return -ENOMEM;
8020 }
8021
8022 static void tg3_mem_rx_release(struct tg3 *tp)
8023 {
8024 int i;
8025
8026 for (i = 0; i < tp->irq_max; i++) {
8027 struct tg3_napi *tnapi = &tp->napi[i];
8028
8029 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8030
8031 if (!tnapi->rx_rcb)
8032 continue;
8033
8034 dma_free_coherent(&tp->pdev->dev,
8035 TG3_RX_RCB_RING_BYTES(tp),
8036 tnapi->rx_rcb,
8037 tnapi->rx_rcb_mapping);
8038 tnapi->rx_rcb = NULL;
8039 }
8040 }
8041
8042 static int tg3_mem_rx_acquire(struct tg3 *tp)
8043 {
8044 unsigned int i, limit;
8045
8046 limit = tp->rxq_cnt;
8047
8048 /* If RSS is enabled, we need a (dummy) producer ring
8049 * set on vector zero. This is the true hw prodring.
8050 */
8051 if (tg3_flag(tp, ENABLE_RSS))
8052 limit++;
8053
8054 for (i = 0; i < limit; i++) {
8055 struct tg3_napi *tnapi = &tp->napi[i];
8056
8057 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8058 goto err_out;
8059
8060 /* If multivector RSS is enabled, vector 0
8061 * does not handle rx or tx interrupts.
8062 * Don't allocate any resources for it.
8063 */
8064 if (!i && tg3_flag(tp, ENABLE_RSS))
8065 continue;
8066
8067 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8068 TG3_RX_RCB_RING_BYTES(tp),
8069 &tnapi->rx_rcb_mapping,
8070 GFP_KERNEL);
8071 if (!tnapi->rx_rcb)
8072 goto err_out;
8073
8074 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8075 }
8076
8077 return 0;
8078
8079 err_out:
8080 tg3_mem_rx_release(tp);
8081 return -ENOMEM;
8082 }
8083
8084 /*
8085 * Must not be invoked with interrupt sources disabled and
8086 * the hardware shutdown down.
8087 */
8088 static void tg3_free_consistent(struct tg3 *tp)
8089 {
8090 int i;
8091
8092 for (i = 0; i < tp->irq_cnt; i++) {
8093 struct tg3_napi *tnapi = &tp->napi[i];
8094
8095 if (tnapi->hw_status) {
8096 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8097 tnapi->hw_status,
8098 tnapi->status_mapping);
8099 tnapi->hw_status = NULL;
8100 }
8101 }
8102
8103 tg3_mem_rx_release(tp);
8104 tg3_mem_tx_release(tp);
8105
8106 if (tp->hw_stats) {
8107 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8108 tp->hw_stats, tp->stats_mapping);
8109 tp->hw_stats = NULL;
8110 }
8111 }
8112
8113 /*
8114 * Must not be invoked with interrupt sources disabled and
8115 * the hardware shutdown down. Can sleep.
8116 */
8117 static int tg3_alloc_consistent(struct tg3 *tp)
8118 {
8119 int i;
8120
8121 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8122 sizeof(struct tg3_hw_stats),
8123 &tp->stats_mapping,
8124 GFP_KERNEL);
8125 if (!tp->hw_stats)
8126 goto err_out;
8127
8128 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8129
8130 for (i = 0; i < tp->irq_cnt; i++) {
8131 struct tg3_napi *tnapi = &tp->napi[i];
8132 struct tg3_hw_status *sblk;
8133
8134 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8135 TG3_HW_STATUS_SIZE,
8136 &tnapi->status_mapping,
8137 GFP_KERNEL);
8138 if (!tnapi->hw_status)
8139 goto err_out;
8140
8141 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8142 sblk = tnapi->hw_status;
8143
8144 if (tg3_flag(tp, ENABLE_RSS)) {
8145 u16 *prodptr = NULL;
8146
8147 /*
8148 * When RSS is enabled, the status block format changes
8149 * slightly. The "rx_jumbo_consumer", "reserved",
8150 * and "rx_mini_consumer" members get mapped to the
8151 * other three rx return ring producer indexes.
8152 */
8153 switch (i) {
8154 case 1:
8155 prodptr = &sblk->idx[0].rx_producer;
8156 break;
8157 case 2:
8158 prodptr = &sblk->rx_jumbo_consumer;
8159 break;
8160 case 3:
8161 prodptr = &sblk->reserved;
8162 break;
8163 case 4:
8164 prodptr = &sblk->rx_mini_consumer;
8165 break;
8166 }
8167 tnapi->rx_rcb_prod_idx = prodptr;
8168 } else {
8169 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8170 }
8171 }
8172
8173 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8174 goto err_out;
8175
8176 return 0;
8177
8178 err_out:
8179 tg3_free_consistent(tp);
8180 return -ENOMEM;
8181 }
8182
8183 #define MAX_WAIT_CNT 1000
8184
8185 /* To stop a block, clear the enable bit and poll till it
8186 * clears. tp->lock is held.
8187 */
8188 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8189 {
8190 unsigned int i;
8191 u32 val;
8192
8193 if (tg3_flag(tp, 5705_PLUS)) {
8194 switch (ofs) {
8195 case RCVLSC_MODE:
8196 case DMAC_MODE:
8197 case MBFREE_MODE:
8198 case BUFMGR_MODE:
8199 case MEMARB_MODE:
8200 /* We can't enable/disable these bits of the
8201 * 5705/5750, just say success.
8202 */
8203 return 0;
8204
8205 default:
8206 break;
8207 }
8208 }
8209
8210 val = tr32(ofs);
8211 val &= ~enable_bit;
8212 tw32_f(ofs, val);
8213
8214 for (i = 0; i < MAX_WAIT_CNT; i++) {
8215 udelay(100);
8216 val = tr32(ofs);
8217 if ((val & enable_bit) == 0)
8218 break;
8219 }
8220
8221 if (i == MAX_WAIT_CNT && !silent) {
8222 dev_err(&tp->pdev->dev,
8223 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8224 ofs, enable_bit);
8225 return -ENODEV;
8226 }
8227
8228 return 0;
8229 }
8230
8231 /* tp->lock is held. */
8232 static int tg3_abort_hw(struct tg3 *tp, int silent)
8233 {
8234 int i, err;
8235
8236 tg3_disable_ints(tp);
8237
8238 tp->rx_mode &= ~RX_MODE_ENABLE;
8239 tw32_f(MAC_RX_MODE, tp->rx_mode);
8240 udelay(10);
8241
8242 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8243 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8244 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8245 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8246 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8247 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8248
8249 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8250 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8251 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8252 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8253 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8254 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8255 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8256
8257 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8258 tw32_f(MAC_MODE, tp->mac_mode);
8259 udelay(40);
8260
8261 tp->tx_mode &= ~TX_MODE_ENABLE;
8262 tw32_f(MAC_TX_MODE, tp->tx_mode);
8263
8264 for (i = 0; i < MAX_WAIT_CNT; i++) {
8265 udelay(100);
8266 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8267 break;
8268 }
8269 if (i >= MAX_WAIT_CNT) {
8270 dev_err(&tp->pdev->dev,
8271 "%s timed out, TX_MODE_ENABLE will not clear "
8272 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8273 err |= -ENODEV;
8274 }
8275
8276 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8277 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8278 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8279
8280 tw32(FTQ_RESET, 0xffffffff);
8281 tw32(FTQ_RESET, 0x00000000);
8282
8283 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8284 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8285
8286 for (i = 0; i < tp->irq_cnt; i++) {
8287 struct tg3_napi *tnapi = &tp->napi[i];
8288 if (tnapi->hw_status)
8289 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8290 }
8291
8292 return err;
8293 }
8294
8295 /* Save PCI command register before chip reset */
8296 static void tg3_save_pci_state(struct tg3 *tp)
8297 {
8298 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8299 }
8300
8301 /* Restore PCI state after chip reset */
8302 static void tg3_restore_pci_state(struct tg3 *tp)
8303 {
8304 u32 val;
8305
8306 /* Re-enable indirect register accesses. */
8307 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8308 tp->misc_host_ctrl);
8309
8310 /* Set MAX PCI retry to zero. */
8311 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8312 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8313 tg3_flag(tp, PCIX_MODE))
8314 val |= PCISTATE_RETRY_SAME_DMA;
8315 /* Allow reads and writes to the APE register and memory space. */
8316 if (tg3_flag(tp, ENABLE_APE))
8317 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8318 PCISTATE_ALLOW_APE_SHMEM_WR |
8319 PCISTATE_ALLOW_APE_PSPACE_WR;
8320 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8321
8322 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8323
8324 if (!tg3_flag(tp, PCI_EXPRESS)) {
8325 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8326 tp->pci_cacheline_sz);
8327 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8328 tp->pci_lat_timer);
8329 }
8330
8331 /* Make sure PCI-X relaxed ordering bit is clear. */
8332 if (tg3_flag(tp, PCIX_MODE)) {
8333 u16 pcix_cmd;
8334
8335 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8336 &pcix_cmd);
8337 pcix_cmd &= ~PCI_X_CMD_ERO;
8338 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8339 pcix_cmd);
8340 }
8341
8342 if (tg3_flag(tp, 5780_CLASS)) {
8343
8344 /* Chip reset on 5780 will reset MSI enable bit,
8345 * so need to restore it.
8346 */
8347 if (tg3_flag(tp, USING_MSI)) {
8348 u16 ctrl;
8349
8350 pci_read_config_word(tp->pdev,
8351 tp->msi_cap + PCI_MSI_FLAGS,
8352 &ctrl);
8353 pci_write_config_word(tp->pdev,
8354 tp->msi_cap + PCI_MSI_FLAGS,
8355 ctrl | PCI_MSI_FLAGS_ENABLE);
8356 val = tr32(MSGINT_MODE);
8357 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8358 }
8359 }
8360 }
8361
8362 /* tp->lock is held. */
8363 static int tg3_chip_reset(struct tg3 *tp)
8364 {
8365 u32 val;
8366 void (*write_op)(struct tg3 *, u32, u32);
8367 int i, err;
8368
8369 tg3_nvram_lock(tp);
8370
8371 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8372
8373 /* No matching tg3_nvram_unlock() after this because
8374 * chip reset below will undo the nvram lock.
8375 */
8376 tp->nvram_lock_cnt = 0;
8377
8378 /* GRC_MISC_CFG core clock reset will clear the memory
8379 * enable bit in PCI register 4 and the MSI enable bit
8380 * on some chips, so we save relevant registers here.
8381 */
8382 tg3_save_pci_state(tp);
8383
8384 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8385 tg3_flag(tp, 5755_PLUS))
8386 tw32(GRC_FASTBOOT_PC, 0);
8387
8388 /*
8389 * We must avoid the readl() that normally takes place.
8390 * It locks machines, causes machine checks, and other
8391 * fun things. So, temporarily disable the 5701
8392 * hardware workaround, while we do the reset.
8393 */
8394 write_op = tp->write32;
8395 if (write_op == tg3_write_flush_reg32)
8396 tp->write32 = tg3_write32;
8397
8398 /* Prevent the irq handler from reading or writing PCI registers
8399 * during chip reset when the memory enable bit in the PCI command
8400 * register may be cleared. The chip does not generate interrupt
8401 * at this time, but the irq handler may still be called due to irq
8402 * sharing or irqpoll.
8403 */
8404 tg3_flag_set(tp, CHIP_RESETTING);
8405 for (i = 0; i < tp->irq_cnt; i++) {
8406 struct tg3_napi *tnapi = &tp->napi[i];
8407 if (tnapi->hw_status) {
8408 tnapi->hw_status->status = 0;
8409 tnapi->hw_status->status_tag = 0;
8410 }
8411 tnapi->last_tag = 0;
8412 tnapi->last_irq_tag = 0;
8413 }
8414 smp_mb();
8415
8416 for (i = 0; i < tp->irq_cnt; i++)
8417 synchronize_irq(tp->napi[i].irq_vec);
8418
8419 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8420 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8421 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8422 }
8423
8424 /* do the reset */
8425 val = GRC_MISC_CFG_CORECLK_RESET;
8426
8427 if (tg3_flag(tp, PCI_EXPRESS)) {
8428 /* Force PCIe 1.0a mode */
8429 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8430 !tg3_flag(tp, 57765_PLUS) &&
8431 tr32(TG3_PCIE_PHY_TSTCTL) ==
8432 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8433 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8434
8435 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8436 tw32(GRC_MISC_CFG, (1 << 29));
8437 val |= (1 << 29);
8438 }
8439 }
8440
8441 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8442 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8443 tw32(GRC_VCPU_EXT_CTRL,
8444 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8445 }
8446
8447 /* Manage gphy power for all CPMU absent PCIe devices. */
8448 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8449 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8450
8451 tw32(GRC_MISC_CFG, val);
8452
8453 /* restore 5701 hardware bug workaround write method */
8454 tp->write32 = write_op;
8455
8456 /* Unfortunately, we have to delay before the PCI read back.
8457 * Some 575X chips even will not respond to a PCI cfg access
8458 * when the reset command is given to the chip.
8459 *
8460 * How do these hardware designers expect things to work
8461 * properly if the PCI write is posted for a long period
8462 * of time? It is always necessary to have some method by
8463 * which a register read back can occur to push the write
8464 * out which does the reset.
8465 *
8466 * For most tg3 variants the trick below was working.
8467 * Ho hum...
8468 */
8469 udelay(120);
8470
8471 /* Flush PCI posted writes. The normal MMIO registers
8472 * are inaccessible at this time so this is the only
8473 * way to make this reliably (actually, this is no longer
8474 * the case, see above). I tried to use indirect
8475 * register read/write but this upset some 5701 variants.
8476 */
8477 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8478
8479 udelay(120);
8480
8481 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8482 u16 val16;
8483
8484 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8485 int j;
8486 u32 cfg_val;
8487
8488 /* Wait for link training to complete. */
8489 for (j = 0; j < 5000; j++)
8490 udelay(100);
8491
8492 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8493 pci_write_config_dword(tp->pdev, 0xc4,
8494 cfg_val | (1 << 15));
8495 }
8496
8497 /* Clear the "no snoop" and "relaxed ordering" bits. */
8498 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8499 /*
8500 * Older PCIe devices only support the 128 byte
8501 * MPS setting. Enforce the restriction.
8502 */
8503 if (!tg3_flag(tp, CPMU_PRESENT))
8504 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8505 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8506
8507 /* Clear error status */
8508 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8509 PCI_EXP_DEVSTA_CED |
8510 PCI_EXP_DEVSTA_NFED |
8511 PCI_EXP_DEVSTA_FED |
8512 PCI_EXP_DEVSTA_URD);
8513 }
8514
8515 tg3_restore_pci_state(tp);
8516
8517 tg3_flag_clear(tp, CHIP_RESETTING);
8518 tg3_flag_clear(tp, ERROR_PROCESSED);
8519
8520 val = 0;
8521 if (tg3_flag(tp, 5780_CLASS))
8522 val = tr32(MEMARB_MODE);
8523 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8524
8525 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8526 tg3_stop_fw(tp);
8527 tw32(0x5000, 0x400);
8528 }
8529
8530 if (tg3_flag(tp, IS_SSB_CORE)) {
8531 /*
8532 * BCM4785: In order to avoid repercussions from using
8533 * potentially defective internal ROM, stop the Rx RISC CPU,
8534 * which is not required.
8535 */
8536 tg3_stop_fw(tp);
8537 tg3_halt_cpu(tp, RX_CPU_BASE);
8538 }
8539
8540 tw32(GRC_MODE, tp->grc_mode);
8541
8542 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8543 val = tr32(0xc4);
8544
8545 tw32(0xc4, val | (1 << 15));
8546 }
8547
8548 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8549 tg3_asic_rev(tp) == ASIC_REV_5705) {
8550 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8551 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8552 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8553 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8554 }
8555
8556 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8557 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8558 val = tp->mac_mode;
8559 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8560 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8561 val = tp->mac_mode;
8562 } else
8563 val = 0;
8564
8565 tw32_f(MAC_MODE, val);
8566 udelay(40);
8567
8568 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8569
8570 err = tg3_poll_fw(tp);
8571 if (err)
8572 return err;
8573
8574 tg3_mdio_start(tp);
8575
8576 if (tg3_flag(tp, PCI_EXPRESS) &&
8577 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8578 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8579 !tg3_flag(tp, 57765_PLUS)) {
8580 val = tr32(0x7c00);
8581
8582 tw32(0x7c00, val | (1 << 25));
8583 }
8584
8585 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8586 val = tr32(TG3_CPMU_CLCK_ORIDE);
8587 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8588 }
8589
8590 /* Reprobe ASF enable state. */
8591 tg3_flag_clear(tp, ENABLE_ASF);
8592 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8593 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8594 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8595 u32 nic_cfg;
8596
8597 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8598 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8599 tg3_flag_set(tp, ENABLE_ASF);
8600 tp->last_event_jiffies = jiffies;
8601 if (tg3_flag(tp, 5750_PLUS))
8602 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8603 }
8604 }
8605
8606 return 0;
8607 }
8608
8609 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8610 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8611
8612 /* tp->lock is held. */
8613 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8614 {
8615 int err;
8616
8617 tg3_stop_fw(tp);
8618
8619 tg3_write_sig_pre_reset(tp, kind);
8620
8621 tg3_abort_hw(tp, silent);
8622 err = tg3_chip_reset(tp);
8623
8624 __tg3_set_mac_addr(tp, 0);
8625
8626 tg3_write_sig_legacy(tp, kind);
8627 tg3_write_sig_post_reset(tp, kind);
8628
8629 if (tp->hw_stats) {
8630 /* Save the stats across chip resets... */
8631 tg3_get_nstats(tp, &tp->net_stats_prev);
8632 tg3_get_estats(tp, &tp->estats_prev);
8633
8634 /* And make sure the next sample is new data */
8635 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8636 }
8637
8638 if (err)
8639 return err;
8640
8641 return 0;
8642 }
8643
8644 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8645 {
8646 struct tg3 *tp = netdev_priv(dev);
8647 struct sockaddr *addr = p;
8648 int err = 0, skip_mac_1 = 0;
8649
8650 if (!is_valid_ether_addr(addr->sa_data))
8651 return -EADDRNOTAVAIL;
8652
8653 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8654
8655 if (!netif_running(dev))
8656 return 0;
8657
8658 if (tg3_flag(tp, ENABLE_ASF)) {
8659 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8660
8661 addr0_high = tr32(MAC_ADDR_0_HIGH);
8662 addr0_low = tr32(MAC_ADDR_0_LOW);
8663 addr1_high = tr32(MAC_ADDR_1_HIGH);
8664 addr1_low = tr32(MAC_ADDR_1_LOW);
8665
8666 /* Skip MAC addr 1 if ASF is using it. */
8667 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8668 !(addr1_high == 0 && addr1_low == 0))
8669 skip_mac_1 = 1;
8670 }
8671 spin_lock_bh(&tp->lock);
8672 __tg3_set_mac_addr(tp, skip_mac_1);
8673 spin_unlock_bh(&tp->lock);
8674
8675 return err;
8676 }
8677
8678 /* tp->lock is held. */
8679 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8680 dma_addr_t mapping, u32 maxlen_flags,
8681 u32 nic_addr)
8682 {
8683 tg3_write_mem(tp,
8684 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8685 ((u64) mapping >> 32));
8686 tg3_write_mem(tp,
8687 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8688 ((u64) mapping & 0xffffffff));
8689 tg3_write_mem(tp,
8690 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8691 maxlen_flags);
8692
8693 if (!tg3_flag(tp, 5705_PLUS))
8694 tg3_write_mem(tp,
8695 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8696 nic_addr);
8697 }
8698
8699
8700 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8701 {
8702 int i = 0;
8703
8704 if (!tg3_flag(tp, ENABLE_TSS)) {
8705 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8706 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8707 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8708 } else {
8709 tw32(HOSTCC_TXCOL_TICKS, 0);
8710 tw32(HOSTCC_TXMAX_FRAMES, 0);
8711 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8712
8713 for (; i < tp->txq_cnt; i++) {
8714 u32 reg;
8715
8716 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8717 tw32(reg, ec->tx_coalesce_usecs);
8718 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8719 tw32(reg, ec->tx_max_coalesced_frames);
8720 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8721 tw32(reg, ec->tx_max_coalesced_frames_irq);
8722 }
8723 }
8724
8725 for (; i < tp->irq_max - 1; i++) {
8726 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8727 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8728 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8729 }
8730 }
8731
8732 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8733 {
8734 int i = 0;
8735 u32 limit = tp->rxq_cnt;
8736
8737 if (!tg3_flag(tp, ENABLE_RSS)) {
8738 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8739 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8740 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8741 limit--;
8742 } else {
8743 tw32(HOSTCC_RXCOL_TICKS, 0);
8744 tw32(HOSTCC_RXMAX_FRAMES, 0);
8745 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8746 }
8747
8748 for (; i < limit; i++) {
8749 u32 reg;
8750
8751 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8752 tw32(reg, ec->rx_coalesce_usecs);
8753 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8754 tw32(reg, ec->rx_max_coalesced_frames);
8755 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8756 tw32(reg, ec->rx_max_coalesced_frames_irq);
8757 }
8758
8759 for (; i < tp->irq_max - 1; i++) {
8760 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8761 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8762 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8763 }
8764 }
8765
8766 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8767 {
8768 tg3_coal_tx_init(tp, ec);
8769 tg3_coal_rx_init(tp, ec);
8770
8771 if (!tg3_flag(tp, 5705_PLUS)) {
8772 u32 val = ec->stats_block_coalesce_usecs;
8773
8774 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8775 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8776
8777 if (!tp->link_up)
8778 val = 0;
8779
8780 tw32(HOSTCC_STAT_COAL_TICKS, val);
8781 }
8782 }
8783
8784 /* tp->lock is held. */
8785 static void tg3_rings_reset(struct tg3 *tp)
8786 {
8787 int i;
8788 u32 stblk, txrcb, rxrcb, limit;
8789 struct tg3_napi *tnapi = &tp->napi[0];
8790
8791 /* Disable all transmit rings but the first. */
8792 if (!tg3_flag(tp, 5705_PLUS))
8793 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8794 else if (tg3_flag(tp, 5717_PLUS))
8795 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8796 else if (tg3_flag(tp, 57765_CLASS) ||
8797 tg3_asic_rev(tp) == ASIC_REV_5762)
8798 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8799 else
8800 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8801
8802 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8803 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8804 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8805 BDINFO_FLAGS_DISABLED);
8806
8807
8808 /* Disable all receive return rings but the first. */
8809 if (tg3_flag(tp, 5717_PLUS))
8810 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8811 else if (!tg3_flag(tp, 5705_PLUS))
8812 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8813 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8814 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8815 tg3_flag(tp, 57765_CLASS))
8816 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8817 else
8818 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8819
8820 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8821 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8822 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8823 BDINFO_FLAGS_DISABLED);
8824
8825 /* Disable interrupts */
8826 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8827 tp->napi[0].chk_msi_cnt = 0;
8828 tp->napi[0].last_rx_cons = 0;
8829 tp->napi[0].last_tx_cons = 0;
8830
8831 /* Zero mailbox registers. */
8832 if (tg3_flag(tp, SUPPORT_MSIX)) {
8833 for (i = 1; i < tp->irq_max; i++) {
8834 tp->napi[i].tx_prod = 0;
8835 tp->napi[i].tx_cons = 0;
8836 if (tg3_flag(tp, ENABLE_TSS))
8837 tw32_mailbox(tp->napi[i].prodmbox, 0);
8838 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8839 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8840 tp->napi[i].chk_msi_cnt = 0;
8841 tp->napi[i].last_rx_cons = 0;
8842 tp->napi[i].last_tx_cons = 0;
8843 }
8844 if (!tg3_flag(tp, ENABLE_TSS))
8845 tw32_mailbox(tp->napi[0].prodmbox, 0);
8846 } else {
8847 tp->napi[0].tx_prod = 0;
8848 tp->napi[0].tx_cons = 0;
8849 tw32_mailbox(tp->napi[0].prodmbox, 0);
8850 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8851 }
8852
8853 /* Make sure the NIC-based send BD rings are disabled. */
8854 if (!tg3_flag(tp, 5705_PLUS)) {
8855 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8856 for (i = 0; i < 16; i++)
8857 tw32_tx_mbox(mbox + i * 8, 0);
8858 }
8859
8860 txrcb = NIC_SRAM_SEND_RCB;
8861 rxrcb = NIC_SRAM_RCV_RET_RCB;
8862
8863 /* Clear status block in ram. */
8864 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8865
8866 /* Set status block DMA address */
8867 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8868 ((u64) tnapi->status_mapping >> 32));
8869 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8870 ((u64) tnapi->status_mapping & 0xffffffff));
8871
8872 if (tnapi->tx_ring) {
8873 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8874 (TG3_TX_RING_SIZE <<
8875 BDINFO_FLAGS_MAXLEN_SHIFT),
8876 NIC_SRAM_TX_BUFFER_DESC);
8877 txrcb += TG3_BDINFO_SIZE;
8878 }
8879
8880 if (tnapi->rx_rcb) {
8881 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8882 (tp->rx_ret_ring_mask + 1) <<
8883 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8884 rxrcb += TG3_BDINFO_SIZE;
8885 }
8886
8887 stblk = HOSTCC_STATBLCK_RING1;
8888
8889 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8890 u64 mapping = (u64)tnapi->status_mapping;
8891 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8892 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8893
8894 /* Clear status block in ram. */
8895 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8896
8897 if (tnapi->tx_ring) {
8898 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8899 (TG3_TX_RING_SIZE <<
8900 BDINFO_FLAGS_MAXLEN_SHIFT),
8901 NIC_SRAM_TX_BUFFER_DESC);
8902 txrcb += TG3_BDINFO_SIZE;
8903 }
8904
8905 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8906 ((tp->rx_ret_ring_mask + 1) <<
8907 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8908
8909 stblk += 8;
8910 rxrcb += TG3_BDINFO_SIZE;
8911 }
8912 }
8913
8914 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8915 {
8916 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8917
8918 if (!tg3_flag(tp, 5750_PLUS) ||
8919 tg3_flag(tp, 5780_CLASS) ||
8920 tg3_asic_rev(tp) == ASIC_REV_5750 ||
8921 tg3_asic_rev(tp) == ASIC_REV_5752 ||
8922 tg3_flag(tp, 57765_PLUS))
8923 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8924 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8925 tg3_asic_rev(tp) == ASIC_REV_5787)
8926 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8927 else
8928 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8929
8930 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8931 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8932
8933 val = min(nic_rep_thresh, host_rep_thresh);
8934 tw32(RCVBDI_STD_THRESH, val);
8935
8936 if (tg3_flag(tp, 57765_PLUS))
8937 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8938
8939 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8940 return;
8941
8942 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8943
8944 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8945
8946 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8947 tw32(RCVBDI_JUMBO_THRESH, val);
8948
8949 if (tg3_flag(tp, 57765_PLUS))
8950 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8951 }
8952
8953 static inline u32 calc_crc(unsigned char *buf, int len)
8954 {
8955 u32 reg;
8956 u32 tmp;
8957 int j, k;
8958
8959 reg = 0xffffffff;
8960
8961 for (j = 0; j < len; j++) {
8962 reg ^= buf[j];
8963
8964 for (k = 0; k < 8; k++) {
8965 tmp = reg & 0x01;
8966
8967 reg >>= 1;
8968
8969 if (tmp)
8970 reg ^= 0xedb88320;
8971 }
8972 }
8973
8974 return ~reg;
8975 }
8976
8977 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8978 {
8979 /* accept or reject all multicast frames */
8980 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8981 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8982 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8983 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8984 }
8985
8986 static void __tg3_set_rx_mode(struct net_device *dev)
8987 {
8988 struct tg3 *tp = netdev_priv(dev);
8989 u32 rx_mode;
8990
8991 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8992 RX_MODE_KEEP_VLAN_TAG);
8993
8994 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8995 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8996 * flag clear.
8997 */
8998 if (!tg3_flag(tp, ENABLE_ASF))
8999 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9000 #endif
9001
9002 if (dev->flags & IFF_PROMISC) {
9003 /* Promiscuous mode. */
9004 rx_mode |= RX_MODE_PROMISC;
9005 } else if (dev->flags & IFF_ALLMULTI) {
9006 /* Accept all multicast. */
9007 tg3_set_multi(tp, 1);
9008 } else if (netdev_mc_empty(dev)) {
9009 /* Reject all multicast. */
9010 tg3_set_multi(tp, 0);
9011 } else {
9012 /* Accept one or more multicast(s). */
9013 struct netdev_hw_addr *ha;
9014 u32 mc_filter[4] = { 0, };
9015 u32 regidx;
9016 u32 bit;
9017 u32 crc;
9018
9019 netdev_for_each_mc_addr(ha, dev) {
9020 crc = calc_crc(ha->addr, ETH_ALEN);
9021 bit = ~crc & 0x7f;
9022 regidx = (bit & 0x60) >> 5;
9023 bit &= 0x1f;
9024 mc_filter[regidx] |= (1 << bit);
9025 }
9026
9027 tw32(MAC_HASH_REG_0, mc_filter[0]);
9028 tw32(MAC_HASH_REG_1, mc_filter[1]);
9029 tw32(MAC_HASH_REG_2, mc_filter[2]);
9030 tw32(MAC_HASH_REG_3, mc_filter[3]);
9031 }
9032
9033 if (rx_mode != tp->rx_mode) {
9034 tp->rx_mode = rx_mode;
9035 tw32_f(MAC_RX_MODE, rx_mode);
9036 udelay(10);
9037 }
9038 }
9039
9040 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9041 {
9042 int i;
9043
9044 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9045 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9046 }
9047
9048 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9049 {
9050 int i;
9051
9052 if (!tg3_flag(tp, SUPPORT_MSIX))
9053 return;
9054
9055 if (tp->rxq_cnt == 1) {
9056 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9057 return;
9058 }
9059
9060 /* Validate table against current IRQ count */
9061 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9062 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9063 break;
9064 }
9065
9066 if (i != TG3_RSS_INDIR_TBL_SIZE)
9067 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9068 }
9069
9070 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9071 {
9072 int i = 0;
9073 u32 reg = MAC_RSS_INDIR_TBL_0;
9074
9075 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9076 u32 val = tp->rss_ind_tbl[i];
9077 i++;
9078 for (; i % 8; i++) {
9079 val <<= 4;
9080 val |= tp->rss_ind_tbl[i];
9081 }
9082 tw32(reg, val);
9083 reg += 4;
9084 }
9085 }
9086
9087 /* tp->lock is held. */
9088 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9089 {
9090 u32 val, rdmac_mode;
9091 int i, err, limit;
9092 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9093
9094 tg3_disable_ints(tp);
9095
9096 tg3_stop_fw(tp);
9097
9098 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9099
9100 if (tg3_flag(tp, INIT_COMPLETE))
9101 tg3_abort_hw(tp, 1);
9102
9103 /* Enable MAC control of LPI */
9104 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9105 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9106 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9107 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9108 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9109
9110 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9111
9112 tw32_f(TG3_CPMU_EEE_CTRL,
9113 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9114
9115 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9116 TG3_CPMU_EEEMD_LPI_IN_TX |
9117 TG3_CPMU_EEEMD_LPI_IN_RX |
9118 TG3_CPMU_EEEMD_EEE_ENABLE;
9119
9120 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9121 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9122
9123 if (tg3_flag(tp, ENABLE_APE))
9124 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9125
9126 tw32_f(TG3_CPMU_EEE_MODE, val);
9127
9128 tw32_f(TG3_CPMU_EEE_DBTMR1,
9129 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9130 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9131
9132 tw32_f(TG3_CPMU_EEE_DBTMR2,
9133 TG3_CPMU_DBTMR2_APE_TX_2047US |
9134 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9135 }
9136
9137 if (reset_phy)
9138 tg3_phy_reset(tp);
9139
9140 err = tg3_chip_reset(tp);
9141 if (err)
9142 return err;
9143
9144 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9145
9146 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9147 val = tr32(TG3_CPMU_CTRL);
9148 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9149 tw32(TG3_CPMU_CTRL, val);
9150
9151 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9152 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9153 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9154 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9155
9156 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9157 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9158 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9159 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9160
9161 val = tr32(TG3_CPMU_HST_ACC);
9162 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9163 val |= CPMU_HST_ACC_MACCLK_6_25;
9164 tw32(TG3_CPMU_HST_ACC, val);
9165 }
9166
9167 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9168 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9169 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9170 PCIE_PWR_MGMT_L1_THRESH_4MS;
9171 tw32(PCIE_PWR_MGMT_THRESH, val);
9172
9173 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9174 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9175
9176 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9177
9178 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9179 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9180 }
9181
9182 if (tg3_flag(tp, L1PLLPD_EN)) {
9183 u32 grc_mode = tr32(GRC_MODE);
9184
9185 /* Access the lower 1K of PL PCIE block registers. */
9186 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9187 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9188
9189 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9190 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9191 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9192
9193 tw32(GRC_MODE, grc_mode);
9194 }
9195
9196 if (tg3_flag(tp, 57765_CLASS)) {
9197 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9198 u32 grc_mode = tr32(GRC_MODE);
9199
9200 /* Access the lower 1K of PL PCIE block registers. */
9201 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9202 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9203
9204 val = tr32(TG3_PCIE_TLDLPL_PORT +
9205 TG3_PCIE_PL_LO_PHYCTL5);
9206 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9207 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9208
9209 tw32(GRC_MODE, grc_mode);
9210 }
9211
9212 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9213 u32 grc_mode;
9214
9215 /* Fix transmit hangs */
9216 val = tr32(TG3_CPMU_PADRNG_CTL);
9217 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9218 tw32(TG3_CPMU_PADRNG_CTL, val);
9219
9220 grc_mode = tr32(GRC_MODE);
9221
9222 /* Access the lower 1K of DL PCIE block registers. */
9223 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9224 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9225
9226 val = tr32(TG3_PCIE_TLDLPL_PORT +
9227 TG3_PCIE_DL_LO_FTSMAX);
9228 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9229 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9230 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9231
9232 tw32(GRC_MODE, grc_mode);
9233 }
9234
9235 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9236 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9237 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9238 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9239 }
9240
9241 /* This works around an issue with Athlon chipsets on
9242 * B3 tigon3 silicon. This bit has no effect on any
9243 * other revision. But do not set this on PCI Express
9244 * chips and don't even touch the clocks if the CPMU is present.
9245 */
9246 if (!tg3_flag(tp, CPMU_PRESENT)) {
9247 if (!tg3_flag(tp, PCI_EXPRESS))
9248 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9249 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9250 }
9251
9252 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9253 tg3_flag(tp, PCIX_MODE)) {
9254 val = tr32(TG3PCI_PCISTATE);
9255 val |= PCISTATE_RETRY_SAME_DMA;
9256 tw32(TG3PCI_PCISTATE, val);
9257 }
9258
9259 if (tg3_flag(tp, ENABLE_APE)) {
9260 /* Allow reads and writes to the
9261 * APE register and memory space.
9262 */
9263 val = tr32(TG3PCI_PCISTATE);
9264 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9265 PCISTATE_ALLOW_APE_SHMEM_WR |
9266 PCISTATE_ALLOW_APE_PSPACE_WR;
9267 tw32(TG3PCI_PCISTATE, val);
9268 }
9269
9270 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9271 /* Enable some hw fixes. */
9272 val = tr32(TG3PCI_MSI_DATA);
9273 val |= (1 << 26) | (1 << 28) | (1 << 29);
9274 tw32(TG3PCI_MSI_DATA, val);
9275 }
9276
9277 /* Descriptor ring init may make accesses to the
9278 * NIC SRAM area to setup the TX descriptors, so we
9279 * can only do this after the hardware has been
9280 * successfully reset.
9281 */
9282 err = tg3_init_rings(tp);
9283 if (err)
9284 return err;
9285
9286 if (tg3_flag(tp, 57765_PLUS)) {
9287 val = tr32(TG3PCI_DMA_RW_CTRL) &
9288 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9289 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9290 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9291 if (!tg3_flag(tp, 57765_CLASS) &&
9292 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9293 tg3_asic_rev(tp) != ASIC_REV_5762)
9294 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9295 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9296 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9297 tg3_asic_rev(tp) != ASIC_REV_5761) {
9298 /* This value is determined during the probe time DMA
9299 * engine test, tg3_test_dma.
9300 */
9301 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9302 }
9303
9304 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9305 GRC_MODE_4X_NIC_SEND_RINGS |
9306 GRC_MODE_NO_TX_PHDR_CSUM |
9307 GRC_MODE_NO_RX_PHDR_CSUM);
9308 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9309
9310 /* Pseudo-header checksum is done by hardware logic and not
9311 * the offload processers, so make the chip do the pseudo-
9312 * header checksums on receive. For transmit it is more
9313 * convenient to do the pseudo-header checksum in software
9314 * as Linux does that on transmit for us in all cases.
9315 */
9316 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9317
9318 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9319 if (tp->rxptpctl)
9320 tw32(TG3_RX_PTP_CTL,
9321 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9322
9323 if (tg3_flag(tp, PTP_CAPABLE))
9324 val |= GRC_MODE_TIME_SYNC_ENABLE;
9325
9326 tw32(GRC_MODE, tp->grc_mode | val);
9327
9328 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9329 val = tr32(GRC_MISC_CFG);
9330 val &= ~0xff;
9331 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9332 tw32(GRC_MISC_CFG, val);
9333
9334 /* Initialize MBUF/DESC pool. */
9335 if (tg3_flag(tp, 5750_PLUS)) {
9336 /* Do nothing. */
9337 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9338 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9339 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9340 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9341 else
9342 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9343 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9344 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9345 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9346 int fw_len;
9347
9348 fw_len = tp->fw_len;
9349 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9350 tw32(BUFMGR_MB_POOL_ADDR,
9351 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9352 tw32(BUFMGR_MB_POOL_SIZE,
9353 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9354 }
9355
9356 if (tp->dev->mtu <= ETH_DATA_LEN) {
9357 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9358 tp->bufmgr_config.mbuf_read_dma_low_water);
9359 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9360 tp->bufmgr_config.mbuf_mac_rx_low_water);
9361 tw32(BUFMGR_MB_HIGH_WATER,
9362 tp->bufmgr_config.mbuf_high_water);
9363 } else {
9364 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9365 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9366 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9367 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9368 tw32(BUFMGR_MB_HIGH_WATER,
9369 tp->bufmgr_config.mbuf_high_water_jumbo);
9370 }
9371 tw32(BUFMGR_DMA_LOW_WATER,
9372 tp->bufmgr_config.dma_low_water);
9373 tw32(BUFMGR_DMA_HIGH_WATER,
9374 tp->bufmgr_config.dma_high_water);
9375
9376 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9377 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9378 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9379 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9380 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9381 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9382 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9383 tw32(BUFMGR_MODE, val);
9384 for (i = 0; i < 2000; i++) {
9385 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9386 break;
9387 udelay(10);
9388 }
9389 if (i >= 2000) {
9390 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9391 return -ENODEV;
9392 }
9393
9394 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9395 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9396
9397 tg3_setup_rxbd_thresholds(tp);
9398
9399 /* Initialize TG3_BDINFO's at:
9400 * RCVDBDI_STD_BD: standard eth size rx ring
9401 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9402 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9403 *
9404 * like so:
9405 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9406 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9407 * ring attribute flags
9408 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9409 *
9410 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9411 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9412 *
9413 * The size of each ring is fixed in the firmware, but the location is
9414 * configurable.
9415 */
9416 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9417 ((u64) tpr->rx_std_mapping >> 32));
9418 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9419 ((u64) tpr->rx_std_mapping & 0xffffffff));
9420 if (!tg3_flag(tp, 5717_PLUS))
9421 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9422 NIC_SRAM_RX_BUFFER_DESC);
9423
9424 /* Disable the mini ring */
9425 if (!tg3_flag(tp, 5705_PLUS))
9426 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9427 BDINFO_FLAGS_DISABLED);
9428
9429 /* Program the jumbo buffer descriptor ring control
9430 * blocks on those devices that have them.
9431 */
9432 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9433 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9434
9435 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9436 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9437 ((u64) tpr->rx_jmb_mapping >> 32));
9438 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9439 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9440 val = TG3_RX_JMB_RING_SIZE(tp) <<
9441 BDINFO_FLAGS_MAXLEN_SHIFT;
9442 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9443 val | BDINFO_FLAGS_USE_EXT_RECV);
9444 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9445 tg3_flag(tp, 57765_CLASS) ||
9446 tg3_asic_rev(tp) == ASIC_REV_5762)
9447 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9448 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9449 } else {
9450 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9451 BDINFO_FLAGS_DISABLED);
9452 }
9453
9454 if (tg3_flag(tp, 57765_PLUS)) {
9455 val = TG3_RX_STD_RING_SIZE(tp);
9456 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9457 val |= (TG3_RX_STD_DMA_SZ << 2);
9458 } else
9459 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9460 } else
9461 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9462
9463 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9464
9465 tpr->rx_std_prod_idx = tp->rx_pending;
9466 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9467
9468 tpr->rx_jmb_prod_idx =
9469 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9470 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9471
9472 tg3_rings_reset(tp);
9473
9474 /* Initialize MAC address and backoff seed. */
9475 __tg3_set_mac_addr(tp, 0);
9476
9477 /* MTU + ethernet header + FCS + optional VLAN tag */
9478 tw32(MAC_RX_MTU_SIZE,
9479 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9480
9481 /* The slot time is changed by tg3_setup_phy if we
9482 * run at gigabit with half duplex.
9483 */
9484 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9485 (6 << TX_LENGTHS_IPG_SHIFT) |
9486 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9487
9488 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9489 tg3_asic_rev(tp) == ASIC_REV_5762)
9490 val |= tr32(MAC_TX_LENGTHS) &
9491 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9492 TX_LENGTHS_CNT_DWN_VAL_MSK);
9493
9494 tw32(MAC_TX_LENGTHS, val);
9495
9496 /* Receive rules. */
9497 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9498 tw32(RCVLPC_CONFIG, 0x0181);
9499
9500 /* Calculate RDMAC_MODE setting early, we need it to determine
9501 * the RCVLPC_STATE_ENABLE mask.
9502 */
9503 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9504 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9505 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9506 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9507 RDMAC_MODE_LNGREAD_ENAB);
9508
9509 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9510 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9511
9512 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9513 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9514 tg3_asic_rev(tp) == ASIC_REV_57780)
9515 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9516 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9517 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9518
9519 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9520 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9521 if (tg3_flag(tp, TSO_CAPABLE) &&
9522 tg3_asic_rev(tp) == ASIC_REV_5705) {
9523 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9524 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9525 !tg3_flag(tp, IS_5788)) {
9526 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9527 }
9528 }
9529
9530 if (tg3_flag(tp, PCI_EXPRESS))
9531 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9532
9533 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9534 tp->dma_limit = 0;
9535 if (tp->dev->mtu <= ETH_DATA_LEN) {
9536 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9537 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9538 }
9539 }
9540
9541 if (tg3_flag(tp, HW_TSO_1) ||
9542 tg3_flag(tp, HW_TSO_2) ||
9543 tg3_flag(tp, HW_TSO_3))
9544 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9545
9546 if (tg3_flag(tp, 57765_PLUS) ||
9547 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9548 tg3_asic_rev(tp) == ASIC_REV_57780)
9549 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9550
9551 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9552 tg3_asic_rev(tp) == ASIC_REV_5762)
9553 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9554
9555 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9556 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9557 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9558 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9559 tg3_flag(tp, 57765_PLUS)) {
9560 u32 tgtreg;
9561
9562 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9563 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9564 else
9565 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9566
9567 val = tr32(tgtreg);
9568 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9569 tg3_asic_rev(tp) == ASIC_REV_5762) {
9570 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9571 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9572 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9573 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9574 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9575 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9576 }
9577 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9578 }
9579
9580 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9581 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9582 tg3_asic_rev(tp) == ASIC_REV_5762) {
9583 u32 tgtreg;
9584
9585 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9586 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9587 else
9588 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9589
9590 val = tr32(tgtreg);
9591 tw32(tgtreg, val |
9592 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9593 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9594 }
9595
9596 /* Receive/send statistics. */
9597 if (tg3_flag(tp, 5750_PLUS)) {
9598 val = tr32(RCVLPC_STATS_ENABLE);
9599 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9600 tw32(RCVLPC_STATS_ENABLE, val);
9601 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9602 tg3_flag(tp, TSO_CAPABLE)) {
9603 val = tr32(RCVLPC_STATS_ENABLE);
9604 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9605 tw32(RCVLPC_STATS_ENABLE, val);
9606 } else {
9607 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9608 }
9609 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9610 tw32(SNDDATAI_STATSENAB, 0xffffff);
9611 tw32(SNDDATAI_STATSCTRL,
9612 (SNDDATAI_SCTRL_ENABLE |
9613 SNDDATAI_SCTRL_FASTUPD));
9614
9615 /* Setup host coalescing engine. */
9616 tw32(HOSTCC_MODE, 0);
9617 for (i = 0; i < 2000; i++) {
9618 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9619 break;
9620 udelay(10);
9621 }
9622
9623 __tg3_set_coalesce(tp, &tp->coal);
9624
9625 if (!tg3_flag(tp, 5705_PLUS)) {
9626 /* Status/statistics block address. See tg3_timer,
9627 * the tg3_periodic_fetch_stats call there, and
9628 * tg3_get_stats to see how this works for 5705/5750 chips.
9629 */
9630 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9631 ((u64) tp->stats_mapping >> 32));
9632 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9633 ((u64) tp->stats_mapping & 0xffffffff));
9634 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9635
9636 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9637
9638 /* Clear statistics and status block memory areas */
9639 for (i = NIC_SRAM_STATS_BLK;
9640 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9641 i += sizeof(u32)) {
9642 tg3_write_mem(tp, i, 0);
9643 udelay(40);
9644 }
9645 }
9646
9647 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9648
9649 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9650 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9651 if (!tg3_flag(tp, 5705_PLUS))
9652 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9653
9654 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9655 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9656 /* reset to prevent losing 1st rx packet intermittently */
9657 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9658 udelay(10);
9659 }
9660
9661 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9662 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9663 MAC_MODE_FHDE_ENABLE;
9664 if (tg3_flag(tp, ENABLE_APE))
9665 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9666 if (!tg3_flag(tp, 5705_PLUS) &&
9667 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9668 tg3_asic_rev(tp) != ASIC_REV_5700)
9669 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9670 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9671 udelay(40);
9672
9673 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9674 * If TG3_FLAG_IS_NIC is zero, we should read the
9675 * register to preserve the GPIO settings for LOMs. The GPIOs,
9676 * whether used as inputs or outputs, are set by boot code after
9677 * reset.
9678 */
9679 if (!tg3_flag(tp, IS_NIC)) {
9680 u32 gpio_mask;
9681
9682 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9683 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9684 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9685
9686 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9687 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9688 GRC_LCLCTRL_GPIO_OUTPUT3;
9689
9690 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9691 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9692
9693 tp->grc_local_ctrl &= ~gpio_mask;
9694 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9695
9696 /* GPIO1 must be driven high for eeprom write protect */
9697 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9698 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9699 GRC_LCLCTRL_GPIO_OUTPUT1);
9700 }
9701 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9702 udelay(100);
9703
9704 if (tg3_flag(tp, USING_MSIX)) {
9705 val = tr32(MSGINT_MODE);
9706 val |= MSGINT_MODE_ENABLE;
9707 if (tp->irq_cnt > 1)
9708 val |= MSGINT_MODE_MULTIVEC_EN;
9709 if (!tg3_flag(tp, 1SHOT_MSI))
9710 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9711 tw32(MSGINT_MODE, val);
9712 }
9713
9714 if (!tg3_flag(tp, 5705_PLUS)) {
9715 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9716 udelay(40);
9717 }
9718
9719 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9720 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9721 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9722 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9723 WDMAC_MODE_LNGREAD_ENAB);
9724
9725 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9726 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9727 if (tg3_flag(tp, TSO_CAPABLE) &&
9728 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9729 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9730 /* nothing */
9731 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9732 !tg3_flag(tp, IS_5788)) {
9733 val |= WDMAC_MODE_RX_ACCEL;
9734 }
9735 }
9736
9737 /* Enable host coalescing bug fix */
9738 if (tg3_flag(tp, 5755_PLUS))
9739 val |= WDMAC_MODE_STATUS_TAG_FIX;
9740
9741 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9742 val |= WDMAC_MODE_BURST_ALL_DATA;
9743
9744 tw32_f(WDMAC_MODE, val);
9745 udelay(40);
9746
9747 if (tg3_flag(tp, PCIX_MODE)) {
9748 u16 pcix_cmd;
9749
9750 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9751 &pcix_cmd);
9752 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9753 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9754 pcix_cmd |= PCI_X_CMD_READ_2K;
9755 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9756 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9757 pcix_cmd |= PCI_X_CMD_READ_2K;
9758 }
9759 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9760 pcix_cmd);
9761 }
9762
9763 tw32_f(RDMAC_MODE, rdmac_mode);
9764 udelay(40);
9765
9766 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9767 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9768 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9769 break;
9770 }
9771 if (i < TG3_NUM_RDMA_CHANNELS) {
9772 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9773 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9774 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9775 tg3_flag_set(tp, 5719_RDMA_BUG);
9776 }
9777 }
9778
9779 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9780 if (!tg3_flag(tp, 5705_PLUS))
9781 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9782
9783 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9784 tw32(SNDDATAC_MODE,
9785 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9786 else
9787 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9788
9789 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9790 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9791 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9792 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9793 val |= RCVDBDI_MODE_LRG_RING_SZ;
9794 tw32(RCVDBDI_MODE, val);
9795 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9796 if (tg3_flag(tp, HW_TSO_1) ||
9797 tg3_flag(tp, HW_TSO_2) ||
9798 tg3_flag(tp, HW_TSO_3))
9799 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9800 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9801 if (tg3_flag(tp, ENABLE_TSS))
9802 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9803 tw32(SNDBDI_MODE, val);
9804 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9805
9806 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9807 err = tg3_load_5701_a0_firmware_fix(tp);
9808 if (err)
9809 return err;
9810 }
9811
9812 if (tg3_flag(tp, TSO_CAPABLE)) {
9813 err = tg3_load_tso_firmware(tp);
9814 if (err)
9815 return err;
9816 }
9817
9818 tp->tx_mode = TX_MODE_ENABLE;
9819
9820 if (tg3_flag(tp, 5755_PLUS) ||
9821 tg3_asic_rev(tp) == ASIC_REV_5906)
9822 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9823
9824 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9825 tg3_asic_rev(tp) == ASIC_REV_5762) {
9826 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9827 tp->tx_mode &= ~val;
9828 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9829 }
9830
9831 tw32_f(MAC_TX_MODE, tp->tx_mode);
9832 udelay(100);
9833
9834 if (tg3_flag(tp, ENABLE_RSS)) {
9835 tg3_rss_write_indir_tbl(tp);
9836
9837 /* Setup the "secret" hash key. */
9838 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9839 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9840 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9841 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9842 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9843 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9844 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9845 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9846 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9847 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9848 }
9849
9850 tp->rx_mode = RX_MODE_ENABLE;
9851 if (tg3_flag(tp, 5755_PLUS))
9852 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9853
9854 if (tg3_flag(tp, ENABLE_RSS))
9855 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9856 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9857 RX_MODE_RSS_IPV6_HASH_EN |
9858 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9859 RX_MODE_RSS_IPV4_HASH_EN |
9860 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9861
9862 tw32_f(MAC_RX_MODE, tp->rx_mode);
9863 udelay(10);
9864
9865 tw32(MAC_LED_CTRL, tp->led_ctrl);
9866
9867 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9868 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9869 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9870 udelay(10);
9871 }
9872 tw32_f(MAC_RX_MODE, tp->rx_mode);
9873 udelay(10);
9874
9875 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9876 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9877 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9878 /* Set drive transmission level to 1.2V */
9879 /* only if the signal pre-emphasis bit is not set */
9880 val = tr32(MAC_SERDES_CFG);
9881 val &= 0xfffff000;
9882 val |= 0x880;
9883 tw32(MAC_SERDES_CFG, val);
9884 }
9885 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9886 tw32(MAC_SERDES_CFG, 0x616000);
9887 }
9888
9889 /* Prevent chip from dropping frames when flow control
9890 * is enabled.
9891 */
9892 if (tg3_flag(tp, 57765_CLASS))
9893 val = 1;
9894 else
9895 val = 2;
9896 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9897
9898 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
9899 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9900 /* Use hardware link auto-negotiation */
9901 tg3_flag_set(tp, HW_AUTONEG);
9902 }
9903
9904 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9905 tg3_asic_rev(tp) == ASIC_REV_5714) {
9906 u32 tmp;
9907
9908 tmp = tr32(SERDES_RX_CTRL);
9909 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9910 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9911 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9912 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9913 }
9914
9915 if (!tg3_flag(tp, USE_PHYLIB)) {
9916 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9917 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9918
9919 err = tg3_setup_phy(tp, 0);
9920 if (err)
9921 return err;
9922
9923 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9924 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9925 u32 tmp;
9926
9927 /* Clear CRC stats. */
9928 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9929 tg3_writephy(tp, MII_TG3_TEST1,
9930 tmp | MII_TG3_TEST1_CRC_EN);
9931 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9932 }
9933 }
9934 }
9935
9936 __tg3_set_rx_mode(tp->dev);
9937
9938 /* Initialize receive rules. */
9939 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9940 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9941 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9942 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9943
9944 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9945 limit = 8;
9946 else
9947 limit = 16;
9948 if (tg3_flag(tp, ENABLE_ASF))
9949 limit -= 4;
9950 switch (limit) {
9951 case 16:
9952 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9953 case 15:
9954 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9955 case 14:
9956 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9957 case 13:
9958 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9959 case 12:
9960 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9961 case 11:
9962 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9963 case 10:
9964 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9965 case 9:
9966 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9967 case 8:
9968 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9969 case 7:
9970 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9971 case 6:
9972 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9973 case 5:
9974 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9975 case 4:
9976 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9977 case 3:
9978 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9979 case 2:
9980 case 1:
9981
9982 default:
9983 break;
9984 }
9985
9986 if (tg3_flag(tp, ENABLE_APE))
9987 /* Write our heartbeat update interval to APE. */
9988 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9989 APE_HOST_HEARTBEAT_INT_DISABLE);
9990
9991 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9992
9993 return 0;
9994 }
9995
9996 /* Called at device open time to get the chip ready for
9997 * packet processing. Invoked with tp->lock held.
9998 */
9999 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10000 {
10001 tg3_switch_clocks(tp);
10002
10003 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10004
10005 return tg3_reset_hw(tp, reset_phy);
10006 }
10007
10008 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10009 {
10010 int i;
10011
10012 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10013 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10014
10015 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10016 off += len;
10017
10018 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10019 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10020 memset(ocir, 0, TG3_OCIR_LEN);
10021 }
10022 }
10023
10024 /* sysfs attributes for hwmon */
10025 static ssize_t tg3_show_temp(struct device *dev,
10026 struct device_attribute *devattr, char *buf)
10027 {
10028 struct pci_dev *pdev = to_pci_dev(dev);
10029 struct net_device *netdev = pci_get_drvdata(pdev);
10030 struct tg3 *tp = netdev_priv(netdev);
10031 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10032 u32 temperature;
10033
10034 spin_lock_bh(&tp->lock);
10035 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10036 sizeof(temperature));
10037 spin_unlock_bh(&tp->lock);
10038 return sprintf(buf, "%u\n", temperature);
10039 }
10040
10041
10042 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10043 TG3_TEMP_SENSOR_OFFSET);
10044 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10045 TG3_TEMP_CAUTION_OFFSET);
10046 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10047 TG3_TEMP_MAX_OFFSET);
10048
10049 static struct attribute *tg3_attributes[] = {
10050 &sensor_dev_attr_temp1_input.dev_attr.attr,
10051 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10052 &sensor_dev_attr_temp1_max.dev_attr.attr,
10053 NULL
10054 };
10055
10056 static const struct attribute_group tg3_group = {
10057 .attrs = tg3_attributes,
10058 };
10059
10060 static void tg3_hwmon_close(struct tg3 *tp)
10061 {
10062 if (tp->hwmon_dev) {
10063 hwmon_device_unregister(tp->hwmon_dev);
10064 tp->hwmon_dev = NULL;
10065 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10066 }
10067 }
10068
10069 static void tg3_hwmon_open(struct tg3 *tp)
10070 {
10071 int i, err;
10072 u32 size = 0;
10073 struct pci_dev *pdev = tp->pdev;
10074 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10075
10076 tg3_sd_scan_scratchpad(tp, ocirs);
10077
10078 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10079 if (!ocirs[i].src_data_length)
10080 continue;
10081
10082 size += ocirs[i].src_hdr_length;
10083 size += ocirs[i].src_data_length;
10084 }
10085
10086 if (!size)
10087 return;
10088
10089 /* Register hwmon sysfs hooks */
10090 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10091 if (err) {
10092 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10093 return;
10094 }
10095
10096 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10097 if (IS_ERR(tp->hwmon_dev)) {
10098 tp->hwmon_dev = NULL;
10099 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10100 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10101 }
10102 }
10103
10104
10105 #define TG3_STAT_ADD32(PSTAT, REG) \
10106 do { u32 __val = tr32(REG); \
10107 (PSTAT)->low += __val; \
10108 if ((PSTAT)->low < __val) \
10109 (PSTAT)->high += 1; \
10110 } while (0)
10111
10112 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10113 {
10114 struct tg3_hw_stats *sp = tp->hw_stats;
10115
10116 if (!tp->link_up)
10117 return;
10118
10119 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10120 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10121 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10122 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10123 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10124 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10125 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10126 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10127 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10128 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10129 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10130 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10131 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10132 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10133 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10134 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10135 u32 val;
10136
10137 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10138 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10139 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10140 tg3_flag_clear(tp, 5719_RDMA_BUG);
10141 }
10142
10143 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10144 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10145 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10146 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10147 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10148 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10149 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10150 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10151 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10152 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10153 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10154 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10155 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10156 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10157
10158 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10159 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10160 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10161 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10162 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10163 } else {
10164 u32 val = tr32(HOSTCC_FLOW_ATTN);
10165 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10166 if (val) {
10167 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10168 sp->rx_discards.low += val;
10169 if (sp->rx_discards.low < val)
10170 sp->rx_discards.high += 1;
10171 }
10172 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10173 }
10174 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10175 }
10176
10177 static void tg3_chk_missed_msi(struct tg3 *tp)
10178 {
10179 u32 i;
10180
10181 for (i = 0; i < tp->irq_cnt; i++) {
10182 struct tg3_napi *tnapi = &tp->napi[i];
10183
10184 if (tg3_has_work(tnapi)) {
10185 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10186 tnapi->last_tx_cons == tnapi->tx_cons) {
10187 if (tnapi->chk_msi_cnt < 1) {
10188 tnapi->chk_msi_cnt++;
10189 return;
10190 }
10191 tg3_msi(0, tnapi);
10192 }
10193 }
10194 tnapi->chk_msi_cnt = 0;
10195 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10196 tnapi->last_tx_cons = tnapi->tx_cons;
10197 }
10198 }
10199
10200 static void tg3_timer(unsigned long __opaque)
10201 {
10202 struct tg3 *tp = (struct tg3 *) __opaque;
10203
10204 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10205 goto restart_timer;
10206
10207 spin_lock(&tp->lock);
10208
10209 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10210 tg3_flag(tp, 57765_CLASS))
10211 tg3_chk_missed_msi(tp);
10212
10213 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10214 /* BCM4785: Flush posted writes from GbE to host memory. */
10215 tr32(HOSTCC_MODE);
10216 }
10217
10218 if (!tg3_flag(tp, TAGGED_STATUS)) {
10219 /* All of this garbage is because when using non-tagged
10220 * IRQ status the mailbox/status_block protocol the chip
10221 * uses with the cpu is race prone.
10222 */
10223 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10224 tw32(GRC_LOCAL_CTRL,
10225 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10226 } else {
10227 tw32(HOSTCC_MODE, tp->coalesce_mode |
10228 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10229 }
10230
10231 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10232 spin_unlock(&tp->lock);
10233 tg3_reset_task_schedule(tp);
10234 goto restart_timer;
10235 }
10236 }
10237
10238 /* This part only runs once per second. */
10239 if (!--tp->timer_counter) {
10240 if (tg3_flag(tp, 5705_PLUS))
10241 tg3_periodic_fetch_stats(tp);
10242
10243 if (tp->setlpicnt && !--tp->setlpicnt)
10244 tg3_phy_eee_enable(tp);
10245
10246 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10247 u32 mac_stat;
10248 int phy_event;
10249
10250 mac_stat = tr32(MAC_STATUS);
10251
10252 phy_event = 0;
10253 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10254 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10255 phy_event = 1;
10256 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10257 phy_event = 1;
10258
10259 if (phy_event)
10260 tg3_setup_phy(tp, 0);
10261 } else if (tg3_flag(tp, POLL_SERDES)) {
10262 u32 mac_stat = tr32(MAC_STATUS);
10263 int need_setup = 0;
10264
10265 if (tp->link_up &&
10266 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10267 need_setup = 1;
10268 }
10269 if (!tp->link_up &&
10270 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10271 MAC_STATUS_SIGNAL_DET))) {
10272 need_setup = 1;
10273 }
10274 if (need_setup) {
10275 if (!tp->serdes_counter) {
10276 tw32_f(MAC_MODE,
10277 (tp->mac_mode &
10278 ~MAC_MODE_PORT_MODE_MASK));
10279 udelay(40);
10280 tw32_f(MAC_MODE, tp->mac_mode);
10281 udelay(40);
10282 }
10283 tg3_setup_phy(tp, 0);
10284 }
10285 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10286 tg3_flag(tp, 5780_CLASS)) {
10287 tg3_serdes_parallel_detect(tp);
10288 }
10289
10290 tp->timer_counter = tp->timer_multiplier;
10291 }
10292
10293 /* Heartbeat is only sent once every 2 seconds.
10294 *
10295 * The heartbeat is to tell the ASF firmware that the host
10296 * driver is still alive. In the event that the OS crashes,
10297 * ASF needs to reset the hardware to free up the FIFO space
10298 * that may be filled with rx packets destined for the host.
10299 * If the FIFO is full, ASF will no longer function properly.
10300 *
10301 * Unintended resets have been reported on real time kernels
10302 * where the timer doesn't run on time. Netpoll will also have
10303 * same problem.
10304 *
10305 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10306 * to check the ring condition when the heartbeat is expiring
10307 * before doing the reset. This will prevent most unintended
10308 * resets.
10309 */
10310 if (!--tp->asf_counter) {
10311 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10312 tg3_wait_for_event_ack(tp);
10313
10314 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10315 FWCMD_NICDRV_ALIVE3);
10316 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10317 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10318 TG3_FW_UPDATE_TIMEOUT_SEC);
10319
10320 tg3_generate_fw_event(tp);
10321 }
10322 tp->asf_counter = tp->asf_multiplier;
10323 }
10324
10325 spin_unlock(&tp->lock);
10326
10327 restart_timer:
10328 tp->timer.expires = jiffies + tp->timer_offset;
10329 add_timer(&tp->timer);
10330 }
10331
10332 static void tg3_timer_init(struct tg3 *tp)
10333 {
10334 if (tg3_flag(tp, TAGGED_STATUS) &&
10335 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10336 !tg3_flag(tp, 57765_CLASS))
10337 tp->timer_offset = HZ;
10338 else
10339 tp->timer_offset = HZ / 10;
10340
10341 BUG_ON(tp->timer_offset > HZ);
10342
10343 tp->timer_multiplier = (HZ / tp->timer_offset);
10344 tp->asf_multiplier = (HZ / tp->timer_offset) *
10345 TG3_FW_UPDATE_FREQ_SEC;
10346
10347 init_timer(&tp->timer);
10348 tp->timer.data = (unsigned long) tp;
10349 tp->timer.function = tg3_timer;
10350 }
10351
10352 static void tg3_timer_start(struct tg3 *tp)
10353 {
10354 tp->asf_counter = tp->asf_multiplier;
10355 tp->timer_counter = tp->timer_multiplier;
10356
10357 tp->timer.expires = jiffies + tp->timer_offset;
10358 add_timer(&tp->timer);
10359 }
10360
10361 static void tg3_timer_stop(struct tg3 *tp)
10362 {
10363 del_timer_sync(&tp->timer);
10364 }
10365
10366 /* Restart hardware after configuration changes, self-test, etc.
10367 * Invoked with tp->lock held.
10368 */
10369 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10370 __releases(tp->lock)
10371 __acquires(tp->lock)
10372 {
10373 int err;
10374
10375 err = tg3_init_hw(tp, reset_phy);
10376 if (err) {
10377 netdev_err(tp->dev,
10378 "Failed to re-initialize device, aborting\n");
10379 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10380 tg3_full_unlock(tp);
10381 tg3_timer_stop(tp);
10382 tp->irq_sync = 0;
10383 tg3_napi_enable(tp);
10384 dev_close(tp->dev);
10385 tg3_full_lock(tp, 0);
10386 }
10387 return err;
10388 }
10389
10390 static void tg3_reset_task(struct work_struct *work)
10391 {
10392 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10393 int err;
10394
10395 tg3_full_lock(tp, 0);
10396
10397 if (!netif_running(tp->dev)) {
10398 tg3_flag_clear(tp, RESET_TASK_PENDING);
10399 tg3_full_unlock(tp);
10400 return;
10401 }
10402
10403 tg3_full_unlock(tp);
10404
10405 tg3_phy_stop(tp);
10406
10407 tg3_netif_stop(tp);
10408
10409 tg3_full_lock(tp, 1);
10410
10411 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10412 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10413 tp->write32_rx_mbox = tg3_write_flush_reg32;
10414 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10415 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10416 }
10417
10418 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10419 err = tg3_init_hw(tp, 1);
10420 if (err)
10421 goto out;
10422
10423 tg3_netif_start(tp);
10424
10425 out:
10426 tg3_full_unlock(tp);
10427
10428 if (!err)
10429 tg3_phy_start(tp);
10430
10431 tg3_flag_clear(tp, RESET_TASK_PENDING);
10432 }
10433
10434 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10435 {
10436 irq_handler_t fn;
10437 unsigned long flags;
10438 char *name;
10439 struct tg3_napi *tnapi = &tp->napi[irq_num];
10440
10441 if (tp->irq_cnt == 1)
10442 name = tp->dev->name;
10443 else {
10444 name = &tnapi->irq_lbl[0];
10445 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10446 name[IFNAMSIZ-1] = 0;
10447 }
10448
10449 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10450 fn = tg3_msi;
10451 if (tg3_flag(tp, 1SHOT_MSI))
10452 fn = tg3_msi_1shot;
10453 flags = 0;
10454 } else {
10455 fn = tg3_interrupt;
10456 if (tg3_flag(tp, TAGGED_STATUS))
10457 fn = tg3_interrupt_tagged;
10458 flags = IRQF_SHARED;
10459 }
10460
10461 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10462 }
10463
10464 static int tg3_test_interrupt(struct tg3 *tp)
10465 {
10466 struct tg3_napi *tnapi = &tp->napi[0];
10467 struct net_device *dev = tp->dev;
10468 int err, i, intr_ok = 0;
10469 u32 val;
10470
10471 if (!netif_running(dev))
10472 return -ENODEV;
10473
10474 tg3_disable_ints(tp);
10475
10476 free_irq(tnapi->irq_vec, tnapi);
10477
10478 /*
10479 * Turn off MSI one shot mode. Otherwise this test has no
10480 * observable way to know whether the interrupt was delivered.
10481 */
10482 if (tg3_flag(tp, 57765_PLUS)) {
10483 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10484 tw32(MSGINT_MODE, val);
10485 }
10486
10487 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10488 IRQF_SHARED, dev->name, tnapi);
10489 if (err)
10490 return err;
10491
10492 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10493 tg3_enable_ints(tp);
10494
10495 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10496 tnapi->coal_now);
10497
10498 for (i = 0; i < 5; i++) {
10499 u32 int_mbox, misc_host_ctrl;
10500
10501 int_mbox = tr32_mailbox(tnapi->int_mbox);
10502 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10503
10504 if ((int_mbox != 0) ||
10505 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10506 intr_ok = 1;
10507 break;
10508 }
10509
10510 if (tg3_flag(tp, 57765_PLUS) &&
10511 tnapi->hw_status->status_tag != tnapi->last_tag)
10512 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10513
10514 msleep(10);
10515 }
10516
10517 tg3_disable_ints(tp);
10518
10519 free_irq(tnapi->irq_vec, tnapi);
10520
10521 err = tg3_request_irq(tp, 0);
10522
10523 if (err)
10524 return err;
10525
10526 if (intr_ok) {
10527 /* Reenable MSI one shot mode. */
10528 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10529 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10530 tw32(MSGINT_MODE, val);
10531 }
10532 return 0;
10533 }
10534
10535 return -EIO;
10536 }
10537
10538 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10539 * successfully restored
10540 */
10541 static int tg3_test_msi(struct tg3 *tp)
10542 {
10543 int err;
10544 u16 pci_cmd;
10545
10546 if (!tg3_flag(tp, USING_MSI))
10547 return 0;
10548
10549 /* Turn off SERR reporting in case MSI terminates with Master
10550 * Abort.
10551 */
10552 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10553 pci_write_config_word(tp->pdev, PCI_COMMAND,
10554 pci_cmd & ~PCI_COMMAND_SERR);
10555
10556 err = tg3_test_interrupt(tp);
10557
10558 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10559
10560 if (!err)
10561 return 0;
10562
10563 /* other failures */
10564 if (err != -EIO)
10565 return err;
10566
10567 /* MSI test failed, go back to INTx mode */
10568 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10569 "to INTx mode. Please report this failure to the PCI "
10570 "maintainer and include system chipset information\n");
10571
10572 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10573
10574 pci_disable_msi(tp->pdev);
10575
10576 tg3_flag_clear(tp, USING_MSI);
10577 tp->napi[0].irq_vec = tp->pdev->irq;
10578
10579 err = tg3_request_irq(tp, 0);
10580 if (err)
10581 return err;
10582
10583 /* Need to reset the chip because the MSI cycle may have terminated
10584 * with Master Abort.
10585 */
10586 tg3_full_lock(tp, 1);
10587
10588 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10589 err = tg3_init_hw(tp, 1);
10590
10591 tg3_full_unlock(tp);
10592
10593 if (err)
10594 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10595
10596 return err;
10597 }
10598
10599 static int tg3_request_firmware(struct tg3 *tp)
10600 {
10601 const __be32 *fw_data;
10602
10603 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10604 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10605 tp->fw_needed);
10606 return -ENOENT;
10607 }
10608
10609 fw_data = (void *)tp->fw->data;
10610
10611 /* Firmware blob starts with version numbers, followed by
10612 * start address and _full_ length including BSS sections
10613 * (which must be longer than the actual data, of course
10614 */
10615
10616 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10617 if (tp->fw_len < (tp->fw->size - 12)) {
10618 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10619 tp->fw_len, tp->fw_needed);
10620 release_firmware(tp->fw);
10621 tp->fw = NULL;
10622 return -EINVAL;
10623 }
10624
10625 /* We no longer need firmware; we have it. */
10626 tp->fw_needed = NULL;
10627 return 0;
10628 }
10629
10630 static u32 tg3_irq_count(struct tg3 *tp)
10631 {
10632 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10633
10634 if (irq_cnt > 1) {
10635 /* We want as many rx rings enabled as there are cpus.
10636 * In multiqueue MSI-X mode, the first MSI-X vector
10637 * only deals with link interrupts, etc, so we add
10638 * one to the number of vectors we are requesting.
10639 */
10640 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10641 }
10642
10643 return irq_cnt;
10644 }
10645
10646 static bool tg3_enable_msix(struct tg3 *tp)
10647 {
10648 int i, rc;
10649 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10650
10651 tp->txq_cnt = tp->txq_req;
10652 tp->rxq_cnt = tp->rxq_req;
10653 if (!tp->rxq_cnt)
10654 tp->rxq_cnt = netif_get_num_default_rss_queues();
10655 if (tp->rxq_cnt > tp->rxq_max)
10656 tp->rxq_cnt = tp->rxq_max;
10657
10658 /* Disable multiple TX rings by default. Simple round-robin hardware
10659 * scheduling of the TX rings can cause starvation of rings with
10660 * small packets when other rings have TSO or jumbo packets.
10661 */
10662 if (!tp->txq_req)
10663 tp->txq_cnt = 1;
10664
10665 tp->irq_cnt = tg3_irq_count(tp);
10666
10667 for (i = 0; i < tp->irq_max; i++) {
10668 msix_ent[i].entry = i;
10669 msix_ent[i].vector = 0;
10670 }
10671
10672 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10673 if (rc < 0) {
10674 return false;
10675 } else if (rc != 0) {
10676 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10677 return false;
10678 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10679 tp->irq_cnt, rc);
10680 tp->irq_cnt = rc;
10681 tp->rxq_cnt = max(rc - 1, 1);
10682 if (tp->txq_cnt)
10683 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10684 }
10685
10686 for (i = 0; i < tp->irq_max; i++)
10687 tp->napi[i].irq_vec = msix_ent[i].vector;
10688
10689 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10690 pci_disable_msix(tp->pdev);
10691 return false;
10692 }
10693
10694 if (tp->irq_cnt == 1)
10695 return true;
10696
10697 tg3_flag_set(tp, ENABLE_RSS);
10698
10699 if (tp->txq_cnt > 1)
10700 tg3_flag_set(tp, ENABLE_TSS);
10701
10702 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10703
10704 return true;
10705 }
10706
10707 static void tg3_ints_init(struct tg3 *tp)
10708 {
10709 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10710 !tg3_flag(tp, TAGGED_STATUS)) {
10711 /* All MSI supporting chips should support tagged
10712 * status. Assert that this is the case.
10713 */
10714 netdev_warn(tp->dev,
10715 "MSI without TAGGED_STATUS? Not using MSI\n");
10716 goto defcfg;
10717 }
10718
10719 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10720 tg3_flag_set(tp, USING_MSIX);
10721 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10722 tg3_flag_set(tp, USING_MSI);
10723
10724 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10725 u32 msi_mode = tr32(MSGINT_MODE);
10726 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10727 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10728 if (!tg3_flag(tp, 1SHOT_MSI))
10729 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10730 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10731 }
10732 defcfg:
10733 if (!tg3_flag(tp, USING_MSIX)) {
10734 tp->irq_cnt = 1;
10735 tp->napi[0].irq_vec = tp->pdev->irq;
10736 }
10737
10738 if (tp->irq_cnt == 1) {
10739 tp->txq_cnt = 1;
10740 tp->rxq_cnt = 1;
10741 netif_set_real_num_tx_queues(tp->dev, 1);
10742 netif_set_real_num_rx_queues(tp->dev, 1);
10743 }
10744 }
10745
10746 static void tg3_ints_fini(struct tg3 *tp)
10747 {
10748 if (tg3_flag(tp, USING_MSIX))
10749 pci_disable_msix(tp->pdev);
10750 else if (tg3_flag(tp, USING_MSI))
10751 pci_disable_msi(tp->pdev);
10752 tg3_flag_clear(tp, USING_MSI);
10753 tg3_flag_clear(tp, USING_MSIX);
10754 tg3_flag_clear(tp, ENABLE_RSS);
10755 tg3_flag_clear(tp, ENABLE_TSS);
10756 }
10757
10758 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10759 bool init)
10760 {
10761 struct net_device *dev = tp->dev;
10762 int i, err;
10763
10764 /*
10765 * Setup interrupts first so we know how
10766 * many NAPI resources to allocate
10767 */
10768 tg3_ints_init(tp);
10769
10770 tg3_rss_check_indir_tbl(tp);
10771
10772 /* The placement of this call is tied
10773 * to the setup and use of Host TX descriptors.
10774 */
10775 err = tg3_alloc_consistent(tp);
10776 if (err)
10777 goto err_out1;
10778
10779 tg3_napi_init(tp);
10780
10781 tg3_napi_enable(tp);
10782
10783 for (i = 0; i < tp->irq_cnt; i++) {
10784 struct tg3_napi *tnapi = &tp->napi[i];
10785 err = tg3_request_irq(tp, i);
10786 if (err) {
10787 for (i--; i >= 0; i--) {
10788 tnapi = &tp->napi[i];
10789 free_irq(tnapi->irq_vec, tnapi);
10790 }
10791 goto err_out2;
10792 }
10793 }
10794
10795 tg3_full_lock(tp, 0);
10796
10797 err = tg3_init_hw(tp, reset_phy);
10798 if (err) {
10799 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10800 tg3_free_rings(tp);
10801 }
10802
10803 tg3_full_unlock(tp);
10804
10805 if (err)
10806 goto err_out3;
10807
10808 if (test_irq && tg3_flag(tp, USING_MSI)) {
10809 err = tg3_test_msi(tp);
10810
10811 if (err) {
10812 tg3_full_lock(tp, 0);
10813 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10814 tg3_free_rings(tp);
10815 tg3_full_unlock(tp);
10816
10817 goto err_out2;
10818 }
10819
10820 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10821 u32 val = tr32(PCIE_TRANSACTION_CFG);
10822
10823 tw32(PCIE_TRANSACTION_CFG,
10824 val | PCIE_TRANS_CFG_1SHOT_MSI);
10825 }
10826 }
10827
10828 tg3_phy_start(tp);
10829
10830 tg3_hwmon_open(tp);
10831
10832 tg3_full_lock(tp, 0);
10833
10834 tg3_timer_start(tp);
10835 tg3_flag_set(tp, INIT_COMPLETE);
10836 tg3_enable_ints(tp);
10837
10838 if (init)
10839 tg3_ptp_init(tp);
10840 else
10841 tg3_ptp_resume(tp);
10842
10843
10844 tg3_full_unlock(tp);
10845
10846 netif_tx_start_all_queues(dev);
10847
10848 /*
10849 * Reset loopback feature if it was turned on while the device was down
10850 * make sure that it's installed properly now.
10851 */
10852 if (dev->features & NETIF_F_LOOPBACK)
10853 tg3_set_loopback(dev, dev->features);
10854
10855 return 0;
10856
10857 err_out3:
10858 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10859 struct tg3_napi *tnapi = &tp->napi[i];
10860 free_irq(tnapi->irq_vec, tnapi);
10861 }
10862
10863 err_out2:
10864 tg3_napi_disable(tp);
10865 tg3_napi_fini(tp);
10866 tg3_free_consistent(tp);
10867
10868 err_out1:
10869 tg3_ints_fini(tp);
10870
10871 return err;
10872 }
10873
10874 static void tg3_stop(struct tg3 *tp)
10875 {
10876 int i;
10877
10878 tg3_reset_task_cancel(tp);
10879 tg3_netif_stop(tp);
10880
10881 tg3_timer_stop(tp);
10882
10883 tg3_hwmon_close(tp);
10884
10885 tg3_phy_stop(tp);
10886
10887 tg3_full_lock(tp, 1);
10888
10889 tg3_disable_ints(tp);
10890
10891 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10892 tg3_free_rings(tp);
10893 tg3_flag_clear(tp, INIT_COMPLETE);
10894
10895 tg3_full_unlock(tp);
10896
10897 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10898 struct tg3_napi *tnapi = &tp->napi[i];
10899 free_irq(tnapi->irq_vec, tnapi);
10900 }
10901
10902 tg3_ints_fini(tp);
10903
10904 tg3_napi_fini(tp);
10905
10906 tg3_free_consistent(tp);
10907 }
10908
10909 static int tg3_open(struct net_device *dev)
10910 {
10911 struct tg3 *tp = netdev_priv(dev);
10912 int err;
10913
10914 if (tp->fw_needed) {
10915 err = tg3_request_firmware(tp);
10916 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10917 if (err)
10918 return err;
10919 } else if (err) {
10920 netdev_warn(tp->dev, "TSO capability disabled\n");
10921 tg3_flag_clear(tp, TSO_CAPABLE);
10922 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10923 netdev_notice(tp->dev, "TSO capability restored\n");
10924 tg3_flag_set(tp, TSO_CAPABLE);
10925 }
10926 }
10927
10928 tg3_carrier_off(tp);
10929
10930 err = tg3_power_up(tp);
10931 if (err)
10932 return err;
10933
10934 tg3_full_lock(tp, 0);
10935
10936 tg3_disable_ints(tp);
10937 tg3_flag_clear(tp, INIT_COMPLETE);
10938
10939 tg3_full_unlock(tp);
10940
10941 err = tg3_start(tp, true, true, true);
10942 if (err) {
10943 tg3_frob_aux_power(tp, false);
10944 pci_set_power_state(tp->pdev, PCI_D3hot);
10945 }
10946
10947 if (tg3_flag(tp, PTP_CAPABLE)) {
10948 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10949 &tp->pdev->dev);
10950 if (IS_ERR(tp->ptp_clock))
10951 tp->ptp_clock = NULL;
10952 }
10953
10954 return err;
10955 }
10956
10957 static int tg3_close(struct net_device *dev)
10958 {
10959 struct tg3 *tp = netdev_priv(dev);
10960
10961 tg3_ptp_fini(tp);
10962
10963 tg3_stop(tp);
10964
10965 /* Clear stats across close / open calls */
10966 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10967 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10968
10969 tg3_power_down(tp);
10970
10971 tg3_carrier_off(tp);
10972
10973 return 0;
10974 }
10975
10976 static inline u64 get_stat64(tg3_stat64_t *val)
10977 {
10978 return ((u64)val->high << 32) | ((u64)val->low);
10979 }
10980
10981 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10982 {
10983 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10984
10985 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10986 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
10987 tg3_asic_rev(tp) == ASIC_REV_5701)) {
10988 u32 val;
10989
10990 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10991 tg3_writephy(tp, MII_TG3_TEST1,
10992 val | MII_TG3_TEST1_CRC_EN);
10993 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10994 } else
10995 val = 0;
10996
10997 tp->phy_crc_errors += val;
10998
10999 return tp->phy_crc_errors;
11000 }
11001
11002 return get_stat64(&hw_stats->rx_fcs_errors);
11003 }
11004
11005 #define ESTAT_ADD(member) \
11006 estats->member = old_estats->member + \
11007 get_stat64(&hw_stats->member)
11008
11009 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11010 {
11011 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11012 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11013
11014 ESTAT_ADD(rx_octets);
11015 ESTAT_ADD(rx_fragments);
11016 ESTAT_ADD(rx_ucast_packets);
11017 ESTAT_ADD(rx_mcast_packets);
11018 ESTAT_ADD(rx_bcast_packets);
11019 ESTAT_ADD(rx_fcs_errors);
11020 ESTAT_ADD(rx_align_errors);
11021 ESTAT_ADD(rx_xon_pause_rcvd);
11022 ESTAT_ADD(rx_xoff_pause_rcvd);
11023 ESTAT_ADD(rx_mac_ctrl_rcvd);
11024 ESTAT_ADD(rx_xoff_entered);
11025 ESTAT_ADD(rx_frame_too_long_errors);
11026 ESTAT_ADD(rx_jabbers);
11027 ESTAT_ADD(rx_undersize_packets);
11028 ESTAT_ADD(rx_in_length_errors);
11029 ESTAT_ADD(rx_out_length_errors);
11030 ESTAT_ADD(rx_64_or_less_octet_packets);
11031 ESTAT_ADD(rx_65_to_127_octet_packets);
11032 ESTAT_ADD(rx_128_to_255_octet_packets);
11033 ESTAT_ADD(rx_256_to_511_octet_packets);
11034 ESTAT_ADD(rx_512_to_1023_octet_packets);
11035 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11036 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11037 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11038 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11039 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11040
11041 ESTAT_ADD(tx_octets);
11042 ESTAT_ADD(tx_collisions);
11043 ESTAT_ADD(tx_xon_sent);
11044 ESTAT_ADD(tx_xoff_sent);
11045 ESTAT_ADD(tx_flow_control);
11046 ESTAT_ADD(tx_mac_errors);
11047 ESTAT_ADD(tx_single_collisions);
11048 ESTAT_ADD(tx_mult_collisions);
11049 ESTAT_ADD(tx_deferred);
11050 ESTAT_ADD(tx_excessive_collisions);
11051 ESTAT_ADD(tx_late_collisions);
11052 ESTAT_ADD(tx_collide_2times);
11053 ESTAT_ADD(tx_collide_3times);
11054 ESTAT_ADD(tx_collide_4times);
11055 ESTAT_ADD(tx_collide_5times);
11056 ESTAT_ADD(tx_collide_6times);
11057 ESTAT_ADD(tx_collide_7times);
11058 ESTAT_ADD(tx_collide_8times);
11059 ESTAT_ADD(tx_collide_9times);
11060 ESTAT_ADD(tx_collide_10times);
11061 ESTAT_ADD(tx_collide_11times);
11062 ESTAT_ADD(tx_collide_12times);
11063 ESTAT_ADD(tx_collide_13times);
11064 ESTAT_ADD(tx_collide_14times);
11065 ESTAT_ADD(tx_collide_15times);
11066 ESTAT_ADD(tx_ucast_packets);
11067 ESTAT_ADD(tx_mcast_packets);
11068 ESTAT_ADD(tx_bcast_packets);
11069 ESTAT_ADD(tx_carrier_sense_errors);
11070 ESTAT_ADD(tx_discards);
11071 ESTAT_ADD(tx_errors);
11072
11073 ESTAT_ADD(dma_writeq_full);
11074 ESTAT_ADD(dma_write_prioq_full);
11075 ESTAT_ADD(rxbds_empty);
11076 ESTAT_ADD(rx_discards);
11077 ESTAT_ADD(rx_errors);
11078 ESTAT_ADD(rx_threshold_hit);
11079
11080 ESTAT_ADD(dma_readq_full);
11081 ESTAT_ADD(dma_read_prioq_full);
11082 ESTAT_ADD(tx_comp_queue_full);
11083
11084 ESTAT_ADD(ring_set_send_prod_index);
11085 ESTAT_ADD(ring_status_update);
11086 ESTAT_ADD(nic_irqs);
11087 ESTAT_ADD(nic_avoided_irqs);
11088 ESTAT_ADD(nic_tx_threshold_hit);
11089
11090 ESTAT_ADD(mbuf_lwm_thresh_hit);
11091 }
11092
11093 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11094 {
11095 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11096 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11097
11098 stats->rx_packets = old_stats->rx_packets +
11099 get_stat64(&hw_stats->rx_ucast_packets) +
11100 get_stat64(&hw_stats->rx_mcast_packets) +
11101 get_stat64(&hw_stats->rx_bcast_packets);
11102
11103 stats->tx_packets = old_stats->tx_packets +
11104 get_stat64(&hw_stats->tx_ucast_packets) +
11105 get_stat64(&hw_stats->tx_mcast_packets) +
11106 get_stat64(&hw_stats->tx_bcast_packets);
11107
11108 stats->rx_bytes = old_stats->rx_bytes +
11109 get_stat64(&hw_stats->rx_octets);
11110 stats->tx_bytes = old_stats->tx_bytes +
11111 get_stat64(&hw_stats->tx_octets);
11112
11113 stats->rx_errors = old_stats->rx_errors +
11114 get_stat64(&hw_stats->rx_errors);
11115 stats->tx_errors = old_stats->tx_errors +
11116 get_stat64(&hw_stats->tx_errors) +
11117 get_stat64(&hw_stats->tx_mac_errors) +
11118 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11119 get_stat64(&hw_stats->tx_discards);
11120
11121 stats->multicast = old_stats->multicast +
11122 get_stat64(&hw_stats->rx_mcast_packets);
11123 stats->collisions = old_stats->collisions +
11124 get_stat64(&hw_stats->tx_collisions);
11125
11126 stats->rx_length_errors = old_stats->rx_length_errors +
11127 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11128 get_stat64(&hw_stats->rx_undersize_packets);
11129
11130 stats->rx_over_errors = old_stats->rx_over_errors +
11131 get_stat64(&hw_stats->rxbds_empty);
11132 stats->rx_frame_errors = old_stats->rx_frame_errors +
11133 get_stat64(&hw_stats->rx_align_errors);
11134 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11135 get_stat64(&hw_stats->tx_discards);
11136 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11137 get_stat64(&hw_stats->tx_carrier_sense_errors);
11138
11139 stats->rx_crc_errors = old_stats->rx_crc_errors +
11140 tg3_calc_crc_errors(tp);
11141
11142 stats->rx_missed_errors = old_stats->rx_missed_errors +
11143 get_stat64(&hw_stats->rx_discards);
11144
11145 stats->rx_dropped = tp->rx_dropped;
11146 stats->tx_dropped = tp->tx_dropped;
11147 }
11148
11149 static int tg3_get_regs_len(struct net_device *dev)
11150 {
11151 return TG3_REG_BLK_SIZE;
11152 }
11153
11154 static void tg3_get_regs(struct net_device *dev,
11155 struct ethtool_regs *regs, void *_p)
11156 {
11157 struct tg3 *tp = netdev_priv(dev);
11158
11159 regs->version = 0;
11160
11161 memset(_p, 0, TG3_REG_BLK_SIZE);
11162
11163 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11164 return;
11165
11166 tg3_full_lock(tp, 0);
11167
11168 tg3_dump_legacy_regs(tp, (u32 *)_p);
11169
11170 tg3_full_unlock(tp);
11171 }
11172
11173 static int tg3_get_eeprom_len(struct net_device *dev)
11174 {
11175 struct tg3 *tp = netdev_priv(dev);
11176
11177 return tp->nvram_size;
11178 }
11179
11180 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11181 {
11182 struct tg3 *tp = netdev_priv(dev);
11183 int ret;
11184 u8 *pd;
11185 u32 i, offset, len, b_offset, b_count;
11186 __be32 val;
11187
11188 if (tg3_flag(tp, NO_NVRAM))
11189 return -EINVAL;
11190
11191 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11192 return -EAGAIN;
11193
11194 offset = eeprom->offset;
11195 len = eeprom->len;
11196 eeprom->len = 0;
11197
11198 eeprom->magic = TG3_EEPROM_MAGIC;
11199
11200 if (offset & 3) {
11201 /* adjustments to start on required 4 byte boundary */
11202 b_offset = offset & 3;
11203 b_count = 4 - b_offset;
11204 if (b_count > len) {
11205 /* i.e. offset=1 len=2 */
11206 b_count = len;
11207 }
11208 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11209 if (ret)
11210 return ret;
11211 memcpy(data, ((char *)&val) + b_offset, b_count);
11212 len -= b_count;
11213 offset += b_count;
11214 eeprom->len += b_count;
11215 }
11216
11217 /* read bytes up to the last 4 byte boundary */
11218 pd = &data[eeprom->len];
11219 for (i = 0; i < (len - (len & 3)); i += 4) {
11220 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11221 if (ret) {
11222 eeprom->len += i;
11223 return ret;
11224 }
11225 memcpy(pd + i, &val, 4);
11226 }
11227 eeprom->len += i;
11228
11229 if (len & 3) {
11230 /* read last bytes not ending on 4 byte boundary */
11231 pd = &data[eeprom->len];
11232 b_count = len & 3;
11233 b_offset = offset + len - b_count;
11234 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11235 if (ret)
11236 return ret;
11237 memcpy(pd, &val, b_count);
11238 eeprom->len += b_count;
11239 }
11240 return 0;
11241 }
11242
11243 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11244 {
11245 struct tg3 *tp = netdev_priv(dev);
11246 int ret;
11247 u32 offset, len, b_offset, odd_len;
11248 u8 *buf;
11249 __be32 start, end;
11250
11251 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11252 return -EAGAIN;
11253
11254 if (tg3_flag(tp, NO_NVRAM) ||
11255 eeprom->magic != TG3_EEPROM_MAGIC)
11256 return -EINVAL;
11257
11258 offset = eeprom->offset;
11259 len = eeprom->len;
11260
11261 if ((b_offset = (offset & 3))) {
11262 /* adjustments to start on required 4 byte boundary */
11263 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11264 if (ret)
11265 return ret;
11266 len += b_offset;
11267 offset &= ~3;
11268 if (len < 4)
11269 len = 4;
11270 }
11271
11272 odd_len = 0;
11273 if (len & 3) {
11274 /* adjustments to end on required 4 byte boundary */
11275 odd_len = 1;
11276 len = (len + 3) & ~3;
11277 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11278 if (ret)
11279 return ret;
11280 }
11281
11282 buf = data;
11283 if (b_offset || odd_len) {
11284 buf = kmalloc(len, GFP_KERNEL);
11285 if (!buf)
11286 return -ENOMEM;
11287 if (b_offset)
11288 memcpy(buf, &start, 4);
11289 if (odd_len)
11290 memcpy(buf+len-4, &end, 4);
11291 memcpy(buf + b_offset, data, eeprom->len);
11292 }
11293
11294 ret = tg3_nvram_write_block(tp, offset, len, buf);
11295
11296 if (buf != data)
11297 kfree(buf);
11298
11299 return ret;
11300 }
11301
11302 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11303 {
11304 struct tg3 *tp = netdev_priv(dev);
11305
11306 if (tg3_flag(tp, USE_PHYLIB)) {
11307 struct phy_device *phydev;
11308 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11309 return -EAGAIN;
11310 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11311 return phy_ethtool_gset(phydev, cmd);
11312 }
11313
11314 cmd->supported = (SUPPORTED_Autoneg);
11315
11316 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11317 cmd->supported |= (SUPPORTED_1000baseT_Half |
11318 SUPPORTED_1000baseT_Full);
11319
11320 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11321 cmd->supported |= (SUPPORTED_100baseT_Half |
11322 SUPPORTED_100baseT_Full |
11323 SUPPORTED_10baseT_Half |
11324 SUPPORTED_10baseT_Full |
11325 SUPPORTED_TP);
11326 cmd->port = PORT_TP;
11327 } else {
11328 cmd->supported |= SUPPORTED_FIBRE;
11329 cmd->port = PORT_FIBRE;
11330 }
11331
11332 cmd->advertising = tp->link_config.advertising;
11333 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11334 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11335 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11336 cmd->advertising |= ADVERTISED_Pause;
11337 } else {
11338 cmd->advertising |= ADVERTISED_Pause |
11339 ADVERTISED_Asym_Pause;
11340 }
11341 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11342 cmd->advertising |= ADVERTISED_Asym_Pause;
11343 }
11344 }
11345 if (netif_running(dev) && tp->link_up) {
11346 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11347 cmd->duplex = tp->link_config.active_duplex;
11348 cmd->lp_advertising = tp->link_config.rmt_adv;
11349 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11350 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11351 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11352 else
11353 cmd->eth_tp_mdix = ETH_TP_MDI;
11354 }
11355 } else {
11356 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11357 cmd->duplex = DUPLEX_UNKNOWN;
11358 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11359 }
11360 cmd->phy_address = tp->phy_addr;
11361 cmd->transceiver = XCVR_INTERNAL;
11362 cmd->autoneg = tp->link_config.autoneg;
11363 cmd->maxtxpkt = 0;
11364 cmd->maxrxpkt = 0;
11365 return 0;
11366 }
11367
11368 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11369 {
11370 struct tg3 *tp = netdev_priv(dev);
11371 u32 speed = ethtool_cmd_speed(cmd);
11372
11373 if (tg3_flag(tp, USE_PHYLIB)) {
11374 struct phy_device *phydev;
11375 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11376 return -EAGAIN;
11377 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11378 return phy_ethtool_sset(phydev, cmd);
11379 }
11380
11381 if (cmd->autoneg != AUTONEG_ENABLE &&
11382 cmd->autoneg != AUTONEG_DISABLE)
11383 return -EINVAL;
11384
11385 if (cmd->autoneg == AUTONEG_DISABLE &&
11386 cmd->duplex != DUPLEX_FULL &&
11387 cmd->duplex != DUPLEX_HALF)
11388 return -EINVAL;
11389
11390 if (cmd->autoneg == AUTONEG_ENABLE) {
11391 u32 mask = ADVERTISED_Autoneg |
11392 ADVERTISED_Pause |
11393 ADVERTISED_Asym_Pause;
11394
11395 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11396 mask |= ADVERTISED_1000baseT_Half |
11397 ADVERTISED_1000baseT_Full;
11398
11399 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11400 mask |= ADVERTISED_100baseT_Half |
11401 ADVERTISED_100baseT_Full |
11402 ADVERTISED_10baseT_Half |
11403 ADVERTISED_10baseT_Full |
11404 ADVERTISED_TP;
11405 else
11406 mask |= ADVERTISED_FIBRE;
11407
11408 if (cmd->advertising & ~mask)
11409 return -EINVAL;
11410
11411 mask &= (ADVERTISED_1000baseT_Half |
11412 ADVERTISED_1000baseT_Full |
11413 ADVERTISED_100baseT_Half |
11414 ADVERTISED_100baseT_Full |
11415 ADVERTISED_10baseT_Half |
11416 ADVERTISED_10baseT_Full);
11417
11418 cmd->advertising &= mask;
11419 } else {
11420 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11421 if (speed != SPEED_1000)
11422 return -EINVAL;
11423
11424 if (cmd->duplex != DUPLEX_FULL)
11425 return -EINVAL;
11426 } else {
11427 if (speed != SPEED_100 &&
11428 speed != SPEED_10)
11429 return -EINVAL;
11430 }
11431 }
11432
11433 tg3_full_lock(tp, 0);
11434
11435 tp->link_config.autoneg = cmd->autoneg;
11436 if (cmd->autoneg == AUTONEG_ENABLE) {
11437 tp->link_config.advertising = (cmd->advertising |
11438 ADVERTISED_Autoneg);
11439 tp->link_config.speed = SPEED_UNKNOWN;
11440 tp->link_config.duplex = DUPLEX_UNKNOWN;
11441 } else {
11442 tp->link_config.advertising = 0;
11443 tp->link_config.speed = speed;
11444 tp->link_config.duplex = cmd->duplex;
11445 }
11446
11447 if (netif_running(dev))
11448 tg3_setup_phy(tp, 1);
11449
11450 tg3_full_unlock(tp);
11451
11452 return 0;
11453 }
11454
11455 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11456 {
11457 struct tg3 *tp = netdev_priv(dev);
11458
11459 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11460 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11461 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11462 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11463 }
11464
11465 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11466 {
11467 struct tg3 *tp = netdev_priv(dev);
11468
11469 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11470 wol->supported = WAKE_MAGIC;
11471 else
11472 wol->supported = 0;
11473 wol->wolopts = 0;
11474 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11475 wol->wolopts = WAKE_MAGIC;
11476 memset(&wol->sopass, 0, sizeof(wol->sopass));
11477 }
11478
11479 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11480 {
11481 struct tg3 *tp = netdev_priv(dev);
11482 struct device *dp = &tp->pdev->dev;
11483
11484 if (wol->wolopts & ~WAKE_MAGIC)
11485 return -EINVAL;
11486 if ((wol->wolopts & WAKE_MAGIC) &&
11487 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11488 return -EINVAL;
11489
11490 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11491
11492 spin_lock_bh(&tp->lock);
11493 if (device_may_wakeup(dp))
11494 tg3_flag_set(tp, WOL_ENABLE);
11495 else
11496 tg3_flag_clear(tp, WOL_ENABLE);
11497 spin_unlock_bh(&tp->lock);
11498
11499 return 0;
11500 }
11501
11502 static u32 tg3_get_msglevel(struct net_device *dev)
11503 {
11504 struct tg3 *tp = netdev_priv(dev);
11505 return tp->msg_enable;
11506 }
11507
11508 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11509 {
11510 struct tg3 *tp = netdev_priv(dev);
11511 tp->msg_enable = value;
11512 }
11513
11514 static int tg3_nway_reset(struct net_device *dev)
11515 {
11516 struct tg3 *tp = netdev_priv(dev);
11517 int r;
11518
11519 if (!netif_running(dev))
11520 return -EAGAIN;
11521
11522 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11523 return -EINVAL;
11524
11525 if (tg3_flag(tp, USE_PHYLIB)) {
11526 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11527 return -EAGAIN;
11528 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11529 } else {
11530 u32 bmcr;
11531
11532 spin_lock_bh(&tp->lock);
11533 r = -EINVAL;
11534 tg3_readphy(tp, MII_BMCR, &bmcr);
11535 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11536 ((bmcr & BMCR_ANENABLE) ||
11537 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11538 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11539 BMCR_ANENABLE);
11540 r = 0;
11541 }
11542 spin_unlock_bh(&tp->lock);
11543 }
11544
11545 return r;
11546 }
11547
11548 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11549 {
11550 struct tg3 *tp = netdev_priv(dev);
11551
11552 ering->rx_max_pending = tp->rx_std_ring_mask;
11553 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11554 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11555 else
11556 ering->rx_jumbo_max_pending = 0;
11557
11558 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11559
11560 ering->rx_pending = tp->rx_pending;
11561 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11562 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11563 else
11564 ering->rx_jumbo_pending = 0;
11565
11566 ering->tx_pending = tp->napi[0].tx_pending;
11567 }
11568
11569 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11570 {
11571 struct tg3 *tp = netdev_priv(dev);
11572 int i, irq_sync = 0, err = 0;
11573
11574 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11575 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11576 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11577 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11578 (tg3_flag(tp, TSO_BUG) &&
11579 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11580 return -EINVAL;
11581
11582 if (netif_running(dev)) {
11583 tg3_phy_stop(tp);
11584 tg3_netif_stop(tp);
11585 irq_sync = 1;
11586 }
11587
11588 tg3_full_lock(tp, irq_sync);
11589
11590 tp->rx_pending = ering->rx_pending;
11591
11592 if (tg3_flag(tp, MAX_RXPEND_64) &&
11593 tp->rx_pending > 63)
11594 tp->rx_pending = 63;
11595 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11596
11597 for (i = 0; i < tp->irq_max; i++)
11598 tp->napi[i].tx_pending = ering->tx_pending;
11599
11600 if (netif_running(dev)) {
11601 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11602 err = tg3_restart_hw(tp, 1);
11603 if (!err)
11604 tg3_netif_start(tp);
11605 }
11606
11607 tg3_full_unlock(tp);
11608
11609 if (irq_sync && !err)
11610 tg3_phy_start(tp);
11611
11612 return err;
11613 }
11614
11615 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11616 {
11617 struct tg3 *tp = netdev_priv(dev);
11618
11619 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11620
11621 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11622 epause->rx_pause = 1;
11623 else
11624 epause->rx_pause = 0;
11625
11626 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11627 epause->tx_pause = 1;
11628 else
11629 epause->tx_pause = 0;
11630 }
11631
11632 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11633 {
11634 struct tg3 *tp = netdev_priv(dev);
11635 int err = 0;
11636
11637 if (tg3_flag(tp, USE_PHYLIB)) {
11638 u32 newadv;
11639 struct phy_device *phydev;
11640
11641 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11642
11643 if (!(phydev->supported & SUPPORTED_Pause) ||
11644 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11645 (epause->rx_pause != epause->tx_pause)))
11646 return -EINVAL;
11647
11648 tp->link_config.flowctrl = 0;
11649 if (epause->rx_pause) {
11650 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11651
11652 if (epause->tx_pause) {
11653 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11654 newadv = ADVERTISED_Pause;
11655 } else
11656 newadv = ADVERTISED_Pause |
11657 ADVERTISED_Asym_Pause;
11658 } else if (epause->tx_pause) {
11659 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11660 newadv = ADVERTISED_Asym_Pause;
11661 } else
11662 newadv = 0;
11663
11664 if (epause->autoneg)
11665 tg3_flag_set(tp, PAUSE_AUTONEG);
11666 else
11667 tg3_flag_clear(tp, PAUSE_AUTONEG);
11668
11669 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11670 u32 oldadv = phydev->advertising &
11671 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11672 if (oldadv != newadv) {
11673 phydev->advertising &=
11674 ~(ADVERTISED_Pause |
11675 ADVERTISED_Asym_Pause);
11676 phydev->advertising |= newadv;
11677 if (phydev->autoneg) {
11678 /*
11679 * Always renegotiate the link to
11680 * inform our link partner of our
11681 * flow control settings, even if the
11682 * flow control is forced. Let
11683 * tg3_adjust_link() do the final
11684 * flow control setup.
11685 */
11686 return phy_start_aneg(phydev);
11687 }
11688 }
11689
11690 if (!epause->autoneg)
11691 tg3_setup_flow_control(tp, 0, 0);
11692 } else {
11693 tp->link_config.advertising &=
11694 ~(ADVERTISED_Pause |
11695 ADVERTISED_Asym_Pause);
11696 tp->link_config.advertising |= newadv;
11697 }
11698 } else {
11699 int irq_sync = 0;
11700
11701 if (netif_running(dev)) {
11702 tg3_netif_stop(tp);
11703 irq_sync = 1;
11704 }
11705
11706 tg3_full_lock(tp, irq_sync);
11707
11708 if (epause->autoneg)
11709 tg3_flag_set(tp, PAUSE_AUTONEG);
11710 else
11711 tg3_flag_clear(tp, PAUSE_AUTONEG);
11712 if (epause->rx_pause)
11713 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11714 else
11715 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11716 if (epause->tx_pause)
11717 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11718 else
11719 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11720
11721 if (netif_running(dev)) {
11722 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11723 err = tg3_restart_hw(tp, 1);
11724 if (!err)
11725 tg3_netif_start(tp);
11726 }
11727
11728 tg3_full_unlock(tp);
11729 }
11730
11731 return err;
11732 }
11733
11734 static int tg3_get_sset_count(struct net_device *dev, int sset)
11735 {
11736 switch (sset) {
11737 case ETH_SS_TEST:
11738 return TG3_NUM_TEST;
11739 case ETH_SS_STATS:
11740 return TG3_NUM_STATS;
11741 default:
11742 return -EOPNOTSUPP;
11743 }
11744 }
11745
11746 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11747 u32 *rules __always_unused)
11748 {
11749 struct tg3 *tp = netdev_priv(dev);
11750
11751 if (!tg3_flag(tp, SUPPORT_MSIX))
11752 return -EOPNOTSUPP;
11753
11754 switch (info->cmd) {
11755 case ETHTOOL_GRXRINGS:
11756 if (netif_running(tp->dev))
11757 info->data = tp->rxq_cnt;
11758 else {
11759 info->data = num_online_cpus();
11760 if (info->data > TG3_RSS_MAX_NUM_QS)
11761 info->data = TG3_RSS_MAX_NUM_QS;
11762 }
11763
11764 /* The first interrupt vector only
11765 * handles link interrupts.
11766 */
11767 info->data -= 1;
11768 return 0;
11769
11770 default:
11771 return -EOPNOTSUPP;
11772 }
11773 }
11774
11775 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11776 {
11777 u32 size = 0;
11778 struct tg3 *tp = netdev_priv(dev);
11779
11780 if (tg3_flag(tp, SUPPORT_MSIX))
11781 size = TG3_RSS_INDIR_TBL_SIZE;
11782
11783 return size;
11784 }
11785
11786 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11787 {
11788 struct tg3 *tp = netdev_priv(dev);
11789 int i;
11790
11791 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11792 indir[i] = tp->rss_ind_tbl[i];
11793
11794 return 0;
11795 }
11796
11797 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11798 {
11799 struct tg3 *tp = netdev_priv(dev);
11800 size_t i;
11801
11802 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11803 tp->rss_ind_tbl[i] = indir[i];
11804
11805 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11806 return 0;
11807
11808 /* It is legal to write the indirection
11809 * table while the device is running.
11810 */
11811 tg3_full_lock(tp, 0);
11812 tg3_rss_write_indir_tbl(tp);
11813 tg3_full_unlock(tp);
11814
11815 return 0;
11816 }
11817
11818 static void tg3_get_channels(struct net_device *dev,
11819 struct ethtool_channels *channel)
11820 {
11821 struct tg3 *tp = netdev_priv(dev);
11822 u32 deflt_qs = netif_get_num_default_rss_queues();
11823
11824 channel->max_rx = tp->rxq_max;
11825 channel->max_tx = tp->txq_max;
11826
11827 if (netif_running(dev)) {
11828 channel->rx_count = tp->rxq_cnt;
11829 channel->tx_count = tp->txq_cnt;
11830 } else {
11831 if (tp->rxq_req)
11832 channel->rx_count = tp->rxq_req;
11833 else
11834 channel->rx_count = min(deflt_qs, tp->rxq_max);
11835
11836 if (tp->txq_req)
11837 channel->tx_count = tp->txq_req;
11838 else
11839 channel->tx_count = min(deflt_qs, tp->txq_max);
11840 }
11841 }
11842
11843 static int tg3_set_channels(struct net_device *dev,
11844 struct ethtool_channels *channel)
11845 {
11846 struct tg3 *tp = netdev_priv(dev);
11847
11848 if (!tg3_flag(tp, SUPPORT_MSIX))
11849 return -EOPNOTSUPP;
11850
11851 if (channel->rx_count > tp->rxq_max ||
11852 channel->tx_count > tp->txq_max)
11853 return -EINVAL;
11854
11855 tp->rxq_req = channel->rx_count;
11856 tp->txq_req = channel->tx_count;
11857
11858 if (!netif_running(dev))
11859 return 0;
11860
11861 tg3_stop(tp);
11862
11863 tg3_carrier_off(tp);
11864
11865 tg3_start(tp, true, false, false);
11866
11867 return 0;
11868 }
11869
11870 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11871 {
11872 switch (stringset) {
11873 case ETH_SS_STATS:
11874 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11875 break;
11876 case ETH_SS_TEST:
11877 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11878 break;
11879 default:
11880 WARN_ON(1); /* we need a WARN() */
11881 break;
11882 }
11883 }
11884
11885 static int tg3_set_phys_id(struct net_device *dev,
11886 enum ethtool_phys_id_state state)
11887 {
11888 struct tg3 *tp = netdev_priv(dev);
11889
11890 if (!netif_running(tp->dev))
11891 return -EAGAIN;
11892
11893 switch (state) {
11894 case ETHTOOL_ID_ACTIVE:
11895 return 1; /* cycle on/off once per second */
11896
11897 case ETHTOOL_ID_ON:
11898 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11899 LED_CTRL_1000MBPS_ON |
11900 LED_CTRL_100MBPS_ON |
11901 LED_CTRL_10MBPS_ON |
11902 LED_CTRL_TRAFFIC_OVERRIDE |
11903 LED_CTRL_TRAFFIC_BLINK |
11904 LED_CTRL_TRAFFIC_LED);
11905 break;
11906
11907 case ETHTOOL_ID_OFF:
11908 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11909 LED_CTRL_TRAFFIC_OVERRIDE);
11910 break;
11911
11912 case ETHTOOL_ID_INACTIVE:
11913 tw32(MAC_LED_CTRL, tp->led_ctrl);
11914 break;
11915 }
11916
11917 return 0;
11918 }
11919
11920 static void tg3_get_ethtool_stats(struct net_device *dev,
11921 struct ethtool_stats *estats, u64 *tmp_stats)
11922 {
11923 struct tg3 *tp = netdev_priv(dev);
11924
11925 if (tp->hw_stats)
11926 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11927 else
11928 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11929 }
11930
11931 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11932 {
11933 int i;
11934 __be32 *buf;
11935 u32 offset = 0, len = 0;
11936 u32 magic, val;
11937
11938 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11939 return NULL;
11940
11941 if (magic == TG3_EEPROM_MAGIC) {
11942 for (offset = TG3_NVM_DIR_START;
11943 offset < TG3_NVM_DIR_END;
11944 offset += TG3_NVM_DIRENT_SIZE) {
11945 if (tg3_nvram_read(tp, offset, &val))
11946 return NULL;
11947
11948 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11949 TG3_NVM_DIRTYPE_EXTVPD)
11950 break;
11951 }
11952
11953 if (offset != TG3_NVM_DIR_END) {
11954 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11955 if (tg3_nvram_read(tp, offset + 4, &offset))
11956 return NULL;
11957
11958 offset = tg3_nvram_logical_addr(tp, offset);
11959 }
11960 }
11961
11962 if (!offset || !len) {
11963 offset = TG3_NVM_VPD_OFF;
11964 len = TG3_NVM_VPD_LEN;
11965 }
11966
11967 buf = kmalloc(len, GFP_KERNEL);
11968 if (buf == NULL)
11969 return NULL;
11970
11971 if (magic == TG3_EEPROM_MAGIC) {
11972 for (i = 0; i < len; i += 4) {
11973 /* The data is in little-endian format in NVRAM.
11974 * Use the big-endian read routines to preserve
11975 * the byte order as it exists in NVRAM.
11976 */
11977 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11978 goto error;
11979 }
11980 } else {
11981 u8 *ptr;
11982 ssize_t cnt;
11983 unsigned int pos = 0;
11984
11985 ptr = (u8 *)&buf[0];
11986 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11987 cnt = pci_read_vpd(tp->pdev, pos,
11988 len - pos, ptr);
11989 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11990 cnt = 0;
11991 else if (cnt < 0)
11992 goto error;
11993 }
11994 if (pos != len)
11995 goto error;
11996 }
11997
11998 *vpdlen = len;
11999
12000 return buf;
12001
12002 error:
12003 kfree(buf);
12004 return NULL;
12005 }
12006
12007 #define NVRAM_TEST_SIZE 0x100
12008 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12009 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12010 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12011 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12012 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12013 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12014 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12015 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12016
12017 static int tg3_test_nvram(struct tg3 *tp)
12018 {
12019 u32 csum, magic, len;
12020 __be32 *buf;
12021 int i, j, k, err = 0, size;
12022
12023 if (tg3_flag(tp, NO_NVRAM))
12024 return 0;
12025
12026 if (tg3_nvram_read(tp, 0, &magic) != 0)
12027 return -EIO;
12028
12029 if (magic == TG3_EEPROM_MAGIC)
12030 size = NVRAM_TEST_SIZE;
12031 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12032 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12033 TG3_EEPROM_SB_FORMAT_1) {
12034 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12035 case TG3_EEPROM_SB_REVISION_0:
12036 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12037 break;
12038 case TG3_EEPROM_SB_REVISION_2:
12039 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12040 break;
12041 case TG3_EEPROM_SB_REVISION_3:
12042 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12043 break;
12044 case TG3_EEPROM_SB_REVISION_4:
12045 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12046 break;
12047 case TG3_EEPROM_SB_REVISION_5:
12048 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12049 break;
12050 case TG3_EEPROM_SB_REVISION_6:
12051 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12052 break;
12053 default:
12054 return -EIO;
12055 }
12056 } else
12057 return 0;
12058 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12059 size = NVRAM_SELFBOOT_HW_SIZE;
12060 else
12061 return -EIO;
12062
12063 buf = kmalloc(size, GFP_KERNEL);
12064 if (buf == NULL)
12065 return -ENOMEM;
12066
12067 err = -EIO;
12068 for (i = 0, j = 0; i < size; i += 4, j++) {
12069 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12070 if (err)
12071 break;
12072 }
12073 if (i < size)
12074 goto out;
12075
12076 /* Selfboot format */
12077 magic = be32_to_cpu(buf[0]);
12078 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12079 TG3_EEPROM_MAGIC_FW) {
12080 u8 *buf8 = (u8 *) buf, csum8 = 0;
12081
12082 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12083 TG3_EEPROM_SB_REVISION_2) {
12084 /* For rev 2, the csum doesn't include the MBA. */
12085 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12086 csum8 += buf8[i];
12087 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12088 csum8 += buf8[i];
12089 } else {
12090 for (i = 0; i < size; i++)
12091 csum8 += buf8[i];
12092 }
12093
12094 if (csum8 == 0) {
12095 err = 0;
12096 goto out;
12097 }
12098
12099 err = -EIO;
12100 goto out;
12101 }
12102
12103 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12104 TG3_EEPROM_MAGIC_HW) {
12105 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12106 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12107 u8 *buf8 = (u8 *) buf;
12108
12109 /* Separate the parity bits and the data bytes. */
12110 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12111 if ((i == 0) || (i == 8)) {
12112 int l;
12113 u8 msk;
12114
12115 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12116 parity[k++] = buf8[i] & msk;
12117 i++;
12118 } else if (i == 16) {
12119 int l;
12120 u8 msk;
12121
12122 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12123 parity[k++] = buf8[i] & msk;
12124 i++;
12125
12126 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12127 parity[k++] = buf8[i] & msk;
12128 i++;
12129 }
12130 data[j++] = buf8[i];
12131 }
12132
12133 err = -EIO;
12134 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12135 u8 hw8 = hweight8(data[i]);
12136
12137 if ((hw8 & 0x1) && parity[i])
12138 goto out;
12139 else if (!(hw8 & 0x1) && !parity[i])
12140 goto out;
12141 }
12142 err = 0;
12143 goto out;
12144 }
12145
12146 err = -EIO;
12147
12148 /* Bootstrap checksum at offset 0x10 */
12149 csum = calc_crc((unsigned char *) buf, 0x10);
12150 if (csum != le32_to_cpu(buf[0x10/4]))
12151 goto out;
12152
12153 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12154 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12155 if (csum != le32_to_cpu(buf[0xfc/4]))
12156 goto out;
12157
12158 kfree(buf);
12159
12160 buf = tg3_vpd_readblock(tp, &len);
12161 if (!buf)
12162 return -ENOMEM;
12163
12164 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12165 if (i > 0) {
12166 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12167 if (j < 0)
12168 goto out;
12169
12170 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12171 goto out;
12172
12173 i += PCI_VPD_LRDT_TAG_SIZE;
12174 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12175 PCI_VPD_RO_KEYWORD_CHKSUM);
12176 if (j > 0) {
12177 u8 csum8 = 0;
12178
12179 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12180
12181 for (i = 0; i <= j; i++)
12182 csum8 += ((u8 *)buf)[i];
12183
12184 if (csum8)
12185 goto out;
12186 }
12187 }
12188
12189 err = 0;
12190
12191 out:
12192 kfree(buf);
12193 return err;
12194 }
12195
12196 #define TG3_SERDES_TIMEOUT_SEC 2
12197 #define TG3_COPPER_TIMEOUT_SEC 6
12198
12199 static int tg3_test_link(struct tg3 *tp)
12200 {
12201 int i, max;
12202
12203 if (!netif_running(tp->dev))
12204 return -ENODEV;
12205
12206 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12207 max = TG3_SERDES_TIMEOUT_SEC;
12208 else
12209 max = TG3_COPPER_TIMEOUT_SEC;
12210
12211 for (i = 0; i < max; i++) {
12212 if (tp->link_up)
12213 return 0;
12214
12215 if (msleep_interruptible(1000))
12216 break;
12217 }
12218
12219 return -EIO;
12220 }
12221
12222 /* Only test the commonly used registers */
12223 static int tg3_test_registers(struct tg3 *tp)
12224 {
12225 int i, is_5705, is_5750;
12226 u32 offset, read_mask, write_mask, val, save_val, read_val;
12227 static struct {
12228 u16 offset;
12229 u16 flags;
12230 #define TG3_FL_5705 0x1
12231 #define TG3_FL_NOT_5705 0x2
12232 #define TG3_FL_NOT_5788 0x4
12233 #define TG3_FL_NOT_5750 0x8
12234 u32 read_mask;
12235 u32 write_mask;
12236 } reg_tbl[] = {
12237 /* MAC Control Registers */
12238 { MAC_MODE, TG3_FL_NOT_5705,
12239 0x00000000, 0x00ef6f8c },
12240 { MAC_MODE, TG3_FL_5705,
12241 0x00000000, 0x01ef6b8c },
12242 { MAC_STATUS, TG3_FL_NOT_5705,
12243 0x03800107, 0x00000000 },
12244 { MAC_STATUS, TG3_FL_5705,
12245 0x03800100, 0x00000000 },
12246 { MAC_ADDR_0_HIGH, 0x0000,
12247 0x00000000, 0x0000ffff },
12248 { MAC_ADDR_0_LOW, 0x0000,
12249 0x00000000, 0xffffffff },
12250 { MAC_RX_MTU_SIZE, 0x0000,
12251 0x00000000, 0x0000ffff },
12252 { MAC_TX_MODE, 0x0000,
12253 0x00000000, 0x00000070 },
12254 { MAC_TX_LENGTHS, 0x0000,
12255 0x00000000, 0x00003fff },
12256 { MAC_RX_MODE, TG3_FL_NOT_5705,
12257 0x00000000, 0x000007fc },
12258 { MAC_RX_MODE, TG3_FL_5705,
12259 0x00000000, 0x000007dc },
12260 { MAC_HASH_REG_0, 0x0000,
12261 0x00000000, 0xffffffff },
12262 { MAC_HASH_REG_1, 0x0000,
12263 0x00000000, 0xffffffff },
12264 { MAC_HASH_REG_2, 0x0000,
12265 0x00000000, 0xffffffff },
12266 { MAC_HASH_REG_3, 0x0000,
12267 0x00000000, 0xffffffff },
12268
12269 /* Receive Data and Receive BD Initiator Control Registers. */
12270 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12271 0x00000000, 0xffffffff },
12272 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12273 0x00000000, 0xffffffff },
12274 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12275 0x00000000, 0x00000003 },
12276 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12277 0x00000000, 0xffffffff },
12278 { RCVDBDI_STD_BD+0, 0x0000,
12279 0x00000000, 0xffffffff },
12280 { RCVDBDI_STD_BD+4, 0x0000,
12281 0x00000000, 0xffffffff },
12282 { RCVDBDI_STD_BD+8, 0x0000,
12283 0x00000000, 0xffff0002 },
12284 { RCVDBDI_STD_BD+0xc, 0x0000,
12285 0x00000000, 0xffffffff },
12286
12287 /* Receive BD Initiator Control Registers. */
12288 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12289 0x00000000, 0xffffffff },
12290 { RCVBDI_STD_THRESH, TG3_FL_5705,
12291 0x00000000, 0x000003ff },
12292 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12293 0x00000000, 0xffffffff },
12294
12295 /* Host Coalescing Control Registers. */
12296 { HOSTCC_MODE, TG3_FL_NOT_5705,
12297 0x00000000, 0x00000004 },
12298 { HOSTCC_MODE, TG3_FL_5705,
12299 0x00000000, 0x000000f6 },
12300 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12301 0x00000000, 0xffffffff },
12302 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12303 0x00000000, 0x000003ff },
12304 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12305 0x00000000, 0xffffffff },
12306 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12307 0x00000000, 0x000003ff },
12308 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12309 0x00000000, 0xffffffff },
12310 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12311 0x00000000, 0x000000ff },
12312 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12313 0x00000000, 0xffffffff },
12314 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12315 0x00000000, 0x000000ff },
12316 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12317 0x00000000, 0xffffffff },
12318 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12319 0x00000000, 0xffffffff },
12320 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12321 0x00000000, 0xffffffff },
12322 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12323 0x00000000, 0x000000ff },
12324 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12325 0x00000000, 0xffffffff },
12326 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12327 0x00000000, 0x000000ff },
12328 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12329 0x00000000, 0xffffffff },
12330 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12331 0x00000000, 0xffffffff },
12332 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12333 0x00000000, 0xffffffff },
12334 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12335 0x00000000, 0xffffffff },
12336 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12337 0x00000000, 0xffffffff },
12338 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12339 0xffffffff, 0x00000000 },
12340 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12341 0xffffffff, 0x00000000 },
12342
12343 /* Buffer Manager Control Registers. */
12344 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12345 0x00000000, 0x007fff80 },
12346 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12347 0x00000000, 0x007fffff },
12348 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12349 0x00000000, 0x0000003f },
12350 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12351 0x00000000, 0x000001ff },
12352 { BUFMGR_MB_HIGH_WATER, 0x0000,
12353 0x00000000, 0x000001ff },
12354 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12355 0xffffffff, 0x00000000 },
12356 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12357 0xffffffff, 0x00000000 },
12358
12359 /* Mailbox Registers */
12360 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12361 0x00000000, 0x000001ff },
12362 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12363 0x00000000, 0x000001ff },
12364 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12365 0x00000000, 0x000007ff },
12366 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12367 0x00000000, 0x000001ff },
12368
12369 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12370 };
12371
12372 is_5705 = is_5750 = 0;
12373 if (tg3_flag(tp, 5705_PLUS)) {
12374 is_5705 = 1;
12375 if (tg3_flag(tp, 5750_PLUS))
12376 is_5750 = 1;
12377 }
12378
12379 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12380 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12381 continue;
12382
12383 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12384 continue;
12385
12386 if (tg3_flag(tp, IS_5788) &&
12387 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12388 continue;
12389
12390 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12391 continue;
12392
12393 offset = (u32) reg_tbl[i].offset;
12394 read_mask = reg_tbl[i].read_mask;
12395 write_mask = reg_tbl[i].write_mask;
12396
12397 /* Save the original register content */
12398 save_val = tr32(offset);
12399
12400 /* Determine the read-only value. */
12401 read_val = save_val & read_mask;
12402
12403 /* Write zero to the register, then make sure the read-only bits
12404 * are not changed and the read/write bits are all zeros.
12405 */
12406 tw32(offset, 0);
12407
12408 val = tr32(offset);
12409
12410 /* Test the read-only and read/write bits. */
12411 if (((val & read_mask) != read_val) || (val & write_mask))
12412 goto out;
12413
12414 /* Write ones to all the bits defined by RdMask and WrMask, then
12415 * make sure the read-only bits are not changed and the
12416 * read/write bits are all ones.
12417 */
12418 tw32(offset, read_mask | write_mask);
12419
12420 val = tr32(offset);
12421
12422 /* Test the read-only bits. */
12423 if ((val & read_mask) != read_val)
12424 goto out;
12425
12426 /* Test the read/write bits. */
12427 if ((val & write_mask) != write_mask)
12428 goto out;
12429
12430 tw32(offset, save_val);
12431 }
12432
12433 return 0;
12434
12435 out:
12436 if (netif_msg_hw(tp))
12437 netdev_err(tp->dev,
12438 "Register test failed at offset %x\n", offset);
12439 tw32(offset, save_val);
12440 return -EIO;
12441 }
12442
12443 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12444 {
12445 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12446 int i;
12447 u32 j;
12448
12449 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12450 for (j = 0; j < len; j += 4) {
12451 u32 val;
12452
12453 tg3_write_mem(tp, offset + j, test_pattern[i]);
12454 tg3_read_mem(tp, offset + j, &val);
12455 if (val != test_pattern[i])
12456 return -EIO;
12457 }
12458 }
12459 return 0;
12460 }
12461
12462 static int tg3_test_memory(struct tg3 *tp)
12463 {
12464 static struct mem_entry {
12465 u32 offset;
12466 u32 len;
12467 } mem_tbl_570x[] = {
12468 { 0x00000000, 0x00b50},
12469 { 0x00002000, 0x1c000},
12470 { 0xffffffff, 0x00000}
12471 }, mem_tbl_5705[] = {
12472 { 0x00000100, 0x0000c},
12473 { 0x00000200, 0x00008},
12474 { 0x00004000, 0x00800},
12475 { 0x00006000, 0x01000},
12476 { 0x00008000, 0x02000},
12477 { 0x00010000, 0x0e000},
12478 { 0xffffffff, 0x00000}
12479 }, mem_tbl_5755[] = {
12480 { 0x00000200, 0x00008},
12481 { 0x00004000, 0x00800},
12482 { 0x00006000, 0x00800},
12483 { 0x00008000, 0x02000},
12484 { 0x00010000, 0x0c000},
12485 { 0xffffffff, 0x00000}
12486 }, mem_tbl_5906[] = {
12487 { 0x00000200, 0x00008},
12488 { 0x00004000, 0x00400},
12489 { 0x00006000, 0x00400},
12490 { 0x00008000, 0x01000},
12491 { 0x00010000, 0x01000},
12492 { 0xffffffff, 0x00000}
12493 }, mem_tbl_5717[] = {
12494 { 0x00000200, 0x00008},
12495 { 0x00010000, 0x0a000},
12496 { 0x00020000, 0x13c00},
12497 { 0xffffffff, 0x00000}
12498 }, mem_tbl_57765[] = {
12499 { 0x00000200, 0x00008},
12500 { 0x00004000, 0x00800},
12501 { 0x00006000, 0x09800},
12502 { 0x00010000, 0x0a000},
12503 { 0xffffffff, 0x00000}
12504 };
12505 struct mem_entry *mem_tbl;
12506 int err = 0;
12507 int i;
12508
12509 if (tg3_flag(tp, 5717_PLUS))
12510 mem_tbl = mem_tbl_5717;
12511 else if (tg3_flag(tp, 57765_CLASS) ||
12512 tg3_asic_rev(tp) == ASIC_REV_5762)
12513 mem_tbl = mem_tbl_57765;
12514 else if (tg3_flag(tp, 5755_PLUS))
12515 mem_tbl = mem_tbl_5755;
12516 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12517 mem_tbl = mem_tbl_5906;
12518 else if (tg3_flag(tp, 5705_PLUS))
12519 mem_tbl = mem_tbl_5705;
12520 else
12521 mem_tbl = mem_tbl_570x;
12522
12523 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12524 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12525 if (err)
12526 break;
12527 }
12528
12529 return err;
12530 }
12531
12532 #define TG3_TSO_MSS 500
12533
12534 #define TG3_TSO_IP_HDR_LEN 20
12535 #define TG3_TSO_TCP_HDR_LEN 20
12536 #define TG3_TSO_TCP_OPT_LEN 12
12537
12538 static const u8 tg3_tso_header[] = {
12539 0x08, 0x00,
12540 0x45, 0x00, 0x00, 0x00,
12541 0x00, 0x00, 0x40, 0x00,
12542 0x40, 0x06, 0x00, 0x00,
12543 0x0a, 0x00, 0x00, 0x01,
12544 0x0a, 0x00, 0x00, 0x02,
12545 0x0d, 0x00, 0xe0, 0x00,
12546 0x00, 0x00, 0x01, 0x00,
12547 0x00, 0x00, 0x02, 0x00,
12548 0x80, 0x10, 0x10, 0x00,
12549 0x14, 0x09, 0x00, 0x00,
12550 0x01, 0x01, 0x08, 0x0a,
12551 0x11, 0x11, 0x11, 0x11,
12552 0x11, 0x11, 0x11, 0x11,
12553 };
12554
12555 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12556 {
12557 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12558 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12559 u32 budget;
12560 struct sk_buff *skb;
12561 u8 *tx_data, *rx_data;
12562 dma_addr_t map;
12563 int num_pkts, tx_len, rx_len, i, err;
12564 struct tg3_rx_buffer_desc *desc;
12565 struct tg3_napi *tnapi, *rnapi;
12566 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12567
12568 tnapi = &tp->napi[0];
12569 rnapi = &tp->napi[0];
12570 if (tp->irq_cnt > 1) {
12571 if (tg3_flag(tp, ENABLE_RSS))
12572 rnapi = &tp->napi[1];
12573 if (tg3_flag(tp, ENABLE_TSS))
12574 tnapi = &tp->napi[1];
12575 }
12576 coal_now = tnapi->coal_now | rnapi->coal_now;
12577
12578 err = -EIO;
12579
12580 tx_len = pktsz;
12581 skb = netdev_alloc_skb(tp->dev, tx_len);
12582 if (!skb)
12583 return -ENOMEM;
12584
12585 tx_data = skb_put(skb, tx_len);
12586 memcpy(tx_data, tp->dev->dev_addr, 6);
12587 memset(tx_data + 6, 0x0, 8);
12588
12589 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12590
12591 if (tso_loopback) {
12592 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12593
12594 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12595 TG3_TSO_TCP_OPT_LEN;
12596
12597 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12598 sizeof(tg3_tso_header));
12599 mss = TG3_TSO_MSS;
12600
12601 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12602 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12603
12604 /* Set the total length field in the IP header */
12605 iph->tot_len = htons((u16)(mss + hdr_len));
12606
12607 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12608 TXD_FLAG_CPU_POST_DMA);
12609
12610 if (tg3_flag(tp, HW_TSO_1) ||
12611 tg3_flag(tp, HW_TSO_2) ||
12612 tg3_flag(tp, HW_TSO_3)) {
12613 struct tcphdr *th;
12614 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12615 th = (struct tcphdr *)&tx_data[val];
12616 th->check = 0;
12617 } else
12618 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12619
12620 if (tg3_flag(tp, HW_TSO_3)) {
12621 mss |= (hdr_len & 0xc) << 12;
12622 if (hdr_len & 0x10)
12623 base_flags |= 0x00000010;
12624 base_flags |= (hdr_len & 0x3e0) << 5;
12625 } else if (tg3_flag(tp, HW_TSO_2))
12626 mss |= hdr_len << 9;
12627 else if (tg3_flag(tp, HW_TSO_1) ||
12628 tg3_asic_rev(tp) == ASIC_REV_5705) {
12629 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12630 } else {
12631 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12632 }
12633
12634 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12635 } else {
12636 num_pkts = 1;
12637 data_off = ETH_HLEN;
12638
12639 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12640 tx_len > VLAN_ETH_FRAME_LEN)
12641 base_flags |= TXD_FLAG_JMB_PKT;
12642 }
12643
12644 for (i = data_off; i < tx_len; i++)
12645 tx_data[i] = (u8) (i & 0xff);
12646
12647 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12648 if (pci_dma_mapping_error(tp->pdev, map)) {
12649 dev_kfree_skb(skb);
12650 return -EIO;
12651 }
12652
12653 val = tnapi->tx_prod;
12654 tnapi->tx_buffers[val].skb = skb;
12655 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12656
12657 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12658 rnapi->coal_now);
12659
12660 udelay(10);
12661
12662 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12663
12664 budget = tg3_tx_avail(tnapi);
12665 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12666 base_flags | TXD_FLAG_END, mss, 0)) {
12667 tnapi->tx_buffers[val].skb = NULL;
12668 dev_kfree_skb(skb);
12669 return -EIO;
12670 }
12671
12672 tnapi->tx_prod++;
12673
12674 /* Sync BD data before updating mailbox */
12675 wmb();
12676
12677 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12678 tr32_mailbox(tnapi->prodmbox);
12679
12680 udelay(10);
12681
12682 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12683 for (i = 0; i < 35; i++) {
12684 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12685 coal_now);
12686
12687 udelay(10);
12688
12689 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12690 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12691 if ((tx_idx == tnapi->tx_prod) &&
12692 (rx_idx == (rx_start_idx + num_pkts)))
12693 break;
12694 }
12695
12696 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12697 dev_kfree_skb(skb);
12698
12699 if (tx_idx != tnapi->tx_prod)
12700 goto out;
12701
12702 if (rx_idx != rx_start_idx + num_pkts)
12703 goto out;
12704
12705 val = data_off;
12706 while (rx_idx != rx_start_idx) {
12707 desc = &rnapi->rx_rcb[rx_start_idx++];
12708 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12709 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12710
12711 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12712 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12713 goto out;
12714
12715 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12716 - ETH_FCS_LEN;
12717
12718 if (!tso_loopback) {
12719 if (rx_len != tx_len)
12720 goto out;
12721
12722 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12723 if (opaque_key != RXD_OPAQUE_RING_STD)
12724 goto out;
12725 } else {
12726 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12727 goto out;
12728 }
12729 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12730 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12731 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12732 goto out;
12733 }
12734
12735 if (opaque_key == RXD_OPAQUE_RING_STD) {
12736 rx_data = tpr->rx_std_buffers[desc_idx].data;
12737 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12738 mapping);
12739 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12740 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12741 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12742 mapping);
12743 } else
12744 goto out;
12745
12746 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12747 PCI_DMA_FROMDEVICE);
12748
12749 rx_data += TG3_RX_OFFSET(tp);
12750 for (i = data_off; i < rx_len; i++, val++) {
12751 if (*(rx_data + i) != (u8) (val & 0xff))
12752 goto out;
12753 }
12754 }
12755
12756 err = 0;
12757
12758 /* tg3_free_rings will unmap and free the rx_data */
12759 out:
12760 return err;
12761 }
12762
12763 #define TG3_STD_LOOPBACK_FAILED 1
12764 #define TG3_JMB_LOOPBACK_FAILED 2
12765 #define TG3_TSO_LOOPBACK_FAILED 4
12766 #define TG3_LOOPBACK_FAILED \
12767 (TG3_STD_LOOPBACK_FAILED | \
12768 TG3_JMB_LOOPBACK_FAILED | \
12769 TG3_TSO_LOOPBACK_FAILED)
12770
12771 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12772 {
12773 int err = -EIO;
12774 u32 eee_cap;
12775 u32 jmb_pkt_sz = 9000;
12776
12777 if (tp->dma_limit)
12778 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12779
12780 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12781 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12782
12783 if (!netif_running(tp->dev)) {
12784 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12785 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12786 if (do_extlpbk)
12787 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12788 goto done;
12789 }
12790
12791 err = tg3_reset_hw(tp, 1);
12792 if (err) {
12793 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12794 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12795 if (do_extlpbk)
12796 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12797 goto done;
12798 }
12799
12800 if (tg3_flag(tp, ENABLE_RSS)) {
12801 int i;
12802
12803 /* Reroute all rx packets to the 1st queue */
12804 for (i = MAC_RSS_INDIR_TBL_0;
12805 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12806 tw32(i, 0x0);
12807 }
12808
12809 /* HW errata - mac loopback fails in some cases on 5780.
12810 * Normal traffic and PHY loopback are not affected by
12811 * errata. Also, the MAC loopback test is deprecated for
12812 * all newer ASIC revisions.
12813 */
12814 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12815 !tg3_flag(tp, CPMU_PRESENT)) {
12816 tg3_mac_loopback(tp, true);
12817
12818 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12819 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12820
12821 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12822 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12823 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12824
12825 tg3_mac_loopback(tp, false);
12826 }
12827
12828 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12829 !tg3_flag(tp, USE_PHYLIB)) {
12830 int i;
12831
12832 tg3_phy_lpbk_set(tp, 0, false);
12833
12834 /* Wait for link */
12835 for (i = 0; i < 100; i++) {
12836 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12837 break;
12838 mdelay(1);
12839 }
12840
12841 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12842 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12843 if (tg3_flag(tp, TSO_CAPABLE) &&
12844 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12845 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12846 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12847 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12848 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12849
12850 if (do_extlpbk) {
12851 tg3_phy_lpbk_set(tp, 0, true);
12852
12853 /* All link indications report up, but the hardware
12854 * isn't really ready for about 20 msec. Double it
12855 * to be sure.
12856 */
12857 mdelay(40);
12858
12859 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12860 data[TG3_EXT_LOOPB_TEST] |=
12861 TG3_STD_LOOPBACK_FAILED;
12862 if (tg3_flag(tp, TSO_CAPABLE) &&
12863 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12864 data[TG3_EXT_LOOPB_TEST] |=
12865 TG3_TSO_LOOPBACK_FAILED;
12866 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12867 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12868 data[TG3_EXT_LOOPB_TEST] |=
12869 TG3_JMB_LOOPBACK_FAILED;
12870 }
12871
12872 /* Re-enable gphy autopowerdown. */
12873 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12874 tg3_phy_toggle_apd(tp, true);
12875 }
12876
12877 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12878 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12879
12880 done:
12881 tp->phy_flags |= eee_cap;
12882
12883 return err;
12884 }
12885
12886 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12887 u64 *data)
12888 {
12889 struct tg3 *tp = netdev_priv(dev);
12890 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12891
12892 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12893 tg3_power_up(tp)) {
12894 etest->flags |= ETH_TEST_FL_FAILED;
12895 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12896 return;
12897 }
12898
12899 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12900
12901 if (tg3_test_nvram(tp) != 0) {
12902 etest->flags |= ETH_TEST_FL_FAILED;
12903 data[TG3_NVRAM_TEST] = 1;
12904 }
12905 if (!doextlpbk && tg3_test_link(tp)) {
12906 etest->flags |= ETH_TEST_FL_FAILED;
12907 data[TG3_LINK_TEST] = 1;
12908 }
12909 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12910 int err, err2 = 0, irq_sync = 0;
12911
12912 if (netif_running(dev)) {
12913 tg3_phy_stop(tp);
12914 tg3_netif_stop(tp);
12915 irq_sync = 1;
12916 }
12917
12918 tg3_full_lock(tp, irq_sync);
12919 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12920 err = tg3_nvram_lock(tp);
12921 tg3_halt_cpu(tp, RX_CPU_BASE);
12922 if (!tg3_flag(tp, 5705_PLUS))
12923 tg3_halt_cpu(tp, TX_CPU_BASE);
12924 if (!err)
12925 tg3_nvram_unlock(tp);
12926
12927 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12928 tg3_phy_reset(tp);
12929
12930 if (tg3_test_registers(tp) != 0) {
12931 etest->flags |= ETH_TEST_FL_FAILED;
12932 data[TG3_REGISTER_TEST] = 1;
12933 }
12934
12935 if (tg3_test_memory(tp) != 0) {
12936 etest->flags |= ETH_TEST_FL_FAILED;
12937 data[TG3_MEMORY_TEST] = 1;
12938 }
12939
12940 if (doextlpbk)
12941 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12942
12943 if (tg3_test_loopback(tp, data, doextlpbk))
12944 etest->flags |= ETH_TEST_FL_FAILED;
12945
12946 tg3_full_unlock(tp);
12947
12948 if (tg3_test_interrupt(tp) != 0) {
12949 etest->flags |= ETH_TEST_FL_FAILED;
12950 data[TG3_INTERRUPT_TEST] = 1;
12951 }
12952
12953 tg3_full_lock(tp, 0);
12954
12955 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12956 if (netif_running(dev)) {
12957 tg3_flag_set(tp, INIT_COMPLETE);
12958 err2 = tg3_restart_hw(tp, 1);
12959 if (!err2)
12960 tg3_netif_start(tp);
12961 }
12962
12963 tg3_full_unlock(tp);
12964
12965 if (irq_sync && !err2)
12966 tg3_phy_start(tp);
12967 }
12968 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12969 tg3_power_down(tp);
12970
12971 }
12972
12973 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12974 struct ifreq *ifr, int cmd)
12975 {
12976 struct tg3 *tp = netdev_priv(dev);
12977 struct hwtstamp_config stmpconf;
12978
12979 if (!tg3_flag(tp, PTP_CAPABLE))
12980 return -EINVAL;
12981
12982 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12983 return -EFAULT;
12984
12985 if (stmpconf.flags)
12986 return -EINVAL;
12987
12988 switch (stmpconf.tx_type) {
12989 case HWTSTAMP_TX_ON:
12990 tg3_flag_set(tp, TX_TSTAMP_EN);
12991 break;
12992 case HWTSTAMP_TX_OFF:
12993 tg3_flag_clear(tp, TX_TSTAMP_EN);
12994 break;
12995 default:
12996 return -ERANGE;
12997 }
12998
12999 switch (stmpconf.rx_filter) {
13000 case HWTSTAMP_FILTER_NONE:
13001 tp->rxptpctl = 0;
13002 break;
13003 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13004 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13005 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13006 break;
13007 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13008 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13009 TG3_RX_PTP_CTL_SYNC_EVNT;
13010 break;
13011 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13012 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13013 TG3_RX_PTP_CTL_DELAY_REQ;
13014 break;
13015 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13016 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13017 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13018 break;
13019 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13020 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13021 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13022 break;
13023 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13024 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13025 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13026 break;
13027 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13028 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13029 TG3_RX_PTP_CTL_SYNC_EVNT;
13030 break;
13031 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13032 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13033 TG3_RX_PTP_CTL_SYNC_EVNT;
13034 break;
13035 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13036 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13037 TG3_RX_PTP_CTL_SYNC_EVNT;
13038 break;
13039 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13040 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13041 TG3_RX_PTP_CTL_DELAY_REQ;
13042 break;
13043 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13044 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13045 TG3_RX_PTP_CTL_DELAY_REQ;
13046 break;
13047 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13048 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13049 TG3_RX_PTP_CTL_DELAY_REQ;
13050 break;
13051 default:
13052 return -ERANGE;
13053 }
13054
13055 if (netif_running(dev) && tp->rxptpctl)
13056 tw32(TG3_RX_PTP_CTL,
13057 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13058
13059 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13060 -EFAULT : 0;
13061 }
13062
13063 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13064 {
13065 struct mii_ioctl_data *data = if_mii(ifr);
13066 struct tg3 *tp = netdev_priv(dev);
13067 int err;
13068
13069 if (tg3_flag(tp, USE_PHYLIB)) {
13070 struct phy_device *phydev;
13071 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13072 return -EAGAIN;
13073 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13074 return phy_mii_ioctl(phydev, ifr, cmd);
13075 }
13076
13077 switch (cmd) {
13078 case SIOCGMIIPHY:
13079 data->phy_id = tp->phy_addr;
13080
13081 /* fallthru */
13082 case SIOCGMIIREG: {
13083 u32 mii_regval;
13084
13085 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13086 break; /* We have no PHY */
13087
13088 if (!netif_running(dev))
13089 return -EAGAIN;
13090
13091 spin_lock_bh(&tp->lock);
13092 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13093 data->reg_num & 0x1f, &mii_regval);
13094 spin_unlock_bh(&tp->lock);
13095
13096 data->val_out = mii_regval;
13097
13098 return err;
13099 }
13100
13101 case SIOCSMIIREG:
13102 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13103 break; /* We have no PHY */
13104
13105 if (!netif_running(dev))
13106 return -EAGAIN;
13107
13108 spin_lock_bh(&tp->lock);
13109 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13110 data->reg_num & 0x1f, data->val_in);
13111 spin_unlock_bh(&tp->lock);
13112
13113 return err;
13114
13115 case SIOCSHWTSTAMP:
13116 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13117
13118 default:
13119 /* do nothing */
13120 break;
13121 }
13122 return -EOPNOTSUPP;
13123 }
13124
13125 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13126 {
13127 struct tg3 *tp = netdev_priv(dev);
13128
13129 memcpy(ec, &tp->coal, sizeof(*ec));
13130 return 0;
13131 }
13132
13133 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13134 {
13135 struct tg3 *tp = netdev_priv(dev);
13136 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13137 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13138
13139 if (!tg3_flag(tp, 5705_PLUS)) {
13140 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13141 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13142 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13143 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13144 }
13145
13146 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13147 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13148 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13149 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13150 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13151 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13152 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13153 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13154 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13155 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13156 return -EINVAL;
13157
13158 /* No rx interrupts will be generated if both are zero */
13159 if ((ec->rx_coalesce_usecs == 0) &&
13160 (ec->rx_max_coalesced_frames == 0))
13161 return -EINVAL;
13162
13163 /* No tx interrupts will be generated if both are zero */
13164 if ((ec->tx_coalesce_usecs == 0) &&
13165 (ec->tx_max_coalesced_frames == 0))
13166 return -EINVAL;
13167
13168 /* Only copy relevant parameters, ignore all others. */
13169 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13170 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13171 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13172 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13173 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13174 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13175 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13176 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13177 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13178
13179 if (netif_running(dev)) {
13180 tg3_full_lock(tp, 0);
13181 __tg3_set_coalesce(tp, &tp->coal);
13182 tg3_full_unlock(tp);
13183 }
13184 return 0;
13185 }
13186
13187 static const struct ethtool_ops tg3_ethtool_ops = {
13188 .get_settings = tg3_get_settings,
13189 .set_settings = tg3_set_settings,
13190 .get_drvinfo = tg3_get_drvinfo,
13191 .get_regs_len = tg3_get_regs_len,
13192 .get_regs = tg3_get_regs,
13193 .get_wol = tg3_get_wol,
13194 .set_wol = tg3_set_wol,
13195 .get_msglevel = tg3_get_msglevel,
13196 .set_msglevel = tg3_set_msglevel,
13197 .nway_reset = tg3_nway_reset,
13198 .get_link = ethtool_op_get_link,
13199 .get_eeprom_len = tg3_get_eeprom_len,
13200 .get_eeprom = tg3_get_eeprom,
13201 .set_eeprom = tg3_set_eeprom,
13202 .get_ringparam = tg3_get_ringparam,
13203 .set_ringparam = tg3_set_ringparam,
13204 .get_pauseparam = tg3_get_pauseparam,
13205 .set_pauseparam = tg3_set_pauseparam,
13206 .self_test = tg3_self_test,
13207 .get_strings = tg3_get_strings,
13208 .set_phys_id = tg3_set_phys_id,
13209 .get_ethtool_stats = tg3_get_ethtool_stats,
13210 .get_coalesce = tg3_get_coalesce,
13211 .set_coalesce = tg3_set_coalesce,
13212 .get_sset_count = tg3_get_sset_count,
13213 .get_rxnfc = tg3_get_rxnfc,
13214 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13215 .get_rxfh_indir = tg3_get_rxfh_indir,
13216 .set_rxfh_indir = tg3_set_rxfh_indir,
13217 .get_channels = tg3_get_channels,
13218 .set_channels = tg3_set_channels,
13219 .get_ts_info = tg3_get_ts_info,
13220 };
13221
13222 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13223 struct rtnl_link_stats64 *stats)
13224 {
13225 struct tg3 *tp = netdev_priv(dev);
13226
13227 spin_lock_bh(&tp->lock);
13228 if (!tp->hw_stats) {
13229 spin_unlock_bh(&tp->lock);
13230 return &tp->net_stats_prev;
13231 }
13232
13233 tg3_get_nstats(tp, stats);
13234 spin_unlock_bh(&tp->lock);
13235
13236 return stats;
13237 }
13238
13239 static void tg3_set_rx_mode(struct net_device *dev)
13240 {
13241 struct tg3 *tp = netdev_priv(dev);
13242
13243 if (!netif_running(dev))
13244 return;
13245
13246 tg3_full_lock(tp, 0);
13247 __tg3_set_rx_mode(dev);
13248 tg3_full_unlock(tp);
13249 }
13250
13251 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13252 int new_mtu)
13253 {
13254 dev->mtu = new_mtu;
13255
13256 if (new_mtu > ETH_DATA_LEN) {
13257 if (tg3_flag(tp, 5780_CLASS)) {
13258 netdev_update_features(dev);
13259 tg3_flag_clear(tp, TSO_CAPABLE);
13260 } else {
13261 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13262 }
13263 } else {
13264 if (tg3_flag(tp, 5780_CLASS)) {
13265 tg3_flag_set(tp, TSO_CAPABLE);
13266 netdev_update_features(dev);
13267 }
13268 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13269 }
13270 }
13271
13272 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13273 {
13274 struct tg3 *tp = netdev_priv(dev);
13275 int err, reset_phy = 0;
13276
13277 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13278 return -EINVAL;
13279
13280 if (!netif_running(dev)) {
13281 /* We'll just catch it later when the
13282 * device is up'd.
13283 */
13284 tg3_set_mtu(dev, tp, new_mtu);
13285 return 0;
13286 }
13287
13288 tg3_phy_stop(tp);
13289
13290 tg3_netif_stop(tp);
13291
13292 tg3_full_lock(tp, 1);
13293
13294 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13295
13296 tg3_set_mtu(dev, tp, new_mtu);
13297
13298 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13299 * breaks all requests to 256 bytes.
13300 */
13301 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13302 reset_phy = 1;
13303
13304 err = tg3_restart_hw(tp, reset_phy);
13305
13306 if (!err)
13307 tg3_netif_start(tp);
13308
13309 tg3_full_unlock(tp);
13310
13311 if (!err)
13312 tg3_phy_start(tp);
13313
13314 return err;
13315 }
13316
13317 static const struct net_device_ops tg3_netdev_ops = {
13318 .ndo_open = tg3_open,
13319 .ndo_stop = tg3_close,
13320 .ndo_start_xmit = tg3_start_xmit,
13321 .ndo_get_stats64 = tg3_get_stats64,
13322 .ndo_validate_addr = eth_validate_addr,
13323 .ndo_set_rx_mode = tg3_set_rx_mode,
13324 .ndo_set_mac_address = tg3_set_mac_addr,
13325 .ndo_do_ioctl = tg3_ioctl,
13326 .ndo_tx_timeout = tg3_tx_timeout,
13327 .ndo_change_mtu = tg3_change_mtu,
13328 .ndo_fix_features = tg3_fix_features,
13329 .ndo_set_features = tg3_set_features,
13330 #ifdef CONFIG_NET_POLL_CONTROLLER
13331 .ndo_poll_controller = tg3_poll_controller,
13332 #endif
13333 };
13334
13335 static void tg3_get_eeprom_size(struct tg3 *tp)
13336 {
13337 u32 cursize, val, magic;
13338
13339 tp->nvram_size = EEPROM_CHIP_SIZE;
13340
13341 if (tg3_nvram_read(tp, 0, &magic) != 0)
13342 return;
13343
13344 if ((magic != TG3_EEPROM_MAGIC) &&
13345 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13346 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13347 return;
13348
13349 /*
13350 * Size the chip by reading offsets at increasing powers of two.
13351 * When we encounter our validation signature, we know the addressing
13352 * has wrapped around, and thus have our chip size.
13353 */
13354 cursize = 0x10;
13355
13356 while (cursize < tp->nvram_size) {
13357 if (tg3_nvram_read(tp, cursize, &val) != 0)
13358 return;
13359
13360 if (val == magic)
13361 break;
13362
13363 cursize <<= 1;
13364 }
13365
13366 tp->nvram_size = cursize;
13367 }
13368
13369 static void tg3_get_nvram_size(struct tg3 *tp)
13370 {
13371 u32 val;
13372
13373 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13374 return;
13375
13376 /* Selfboot format */
13377 if (val != TG3_EEPROM_MAGIC) {
13378 tg3_get_eeprom_size(tp);
13379 return;
13380 }
13381
13382 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13383 if (val != 0) {
13384 /* This is confusing. We want to operate on the
13385 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13386 * call will read from NVRAM and byteswap the data
13387 * according to the byteswapping settings for all
13388 * other register accesses. This ensures the data we
13389 * want will always reside in the lower 16-bits.
13390 * However, the data in NVRAM is in LE format, which
13391 * means the data from the NVRAM read will always be
13392 * opposite the endianness of the CPU. The 16-bit
13393 * byteswap then brings the data to CPU endianness.
13394 */
13395 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13396 return;
13397 }
13398 }
13399 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13400 }
13401
13402 static void tg3_get_nvram_info(struct tg3 *tp)
13403 {
13404 u32 nvcfg1;
13405
13406 nvcfg1 = tr32(NVRAM_CFG1);
13407 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13408 tg3_flag_set(tp, FLASH);
13409 } else {
13410 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13411 tw32(NVRAM_CFG1, nvcfg1);
13412 }
13413
13414 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13415 tg3_flag(tp, 5780_CLASS)) {
13416 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13417 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13418 tp->nvram_jedecnum = JEDEC_ATMEL;
13419 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13420 tg3_flag_set(tp, NVRAM_BUFFERED);
13421 break;
13422 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13423 tp->nvram_jedecnum = JEDEC_ATMEL;
13424 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13425 break;
13426 case FLASH_VENDOR_ATMEL_EEPROM:
13427 tp->nvram_jedecnum = JEDEC_ATMEL;
13428 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13429 tg3_flag_set(tp, NVRAM_BUFFERED);
13430 break;
13431 case FLASH_VENDOR_ST:
13432 tp->nvram_jedecnum = JEDEC_ST;
13433 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13434 tg3_flag_set(tp, NVRAM_BUFFERED);
13435 break;
13436 case FLASH_VENDOR_SAIFUN:
13437 tp->nvram_jedecnum = JEDEC_SAIFUN;
13438 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13439 break;
13440 case FLASH_VENDOR_SST_SMALL:
13441 case FLASH_VENDOR_SST_LARGE:
13442 tp->nvram_jedecnum = JEDEC_SST;
13443 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13444 break;
13445 }
13446 } else {
13447 tp->nvram_jedecnum = JEDEC_ATMEL;
13448 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13449 tg3_flag_set(tp, NVRAM_BUFFERED);
13450 }
13451 }
13452
13453 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13454 {
13455 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13456 case FLASH_5752PAGE_SIZE_256:
13457 tp->nvram_pagesize = 256;
13458 break;
13459 case FLASH_5752PAGE_SIZE_512:
13460 tp->nvram_pagesize = 512;
13461 break;
13462 case FLASH_5752PAGE_SIZE_1K:
13463 tp->nvram_pagesize = 1024;
13464 break;
13465 case FLASH_5752PAGE_SIZE_2K:
13466 tp->nvram_pagesize = 2048;
13467 break;
13468 case FLASH_5752PAGE_SIZE_4K:
13469 tp->nvram_pagesize = 4096;
13470 break;
13471 case FLASH_5752PAGE_SIZE_264:
13472 tp->nvram_pagesize = 264;
13473 break;
13474 case FLASH_5752PAGE_SIZE_528:
13475 tp->nvram_pagesize = 528;
13476 break;
13477 }
13478 }
13479
13480 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13481 {
13482 u32 nvcfg1;
13483
13484 nvcfg1 = tr32(NVRAM_CFG1);
13485
13486 /* NVRAM protection for TPM */
13487 if (nvcfg1 & (1 << 27))
13488 tg3_flag_set(tp, PROTECTED_NVRAM);
13489
13490 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13491 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13492 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13493 tp->nvram_jedecnum = JEDEC_ATMEL;
13494 tg3_flag_set(tp, NVRAM_BUFFERED);
13495 break;
13496 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13497 tp->nvram_jedecnum = JEDEC_ATMEL;
13498 tg3_flag_set(tp, NVRAM_BUFFERED);
13499 tg3_flag_set(tp, FLASH);
13500 break;
13501 case FLASH_5752VENDOR_ST_M45PE10:
13502 case FLASH_5752VENDOR_ST_M45PE20:
13503 case FLASH_5752VENDOR_ST_M45PE40:
13504 tp->nvram_jedecnum = JEDEC_ST;
13505 tg3_flag_set(tp, NVRAM_BUFFERED);
13506 tg3_flag_set(tp, FLASH);
13507 break;
13508 }
13509
13510 if (tg3_flag(tp, FLASH)) {
13511 tg3_nvram_get_pagesize(tp, nvcfg1);
13512 } else {
13513 /* For eeprom, set pagesize to maximum eeprom size */
13514 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13515
13516 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13517 tw32(NVRAM_CFG1, nvcfg1);
13518 }
13519 }
13520
13521 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13522 {
13523 u32 nvcfg1, protect = 0;
13524
13525 nvcfg1 = tr32(NVRAM_CFG1);
13526
13527 /* NVRAM protection for TPM */
13528 if (nvcfg1 & (1 << 27)) {
13529 tg3_flag_set(tp, PROTECTED_NVRAM);
13530 protect = 1;
13531 }
13532
13533 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13534 switch (nvcfg1) {
13535 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13536 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13537 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13538 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13539 tp->nvram_jedecnum = JEDEC_ATMEL;
13540 tg3_flag_set(tp, NVRAM_BUFFERED);
13541 tg3_flag_set(tp, FLASH);
13542 tp->nvram_pagesize = 264;
13543 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13544 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13545 tp->nvram_size = (protect ? 0x3e200 :
13546 TG3_NVRAM_SIZE_512KB);
13547 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13548 tp->nvram_size = (protect ? 0x1f200 :
13549 TG3_NVRAM_SIZE_256KB);
13550 else
13551 tp->nvram_size = (protect ? 0x1f200 :
13552 TG3_NVRAM_SIZE_128KB);
13553 break;
13554 case FLASH_5752VENDOR_ST_M45PE10:
13555 case FLASH_5752VENDOR_ST_M45PE20:
13556 case FLASH_5752VENDOR_ST_M45PE40:
13557 tp->nvram_jedecnum = JEDEC_ST;
13558 tg3_flag_set(tp, NVRAM_BUFFERED);
13559 tg3_flag_set(tp, FLASH);
13560 tp->nvram_pagesize = 256;
13561 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13562 tp->nvram_size = (protect ?
13563 TG3_NVRAM_SIZE_64KB :
13564 TG3_NVRAM_SIZE_128KB);
13565 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13566 tp->nvram_size = (protect ?
13567 TG3_NVRAM_SIZE_64KB :
13568 TG3_NVRAM_SIZE_256KB);
13569 else
13570 tp->nvram_size = (protect ?
13571 TG3_NVRAM_SIZE_128KB :
13572 TG3_NVRAM_SIZE_512KB);
13573 break;
13574 }
13575 }
13576
13577 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13578 {
13579 u32 nvcfg1;
13580
13581 nvcfg1 = tr32(NVRAM_CFG1);
13582
13583 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13584 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13585 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13586 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13587 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13588 tp->nvram_jedecnum = JEDEC_ATMEL;
13589 tg3_flag_set(tp, NVRAM_BUFFERED);
13590 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13591
13592 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13593 tw32(NVRAM_CFG1, nvcfg1);
13594 break;
13595 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13596 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13597 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13598 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13599 tp->nvram_jedecnum = JEDEC_ATMEL;
13600 tg3_flag_set(tp, NVRAM_BUFFERED);
13601 tg3_flag_set(tp, FLASH);
13602 tp->nvram_pagesize = 264;
13603 break;
13604 case FLASH_5752VENDOR_ST_M45PE10:
13605 case FLASH_5752VENDOR_ST_M45PE20:
13606 case FLASH_5752VENDOR_ST_M45PE40:
13607 tp->nvram_jedecnum = JEDEC_ST;
13608 tg3_flag_set(tp, NVRAM_BUFFERED);
13609 tg3_flag_set(tp, FLASH);
13610 tp->nvram_pagesize = 256;
13611 break;
13612 }
13613 }
13614
13615 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13616 {
13617 u32 nvcfg1, protect = 0;
13618
13619 nvcfg1 = tr32(NVRAM_CFG1);
13620
13621 /* NVRAM protection for TPM */
13622 if (nvcfg1 & (1 << 27)) {
13623 tg3_flag_set(tp, PROTECTED_NVRAM);
13624 protect = 1;
13625 }
13626
13627 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13628 switch (nvcfg1) {
13629 case FLASH_5761VENDOR_ATMEL_ADB021D:
13630 case FLASH_5761VENDOR_ATMEL_ADB041D:
13631 case FLASH_5761VENDOR_ATMEL_ADB081D:
13632 case FLASH_5761VENDOR_ATMEL_ADB161D:
13633 case FLASH_5761VENDOR_ATMEL_MDB021D:
13634 case FLASH_5761VENDOR_ATMEL_MDB041D:
13635 case FLASH_5761VENDOR_ATMEL_MDB081D:
13636 case FLASH_5761VENDOR_ATMEL_MDB161D:
13637 tp->nvram_jedecnum = JEDEC_ATMEL;
13638 tg3_flag_set(tp, NVRAM_BUFFERED);
13639 tg3_flag_set(tp, FLASH);
13640 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13641 tp->nvram_pagesize = 256;
13642 break;
13643 case FLASH_5761VENDOR_ST_A_M45PE20:
13644 case FLASH_5761VENDOR_ST_A_M45PE40:
13645 case FLASH_5761VENDOR_ST_A_M45PE80:
13646 case FLASH_5761VENDOR_ST_A_M45PE16:
13647 case FLASH_5761VENDOR_ST_M_M45PE20:
13648 case FLASH_5761VENDOR_ST_M_M45PE40:
13649 case FLASH_5761VENDOR_ST_M_M45PE80:
13650 case FLASH_5761VENDOR_ST_M_M45PE16:
13651 tp->nvram_jedecnum = JEDEC_ST;
13652 tg3_flag_set(tp, NVRAM_BUFFERED);
13653 tg3_flag_set(tp, FLASH);
13654 tp->nvram_pagesize = 256;
13655 break;
13656 }
13657
13658 if (protect) {
13659 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13660 } else {
13661 switch (nvcfg1) {
13662 case FLASH_5761VENDOR_ATMEL_ADB161D:
13663 case FLASH_5761VENDOR_ATMEL_MDB161D:
13664 case FLASH_5761VENDOR_ST_A_M45PE16:
13665 case FLASH_5761VENDOR_ST_M_M45PE16:
13666 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13667 break;
13668 case FLASH_5761VENDOR_ATMEL_ADB081D:
13669 case FLASH_5761VENDOR_ATMEL_MDB081D:
13670 case FLASH_5761VENDOR_ST_A_M45PE80:
13671 case FLASH_5761VENDOR_ST_M_M45PE80:
13672 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13673 break;
13674 case FLASH_5761VENDOR_ATMEL_ADB041D:
13675 case FLASH_5761VENDOR_ATMEL_MDB041D:
13676 case FLASH_5761VENDOR_ST_A_M45PE40:
13677 case FLASH_5761VENDOR_ST_M_M45PE40:
13678 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13679 break;
13680 case FLASH_5761VENDOR_ATMEL_ADB021D:
13681 case FLASH_5761VENDOR_ATMEL_MDB021D:
13682 case FLASH_5761VENDOR_ST_A_M45PE20:
13683 case FLASH_5761VENDOR_ST_M_M45PE20:
13684 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13685 break;
13686 }
13687 }
13688 }
13689
13690 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13691 {
13692 tp->nvram_jedecnum = JEDEC_ATMEL;
13693 tg3_flag_set(tp, NVRAM_BUFFERED);
13694 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13695 }
13696
13697 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13698 {
13699 u32 nvcfg1;
13700
13701 nvcfg1 = tr32(NVRAM_CFG1);
13702
13703 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13704 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13705 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13706 tp->nvram_jedecnum = JEDEC_ATMEL;
13707 tg3_flag_set(tp, NVRAM_BUFFERED);
13708 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13709
13710 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13711 tw32(NVRAM_CFG1, nvcfg1);
13712 return;
13713 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13714 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13715 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13716 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13717 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13718 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13719 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13720 tp->nvram_jedecnum = JEDEC_ATMEL;
13721 tg3_flag_set(tp, NVRAM_BUFFERED);
13722 tg3_flag_set(tp, FLASH);
13723
13724 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13725 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13726 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13727 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13728 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13729 break;
13730 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13731 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13732 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13733 break;
13734 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13735 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13736 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13737 break;
13738 }
13739 break;
13740 case FLASH_5752VENDOR_ST_M45PE10:
13741 case FLASH_5752VENDOR_ST_M45PE20:
13742 case FLASH_5752VENDOR_ST_M45PE40:
13743 tp->nvram_jedecnum = JEDEC_ST;
13744 tg3_flag_set(tp, NVRAM_BUFFERED);
13745 tg3_flag_set(tp, FLASH);
13746
13747 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13748 case FLASH_5752VENDOR_ST_M45PE10:
13749 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13750 break;
13751 case FLASH_5752VENDOR_ST_M45PE20:
13752 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13753 break;
13754 case FLASH_5752VENDOR_ST_M45PE40:
13755 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13756 break;
13757 }
13758 break;
13759 default:
13760 tg3_flag_set(tp, NO_NVRAM);
13761 return;
13762 }
13763
13764 tg3_nvram_get_pagesize(tp, nvcfg1);
13765 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13766 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13767 }
13768
13769
13770 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13771 {
13772 u32 nvcfg1;
13773
13774 nvcfg1 = tr32(NVRAM_CFG1);
13775
13776 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13777 case FLASH_5717VENDOR_ATMEL_EEPROM:
13778 case FLASH_5717VENDOR_MICRO_EEPROM:
13779 tp->nvram_jedecnum = JEDEC_ATMEL;
13780 tg3_flag_set(tp, NVRAM_BUFFERED);
13781 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13782
13783 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13784 tw32(NVRAM_CFG1, nvcfg1);
13785 return;
13786 case FLASH_5717VENDOR_ATMEL_MDB011D:
13787 case FLASH_5717VENDOR_ATMEL_ADB011B:
13788 case FLASH_5717VENDOR_ATMEL_ADB011D:
13789 case FLASH_5717VENDOR_ATMEL_MDB021D:
13790 case FLASH_5717VENDOR_ATMEL_ADB021B:
13791 case FLASH_5717VENDOR_ATMEL_ADB021D:
13792 case FLASH_5717VENDOR_ATMEL_45USPT:
13793 tp->nvram_jedecnum = JEDEC_ATMEL;
13794 tg3_flag_set(tp, NVRAM_BUFFERED);
13795 tg3_flag_set(tp, FLASH);
13796
13797 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13798 case FLASH_5717VENDOR_ATMEL_MDB021D:
13799 /* Detect size with tg3_nvram_get_size() */
13800 break;
13801 case FLASH_5717VENDOR_ATMEL_ADB021B:
13802 case FLASH_5717VENDOR_ATMEL_ADB021D:
13803 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13804 break;
13805 default:
13806 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13807 break;
13808 }
13809 break;
13810 case FLASH_5717VENDOR_ST_M_M25PE10:
13811 case FLASH_5717VENDOR_ST_A_M25PE10:
13812 case FLASH_5717VENDOR_ST_M_M45PE10:
13813 case FLASH_5717VENDOR_ST_A_M45PE10:
13814 case FLASH_5717VENDOR_ST_M_M25PE20:
13815 case FLASH_5717VENDOR_ST_A_M25PE20:
13816 case FLASH_5717VENDOR_ST_M_M45PE20:
13817 case FLASH_5717VENDOR_ST_A_M45PE20:
13818 case FLASH_5717VENDOR_ST_25USPT:
13819 case FLASH_5717VENDOR_ST_45USPT:
13820 tp->nvram_jedecnum = JEDEC_ST;
13821 tg3_flag_set(tp, NVRAM_BUFFERED);
13822 tg3_flag_set(tp, FLASH);
13823
13824 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13825 case FLASH_5717VENDOR_ST_M_M25PE20:
13826 case FLASH_5717VENDOR_ST_M_M45PE20:
13827 /* Detect size with tg3_nvram_get_size() */
13828 break;
13829 case FLASH_5717VENDOR_ST_A_M25PE20:
13830 case FLASH_5717VENDOR_ST_A_M45PE20:
13831 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13832 break;
13833 default:
13834 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13835 break;
13836 }
13837 break;
13838 default:
13839 tg3_flag_set(tp, NO_NVRAM);
13840 return;
13841 }
13842
13843 tg3_nvram_get_pagesize(tp, nvcfg1);
13844 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13845 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13846 }
13847
13848 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13849 {
13850 u32 nvcfg1, nvmpinstrp;
13851
13852 nvcfg1 = tr32(NVRAM_CFG1);
13853 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13854
13855 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13856 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13857 tg3_flag_set(tp, NO_NVRAM);
13858 return;
13859 }
13860
13861 switch (nvmpinstrp) {
13862 case FLASH_5762_EEPROM_HD:
13863 nvmpinstrp = FLASH_5720_EEPROM_HD;
13864 break;
13865 case FLASH_5762_EEPROM_LD:
13866 nvmpinstrp = FLASH_5720_EEPROM_LD;
13867 break;
13868 }
13869 }
13870
13871 switch (nvmpinstrp) {
13872 case FLASH_5720_EEPROM_HD:
13873 case FLASH_5720_EEPROM_LD:
13874 tp->nvram_jedecnum = JEDEC_ATMEL;
13875 tg3_flag_set(tp, NVRAM_BUFFERED);
13876
13877 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13878 tw32(NVRAM_CFG1, nvcfg1);
13879 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13880 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13881 else
13882 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13883 return;
13884 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13885 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13886 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13887 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13888 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13889 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13890 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13891 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13892 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13893 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13894 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13895 case FLASH_5720VENDOR_ATMEL_45USPT:
13896 tp->nvram_jedecnum = JEDEC_ATMEL;
13897 tg3_flag_set(tp, NVRAM_BUFFERED);
13898 tg3_flag_set(tp, FLASH);
13899
13900 switch (nvmpinstrp) {
13901 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13902 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13903 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13904 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13905 break;
13906 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13907 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13908 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13909 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13910 break;
13911 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13912 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13913 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13914 break;
13915 default:
13916 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13917 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13918 break;
13919 }
13920 break;
13921 case FLASH_5720VENDOR_M_ST_M25PE10:
13922 case FLASH_5720VENDOR_M_ST_M45PE10:
13923 case FLASH_5720VENDOR_A_ST_M25PE10:
13924 case FLASH_5720VENDOR_A_ST_M45PE10:
13925 case FLASH_5720VENDOR_M_ST_M25PE20:
13926 case FLASH_5720VENDOR_M_ST_M45PE20:
13927 case FLASH_5720VENDOR_A_ST_M25PE20:
13928 case FLASH_5720VENDOR_A_ST_M45PE20:
13929 case FLASH_5720VENDOR_M_ST_M25PE40:
13930 case FLASH_5720VENDOR_M_ST_M45PE40:
13931 case FLASH_5720VENDOR_A_ST_M25PE40:
13932 case FLASH_5720VENDOR_A_ST_M45PE40:
13933 case FLASH_5720VENDOR_M_ST_M25PE80:
13934 case FLASH_5720VENDOR_M_ST_M45PE80:
13935 case FLASH_5720VENDOR_A_ST_M25PE80:
13936 case FLASH_5720VENDOR_A_ST_M45PE80:
13937 case FLASH_5720VENDOR_ST_25USPT:
13938 case FLASH_5720VENDOR_ST_45USPT:
13939 tp->nvram_jedecnum = JEDEC_ST;
13940 tg3_flag_set(tp, NVRAM_BUFFERED);
13941 tg3_flag_set(tp, FLASH);
13942
13943 switch (nvmpinstrp) {
13944 case FLASH_5720VENDOR_M_ST_M25PE20:
13945 case FLASH_5720VENDOR_M_ST_M45PE20:
13946 case FLASH_5720VENDOR_A_ST_M25PE20:
13947 case FLASH_5720VENDOR_A_ST_M45PE20:
13948 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13949 break;
13950 case FLASH_5720VENDOR_M_ST_M25PE40:
13951 case FLASH_5720VENDOR_M_ST_M45PE40:
13952 case FLASH_5720VENDOR_A_ST_M25PE40:
13953 case FLASH_5720VENDOR_A_ST_M45PE40:
13954 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13955 break;
13956 case FLASH_5720VENDOR_M_ST_M25PE80:
13957 case FLASH_5720VENDOR_M_ST_M45PE80:
13958 case FLASH_5720VENDOR_A_ST_M25PE80:
13959 case FLASH_5720VENDOR_A_ST_M45PE80:
13960 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13961 break;
13962 default:
13963 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13964 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13965 break;
13966 }
13967 break;
13968 default:
13969 tg3_flag_set(tp, NO_NVRAM);
13970 return;
13971 }
13972
13973 tg3_nvram_get_pagesize(tp, nvcfg1);
13974 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13975 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13976
13977 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13978 u32 val;
13979
13980 if (tg3_nvram_read(tp, 0, &val))
13981 return;
13982
13983 if (val != TG3_EEPROM_MAGIC &&
13984 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13985 tg3_flag_set(tp, NO_NVRAM);
13986 }
13987 }
13988
13989 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13990 static void tg3_nvram_init(struct tg3 *tp)
13991 {
13992 if (tg3_flag(tp, IS_SSB_CORE)) {
13993 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13994 tg3_flag_clear(tp, NVRAM);
13995 tg3_flag_clear(tp, NVRAM_BUFFERED);
13996 tg3_flag_set(tp, NO_NVRAM);
13997 return;
13998 }
13999
14000 tw32_f(GRC_EEPROM_ADDR,
14001 (EEPROM_ADDR_FSM_RESET |
14002 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14003 EEPROM_ADDR_CLKPERD_SHIFT)));
14004
14005 msleep(1);
14006
14007 /* Enable seeprom accesses. */
14008 tw32_f(GRC_LOCAL_CTRL,
14009 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14010 udelay(100);
14011
14012 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14013 tg3_asic_rev(tp) != ASIC_REV_5701) {
14014 tg3_flag_set(tp, NVRAM);
14015
14016 if (tg3_nvram_lock(tp)) {
14017 netdev_warn(tp->dev,
14018 "Cannot get nvram lock, %s failed\n",
14019 __func__);
14020 return;
14021 }
14022 tg3_enable_nvram_access(tp);
14023
14024 tp->nvram_size = 0;
14025
14026 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14027 tg3_get_5752_nvram_info(tp);
14028 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14029 tg3_get_5755_nvram_info(tp);
14030 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14031 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14032 tg3_asic_rev(tp) == ASIC_REV_5785)
14033 tg3_get_5787_nvram_info(tp);
14034 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14035 tg3_get_5761_nvram_info(tp);
14036 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14037 tg3_get_5906_nvram_info(tp);
14038 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14039 tg3_flag(tp, 57765_CLASS))
14040 tg3_get_57780_nvram_info(tp);
14041 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14042 tg3_asic_rev(tp) == ASIC_REV_5719)
14043 tg3_get_5717_nvram_info(tp);
14044 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14045 tg3_asic_rev(tp) == ASIC_REV_5762)
14046 tg3_get_5720_nvram_info(tp);
14047 else
14048 tg3_get_nvram_info(tp);
14049
14050 if (tp->nvram_size == 0)
14051 tg3_get_nvram_size(tp);
14052
14053 tg3_disable_nvram_access(tp);
14054 tg3_nvram_unlock(tp);
14055
14056 } else {
14057 tg3_flag_clear(tp, NVRAM);
14058 tg3_flag_clear(tp, NVRAM_BUFFERED);
14059
14060 tg3_get_eeprom_size(tp);
14061 }
14062 }
14063
14064 struct subsys_tbl_ent {
14065 u16 subsys_vendor, subsys_devid;
14066 u32 phy_id;
14067 };
14068
14069 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14070 /* Broadcom boards. */
14071 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14072 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14073 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14074 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14075 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14076 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14077 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14078 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14079 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14080 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14081 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14082 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14083 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14084 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14085 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14086 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14087 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14088 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14089 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14090 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14091 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14092 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14093
14094 /* 3com boards. */
14095 { TG3PCI_SUBVENDOR_ID_3COM,
14096 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14097 { TG3PCI_SUBVENDOR_ID_3COM,
14098 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14099 { TG3PCI_SUBVENDOR_ID_3COM,
14100 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14101 { TG3PCI_SUBVENDOR_ID_3COM,
14102 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14103 { TG3PCI_SUBVENDOR_ID_3COM,
14104 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14105
14106 /* DELL boards. */
14107 { TG3PCI_SUBVENDOR_ID_DELL,
14108 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14109 { TG3PCI_SUBVENDOR_ID_DELL,
14110 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14111 { TG3PCI_SUBVENDOR_ID_DELL,
14112 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14113 { TG3PCI_SUBVENDOR_ID_DELL,
14114 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14115
14116 /* Compaq boards. */
14117 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14118 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14119 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14120 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14121 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14122 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14123 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14124 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14125 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14126 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14127
14128 /* IBM boards. */
14129 { TG3PCI_SUBVENDOR_ID_IBM,
14130 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14131 };
14132
14133 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14134 {
14135 int i;
14136
14137 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14138 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14139 tp->pdev->subsystem_vendor) &&
14140 (subsys_id_to_phy_id[i].subsys_devid ==
14141 tp->pdev->subsystem_device))
14142 return &subsys_id_to_phy_id[i];
14143 }
14144 return NULL;
14145 }
14146
14147 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14148 {
14149 u32 val;
14150
14151 tp->phy_id = TG3_PHY_ID_INVALID;
14152 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14153
14154 /* Assume an onboard device and WOL capable by default. */
14155 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14156 tg3_flag_set(tp, WOL_CAP);
14157
14158 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14159 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14160 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14161 tg3_flag_set(tp, IS_NIC);
14162 }
14163 val = tr32(VCPU_CFGSHDW);
14164 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14165 tg3_flag_set(tp, ASPM_WORKAROUND);
14166 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14167 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14168 tg3_flag_set(tp, WOL_ENABLE);
14169 device_set_wakeup_enable(&tp->pdev->dev, true);
14170 }
14171 goto done;
14172 }
14173
14174 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14175 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14176 u32 nic_cfg, led_cfg;
14177 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14178 int eeprom_phy_serdes = 0;
14179
14180 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14181 tp->nic_sram_data_cfg = nic_cfg;
14182
14183 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14184 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14185 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14186 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14187 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14188 (ver > 0) && (ver < 0x100))
14189 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14190
14191 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14192 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14193
14194 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14195 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14196 eeprom_phy_serdes = 1;
14197
14198 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14199 if (nic_phy_id != 0) {
14200 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14201 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14202
14203 eeprom_phy_id = (id1 >> 16) << 10;
14204 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14205 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14206 } else
14207 eeprom_phy_id = 0;
14208
14209 tp->phy_id = eeprom_phy_id;
14210 if (eeprom_phy_serdes) {
14211 if (!tg3_flag(tp, 5705_PLUS))
14212 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14213 else
14214 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14215 }
14216
14217 if (tg3_flag(tp, 5750_PLUS))
14218 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14219 SHASTA_EXT_LED_MODE_MASK);
14220 else
14221 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14222
14223 switch (led_cfg) {
14224 default:
14225 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14226 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14227 break;
14228
14229 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14230 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14231 break;
14232
14233 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14234 tp->led_ctrl = LED_CTRL_MODE_MAC;
14235
14236 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14237 * read on some older 5700/5701 bootcode.
14238 */
14239 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14240 tg3_asic_rev(tp) == ASIC_REV_5701)
14241 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14242
14243 break;
14244
14245 case SHASTA_EXT_LED_SHARED:
14246 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14247 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14248 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14249 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14250 LED_CTRL_MODE_PHY_2);
14251 break;
14252
14253 case SHASTA_EXT_LED_MAC:
14254 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14255 break;
14256
14257 case SHASTA_EXT_LED_COMBO:
14258 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14259 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14260 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14261 LED_CTRL_MODE_PHY_2);
14262 break;
14263
14264 }
14265
14266 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14267 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14268 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14269 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14270
14271 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14272 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14273
14274 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14275 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14276 if ((tp->pdev->subsystem_vendor ==
14277 PCI_VENDOR_ID_ARIMA) &&
14278 (tp->pdev->subsystem_device == 0x205a ||
14279 tp->pdev->subsystem_device == 0x2063))
14280 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14281 } else {
14282 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14283 tg3_flag_set(tp, IS_NIC);
14284 }
14285
14286 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14287 tg3_flag_set(tp, ENABLE_ASF);
14288 if (tg3_flag(tp, 5750_PLUS))
14289 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14290 }
14291
14292 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14293 tg3_flag(tp, 5750_PLUS))
14294 tg3_flag_set(tp, ENABLE_APE);
14295
14296 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14297 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14298 tg3_flag_clear(tp, WOL_CAP);
14299
14300 if (tg3_flag(tp, WOL_CAP) &&
14301 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14302 tg3_flag_set(tp, WOL_ENABLE);
14303 device_set_wakeup_enable(&tp->pdev->dev, true);
14304 }
14305
14306 if (cfg2 & (1 << 17))
14307 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14308
14309 /* serdes signal pre-emphasis in register 0x590 set by */
14310 /* bootcode if bit 18 is set */
14311 if (cfg2 & (1 << 18))
14312 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14313
14314 if ((tg3_flag(tp, 57765_PLUS) ||
14315 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14316 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14317 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14318 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14319
14320 if (tg3_flag(tp, PCI_EXPRESS) &&
14321 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14322 !tg3_flag(tp, 57765_PLUS)) {
14323 u32 cfg3;
14324
14325 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14326 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14327 tg3_flag_set(tp, ASPM_WORKAROUND);
14328 }
14329
14330 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14331 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14332 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14333 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14334 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14335 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14336 }
14337 done:
14338 if (tg3_flag(tp, WOL_CAP))
14339 device_set_wakeup_enable(&tp->pdev->dev,
14340 tg3_flag(tp, WOL_ENABLE));
14341 else
14342 device_set_wakeup_capable(&tp->pdev->dev, false);
14343 }
14344
14345 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14346 {
14347 int i, err;
14348 u32 val2, off = offset * 8;
14349
14350 err = tg3_nvram_lock(tp);
14351 if (err)
14352 return err;
14353
14354 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14355 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14356 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14357 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14358 udelay(10);
14359
14360 for (i = 0; i < 100; i++) {
14361 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14362 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14363 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14364 break;
14365 }
14366 udelay(10);
14367 }
14368
14369 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14370
14371 tg3_nvram_unlock(tp);
14372 if (val2 & APE_OTP_STATUS_CMD_DONE)
14373 return 0;
14374
14375 return -EBUSY;
14376 }
14377
14378 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14379 {
14380 int i;
14381 u32 val;
14382
14383 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14384 tw32(OTP_CTRL, cmd);
14385
14386 /* Wait for up to 1 ms for command to execute. */
14387 for (i = 0; i < 100; i++) {
14388 val = tr32(OTP_STATUS);
14389 if (val & OTP_STATUS_CMD_DONE)
14390 break;
14391 udelay(10);
14392 }
14393
14394 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14395 }
14396
14397 /* Read the gphy configuration from the OTP region of the chip. The gphy
14398 * configuration is a 32-bit value that straddles the alignment boundary.
14399 * We do two 32-bit reads and then shift and merge the results.
14400 */
14401 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14402 {
14403 u32 bhalf_otp, thalf_otp;
14404
14405 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14406
14407 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14408 return 0;
14409
14410 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14411
14412 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14413 return 0;
14414
14415 thalf_otp = tr32(OTP_READ_DATA);
14416
14417 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14418
14419 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14420 return 0;
14421
14422 bhalf_otp = tr32(OTP_READ_DATA);
14423
14424 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14425 }
14426
14427 static void tg3_phy_init_link_config(struct tg3 *tp)
14428 {
14429 u32 adv = ADVERTISED_Autoneg;
14430
14431 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14432 adv |= ADVERTISED_1000baseT_Half |
14433 ADVERTISED_1000baseT_Full;
14434
14435 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14436 adv |= ADVERTISED_100baseT_Half |
14437 ADVERTISED_100baseT_Full |
14438 ADVERTISED_10baseT_Half |
14439 ADVERTISED_10baseT_Full |
14440 ADVERTISED_TP;
14441 else
14442 adv |= ADVERTISED_FIBRE;
14443
14444 tp->link_config.advertising = adv;
14445 tp->link_config.speed = SPEED_UNKNOWN;
14446 tp->link_config.duplex = DUPLEX_UNKNOWN;
14447 tp->link_config.autoneg = AUTONEG_ENABLE;
14448 tp->link_config.active_speed = SPEED_UNKNOWN;
14449 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14450
14451 tp->old_link = -1;
14452 }
14453
14454 static int tg3_phy_probe(struct tg3 *tp)
14455 {
14456 u32 hw_phy_id_1, hw_phy_id_2;
14457 u32 hw_phy_id, hw_phy_id_masked;
14458 int err;
14459
14460 /* flow control autonegotiation is default behavior */
14461 tg3_flag_set(tp, PAUSE_AUTONEG);
14462 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14463
14464 if (tg3_flag(tp, ENABLE_APE)) {
14465 switch (tp->pci_fn) {
14466 case 0:
14467 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14468 break;
14469 case 1:
14470 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14471 break;
14472 case 2:
14473 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14474 break;
14475 case 3:
14476 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14477 break;
14478 }
14479 }
14480
14481 if (tg3_flag(tp, USE_PHYLIB))
14482 return tg3_phy_init(tp);
14483
14484 /* Reading the PHY ID register can conflict with ASF
14485 * firmware access to the PHY hardware.
14486 */
14487 err = 0;
14488 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14489 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14490 } else {
14491 /* Now read the physical PHY_ID from the chip and verify
14492 * that it is sane. If it doesn't look good, we fall back
14493 * to either the hard-coded table based PHY_ID and failing
14494 * that the value found in the eeprom area.
14495 */
14496 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14497 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14498
14499 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14500 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14501 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14502
14503 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14504 }
14505
14506 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14507 tp->phy_id = hw_phy_id;
14508 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14509 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14510 else
14511 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14512 } else {
14513 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14514 /* Do nothing, phy ID already set up in
14515 * tg3_get_eeprom_hw_cfg().
14516 */
14517 } else {
14518 struct subsys_tbl_ent *p;
14519
14520 /* No eeprom signature? Try the hardcoded
14521 * subsys device table.
14522 */
14523 p = tg3_lookup_by_subsys(tp);
14524 if (p) {
14525 tp->phy_id = p->phy_id;
14526 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14527 /* For now we saw the IDs 0xbc050cd0,
14528 * 0xbc050f80 and 0xbc050c30 on devices
14529 * connected to an BCM4785 and there are
14530 * probably more. Just assume that the phy is
14531 * supported when it is connected to a SSB core
14532 * for now.
14533 */
14534 return -ENODEV;
14535 }
14536
14537 if (!tp->phy_id ||
14538 tp->phy_id == TG3_PHY_ID_BCM8002)
14539 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14540 }
14541 }
14542
14543 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14544 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14545 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14546 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14547 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14548 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14549 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14550 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14551 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14552
14553 tg3_phy_init_link_config(tp);
14554
14555 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14556 !tg3_flag(tp, ENABLE_APE) &&
14557 !tg3_flag(tp, ENABLE_ASF)) {
14558 u32 bmsr, dummy;
14559
14560 tg3_readphy(tp, MII_BMSR, &bmsr);
14561 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14562 (bmsr & BMSR_LSTATUS))
14563 goto skip_phy_reset;
14564
14565 err = tg3_phy_reset(tp);
14566 if (err)
14567 return err;
14568
14569 tg3_phy_set_wirespeed(tp);
14570
14571 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14572 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14573 tp->link_config.flowctrl);
14574
14575 tg3_writephy(tp, MII_BMCR,
14576 BMCR_ANENABLE | BMCR_ANRESTART);
14577 }
14578 }
14579
14580 skip_phy_reset:
14581 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14582 err = tg3_init_5401phy_dsp(tp);
14583 if (err)
14584 return err;
14585
14586 err = tg3_init_5401phy_dsp(tp);
14587 }
14588
14589 return err;
14590 }
14591
14592 static void tg3_read_vpd(struct tg3 *tp)
14593 {
14594 u8 *vpd_data;
14595 unsigned int block_end, rosize, len;
14596 u32 vpdlen;
14597 int j, i = 0;
14598
14599 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14600 if (!vpd_data)
14601 goto out_no_vpd;
14602
14603 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14604 if (i < 0)
14605 goto out_not_found;
14606
14607 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14608 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14609 i += PCI_VPD_LRDT_TAG_SIZE;
14610
14611 if (block_end > vpdlen)
14612 goto out_not_found;
14613
14614 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14615 PCI_VPD_RO_KEYWORD_MFR_ID);
14616 if (j > 0) {
14617 len = pci_vpd_info_field_size(&vpd_data[j]);
14618
14619 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14620 if (j + len > block_end || len != 4 ||
14621 memcmp(&vpd_data[j], "1028", 4))
14622 goto partno;
14623
14624 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14625 PCI_VPD_RO_KEYWORD_VENDOR0);
14626 if (j < 0)
14627 goto partno;
14628
14629 len = pci_vpd_info_field_size(&vpd_data[j]);
14630
14631 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14632 if (j + len > block_end)
14633 goto partno;
14634
14635 memcpy(tp->fw_ver, &vpd_data[j], len);
14636 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14637 }
14638
14639 partno:
14640 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14641 PCI_VPD_RO_KEYWORD_PARTNO);
14642 if (i < 0)
14643 goto out_not_found;
14644
14645 len = pci_vpd_info_field_size(&vpd_data[i]);
14646
14647 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14648 if (len > TG3_BPN_SIZE ||
14649 (len + i) > vpdlen)
14650 goto out_not_found;
14651
14652 memcpy(tp->board_part_number, &vpd_data[i], len);
14653
14654 out_not_found:
14655 kfree(vpd_data);
14656 if (tp->board_part_number[0])
14657 return;
14658
14659 out_no_vpd:
14660 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14661 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14662 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14663 strcpy(tp->board_part_number, "BCM5717");
14664 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14665 strcpy(tp->board_part_number, "BCM5718");
14666 else
14667 goto nomatch;
14668 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14669 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14670 strcpy(tp->board_part_number, "BCM57780");
14671 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14672 strcpy(tp->board_part_number, "BCM57760");
14673 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14674 strcpy(tp->board_part_number, "BCM57790");
14675 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14676 strcpy(tp->board_part_number, "BCM57788");
14677 else
14678 goto nomatch;
14679 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14680 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14681 strcpy(tp->board_part_number, "BCM57761");
14682 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14683 strcpy(tp->board_part_number, "BCM57765");
14684 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14685 strcpy(tp->board_part_number, "BCM57781");
14686 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14687 strcpy(tp->board_part_number, "BCM57785");
14688 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14689 strcpy(tp->board_part_number, "BCM57791");
14690 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14691 strcpy(tp->board_part_number, "BCM57795");
14692 else
14693 goto nomatch;
14694 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14695 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14696 strcpy(tp->board_part_number, "BCM57762");
14697 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14698 strcpy(tp->board_part_number, "BCM57766");
14699 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14700 strcpy(tp->board_part_number, "BCM57782");
14701 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14702 strcpy(tp->board_part_number, "BCM57786");
14703 else
14704 goto nomatch;
14705 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14706 strcpy(tp->board_part_number, "BCM95906");
14707 } else {
14708 nomatch:
14709 strcpy(tp->board_part_number, "none");
14710 }
14711 }
14712
14713 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14714 {
14715 u32 val;
14716
14717 if (tg3_nvram_read(tp, offset, &val) ||
14718 (val & 0xfc000000) != 0x0c000000 ||
14719 tg3_nvram_read(tp, offset + 4, &val) ||
14720 val != 0)
14721 return 0;
14722
14723 return 1;
14724 }
14725
14726 static void tg3_read_bc_ver(struct tg3 *tp)
14727 {
14728 u32 val, offset, start, ver_offset;
14729 int i, dst_off;
14730 bool newver = false;
14731
14732 if (tg3_nvram_read(tp, 0xc, &offset) ||
14733 tg3_nvram_read(tp, 0x4, &start))
14734 return;
14735
14736 offset = tg3_nvram_logical_addr(tp, offset);
14737
14738 if (tg3_nvram_read(tp, offset, &val))
14739 return;
14740
14741 if ((val & 0xfc000000) == 0x0c000000) {
14742 if (tg3_nvram_read(tp, offset + 4, &val))
14743 return;
14744
14745 if (val == 0)
14746 newver = true;
14747 }
14748
14749 dst_off = strlen(tp->fw_ver);
14750
14751 if (newver) {
14752 if (TG3_VER_SIZE - dst_off < 16 ||
14753 tg3_nvram_read(tp, offset + 8, &ver_offset))
14754 return;
14755
14756 offset = offset + ver_offset - start;
14757 for (i = 0; i < 16; i += 4) {
14758 __be32 v;
14759 if (tg3_nvram_read_be32(tp, offset + i, &v))
14760 return;
14761
14762 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14763 }
14764 } else {
14765 u32 major, minor;
14766
14767 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14768 return;
14769
14770 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14771 TG3_NVM_BCVER_MAJSFT;
14772 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14773 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14774 "v%d.%02d", major, minor);
14775 }
14776 }
14777
14778 static void tg3_read_hwsb_ver(struct tg3 *tp)
14779 {
14780 u32 val, major, minor;
14781
14782 /* Use native endian representation */
14783 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14784 return;
14785
14786 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14787 TG3_NVM_HWSB_CFG1_MAJSFT;
14788 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14789 TG3_NVM_HWSB_CFG1_MINSFT;
14790
14791 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14792 }
14793
14794 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14795 {
14796 u32 offset, major, minor, build;
14797
14798 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14799
14800 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14801 return;
14802
14803 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14804 case TG3_EEPROM_SB_REVISION_0:
14805 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14806 break;
14807 case TG3_EEPROM_SB_REVISION_2:
14808 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14809 break;
14810 case TG3_EEPROM_SB_REVISION_3:
14811 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14812 break;
14813 case TG3_EEPROM_SB_REVISION_4:
14814 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14815 break;
14816 case TG3_EEPROM_SB_REVISION_5:
14817 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14818 break;
14819 case TG3_EEPROM_SB_REVISION_6:
14820 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14821 break;
14822 default:
14823 return;
14824 }
14825
14826 if (tg3_nvram_read(tp, offset, &val))
14827 return;
14828
14829 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14830 TG3_EEPROM_SB_EDH_BLD_SHFT;
14831 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14832 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14833 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14834
14835 if (minor > 99 || build > 26)
14836 return;
14837
14838 offset = strlen(tp->fw_ver);
14839 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14840 " v%d.%02d", major, minor);
14841
14842 if (build > 0) {
14843 offset = strlen(tp->fw_ver);
14844 if (offset < TG3_VER_SIZE - 1)
14845 tp->fw_ver[offset] = 'a' + build - 1;
14846 }
14847 }
14848
14849 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14850 {
14851 u32 val, offset, start;
14852 int i, vlen;
14853
14854 for (offset = TG3_NVM_DIR_START;
14855 offset < TG3_NVM_DIR_END;
14856 offset += TG3_NVM_DIRENT_SIZE) {
14857 if (tg3_nvram_read(tp, offset, &val))
14858 return;
14859
14860 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14861 break;
14862 }
14863
14864 if (offset == TG3_NVM_DIR_END)
14865 return;
14866
14867 if (!tg3_flag(tp, 5705_PLUS))
14868 start = 0x08000000;
14869 else if (tg3_nvram_read(tp, offset - 4, &start))
14870 return;
14871
14872 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14873 !tg3_fw_img_is_valid(tp, offset) ||
14874 tg3_nvram_read(tp, offset + 8, &val))
14875 return;
14876
14877 offset += val - start;
14878
14879 vlen = strlen(tp->fw_ver);
14880
14881 tp->fw_ver[vlen++] = ',';
14882 tp->fw_ver[vlen++] = ' ';
14883
14884 for (i = 0; i < 4; i++) {
14885 __be32 v;
14886 if (tg3_nvram_read_be32(tp, offset, &v))
14887 return;
14888
14889 offset += sizeof(v);
14890
14891 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14892 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14893 break;
14894 }
14895
14896 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14897 vlen += sizeof(v);
14898 }
14899 }
14900
14901 static void tg3_probe_ncsi(struct tg3 *tp)
14902 {
14903 u32 apedata;
14904
14905 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14906 if (apedata != APE_SEG_SIG_MAGIC)
14907 return;
14908
14909 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14910 if (!(apedata & APE_FW_STATUS_READY))
14911 return;
14912
14913 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14914 tg3_flag_set(tp, APE_HAS_NCSI);
14915 }
14916
14917 static void tg3_read_dash_ver(struct tg3 *tp)
14918 {
14919 int vlen;
14920 u32 apedata;
14921 char *fwtype;
14922
14923 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14924
14925 if (tg3_flag(tp, APE_HAS_NCSI))
14926 fwtype = "NCSI";
14927 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14928 fwtype = "SMASH";
14929 else
14930 fwtype = "DASH";
14931
14932 vlen = strlen(tp->fw_ver);
14933
14934 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14935 fwtype,
14936 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14937 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14938 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14939 (apedata & APE_FW_VERSION_BLDMSK));
14940 }
14941
14942 static void tg3_read_otp_ver(struct tg3 *tp)
14943 {
14944 u32 val, val2;
14945
14946 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14947 return;
14948
14949 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14950 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14951 TG3_OTP_MAGIC0_VALID(val)) {
14952 u64 val64 = (u64) val << 32 | val2;
14953 u32 ver = 0;
14954 int i, vlen;
14955
14956 for (i = 0; i < 7; i++) {
14957 if ((val64 & 0xff) == 0)
14958 break;
14959 ver = val64 & 0xff;
14960 val64 >>= 8;
14961 }
14962 vlen = strlen(tp->fw_ver);
14963 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14964 }
14965 }
14966
14967 static void tg3_read_fw_ver(struct tg3 *tp)
14968 {
14969 u32 val;
14970 bool vpd_vers = false;
14971
14972 if (tp->fw_ver[0] != 0)
14973 vpd_vers = true;
14974
14975 if (tg3_flag(tp, NO_NVRAM)) {
14976 strcat(tp->fw_ver, "sb");
14977 tg3_read_otp_ver(tp);
14978 return;
14979 }
14980
14981 if (tg3_nvram_read(tp, 0, &val))
14982 return;
14983
14984 if (val == TG3_EEPROM_MAGIC)
14985 tg3_read_bc_ver(tp);
14986 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14987 tg3_read_sb_ver(tp, val);
14988 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14989 tg3_read_hwsb_ver(tp);
14990
14991 if (tg3_flag(tp, ENABLE_ASF)) {
14992 if (tg3_flag(tp, ENABLE_APE)) {
14993 tg3_probe_ncsi(tp);
14994 if (!vpd_vers)
14995 tg3_read_dash_ver(tp);
14996 } else if (!vpd_vers) {
14997 tg3_read_mgmtfw_ver(tp);
14998 }
14999 }
15000
15001 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15002 }
15003
15004 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15005 {
15006 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15007 return TG3_RX_RET_MAX_SIZE_5717;
15008 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15009 return TG3_RX_RET_MAX_SIZE_5700;
15010 else
15011 return TG3_RX_RET_MAX_SIZE_5705;
15012 }
15013
15014 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15015 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15016 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15017 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15018 { },
15019 };
15020
15021 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15022 {
15023 struct pci_dev *peer;
15024 unsigned int func, devnr = tp->pdev->devfn & ~7;
15025
15026 for (func = 0; func < 8; func++) {
15027 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15028 if (peer && peer != tp->pdev)
15029 break;
15030 pci_dev_put(peer);
15031 }
15032 /* 5704 can be configured in single-port mode, set peer to
15033 * tp->pdev in that case.
15034 */
15035 if (!peer) {
15036 peer = tp->pdev;
15037 return peer;
15038 }
15039
15040 /*
15041 * We don't need to keep the refcount elevated; there's no way
15042 * to remove one half of this device without removing the other
15043 */
15044 pci_dev_put(peer);
15045
15046 return peer;
15047 }
15048
15049 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15050 {
15051 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15052 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15053 u32 reg;
15054
15055 /* All devices that use the alternate
15056 * ASIC REV location have a CPMU.
15057 */
15058 tg3_flag_set(tp, CPMU_PRESENT);
15059
15060 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15061 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15062 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15063 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15064 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15065 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15066 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15068 reg = TG3PCI_GEN2_PRODID_ASICREV;
15069 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15070 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15071 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15072 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15073 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15074 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15075 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15076 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15077 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15078 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15079 reg = TG3PCI_GEN15_PRODID_ASICREV;
15080 else
15081 reg = TG3PCI_PRODID_ASICREV;
15082
15083 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15084 }
15085
15086 /* Wrong chip ID in 5752 A0. This code can be removed later
15087 * as A0 is not in production.
15088 */
15089 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15090 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15091
15092 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15093 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15094
15095 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15096 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15097 tg3_asic_rev(tp) == ASIC_REV_5720)
15098 tg3_flag_set(tp, 5717_PLUS);
15099
15100 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15101 tg3_asic_rev(tp) == ASIC_REV_57766)
15102 tg3_flag_set(tp, 57765_CLASS);
15103
15104 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15105 tg3_asic_rev(tp) == ASIC_REV_5762)
15106 tg3_flag_set(tp, 57765_PLUS);
15107
15108 /* Intentionally exclude ASIC_REV_5906 */
15109 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15110 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15111 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15112 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15113 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15114 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15115 tg3_flag(tp, 57765_PLUS))
15116 tg3_flag_set(tp, 5755_PLUS);
15117
15118 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15119 tg3_asic_rev(tp) == ASIC_REV_5714)
15120 tg3_flag_set(tp, 5780_CLASS);
15121
15122 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15123 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15124 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15125 tg3_flag(tp, 5755_PLUS) ||
15126 tg3_flag(tp, 5780_CLASS))
15127 tg3_flag_set(tp, 5750_PLUS);
15128
15129 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15130 tg3_flag(tp, 5750_PLUS))
15131 tg3_flag_set(tp, 5705_PLUS);
15132 }
15133
15134 static bool tg3_10_100_only_device(struct tg3 *tp,
15135 const struct pci_device_id *ent)
15136 {
15137 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15138
15139 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15140 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15141 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15142 return true;
15143
15144 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15145 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15146 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15147 return true;
15148 } else {
15149 return true;
15150 }
15151 }
15152
15153 return false;
15154 }
15155
15156 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15157 {
15158 u32 misc_ctrl_reg;
15159 u32 pci_state_reg, grc_misc_cfg;
15160 u32 val;
15161 u16 pci_cmd;
15162 int err;
15163
15164 /* Force memory write invalidate off. If we leave it on,
15165 * then on 5700_BX chips we have to enable a workaround.
15166 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15167 * to match the cacheline size. The Broadcom driver have this
15168 * workaround but turns MWI off all the times so never uses
15169 * it. This seems to suggest that the workaround is insufficient.
15170 */
15171 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15172 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15173 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15174
15175 /* Important! -- Make sure register accesses are byteswapped
15176 * correctly. Also, for those chips that require it, make
15177 * sure that indirect register accesses are enabled before
15178 * the first operation.
15179 */
15180 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15181 &misc_ctrl_reg);
15182 tp->misc_host_ctrl |= (misc_ctrl_reg &
15183 MISC_HOST_CTRL_CHIPREV);
15184 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15185 tp->misc_host_ctrl);
15186
15187 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15188
15189 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15190 * we need to disable memory and use config. cycles
15191 * only to access all registers. The 5702/03 chips
15192 * can mistakenly decode the special cycles from the
15193 * ICH chipsets as memory write cycles, causing corruption
15194 * of register and memory space. Only certain ICH bridges
15195 * will drive special cycles with non-zero data during the
15196 * address phase which can fall within the 5703's address
15197 * range. This is not an ICH bug as the PCI spec allows
15198 * non-zero address during special cycles. However, only
15199 * these ICH bridges are known to drive non-zero addresses
15200 * during special cycles.
15201 *
15202 * Since special cycles do not cross PCI bridges, we only
15203 * enable this workaround if the 5703 is on the secondary
15204 * bus of these ICH bridges.
15205 */
15206 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15207 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15208 static struct tg3_dev_id {
15209 u32 vendor;
15210 u32 device;
15211 u32 rev;
15212 } ich_chipsets[] = {
15213 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15214 PCI_ANY_ID },
15215 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15216 PCI_ANY_ID },
15217 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15218 0xa },
15219 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15220 PCI_ANY_ID },
15221 { },
15222 };
15223 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15224 struct pci_dev *bridge = NULL;
15225
15226 while (pci_id->vendor != 0) {
15227 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15228 bridge);
15229 if (!bridge) {
15230 pci_id++;
15231 continue;
15232 }
15233 if (pci_id->rev != PCI_ANY_ID) {
15234 if (bridge->revision > pci_id->rev)
15235 continue;
15236 }
15237 if (bridge->subordinate &&
15238 (bridge->subordinate->number ==
15239 tp->pdev->bus->number)) {
15240 tg3_flag_set(tp, ICH_WORKAROUND);
15241 pci_dev_put(bridge);
15242 break;
15243 }
15244 }
15245 }
15246
15247 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15248 static struct tg3_dev_id {
15249 u32 vendor;
15250 u32 device;
15251 } bridge_chipsets[] = {
15252 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15253 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15254 { },
15255 };
15256 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15257 struct pci_dev *bridge = NULL;
15258
15259 while (pci_id->vendor != 0) {
15260 bridge = pci_get_device(pci_id->vendor,
15261 pci_id->device,
15262 bridge);
15263 if (!bridge) {
15264 pci_id++;
15265 continue;
15266 }
15267 if (bridge->subordinate &&
15268 (bridge->subordinate->number <=
15269 tp->pdev->bus->number) &&
15270 (bridge->subordinate->busn_res.end >=
15271 tp->pdev->bus->number)) {
15272 tg3_flag_set(tp, 5701_DMA_BUG);
15273 pci_dev_put(bridge);
15274 break;
15275 }
15276 }
15277 }
15278
15279 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15280 * DMA addresses > 40-bit. This bridge may have other additional
15281 * 57xx devices behind it in some 4-port NIC designs for example.
15282 * Any tg3 device found behind the bridge will also need the 40-bit
15283 * DMA workaround.
15284 */
15285 if (tg3_flag(tp, 5780_CLASS)) {
15286 tg3_flag_set(tp, 40BIT_DMA_BUG);
15287 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15288 } else {
15289 struct pci_dev *bridge = NULL;
15290
15291 do {
15292 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15293 PCI_DEVICE_ID_SERVERWORKS_EPB,
15294 bridge);
15295 if (bridge && bridge->subordinate &&
15296 (bridge->subordinate->number <=
15297 tp->pdev->bus->number) &&
15298 (bridge->subordinate->busn_res.end >=
15299 tp->pdev->bus->number)) {
15300 tg3_flag_set(tp, 40BIT_DMA_BUG);
15301 pci_dev_put(bridge);
15302 break;
15303 }
15304 } while (bridge);
15305 }
15306
15307 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15308 tg3_asic_rev(tp) == ASIC_REV_5714)
15309 tp->pdev_peer = tg3_find_peer(tp);
15310
15311 /* Determine TSO capabilities */
15312 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15313 ; /* Do nothing. HW bug. */
15314 else if (tg3_flag(tp, 57765_PLUS))
15315 tg3_flag_set(tp, HW_TSO_3);
15316 else if (tg3_flag(tp, 5755_PLUS) ||
15317 tg3_asic_rev(tp) == ASIC_REV_5906)
15318 tg3_flag_set(tp, HW_TSO_2);
15319 else if (tg3_flag(tp, 5750_PLUS)) {
15320 tg3_flag_set(tp, HW_TSO_1);
15321 tg3_flag_set(tp, TSO_BUG);
15322 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15323 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15324 tg3_flag_clear(tp, TSO_BUG);
15325 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15326 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15327 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15328 tg3_flag_set(tp, FW_TSO);
15329 tg3_flag_set(tp, TSO_BUG);
15330 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15331 tp->fw_needed = FIRMWARE_TG3TSO5;
15332 else
15333 tp->fw_needed = FIRMWARE_TG3TSO;
15334 }
15335
15336 /* Selectively allow TSO based on operating conditions */
15337 if (tg3_flag(tp, HW_TSO_1) ||
15338 tg3_flag(tp, HW_TSO_2) ||
15339 tg3_flag(tp, HW_TSO_3) ||
15340 tg3_flag(tp, FW_TSO)) {
15341 /* For firmware TSO, assume ASF is disabled.
15342 * We'll disable TSO later if we discover ASF
15343 * is enabled in tg3_get_eeprom_hw_cfg().
15344 */
15345 tg3_flag_set(tp, TSO_CAPABLE);
15346 } else {
15347 tg3_flag_clear(tp, TSO_CAPABLE);
15348 tg3_flag_clear(tp, TSO_BUG);
15349 tp->fw_needed = NULL;
15350 }
15351
15352 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15353 tp->fw_needed = FIRMWARE_TG3;
15354
15355 tp->irq_max = 1;
15356
15357 if (tg3_flag(tp, 5750_PLUS)) {
15358 tg3_flag_set(tp, SUPPORT_MSI);
15359 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15360 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15361 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15362 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15363 tp->pdev_peer == tp->pdev))
15364 tg3_flag_clear(tp, SUPPORT_MSI);
15365
15366 if (tg3_flag(tp, 5755_PLUS) ||
15367 tg3_asic_rev(tp) == ASIC_REV_5906) {
15368 tg3_flag_set(tp, 1SHOT_MSI);
15369 }
15370
15371 if (tg3_flag(tp, 57765_PLUS)) {
15372 tg3_flag_set(tp, SUPPORT_MSIX);
15373 tp->irq_max = TG3_IRQ_MAX_VECS;
15374 }
15375 }
15376
15377 tp->txq_max = 1;
15378 tp->rxq_max = 1;
15379 if (tp->irq_max > 1) {
15380 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15381 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15382
15383 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15384 tg3_asic_rev(tp) == ASIC_REV_5720)
15385 tp->txq_max = tp->irq_max - 1;
15386 }
15387
15388 if (tg3_flag(tp, 5755_PLUS) ||
15389 tg3_asic_rev(tp) == ASIC_REV_5906)
15390 tg3_flag_set(tp, SHORT_DMA_BUG);
15391
15392 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15393 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15394
15395 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15396 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15397 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15398 tg3_asic_rev(tp) == ASIC_REV_5762)
15399 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15400
15401 if (tg3_flag(tp, 57765_PLUS) &&
15402 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15403 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15404
15405 if (!tg3_flag(tp, 5705_PLUS) ||
15406 tg3_flag(tp, 5780_CLASS) ||
15407 tg3_flag(tp, USE_JUMBO_BDFLAG))
15408 tg3_flag_set(tp, JUMBO_CAPABLE);
15409
15410 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15411 &pci_state_reg);
15412
15413 if (pci_is_pcie(tp->pdev)) {
15414 u16 lnkctl;
15415
15416 tg3_flag_set(tp, PCI_EXPRESS);
15417
15418 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15419 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15420 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15421 tg3_flag_clear(tp, HW_TSO_2);
15422 tg3_flag_clear(tp, TSO_CAPABLE);
15423 }
15424 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15425 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15426 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15427 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15428 tg3_flag_set(tp, CLKREQ_BUG);
15429 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15430 tg3_flag_set(tp, L1PLLPD_EN);
15431 }
15432 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15433 /* BCM5785 devices are effectively PCIe devices, and should
15434 * follow PCIe codepaths, but do not have a PCIe capabilities
15435 * section.
15436 */
15437 tg3_flag_set(tp, PCI_EXPRESS);
15438 } else if (!tg3_flag(tp, 5705_PLUS) ||
15439 tg3_flag(tp, 5780_CLASS)) {
15440 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15441 if (!tp->pcix_cap) {
15442 dev_err(&tp->pdev->dev,
15443 "Cannot find PCI-X capability, aborting\n");
15444 return -EIO;
15445 }
15446
15447 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15448 tg3_flag_set(tp, PCIX_MODE);
15449 }
15450
15451 /* If we have an AMD 762 or VIA K8T800 chipset, write
15452 * reordering to the mailbox registers done by the host
15453 * controller can cause major troubles. We read back from
15454 * every mailbox register write to force the writes to be
15455 * posted to the chip in order.
15456 */
15457 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15458 !tg3_flag(tp, PCI_EXPRESS))
15459 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15460
15461 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15462 &tp->pci_cacheline_sz);
15463 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15464 &tp->pci_lat_timer);
15465 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15466 tp->pci_lat_timer < 64) {
15467 tp->pci_lat_timer = 64;
15468 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15469 tp->pci_lat_timer);
15470 }
15471
15472 /* Important! -- It is critical that the PCI-X hw workaround
15473 * situation is decided before the first MMIO register access.
15474 */
15475 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15476 /* 5700 BX chips need to have their TX producer index
15477 * mailboxes written twice to workaround a bug.
15478 */
15479 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15480
15481 /* If we are in PCI-X mode, enable register write workaround.
15482 *
15483 * The workaround is to use indirect register accesses
15484 * for all chip writes not to mailbox registers.
15485 */
15486 if (tg3_flag(tp, PCIX_MODE)) {
15487 u32 pm_reg;
15488
15489 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15490
15491 /* The chip can have it's power management PCI config
15492 * space registers clobbered due to this bug.
15493 * So explicitly force the chip into D0 here.
15494 */
15495 pci_read_config_dword(tp->pdev,
15496 tp->pm_cap + PCI_PM_CTRL,
15497 &pm_reg);
15498 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15499 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15500 pci_write_config_dword(tp->pdev,
15501 tp->pm_cap + PCI_PM_CTRL,
15502 pm_reg);
15503
15504 /* Also, force SERR#/PERR# in PCI command. */
15505 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15506 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15507 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15508 }
15509 }
15510
15511 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15512 tg3_flag_set(tp, PCI_HIGH_SPEED);
15513 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15514 tg3_flag_set(tp, PCI_32BIT);
15515
15516 /* Chip-specific fixup from Broadcom driver */
15517 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15518 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15519 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15520 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15521 }
15522
15523 /* Default fast path register access methods */
15524 tp->read32 = tg3_read32;
15525 tp->write32 = tg3_write32;
15526 tp->read32_mbox = tg3_read32;
15527 tp->write32_mbox = tg3_write32;
15528 tp->write32_tx_mbox = tg3_write32;
15529 tp->write32_rx_mbox = tg3_write32;
15530
15531 /* Various workaround register access methods */
15532 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15533 tp->write32 = tg3_write_indirect_reg32;
15534 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15535 (tg3_flag(tp, PCI_EXPRESS) &&
15536 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15537 /*
15538 * Back to back register writes can cause problems on these
15539 * chips, the workaround is to read back all reg writes
15540 * except those to mailbox regs.
15541 *
15542 * See tg3_write_indirect_reg32().
15543 */
15544 tp->write32 = tg3_write_flush_reg32;
15545 }
15546
15547 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15548 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15549 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15550 tp->write32_rx_mbox = tg3_write_flush_reg32;
15551 }
15552
15553 if (tg3_flag(tp, ICH_WORKAROUND)) {
15554 tp->read32 = tg3_read_indirect_reg32;
15555 tp->write32 = tg3_write_indirect_reg32;
15556 tp->read32_mbox = tg3_read_indirect_mbox;
15557 tp->write32_mbox = tg3_write_indirect_mbox;
15558 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15559 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15560
15561 iounmap(tp->regs);
15562 tp->regs = NULL;
15563
15564 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15565 pci_cmd &= ~PCI_COMMAND_MEMORY;
15566 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15567 }
15568 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15569 tp->read32_mbox = tg3_read32_mbox_5906;
15570 tp->write32_mbox = tg3_write32_mbox_5906;
15571 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15572 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15573 }
15574
15575 if (tp->write32 == tg3_write_indirect_reg32 ||
15576 (tg3_flag(tp, PCIX_MODE) &&
15577 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15578 tg3_asic_rev(tp) == ASIC_REV_5701)))
15579 tg3_flag_set(tp, SRAM_USE_CONFIG);
15580
15581 /* The memory arbiter has to be enabled in order for SRAM accesses
15582 * to succeed. Normally on powerup the tg3 chip firmware will make
15583 * sure it is enabled, but other entities such as system netboot
15584 * code might disable it.
15585 */
15586 val = tr32(MEMARB_MODE);
15587 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15588
15589 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15590 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15591 tg3_flag(tp, 5780_CLASS)) {
15592 if (tg3_flag(tp, PCIX_MODE)) {
15593 pci_read_config_dword(tp->pdev,
15594 tp->pcix_cap + PCI_X_STATUS,
15595 &val);
15596 tp->pci_fn = val & 0x7;
15597 }
15598 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15599 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15600 tg3_asic_rev(tp) == ASIC_REV_5720) {
15601 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15602 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15603 val = tr32(TG3_CPMU_STATUS);
15604
15605 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15606 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15607 else
15608 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15609 TG3_CPMU_STATUS_FSHFT_5719;
15610 }
15611
15612 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15613 tp->write32_tx_mbox = tg3_write_flush_reg32;
15614 tp->write32_rx_mbox = tg3_write_flush_reg32;
15615 }
15616
15617 /* Get eeprom hw config before calling tg3_set_power_state().
15618 * In particular, the TG3_FLAG_IS_NIC flag must be
15619 * determined before calling tg3_set_power_state() so that
15620 * we know whether or not to switch out of Vaux power.
15621 * When the flag is set, it means that GPIO1 is used for eeprom
15622 * write protect and also implies that it is a LOM where GPIOs
15623 * are not used to switch power.
15624 */
15625 tg3_get_eeprom_hw_cfg(tp);
15626
15627 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15628 tg3_flag_clear(tp, TSO_CAPABLE);
15629 tg3_flag_clear(tp, TSO_BUG);
15630 tp->fw_needed = NULL;
15631 }
15632
15633 if (tg3_flag(tp, ENABLE_APE)) {
15634 /* Allow reads and writes to the
15635 * APE register and memory space.
15636 */
15637 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15638 PCISTATE_ALLOW_APE_SHMEM_WR |
15639 PCISTATE_ALLOW_APE_PSPACE_WR;
15640 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15641 pci_state_reg);
15642
15643 tg3_ape_lock_init(tp);
15644 }
15645
15646 /* Set up tp->grc_local_ctrl before calling
15647 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15648 * will bring 5700's external PHY out of reset.
15649 * It is also used as eeprom write protect on LOMs.
15650 */
15651 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15652 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15653 tg3_flag(tp, EEPROM_WRITE_PROT))
15654 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15655 GRC_LCLCTRL_GPIO_OUTPUT1);
15656 /* Unused GPIO3 must be driven as output on 5752 because there
15657 * are no pull-up resistors on unused GPIO pins.
15658 */
15659 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15660 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15661
15662 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15663 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15664 tg3_flag(tp, 57765_CLASS))
15665 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15666
15667 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15668 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15669 /* Turn off the debug UART. */
15670 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15671 if (tg3_flag(tp, IS_NIC))
15672 /* Keep VMain power. */
15673 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15674 GRC_LCLCTRL_GPIO_OUTPUT0;
15675 }
15676
15677 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15678 tp->grc_local_ctrl |=
15679 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15680
15681 /* Switch out of Vaux if it is a NIC */
15682 tg3_pwrsrc_switch_to_vmain(tp);
15683
15684 /* Derive initial jumbo mode from MTU assigned in
15685 * ether_setup() via the alloc_etherdev() call
15686 */
15687 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15688 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15689
15690 /* Determine WakeOnLan speed to use. */
15691 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15692 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15693 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15694 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15695 tg3_flag_clear(tp, WOL_SPEED_100MB);
15696 } else {
15697 tg3_flag_set(tp, WOL_SPEED_100MB);
15698 }
15699
15700 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15701 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15702
15703 /* A few boards don't want Ethernet@WireSpeed phy feature */
15704 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15705 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15706 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15707 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15708 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15709 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15710 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15711
15712 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15713 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15714 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15715 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15716 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15717
15718 if (tg3_flag(tp, 5705_PLUS) &&
15719 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15720 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15721 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15722 !tg3_flag(tp, 57765_PLUS)) {
15723 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15724 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15725 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15726 tg3_asic_rev(tp) == ASIC_REV_5761) {
15727 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15728 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15729 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15730 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15731 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15732 } else
15733 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15734 }
15735
15736 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15737 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15738 tp->phy_otp = tg3_read_otp_phycfg(tp);
15739 if (tp->phy_otp == 0)
15740 tp->phy_otp = TG3_OTP_DEFAULT;
15741 }
15742
15743 if (tg3_flag(tp, CPMU_PRESENT))
15744 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15745 else
15746 tp->mi_mode = MAC_MI_MODE_BASE;
15747
15748 tp->coalesce_mode = 0;
15749 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15750 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15751 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15752
15753 /* Set these bits to enable statistics workaround. */
15754 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15755 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15756 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15757 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15758 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15759 }
15760
15761 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15762 tg3_asic_rev(tp) == ASIC_REV_57780)
15763 tg3_flag_set(tp, USE_PHYLIB);
15764
15765 err = tg3_mdio_init(tp);
15766 if (err)
15767 return err;
15768
15769 /* Initialize data/descriptor byte/word swapping. */
15770 val = tr32(GRC_MODE);
15771 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15772 tg3_asic_rev(tp) == ASIC_REV_5762)
15773 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15774 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15775 GRC_MODE_B2HRX_ENABLE |
15776 GRC_MODE_HTX2B_ENABLE |
15777 GRC_MODE_HOST_STACKUP);
15778 else
15779 val &= GRC_MODE_HOST_STACKUP;
15780
15781 tw32(GRC_MODE, val | tp->grc_mode);
15782
15783 tg3_switch_clocks(tp);
15784
15785 /* Clear this out for sanity. */
15786 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15787
15788 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15789 &pci_state_reg);
15790 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15791 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15792 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15793 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15794 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15795 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15796 void __iomem *sram_base;
15797
15798 /* Write some dummy words into the SRAM status block
15799 * area, see if it reads back correctly. If the return
15800 * value is bad, force enable the PCIX workaround.
15801 */
15802 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15803
15804 writel(0x00000000, sram_base);
15805 writel(0x00000000, sram_base + 4);
15806 writel(0xffffffff, sram_base + 4);
15807 if (readl(sram_base) != 0x00000000)
15808 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15809 }
15810 }
15811
15812 udelay(50);
15813 tg3_nvram_init(tp);
15814
15815 grc_misc_cfg = tr32(GRC_MISC_CFG);
15816 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15817
15818 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15819 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15820 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15821 tg3_flag_set(tp, IS_5788);
15822
15823 if (!tg3_flag(tp, IS_5788) &&
15824 tg3_asic_rev(tp) != ASIC_REV_5700)
15825 tg3_flag_set(tp, TAGGED_STATUS);
15826 if (tg3_flag(tp, TAGGED_STATUS)) {
15827 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15828 HOSTCC_MODE_CLRTICK_TXBD);
15829
15830 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15831 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15832 tp->misc_host_ctrl);
15833 }
15834
15835 /* Preserve the APE MAC_MODE bits */
15836 if (tg3_flag(tp, ENABLE_APE))
15837 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15838 else
15839 tp->mac_mode = 0;
15840
15841 if (tg3_10_100_only_device(tp, ent))
15842 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15843
15844 err = tg3_phy_probe(tp);
15845 if (err) {
15846 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15847 /* ... but do not return immediately ... */
15848 tg3_mdio_fini(tp);
15849 }
15850
15851 tg3_read_vpd(tp);
15852 tg3_read_fw_ver(tp);
15853
15854 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15855 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15856 } else {
15857 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15858 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15859 else
15860 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15861 }
15862
15863 /* 5700 {AX,BX} chips have a broken status block link
15864 * change bit implementation, so we must use the
15865 * status register in those cases.
15866 */
15867 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15868 tg3_flag_set(tp, USE_LINKCHG_REG);
15869 else
15870 tg3_flag_clear(tp, USE_LINKCHG_REG);
15871
15872 /* The led_ctrl is set during tg3_phy_probe, here we might
15873 * have to force the link status polling mechanism based
15874 * upon subsystem IDs.
15875 */
15876 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15877 tg3_asic_rev(tp) == ASIC_REV_5701 &&
15878 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15879 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15880 tg3_flag_set(tp, USE_LINKCHG_REG);
15881 }
15882
15883 /* For all SERDES we poll the MAC status register. */
15884 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15885 tg3_flag_set(tp, POLL_SERDES);
15886 else
15887 tg3_flag_clear(tp, POLL_SERDES);
15888
15889 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15890 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15891 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
15892 tg3_flag(tp, PCIX_MODE)) {
15893 tp->rx_offset = NET_SKB_PAD;
15894 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15895 tp->rx_copy_thresh = ~(u16)0;
15896 #endif
15897 }
15898
15899 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15900 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15901 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15902
15903 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15904
15905 /* Increment the rx prod index on the rx std ring by at most
15906 * 8 for these chips to workaround hw errata.
15907 */
15908 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15909 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15910 tg3_asic_rev(tp) == ASIC_REV_5755)
15911 tp->rx_std_max_post = 8;
15912
15913 if (tg3_flag(tp, ASPM_WORKAROUND))
15914 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15915 PCIE_PWR_MGMT_L1_THRESH_MSK;
15916
15917 return err;
15918 }
15919
15920 #ifdef CONFIG_SPARC
15921 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15922 {
15923 struct net_device *dev = tp->dev;
15924 struct pci_dev *pdev = tp->pdev;
15925 struct device_node *dp = pci_device_to_OF_node(pdev);
15926 const unsigned char *addr;
15927 int len;
15928
15929 addr = of_get_property(dp, "local-mac-address", &len);
15930 if (addr && len == 6) {
15931 memcpy(dev->dev_addr, addr, 6);
15932 return 0;
15933 }
15934 return -ENODEV;
15935 }
15936
15937 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15938 {
15939 struct net_device *dev = tp->dev;
15940
15941 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15942 return 0;
15943 }
15944 #endif
15945
15946 static int tg3_get_device_address(struct tg3 *tp)
15947 {
15948 struct net_device *dev = tp->dev;
15949 u32 hi, lo, mac_offset;
15950 int addr_ok = 0;
15951 int err;
15952
15953 #ifdef CONFIG_SPARC
15954 if (!tg3_get_macaddr_sparc(tp))
15955 return 0;
15956 #endif
15957
15958 if (tg3_flag(tp, IS_SSB_CORE)) {
15959 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15960 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15961 return 0;
15962 }
15963
15964 mac_offset = 0x7c;
15965 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15966 tg3_flag(tp, 5780_CLASS)) {
15967 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15968 mac_offset = 0xcc;
15969 if (tg3_nvram_lock(tp))
15970 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15971 else
15972 tg3_nvram_unlock(tp);
15973 } else if (tg3_flag(tp, 5717_PLUS)) {
15974 if (tp->pci_fn & 1)
15975 mac_offset = 0xcc;
15976 if (tp->pci_fn > 1)
15977 mac_offset += 0x18c;
15978 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15979 mac_offset = 0x10;
15980
15981 /* First try to get it from MAC address mailbox. */
15982 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15983 if ((hi >> 16) == 0x484b) {
15984 dev->dev_addr[0] = (hi >> 8) & 0xff;
15985 dev->dev_addr[1] = (hi >> 0) & 0xff;
15986
15987 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15988 dev->dev_addr[2] = (lo >> 24) & 0xff;
15989 dev->dev_addr[3] = (lo >> 16) & 0xff;
15990 dev->dev_addr[4] = (lo >> 8) & 0xff;
15991 dev->dev_addr[5] = (lo >> 0) & 0xff;
15992
15993 /* Some old bootcode may report a 0 MAC address in SRAM */
15994 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15995 }
15996 if (!addr_ok) {
15997 /* Next, try NVRAM. */
15998 if (!tg3_flag(tp, NO_NVRAM) &&
15999 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16000 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16001 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16002 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16003 }
16004 /* Finally just fetch it out of the MAC control regs. */
16005 else {
16006 hi = tr32(MAC_ADDR_0_HIGH);
16007 lo = tr32(MAC_ADDR_0_LOW);
16008
16009 dev->dev_addr[5] = lo & 0xff;
16010 dev->dev_addr[4] = (lo >> 8) & 0xff;
16011 dev->dev_addr[3] = (lo >> 16) & 0xff;
16012 dev->dev_addr[2] = (lo >> 24) & 0xff;
16013 dev->dev_addr[1] = hi & 0xff;
16014 dev->dev_addr[0] = (hi >> 8) & 0xff;
16015 }
16016 }
16017
16018 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16019 #ifdef CONFIG_SPARC
16020 if (!tg3_get_default_macaddr_sparc(tp))
16021 return 0;
16022 #endif
16023 return -EINVAL;
16024 }
16025 return 0;
16026 }
16027
16028 #define BOUNDARY_SINGLE_CACHELINE 1
16029 #define BOUNDARY_MULTI_CACHELINE 2
16030
16031 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16032 {
16033 int cacheline_size;
16034 u8 byte;
16035 int goal;
16036
16037 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16038 if (byte == 0)
16039 cacheline_size = 1024;
16040 else
16041 cacheline_size = (int) byte * 4;
16042
16043 /* On 5703 and later chips, the boundary bits have no
16044 * effect.
16045 */
16046 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16047 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16048 !tg3_flag(tp, PCI_EXPRESS))
16049 goto out;
16050
16051 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16052 goal = BOUNDARY_MULTI_CACHELINE;
16053 #else
16054 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16055 goal = BOUNDARY_SINGLE_CACHELINE;
16056 #else
16057 goal = 0;
16058 #endif
16059 #endif
16060
16061 if (tg3_flag(tp, 57765_PLUS)) {
16062 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16063 goto out;
16064 }
16065
16066 if (!goal)
16067 goto out;
16068
16069 /* PCI controllers on most RISC systems tend to disconnect
16070 * when a device tries to burst across a cache-line boundary.
16071 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16072 *
16073 * Unfortunately, for PCI-E there are only limited
16074 * write-side controls for this, and thus for reads
16075 * we will still get the disconnects. We'll also waste
16076 * these PCI cycles for both read and write for chips
16077 * other than 5700 and 5701 which do not implement the
16078 * boundary bits.
16079 */
16080 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16081 switch (cacheline_size) {
16082 case 16:
16083 case 32:
16084 case 64:
16085 case 128:
16086 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16087 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16088 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16089 } else {
16090 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16091 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16092 }
16093 break;
16094
16095 case 256:
16096 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16097 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16098 break;
16099
16100 default:
16101 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16102 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16103 break;
16104 }
16105 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16106 switch (cacheline_size) {
16107 case 16:
16108 case 32:
16109 case 64:
16110 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16111 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16112 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16113 break;
16114 }
16115 /* fallthrough */
16116 case 128:
16117 default:
16118 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16119 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16120 break;
16121 }
16122 } else {
16123 switch (cacheline_size) {
16124 case 16:
16125 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16126 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16127 DMA_RWCTRL_WRITE_BNDRY_16);
16128 break;
16129 }
16130 /* fallthrough */
16131 case 32:
16132 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16133 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16134 DMA_RWCTRL_WRITE_BNDRY_32);
16135 break;
16136 }
16137 /* fallthrough */
16138 case 64:
16139 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16140 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16141 DMA_RWCTRL_WRITE_BNDRY_64);
16142 break;
16143 }
16144 /* fallthrough */
16145 case 128:
16146 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16147 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16148 DMA_RWCTRL_WRITE_BNDRY_128);
16149 break;
16150 }
16151 /* fallthrough */
16152 case 256:
16153 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16154 DMA_RWCTRL_WRITE_BNDRY_256);
16155 break;
16156 case 512:
16157 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16158 DMA_RWCTRL_WRITE_BNDRY_512);
16159 break;
16160 case 1024:
16161 default:
16162 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16163 DMA_RWCTRL_WRITE_BNDRY_1024);
16164 break;
16165 }
16166 }
16167
16168 out:
16169 return val;
16170 }
16171
16172 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16173 int size, int to_device)
16174 {
16175 struct tg3_internal_buffer_desc test_desc;
16176 u32 sram_dma_descs;
16177 int i, ret;
16178
16179 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16180
16181 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16182 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16183 tw32(RDMAC_STATUS, 0);
16184 tw32(WDMAC_STATUS, 0);
16185
16186 tw32(BUFMGR_MODE, 0);
16187 tw32(FTQ_RESET, 0);
16188
16189 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16190 test_desc.addr_lo = buf_dma & 0xffffffff;
16191 test_desc.nic_mbuf = 0x00002100;
16192 test_desc.len = size;
16193
16194 /*
16195 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16196 * the *second* time the tg3 driver was getting loaded after an
16197 * initial scan.
16198 *
16199 * Broadcom tells me:
16200 * ...the DMA engine is connected to the GRC block and a DMA
16201 * reset may affect the GRC block in some unpredictable way...
16202 * The behavior of resets to individual blocks has not been tested.
16203 *
16204 * Broadcom noted the GRC reset will also reset all sub-components.
16205 */
16206 if (to_device) {
16207 test_desc.cqid_sqid = (13 << 8) | 2;
16208
16209 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16210 udelay(40);
16211 } else {
16212 test_desc.cqid_sqid = (16 << 8) | 7;
16213
16214 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16215 udelay(40);
16216 }
16217 test_desc.flags = 0x00000005;
16218
16219 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16220 u32 val;
16221
16222 val = *(((u32 *)&test_desc) + i);
16223 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16224 sram_dma_descs + (i * sizeof(u32)));
16225 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16226 }
16227 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16228
16229 if (to_device)
16230 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16231 else
16232 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16233
16234 ret = -ENODEV;
16235 for (i = 0; i < 40; i++) {
16236 u32 val;
16237
16238 if (to_device)
16239 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16240 else
16241 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16242 if ((val & 0xffff) == sram_dma_descs) {
16243 ret = 0;
16244 break;
16245 }
16246
16247 udelay(100);
16248 }
16249
16250 return ret;
16251 }
16252
16253 #define TEST_BUFFER_SIZE 0x2000
16254
16255 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16256 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16257 { },
16258 };
16259
16260 static int tg3_test_dma(struct tg3 *tp)
16261 {
16262 dma_addr_t buf_dma;
16263 u32 *buf, saved_dma_rwctrl;
16264 int ret = 0;
16265
16266 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16267 &buf_dma, GFP_KERNEL);
16268 if (!buf) {
16269 ret = -ENOMEM;
16270 goto out_nofree;
16271 }
16272
16273 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16274 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16275
16276 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16277
16278 if (tg3_flag(tp, 57765_PLUS))
16279 goto out;
16280
16281 if (tg3_flag(tp, PCI_EXPRESS)) {
16282 /* DMA read watermark not used on PCIE */
16283 tp->dma_rwctrl |= 0x00180000;
16284 } else if (!tg3_flag(tp, PCIX_MODE)) {
16285 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16286 tg3_asic_rev(tp) == ASIC_REV_5750)
16287 tp->dma_rwctrl |= 0x003f0000;
16288 else
16289 tp->dma_rwctrl |= 0x003f000f;
16290 } else {
16291 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16292 tg3_asic_rev(tp) == ASIC_REV_5704) {
16293 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16294 u32 read_water = 0x7;
16295
16296 /* If the 5704 is behind the EPB bridge, we can
16297 * do the less restrictive ONE_DMA workaround for
16298 * better performance.
16299 */
16300 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16301 tg3_asic_rev(tp) == ASIC_REV_5704)
16302 tp->dma_rwctrl |= 0x8000;
16303 else if (ccval == 0x6 || ccval == 0x7)
16304 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16305
16306 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16307 read_water = 4;
16308 /* Set bit 23 to enable PCIX hw bug fix */
16309 tp->dma_rwctrl |=
16310 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16311 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16312 (1 << 23);
16313 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16314 /* 5780 always in PCIX mode */
16315 tp->dma_rwctrl |= 0x00144000;
16316 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16317 /* 5714 always in PCIX mode */
16318 tp->dma_rwctrl |= 0x00148000;
16319 } else {
16320 tp->dma_rwctrl |= 0x001b000f;
16321 }
16322 }
16323 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16324 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16325
16326 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16327 tg3_asic_rev(tp) == ASIC_REV_5704)
16328 tp->dma_rwctrl &= 0xfffffff0;
16329
16330 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16331 tg3_asic_rev(tp) == ASIC_REV_5701) {
16332 /* Remove this if it causes problems for some boards. */
16333 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16334
16335 /* On 5700/5701 chips, we need to set this bit.
16336 * Otherwise the chip will issue cacheline transactions
16337 * to streamable DMA memory with not all the byte
16338 * enables turned on. This is an error on several
16339 * RISC PCI controllers, in particular sparc64.
16340 *
16341 * On 5703/5704 chips, this bit has been reassigned
16342 * a different meaning. In particular, it is used
16343 * on those chips to enable a PCI-X workaround.
16344 */
16345 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16346 }
16347
16348 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16349
16350 #if 0
16351 /* Unneeded, already done by tg3_get_invariants. */
16352 tg3_switch_clocks(tp);
16353 #endif
16354
16355 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16356 tg3_asic_rev(tp) != ASIC_REV_5701)
16357 goto out;
16358
16359 /* It is best to perform DMA test with maximum write burst size
16360 * to expose the 5700/5701 write DMA bug.
16361 */
16362 saved_dma_rwctrl = tp->dma_rwctrl;
16363 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16364 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16365
16366 while (1) {
16367 u32 *p = buf, i;
16368
16369 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16370 p[i] = i;
16371
16372 /* Send the buffer to the chip. */
16373 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16374 if (ret) {
16375 dev_err(&tp->pdev->dev,
16376 "%s: Buffer write failed. err = %d\n",
16377 __func__, ret);
16378 break;
16379 }
16380
16381 #if 0
16382 /* validate data reached card RAM correctly. */
16383 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16384 u32 val;
16385 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16386 if (le32_to_cpu(val) != p[i]) {
16387 dev_err(&tp->pdev->dev,
16388 "%s: Buffer corrupted on device! "
16389 "(%d != %d)\n", __func__, val, i);
16390 /* ret = -ENODEV here? */
16391 }
16392 p[i] = 0;
16393 }
16394 #endif
16395 /* Now read it back. */
16396 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16397 if (ret) {
16398 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16399 "err = %d\n", __func__, ret);
16400 break;
16401 }
16402
16403 /* Verify it. */
16404 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16405 if (p[i] == i)
16406 continue;
16407
16408 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16409 DMA_RWCTRL_WRITE_BNDRY_16) {
16410 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16411 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16412 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16413 break;
16414 } else {
16415 dev_err(&tp->pdev->dev,
16416 "%s: Buffer corrupted on read back! "
16417 "(%d != %d)\n", __func__, p[i], i);
16418 ret = -ENODEV;
16419 goto out;
16420 }
16421 }
16422
16423 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16424 /* Success. */
16425 ret = 0;
16426 break;
16427 }
16428 }
16429 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16430 DMA_RWCTRL_WRITE_BNDRY_16) {
16431 /* DMA test passed without adjusting DMA boundary,
16432 * now look for chipsets that are known to expose the
16433 * DMA bug without failing the test.
16434 */
16435 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16436 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16437 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16438 } else {
16439 /* Safe to use the calculated DMA boundary. */
16440 tp->dma_rwctrl = saved_dma_rwctrl;
16441 }
16442
16443 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16444 }
16445
16446 out:
16447 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16448 out_nofree:
16449 return ret;
16450 }
16451
16452 static void tg3_init_bufmgr_config(struct tg3 *tp)
16453 {
16454 if (tg3_flag(tp, 57765_PLUS)) {
16455 tp->bufmgr_config.mbuf_read_dma_low_water =
16456 DEFAULT_MB_RDMA_LOW_WATER_5705;
16457 tp->bufmgr_config.mbuf_mac_rx_low_water =
16458 DEFAULT_MB_MACRX_LOW_WATER_57765;
16459 tp->bufmgr_config.mbuf_high_water =
16460 DEFAULT_MB_HIGH_WATER_57765;
16461
16462 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16463 DEFAULT_MB_RDMA_LOW_WATER_5705;
16464 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16465 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16466 tp->bufmgr_config.mbuf_high_water_jumbo =
16467 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16468 } else if (tg3_flag(tp, 5705_PLUS)) {
16469 tp->bufmgr_config.mbuf_read_dma_low_water =
16470 DEFAULT_MB_RDMA_LOW_WATER_5705;
16471 tp->bufmgr_config.mbuf_mac_rx_low_water =
16472 DEFAULT_MB_MACRX_LOW_WATER_5705;
16473 tp->bufmgr_config.mbuf_high_water =
16474 DEFAULT_MB_HIGH_WATER_5705;
16475 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16476 tp->bufmgr_config.mbuf_mac_rx_low_water =
16477 DEFAULT_MB_MACRX_LOW_WATER_5906;
16478 tp->bufmgr_config.mbuf_high_water =
16479 DEFAULT_MB_HIGH_WATER_5906;
16480 }
16481
16482 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16483 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16484 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16485 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16486 tp->bufmgr_config.mbuf_high_water_jumbo =
16487 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16488 } else {
16489 tp->bufmgr_config.mbuf_read_dma_low_water =
16490 DEFAULT_MB_RDMA_LOW_WATER;
16491 tp->bufmgr_config.mbuf_mac_rx_low_water =
16492 DEFAULT_MB_MACRX_LOW_WATER;
16493 tp->bufmgr_config.mbuf_high_water =
16494 DEFAULT_MB_HIGH_WATER;
16495
16496 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16497 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16498 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16499 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16500 tp->bufmgr_config.mbuf_high_water_jumbo =
16501 DEFAULT_MB_HIGH_WATER_JUMBO;
16502 }
16503
16504 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16505 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16506 }
16507
16508 static char *tg3_phy_string(struct tg3 *tp)
16509 {
16510 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16511 case TG3_PHY_ID_BCM5400: return "5400";
16512 case TG3_PHY_ID_BCM5401: return "5401";
16513 case TG3_PHY_ID_BCM5411: return "5411";
16514 case TG3_PHY_ID_BCM5701: return "5701";
16515 case TG3_PHY_ID_BCM5703: return "5703";
16516 case TG3_PHY_ID_BCM5704: return "5704";
16517 case TG3_PHY_ID_BCM5705: return "5705";
16518 case TG3_PHY_ID_BCM5750: return "5750";
16519 case TG3_PHY_ID_BCM5752: return "5752";
16520 case TG3_PHY_ID_BCM5714: return "5714";
16521 case TG3_PHY_ID_BCM5780: return "5780";
16522 case TG3_PHY_ID_BCM5755: return "5755";
16523 case TG3_PHY_ID_BCM5787: return "5787";
16524 case TG3_PHY_ID_BCM5784: return "5784";
16525 case TG3_PHY_ID_BCM5756: return "5722/5756";
16526 case TG3_PHY_ID_BCM5906: return "5906";
16527 case TG3_PHY_ID_BCM5761: return "5761";
16528 case TG3_PHY_ID_BCM5718C: return "5718C";
16529 case TG3_PHY_ID_BCM5718S: return "5718S";
16530 case TG3_PHY_ID_BCM57765: return "57765";
16531 case TG3_PHY_ID_BCM5719C: return "5719C";
16532 case TG3_PHY_ID_BCM5720C: return "5720C";
16533 case TG3_PHY_ID_BCM5762: return "5762C";
16534 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16535 case 0: return "serdes";
16536 default: return "unknown";
16537 }
16538 }
16539
16540 static char *tg3_bus_string(struct tg3 *tp, char *str)
16541 {
16542 if (tg3_flag(tp, PCI_EXPRESS)) {
16543 strcpy(str, "PCI Express");
16544 return str;
16545 } else if (tg3_flag(tp, PCIX_MODE)) {
16546 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16547
16548 strcpy(str, "PCIX:");
16549
16550 if ((clock_ctrl == 7) ||
16551 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16552 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16553 strcat(str, "133MHz");
16554 else if (clock_ctrl == 0)
16555 strcat(str, "33MHz");
16556 else if (clock_ctrl == 2)
16557 strcat(str, "50MHz");
16558 else if (clock_ctrl == 4)
16559 strcat(str, "66MHz");
16560 else if (clock_ctrl == 6)
16561 strcat(str, "100MHz");
16562 } else {
16563 strcpy(str, "PCI:");
16564 if (tg3_flag(tp, PCI_HIGH_SPEED))
16565 strcat(str, "66MHz");
16566 else
16567 strcat(str, "33MHz");
16568 }
16569 if (tg3_flag(tp, PCI_32BIT))
16570 strcat(str, ":32-bit");
16571 else
16572 strcat(str, ":64-bit");
16573 return str;
16574 }
16575
16576 static void tg3_init_coal(struct tg3 *tp)
16577 {
16578 struct ethtool_coalesce *ec = &tp->coal;
16579
16580 memset(ec, 0, sizeof(*ec));
16581 ec->cmd = ETHTOOL_GCOALESCE;
16582 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16583 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16584 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16585 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16586 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16587 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16588 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16589 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16590 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16591
16592 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16593 HOSTCC_MODE_CLRTICK_TXBD)) {
16594 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16595 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16596 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16597 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16598 }
16599
16600 if (tg3_flag(tp, 5705_PLUS)) {
16601 ec->rx_coalesce_usecs_irq = 0;
16602 ec->tx_coalesce_usecs_irq = 0;
16603 ec->stats_block_coalesce_usecs = 0;
16604 }
16605 }
16606
16607 static int tg3_init_one(struct pci_dev *pdev,
16608 const struct pci_device_id *ent)
16609 {
16610 struct net_device *dev;
16611 struct tg3 *tp;
16612 int i, err, pm_cap;
16613 u32 sndmbx, rcvmbx, intmbx;
16614 char str[40];
16615 u64 dma_mask, persist_dma_mask;
16616 netdev_features_t features = 0;
16617
16618 printk_once(KERN_INFO "%s\n", version);
16619
16620 err = pci_enable_device(pdev);
16621 if (err) {
16622 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16623 return err;
16624 }
16625
16626 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16627 if (err) {
16628 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16629 goto err_out_disable_pdev;
16630 }
16631
16632 pci_set_master(pdev);
16633
16634 /* Find power-management capability. */
16635 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16636 if (pm_cap == 0) {
16637 dev_err(&pdev->dev,
16638 "Cannot find Power Management capability, aborting\n");
16639 err = -EIO;
16640 goto err_out_free_res;
16641 }
16642
16643 err = pci_set_power_state(pdev, PCI_D0);
16644 if (err) {
16645 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16646 goto err_out_free_res;
16647 }
16648
16649 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16650 if (!dev) {
16651 err = -ENOMEM;
16652 goto err_out_power_down;
16653 }
16654
16655 SET_NETDEV_DEV(dev, &pdev->dev);
16656
16657 tp = netdev_priv(dev);
16658 tp->pdev = pdev;
16659 tp->dev = dev;
16660 tp->pm_cap = pm_cap;
16661 tp->rx_mode = TG3_DEF_RX_MODE;
16662 tp->tx_mode = TG3_DEF_TX_MODE;
16663 tp->irq_sync = 1;
16664
16665 if (tg3_debug > 0)
16666 tp->msg_enable = tg3_debug;
16667 else
16668 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16669
16670 if (pdev_is_ssb_gige_core(pdev)) {
16671 tg3_flag_set(tp, IS_SSB_CORE);
16672 if (ssb_gige_must_flush_posted_writes(pdev))
16673 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16674 if (ssb_gige_one_dma_at_once(pdev))
16675 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16676 if (ssb_gige_have_roboswitch(pdev))
16677 tg3_flag_set(tp, ROBOSWITCH);
16678 if (ssb_gige_is_rgmii(pdev))
16679 tg3_flag_set(tp, RGMII_MODE);
16680 }
16681
16682 /* The word/byte swap controls here control register access byte
16683 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16684 * setting below.
16685 */
16686 tp->misc_host_ctrl =
16687 MISC_HOST_CTRL_MASK_PCI_INT |
16688 MISC_HOST_CTRL_WORD_SWAP |
16689 MISC_HOST_CTRL_INDIR_ACCESS |
16690 MISC_HOST_CTRL_PCISTATE_RW;
16691
16692 /* The NONFRM (non-frame) byte/word swap controls take effect
16693 * on descriptor entries, anything which isn't packet data.
16694 *
16695 * The StrongARM chips on the board (one for tx, one for rx)
16696 * are running in big-endian mode.
16697 */
16698 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16699 GRC_MODE_WSWAP_NONFRM_DATA);
16700 #ifdef __BIG_ENDIAN
16701 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16702 #endif
16703 spin_lock_init(&tp->lock);
16704 spin_lock_init(&tp->indirect_lock);
16705 INIT_WORK(&tp->reset_task, tg3_reset_task);
16706
16707 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16708 if (!tp->regs) {
16709 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16710 err = -ENOMEM;
16711 goto err_out_free_dev;
16712 }
16713
16714 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16715 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16719 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16721 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16726 tg3_flag_set(tp, ENABLE_APE);
16727 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16728 if (!tp->aperegs) {
16729 dev_err(&pdev->dev,
16730 "Cannot map APE registers, aborting\n");
16731 err = -ENOMEM;
16732 goto err_out_iounmap;
16733 }
16734 }
16735
16736 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16737 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16738
16739 dev->ethtool_ops = &tg3_ethtool_ops;
16740 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16741 dev->netdev_ops = &tg3_netdev_ops;
16742 dev->irq = pdev->irq;
16743
16744 err = tg3_get_invariants(tp, ent);
16745 if (err) {
16746 dev_err(&pdev->dev,
16747 "Problem fetching invariants of chip, aborting\n");
16748 goto err_out_apeunmap;
16749 }
16750
16751 /* The EPB bridge inside 5714, 5715, and 5780 and any
16752 * device behind the EPB cannot support DMA addresses > 40-bit.
16753 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16754 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16755 * do DMA address check in tg3_start_xmit().
16756 */
16757 if (tg3_flag(tp, IS_5788))
16758 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16759 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16760 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16761 #ifdef CONFIG_HIGHMEM
16762 dma_mask = DMA_BIT_MASK(64);
16763 #endif
16764 } else
16765 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16766
16767 /* Configure DMA attributes. */
16768 if (dma_mask > DMA_BIT_MASK(32)) {
16769 err = pci_set_dma_mask(pdev, dma_mask);
16770 if (!err) {
16771 features |= NETIF_F_HIGHDMA;
16772 err = pci_set_consistent_dma_mask(pdev,
16773 persist_dma_mask);
16774 if (err < 0) {
16775 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16776 "DMA for consistent allocations\n");
16777 goto err_out_apeunmap;
16778 }
16779 }
16780 }
16781 if (err || dma_mask == DMA_BIT_MASK(32)) {
16782 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16783 if (err) {
16784 dev_err(&pdev->dev,
16785 "No usable DMA configuration, aborting\n");
16786 goto err_out_apeunmap;
16787 }
16788 }
16789
16790 tg3_init_bufmgr_config(tp);
16791
16792 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16793
16794 /* 5700 B0 chips do not support checksumming correctly due
16795 * to hardware bugs.
16796 */
16797 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16798 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16799
16800 if (tg3_flag(tp, 5755_PLUS))
16801 features |= NETIF_F_IPV6_CSUM;
16802 }
16803
16804 /* TSO is on by default on chips that support hardware TSO.
16805 * Firmware TSO on older chips gives lower performance, so it
16806 * is off by default, but can be enabled using ethtool.
16807 */
16808 if ((tg3_flag(tp, HW_TSO_1) ||
16809 tg3_flag(tp, HW_TSO_2) ||
16810 tg3_flag(tp, HW_TSO_3)) &&
16811 (features & NETIF_F_IP_CSUM))
16812 features |= NETIF_F_TSO;
16813 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16814 if (features & NETIF_F_IPV6_CSUM)
16815 features |= NETIF_F_TSO6;
16816 if (tg3_flag(tp, HW_TSO_3) ||
16817 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16818 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16819 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16820 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16821 tg3_asic_rev(tp) == ASIC_REV_57780)
16822 features |= NETIF_F_TSO_ECN;
16823 }
16824
16825 dev->features |= features;
16826 dev->vlan_features |= features;
16827
16828 /*
16829 * Add loopback capability only for a subset of devices that support
16830 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16831 * loopback for the remaining devices.
16832 */
16833 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16834 !tg3_flag(tp, CPMU_PRESENT))
16835 /* Add the loopback capability */
16836 features |= NETIF_F_LOOPBACK;
16837
16838 dev->hw_features |= features;
16839
16840 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16841 !tg3_flag(tp, TSO_CAPABLE) &&
16842 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16843 tg3_flag_set(tp, MAX_RXPEND_64);
16844 tp->rx_pending = 63;
16845 }
16846
16847 err = tg3_get_device_address(tp);
16848 if (err) {
16849 dev_err(&pdev->dev,
16850 "Could not obtain valid ethernet address, aborting\n");
16851 goto err_out_apeunmap;
16852 }
16853
16854 /*
16855 * Reset chip in case UNDI or EFI driver did not shutdown
16856 * DMA self test will enable WDMAC and we'll see (spurious)
16857 * pending DMA on the PCI bus at that point.
16858 */
16859 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16860 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16861 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16862 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16863 }
16864
16865 err = tg3_test_dma(tp);
16866 if (err) {
16867 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16868 goto err_out_apeunmap;
16869 }
16870
16871 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16872 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16873 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16874 for (i = 0; i < tp->irq_max; i++) {
16875 struct tg3_napi *tnapi = &tp->napi[i];
16876
16877 tnapi->tp = tp;
16878 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16879
16880 tnapi->int_mbox = intmbx;
16881 if (i <= 4)
16882 intmbx += 0x8;
16883 else
16884 intmbx += 0x4;
16885
16886 tnapi->consmbox = rcvmbx;
16887 tnapi->prodmbox = sndmbx;
16888
16889 if (i)
16890 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16891 else
16892 tnapi->coal_now = HOSTCC_MODE_NOW;
16893
16894 if (!tg3_flag(tp, SUPPORT_MSIX))
16895 break;
16896
16897 /*
16898 * If we support MSIX, we'll be using RSS. If we're using
16899 * RSS, the first vector only handles link interrupts and the
16900 * remaining vectors handle rx and tx interrupts. Reuse the
16901 * mailbox values for the next iteration. The values we setup
16902 * above are still useful for the single vectored mode.
16903 */
16904 if (!i)
16905 continue;
16906
16907 rcvmbx += 0x8;
16908
16909 if (sndmbx & 0x4)
16910 sndmbx -= 0x4;
16911 else
16912 sndmbx += 0xc;
16913 }
16914
16915 tg3_init_coal(tp);
16916
16917 pci_set_drvdata(pdev, dev);
16918
16919 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16920 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16921 tg3_asic_rev(tp) == ASIC_REV_5762)
16922 tg3_flag_set(tp, PTP_CAPABLE);
16923
16924 if (tg3_flag(tp, 5717_PLUS)) {
16925 /* Resume a low-power mode */
16926 tg3_frob_aux_power(tp, false);
16927 }
16928
16929 tg3_timer_init(tp);
16930
16931 tg3_carrier_off(tp);
16932
16933 err = register_netdev(dev);
16934 if (err) {
16935 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16936 goto err_out_apeunmap;
16937 }
16938
16939 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16940 tp->board_part_number,
16941 tg3_chip_rev_id(tp),
16942 tg3_bus_string(tp, str),
16943 dev->dev_addr);
16944
16945 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16946 struct phy_device *phydev;
16947 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16948 netdev_info(dev,
16949 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16950 phydev->drv->name, dev_name(&phydev->dev));
16951 } else {
16952 char *ethtype;
16953
16954 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16955 ethtype = "10/100Base-TX";
16956 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16957 ethtype = "1000Base-SX";
16958 else
16959 ethtype = "10/100/1000Base-T";
16960
16961 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16962 "(WireSpeed[%d], EEE[%d])\n",
16963 tg3_phy_string(tp), ethtype,
16964 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16965 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16966 }
16967
16968 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16969 (dev->features & NETIF_F_RXCSUM) != 0,
16970 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16971 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16972 tg3_flag(tp, ENABLE_ASF) != 0,
16973 tg3_flag(tp, TSO_CAPABLE) != 0);
16974 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16975 tp->dma_rwctrl,
16976 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16977 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16978
16979 pci_save_state(pdev);
16980
16981 return 0;
16982
16983 err_out_apeunmap:
16984 if (tp->aperegs) {
16985 iounmap(tp->aperegs);
16986 tp->aperegs = NULL;
16987 }
16988
16989 err_out_iounmap:
16990 if (tp->regs) {
16991 iounmap(tp->regs);
16992 tp->regs = NULL;
16993 }
16994
16995 err_out_free_dev:
16996 free_netdev(dev);
16997
16998 err_out_power_down:
16999 pci_set_power_state(pdev, PCI_D3hot);
17000
17001 err_out_free_res:
17002 pci_release_regions(pdev);
17003
17004 err_out_disable_pdev:
17005 pci_disable_device(pdev);
17006 pci_set_drvdata(pdev, NULL);
17007 return err;
17008 }
17009
17010 static void tg3_remove_one(struct pci_dev *pdev)
17011 {
17012 struct net_device *dev = pci_get_drvdata(pdev);
17013
17014 if (dev) {
17015 struct tg3 *tp = netdev_priv(dev);
17016
17017 release_firmware(tp->fw);
17018
17019 tg3_reset_task_cancel(tp);
17020
17021 if (tg3_flag(tp, USE_PHYLIB)) {
17022 tg3_phy_fini(tp);
17023 tg3_mdio_fini(tp);
17024 }
17025
17026 unregister_netdev(dev);
17027 if (tp->aperegs) {
17028 iounmap(tp->aperegs);
17029 tp->aperegs = NULL;
17030 }
17031 if (tp->regs) {
17032 iounmap(tp->regs);
17033 tp->regs = NULL;
17034 }
17035 free_netdev(dev);
17036 pci_release_regions(pdev);
17037 pci_disable_device(pdev);
17038 pci_set_drvdata(pdev, NULL);
17039 }
17040 }
17041
17042 #ifdef CONFIG_PM_SLEEP
17043 static int tg3_suspend(struct device *device)
17044 {
17045 struct pci_dev *pdev = to_pci_dev(device);
17046 struct net_device *dev = pci_get_drvdata(pdev);
17047 struct tg3 *tp = netdev_priv(dev);
17048 int err;
17049
17050 if (!netif_running(dev))
17051 return 0;
17052
17053 tg3_reset_task_cancel(tp);
17054 tg3_phy_stop(tp);
17055 tg3_netif_stop(tp);
17056
17057 tg3_timer_stop(tp);
17058
17059 tg3_full_lock(tp, 1);
17060 tg3_disable_ints(tp);
17061 tg3_full_unlock(tp);
17062
17063 netif_device_detach(dev);
17064
17065 tg3_full_lock(tp, 0);
17066 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17067 tg3_flag_clear(tp, INIT_COMPLETE);
17068 tg3_full_unlock(tp);
17069
17070 err = tg3_power_down_prepare(tp);
17071 if (err) {
17072 int err2;
17073
17074 tg3_full_lock(tp, 0);
17075
17076 tg3_flag_set(tp, INIT_COMPLETE);
17077 err2 = tg3_restart_hw(tp, 1);
17078 if (err2)
17079 goto out;
17080
17081 tg3_timer_start(tp);
17082
17083 netif_device_attach(dev);
17084 tg3_netif_start(tp);
17085
17086 out:
17087 tg3_full_unlock(tp);
17088
17089 if (!err2)
17090 tg3_phy_start(tp);
17091 }
17092
17093 return err;
17094 }
17095
17096 static int tg3_resume(struct device *device)
17097 {
17098 struct pci_dev *pdev = to_pci_dev(device);
17099 struct net_device *dev = pci_get_drvdata(pdev);
17100 struct tg3 *tp = netdev_priv(dev);
17101 int err;
17102
17103 if (!netif_running(dev))
17104 return 0;
17105
17106 netif_device_attach(dev);
17107
17108 tg3_full_lock(tp, 0);
17109
17110 tg3_flag_set(tp, INIT_COMPLETE);
17111 err = tg3_restart_hw(tp, 1);
17112 if (err)
17113 goto out;
17114
17115 tg3_timer_start(tp);
17116
17117 tg3_netif_start(tp);
17118
17119 out:
17120 tg3_full_unlock(tp);
17121
17122 if (!err)
17123 tg3_phy_start(tp);
17124
17125 return err;
17126 }
17127
17128 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17129 #define TG3_PM_OPS (&tg3_pm_ops)
17130
17131 #else
17132
17133 #define TG3_PM_OPS NULL
17134
17135 #endif /* CONFIG_PM_SLEEP */
17136
17137 /**
17138 * tg3_io_error_detected - called when PCI error is detected
17139 * @pdev: Pointer to PCI device
17140 * @state: The current pci connection state
17141 *
17142 * This function is called after a PCI bus error affecting
17143 * this device has been detected.
17144 */
17145 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17146 pci_channel_state_t state)
17147 {
17148 struct net_device *netdev = pci_get_drvdata(pdev);
17149 struct tg3 *tp = netdev_priv(netdev);
17150 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17151
17152 netdev_info(netdev, "PCI I/O error detected\n");
17153
17154 rtnl_lock();
17155
17156 if (!netif_running(netdev))
17157 goto done;
17158
17159 tg3_phy_stop(tp);
17160
17161 tg3_netif_stop(tp);
17162
17163 tg3_timer_stop(tp);
17164
17165 /* Want to make sure that the reset task doesn't run */
17166 tg3_reset_task_cancel(tp);
17167
17168 netif_device_detach(netdev);
17169
17170 /* Clean up software state, even if MMIO is blocked */
17171 tg3_full_lock(tp, 0);
17172 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17173 tg3_full_unlock(tp);
17174
17175 done:
17176 if (state == pci_channel_io_perm_failure)
17177 err = PCI_ERS_RESULT_DISCONNECT;
17178 else
17179 pci_disable_device(pdev);
17180
17181 rtnl_unlock();
17182
17183 return err;
17184 }
17185
17186 /**
17187 * tg3_io_slot_reset - called after the pci bus has been reset.
17188 * @pdev: Pointer to PCI device
17189 *
17190 * Restart the card from scratch, as if from a cold-boot.
17191 * At this point, the card has exprienced a hard reset,
17192 * followed by fixups by BIOS, and has its config space
17193 * set up identically to what it was at cold boot.
17194 */
17195 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17196 {
17197 struct net_device *netdev = pci_get_drvdata(pdev);
17198 struct tg3 *tp = netdev_priv(netdev);
17199 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17200 int err;
17201
17202 rtnl_lock();
17203
17204 if (pci_enable_device(pdev)) {
17205 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17206 goto done;
17207 }
17208
17209 pci_set_master(pdev);
17210 pci_restore_state(pdev);
17211 pci_save_state(pdev);
17212
17213 if (!netif_running(netdev)) {
17214 rc = PCI_ERS_RESULT_RECOVERED;
17215 goto done;
17216 }
17217
17218 err = tg3_power_up(tp);
17219 if (err)
17220 goto done;
17221
17222 rc = PCI_ERS_RESULT_RECOVERED;
17223
17224 done:
17225 rtnl_unlock();
17226
17227 return rc;
17228 }
17229
17230 /**
17231 * tg3_io_resume - called when traffic can start flowing again.
17232 * @pdev: Pointer to PCI device
17233 *
17234 * This callback is called when the error recovery driver tells
17235 * us that its OK to resume normal operation.
17236 */
17237 static void tg3_io_resume(struct pci_dev *pdev)
17238 {
17239 struct net_device *netdev = pci_get_drvdata(pdev);
17240 struct tg3 *tp = netdev_priv(netdev);
17241 int err;
17242
17243 rtnl_lock();
17244
17245 if (!netif_running(netdev))
17246 goto done;
17247
17248 tg3_full_lock(tp, 0);
17249 tg3_flag_set(tp, INIT_COMPLETE);
17250 err = tg3_restart_hw(tp, 1);
17251 if (err) {
17252 tg3_full_unlock(tp);
17253 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17254 goto done;
17255 }
17256
17257 netif_device_attach(netdev);
17258
17259 tg3_timer_start(tp);
17260
17261 tg3_netif_start(tp);
17262
17263 tg3_full_unlock(tp);
17264
17265 tg3_phy_start(tp);
17266
17267 done:
17268 rtnl_unlock();
17269 }
17270
17271 static const struct pci_error_handlers tg3_err_handler = {
17272 .error_detected = tg3_io_error_detected,
17273 .slot_reset = tg3_io_slot_reset,
17274 .resume = tg3_io_resume
17275 };
17276
17277 static struct pci_driver tg3_driver = {
17278 .name = DRV_MODULE_NAME,
17279 .id_table = tg3_pci_tbl,
17280 .probe = tg3_init_one,
17281 .remove = tg3_remove_one,
17282 .err_handler = &tg3_err_handler,
17283 .driver.pm = TG3_PM_OPS,
17284 };
17285
17286 static int __init tg3_init(void)
17287 {
17288 return pci_register_driver(&tg3_driver);
17289 }
17290
17291 static void __exit tg3_cleanup(void)
17292 {
17293 pci_unregister_driver(&tg3_driver);
17294 }
17295
17296 module_init(tg3_init);
17297 module_exit(tg3_cleanup);
This page took 0.392524 seconds and 6 git commands to generate.