tg3: Enhance firmware download code to support fragmented firmware
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217
218 static char version[] =
219 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257 TG3_DRV_DATA_FLAG_5705_10_100},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286 PCI_VENDOR_ID_LENOVO,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347 {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353 const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355 { "rx_octets" },
356 { "rx_fragments" },
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
360 { "rx_fcs_errors" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
367 { "rx_jabbers" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
381
382 { "tx_octets" },
383 { "tx_collisions" },
384
385 { "tx_xon_sent" },
386 { "tx_xoff_sent" },
387 { "tx_flow_control" },
388 { "tx_mac_errors" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
391 { "tx_deferred" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
412 { "tx_discards" },
413 { "tx_errors" },
414
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
417 { "rxbds_empty" },
418 { "rx_discards" },
419 { "rx_errors" },
420 { "rx_threshold_hit" },
421
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
425
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
428 { "nic_irqs" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
431
432 { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
444
445
446 static const struct {
447 const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449 [TG3_NVRAM_TEST] = { "nvram test (online) " },
450 [TG3_LINK_TEST] = { "link test (online) " },
451 [TG3_REGISTER_TEST] = { "register test (offline)" },
452 [TG3_MEMORY_TEST] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
457 };
458
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464 writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469 return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474 writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479 return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484 unsigned long flags;
485
486 spin_lock_irqsave(&tp->indirect_lock, flags);
487 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494 writel(val, tp->regs + off);
495 readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500 unsigned long flags;
501 u32 val;
502
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512 unsigned long flags;
513
514 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516 TG3_64BIT_REG_LOW, val);
517 return;
518 }
519 if (off == TG3_RX_STD_PROD_IDX_REG) {
520 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521 TG3_64BIT_REG_LOW, val);
522 return;
523 }
524
525 spin_lock_irqsave(&tp->indirect_lock, flags);
526 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528 spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
532 */
533 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534 (val == 0x1)) {
535 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537 }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542 unsigned long flags;
543 u32 val;
544
545 spin_lock_irqsave(&tp->indirect_lock, flags);
546 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548 spin_unlock_irqrestore(&tp->indirect_lock, flags);
549 return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556 */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560 /* Non-posted methods */
561 tp->write32(tp, off, val);
562 else {
563 /* Posted method */
564 tg3_write32(tp, off, val);
565 if (usec_wait)
566 udelay(usec_wait);
567 tp->read32(tp, off);
568 }
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
571 */
572 if (usec_wait)
573 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578 tp->write32_mbox(tp, off, val);
579 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581 !tg3_flag(tp, ICH_WORKAROUND)))
582 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587 void __iomem *mbox = tp->regs + off;
588 writel(val, mbox);
589 if (tg3_flag(tp, TXD_MBOX_HWBUG))
590 writel(val, mbox);
591 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592 tg3_flag(tp, FLUSH_POSTED_WRITES))
593 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598 return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603 writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619 unsigned long flags;
620
621 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623 return;
624
625 spin_lock_irqsave(&tp->indirect_lock, flags);
626 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632 } else {
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 }
639 spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644 unsigned long flags;
645
646 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648 *val = 0;
649 return;
650 }
651
652 spin_lock_irqsave(&tp->indirect_lock, flags);
653 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659 } else {
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 }
666 spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671 int i;
672 u32 regbase, bit;
673
674 if (tg3_asic_rev(tp) == ASIC_REV_5761)
675 regbase = TG3_APE_LOCK_GRANT;
676 else
677 regbase = TG3_APE_PER_LOCK_GRANT;
678
679 /* Make sure the driver hasn't any stale locks. */
680 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681 switch (i) {
682 case TG3_APE_LOCK_PHY0:
683 case TG3_APE_LOCK_PHY1:
684 case TG3_APE_LOCK_PHY2:
685 case TG3_APE_LOCK_PHY3:
686 bit = APE_LOCK_GRANT_DRIVER;
687 break;
688 default:
689 if (!tp->pci_fn)
690 bit = APE_LOCK_GRANT_DRIVER;
691 else
692 bit = 1 << tp->pci_fn;
693 }
694 tg3_ape_write32(tp, regbase + 4 * i, bit);
695 }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701 int i, off;
702 int ret = 0;
703 u32 status, req, gnt, bit;
704
705 if (!tg3_flag(tp, ENABLE_APE))
706 return 0;
707
708 switch (locknum) {
709 case TG3_APE_LOCK_GPIO:
710 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711 return 0;
712 case TG3_APE_LOCK_GRC:
713 case TG3_APE_LOCK_MEM:
714 if (!tp->pci_fn)
715 bit = APE_LOCK_REQ_DRIVER;
716 else
717 bit = 1 << tp->pci_fn;
718 break;
719 case TG3_APE_LOCK_PHY0:
720 case TG3_APE_LOCK_PHY1:
721 case TG3_APE_LOCK_PHY2:
722 case TG3_APE_LOCK_PHY3:
723 bit = APE_LOCK_REQ_DRIVER;
724 break;
725 default:
726 return -EINVAL;
727 }
728
729 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730 req = TG3_APE_LOCK_REQ;
731 gnt = TG3_APE_LOCK_GRANT;
732 } else {
733 req = TG3_APE_PER_LOCK_REQ;
734 gnt = TG3_APE_PER_LOCK_GRANT;
735 }
736
737 off = 4 * locknum;
738
739 tg3_ape_write32(tp, req + off, bit);
740
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i = 0; i < 100; i++) {
743 status = tg3_ape_read32(tp, gnt + off);
744 if (status == bit)
745 break;
746 udelay(10);
747 }
748
749 if (status != bit) {
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp, gnt + off, bit);
752 ret = -EBUSY;
753 }
754
755 return ret;
756 }
757
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760 u32 gnt, bit;
761
762 if (!tg3_flag(tp, ENABLE_APE))
763 return;
764
765 switch (locknum) {
766 case TG3_APE_LOCK_GPIO:
767 if (tg3_asic_rev(tp) == ASIC_REV_5761)
768 return;
769 case TG3_APE_LOCK_GRC:
770 case TG3_APE_LOCK_MEM:
771 if (!tp->pci_fn)
772 bit = APE_LOCK_GRANT_DRIVER;
773 else
774 bit = 1 << tp->pci_fn;
775 break;
776 case TG3_APE_LOCK_PHY0:
777 case TG3_APE_LOCK_PHY1:
778 case TG3_APE_LOCK_PHY2:
779 case TG3_APE_LOCK_PHY3:
780 bit = APE_LOCK_GRANT_DRIVER;
781 break;
782 default:
783 return;
784 }
785
786 if (tg3_asic_rev(tp) == ASIC_REV_5761)
787 gnt = TG3_APE_LOCK_GRANT;
788 else
789 gnt = TG3_APE_PER_LOCK_GRANT;
790
791 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796 u32 apedata;
797
798 while (timeout_us) {
799 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800 return -EBUSY;
801
802 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804 break;
805
806 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808 udelay(10);
809 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810 }
811
812 return timeout_us ? 0 : -EBUSY;
813 }
814
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817 u32 i, apedata;
818
819 for (i = 0; i < timeout_us / 10; i++) {
820 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 break;
824
825 udelay(10);
826 }
827
828 return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832 u32 len)
833 {
834 int err;
835 u32 i, bufoff, msgoff, maxlen, apedata;
836
837 if (!tg3_flag(tp, APE_HAS_NCSI))
838 return 0;
839
840 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841 if (apedata != APE_SEG_SIG_MAGIC)
842 return -ENODEV;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845 if (!(apedata & APE_FW_STATUS_READY))
846 return -EAGAIN;
847
848 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849 TG3_APE_SHMEM_BASE;
850 msgoff = bufoff + 2 * sizeof(u32);
851 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853 while (len) {
854 u32 length;
855
856 /* Cap xfer sizes to scratchpad limits. */
857 length = (len > maxlen) ? maxlen : len;
858 len -= length;
859
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
862 return -EAGAIN;
863
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err = tg3_ape_event_lock(tp, 1000);
866 if (err)
867 return err;
868
869 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870 APE_EVENT_STATUS_SCRTCHPD_READ |
871 APE_EVENT_STATUS_EVENT_PENDING;
872 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874 tg3_ape_write32(tp, bufoff, base_off);
875 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880 base_off += length;
881
882 if (tg3_ape_wait_for_event(tp, 30000))
883 return -EAGAIN;
884
885 for (i = 0; length; i += 4, length -= 4) {
886 u32 val = tg3_ape_read32(tp, msgoff + i);
887 memcpy(data, &val, sizeof(u32));
888 data++;
889 }
890 }
891
892 return 0;
893 }
894
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897 int err;
898 u32 apedata;
899
900 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901 if (apedata != APE_SEG_SIG_MAGIC)
902 return -EAGAIN;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905 if (!(apedata & APE_FW_STATUS_READY))
906 return -EAGAIN;
907
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err = tg3_ape_event_lock(tp, 1000);
910 if (err)
911 return err;
912
913 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914 event | APE_EVENT_STATUS_EVENT_PENDING);
915
916 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918
919 return 0;
920 }
921
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924 u32 event;
925 u32 apedata;
926
927 if (!tg3_flag(tp, ENABLE_APE))
928 return;
929
930 switch (kind) {
931 case RESET_KIND_INIT:
932 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933 APE_HOST_SEG_SIG_MAGIC);
934 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935 APE_HOST_SEG_LEN_MAGIC);
936 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941 APE_HOST_BEHAV_NO_PHYLOCK);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943 TG3_APE_HOST_DRVR_STATE_START);
944
945 event = APE_EVENT_STATUS_STATE_START;
946 break;
947 case RESET_KIND_SHUTDOWN:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
952 */
953 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954
955 if (device_may_wakeup(&tp->pdev->dev) &&
956 tg3_flag(tp, WOL_ENABLE)) {
957 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958 TG3_APE_HOST_WOL_SPEED_AUTO);
959 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960 } else
961 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962
963 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964
965 event = APE_EVENT_STATUS_STATE_UNLOAD;
966 break;
967 case RESET_KIND_SUSPEND:
968 event = APE_EVENT_STATUS_STATE_SUSPEND;
969 break;
970 default:
971 return;
972 }
973
974 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976 tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981 int i;
982
983 tw32(TG3PCI_MISC_HOST_CTRL,
984 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985 for (i = 0; i < tp->irq_max; i++)
986 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991 int i;
992
993 tp->irq_sync = 0;
994 wmb();
995
996 tw32(TG3PCI_MISC_HOST_CTRL,
997 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000 for (i = 0; i < tp->irq_cnt; i++) {
1001 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004 if (tg3_flag(tp, 1SHOT_MSI))
1005 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007 tp->coal_now |= tnapi->coal_now;
1008 }
1009
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp, TAGGED_STATUS) &&
1012 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014 else
1015 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022 struct tg3 *tp = tnapi->tp;
1023 struct tg3_hw_status *sblk = tnapi->hw_status;
1024 unsigned int work_exists = 0;
1025
1026 /* check for phy events */
1027 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028 if (sblk->status & SD_STATUS_LINK_CHG)
1029 work_exists = 1;
1030 }
1031
1032 /* check for TX work to do */
1033 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034 work_exists = 1;
1035
1036 /* check for RX work to do */
1037 if (tnapi->rx_rcb_prod_idx &&
1038 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039 work_exists = 1;
1040
1041 return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1048 */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051 struct tg3 *tp = tnapi->tp;
1052
1053 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054 mmiowb();
1055
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1059 */
1060 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067 u32 clock_ctrl;
1068 u32 orig_clock_ctrl;
1069
1070 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071 return;
1072
1073 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075 orig_clock_ctrl = clock_ctrl;
1076 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077 CLOCK_CTRL_CLKRUN_OENABLE |
1078 0x1f);
1079 tp->pci_clock_ctrl = clock_ctrl;
1080
1081 if (tg3_flag(tp, 5705_PLUS)) {
1082 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085 }
1086 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl |
1089 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090 40);
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093 40);
1094 }
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS 5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101 u32 *val)
1102 {
1103 u32 frame_val;
1104 unsigned int loops;
1105 int ret;
1106
1107 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108 tw32_f(MAC_MI_MODE,
1109 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110 udelay(80);
1111 }
1112
1113 tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115 *val = 0x0;
1116
1117 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118 MI_COM_PHY_ADDR_MASK);
1119 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120 MI_COM_REG_ADDR_MASK);
1121 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123 tw32_f(MAC_MI_COM, frame_val);
1124
1125 loops = PHY_BUSY_LOOPS;
1126 while (loops != 0) {
1127 udelay(10);
1128 frame_val = tr32(MAC_MI_COM);
1129
1130 if ((frame_val & MI_COM_BUSY) == 0) {
1131 udelay(5);
1132 frame_val = tr32(MAC_MI_COM);
1133 break;
1134 }
1135 loops -= 1;
1136 }
1137
1138 ret = -EBUSY;
1139 if (loops != 0) {
1140 *val = frame_val & MI_COM_DATA_MASK;
1141 ret = 0;
1142 }
1143
1144 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146 udelay(80);
1147 }
1148
1149 tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151 return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160 u32 val)
1161 {
1162 u32 frame_val;
1163 unsigned int loops;
1164 int ret;
1165
1166 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168 return 0;
1169
1170 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171 tw32_f(MAC_MI_MODE,
1172 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173 udelay(80);
1174 }
1175
1176 tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179 MI_COM_PHY_ADDR_MASK);
1180 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181 MI_COM_REG_ADDR_MASK);
1182 frame_val |= (val & MI_COM_DATA_MASK);
1183 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185 tw32_f(MAC_MI_COM, frame_val);
1186
1187 loops = PHY_BUSY_LOOPS;
1188 while (loops != 0) {
1189 udelay(10);
1190 frame_val = tr32(MAC_MI_COM);
1191 if ((frame_val & MI_COM_BUSY) == 0) {
1192 udelay(5);
1193 frame_val = tr32(MAC_MI_COM);
1194 break;
1195 }
1196 loops -= 1;
1197 }
1198
1199 ret = -EBUSY;
1200 if (loops != 0)
1201 ret = 0;
1202
1203 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205 udelay(80);
1206 }
1207
1208 tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210 return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220 int err;
1221
1222 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223 if (err)
1224 goto done;
1225
1226 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227 if (err)
1228 goto done;
1229
1230 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238 return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243 int err;
1244
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246 if (err)
1247 goto done;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261 return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266 int err;
1267
1268 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269 if (!err)
1270 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272 return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277 int err;
1278
1279 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280 if (!err)
1281 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283 return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288 int err;
1289
1290 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC);
1293 if (!err)
1294 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296 return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309 u32 val;
1310 int err;
1311
1312 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314 if (err)
1315 return err;
1316 if (enable)
1317
1318 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319 else
1320 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325 return err;
1326 }
1327
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330 u32 phy_control;
1331 int limit, err;
1332
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1335 */
1336 phy_control = BMCR_RESET;
1337 err = tg3_writephy(tp, MII_BMCR, phy_control);
1338 if (err != 0)
1339 return -EBUSY;
1340
1341 limit = 5000;
1342 while (limit--) {
1343 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344 if (err != 0)
1345 return -EBUSY;
1346
1347 if ((phy_control & BMCR_RESET) == 0) {
1348 udelay(40);
1349 break;
1350 }
1351 udelay(10);
1352 }
1353 if (limit < 0)
1354 return -EBUSY;
1355
1356 return 0;
1357 }
1358
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361 struct tg3 *tp = bp->priv;
1362 u32 val;
1363
1364 spin_lock_bh(&tp->lock);
1365
1366 if (tg3_readphy(tp, reg, &val))
1367 val = -EIO;
1368
1369 spin_unlock_bh(&tp->lock);
1370
1371 return val;
1372 }
1373
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 ret = 0;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (tg3_writephy(tp, reg, val))
1382 ret = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return ret;
1387 }
1388
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391 return 0;
1392 }
1393
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396 u32 val;
1397 struct phy_device *phydev;
1398
1399 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401 case PHY_ID_BCM50610:
1402 case PHY_ID_BCM50610M:
1403 val = MAC_PHYCFG2_50610_LED_MODES;
1404 break;
1405 case PHY_ID_BCMAC131:
1406 val = MAC_PHYCFG2_AC131_LED_MODES;
1407 break;
1408 case PHY_ID_RTL8211C:
1409 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410 break;
1411 case PHY_ID_RTL8201E:
1412 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413 break;
1414 default:
1415 return;
1416 }
1417
1418 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419 tw32(MAC_PHYCFG2, val);
1420
1421 val = tr32(MAC_PHYCFG1);
1422 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425 tw32(MAC_PHYCFG1, val);
1426
1427 return;
1428 }
1429
1430 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432 MAC_PHYCFG2_FMODE_MASK_MASK |
1433 MAC_PHYCFG2_GMODE_MASK_MASK |
1434 MAC_PHYCFG2_ACT_MASK_MASK |
1435 MAC_PHYCFG2_QUAL_MASK_MASK |
1436 MAC_PHYCFG2_INBAND_ENABLE;
1437
1438 tw32(MAC_PHYCFG2, val);
1439
1440 val = tr32(MAC_PHYCFG1);
1441 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448 }
1449 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451 tw32(MAC_PHYCFG1, val);
1452
1453 val = tr32(MAC_EXT_RGMII_MODE);
1454 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455 MAC_RGMII_MODE_RX_QUALITY |
1456 MAC_RGMII_MODE_RX_ACTIVITY |
1457 MAC_RGMII_MODE_RX_ENG_DET |
1458 MAC_RGMII_MODE_TX_ENABLE |
1459 MAC_RGMII_MODE_TX_LOWPWR |
1460 MAC_RGMII_MODE_TX_RESET);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_RGMII_MODE_RX_INT_B |
1464 MAC_RGMII_MODE_RX_QUALITY |
1465 MAC_RGMII_MODE_RX_ACTIVITY |
1466 MAC_RGMII_MODE_RX_ENG_DET;
1467 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 val |= MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET;
1471 }
1472 tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478 tw32_f(MAC_MI_MODE, tp->mi_mode);
1479 udelay(80);
1480
1481 if (tg3_flag(tp, MDIOBUS_INITED) &&
1482 tg3_asic_rev(tp) == ASIC_REV_5785)
1483 tg3_mdio_config_5785(tp);
1484 }
1485
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488 int i;
1489 u32 reg;
1490 struct phy_device *phydev;
1491
1492 if (tg3_flag(tp, 5717_PLUS)) {
1493 u32 is_serdes;
1494
1495 tp->phy_addr = tp->pci_fn + 1;
1496
1497 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1498 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499 else
1500 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES;
1502 if (is_serdes)
1503 tp->phy_addr += 7;
1504 } else
1505 tp->phy_addr = TG3_PHY_MII_ADDR;
1506
1507 tg3_mdio_start(tp);
1508
1509 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510 return 0;
1511
1512 tp->mdio_bus = mdiobus_alloc();
1513 if (tp->mdio_bus == NULL)
1514 return -ENOMEM;
1515
1516 tp->mdio_bus->name = "tg3 mdio bus";
1517 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519 tp->mdio_bus->priv = tp;
1520 tp->mdio_bus->parent = &tp->pdev->dev;
1521 tp->mdio_bus->read = &tg3_mdio_read;
1522 tp->mdio_bus->write = &tg3_mdio_write;
1523 tp->mdio_bus->reset = &tg3_mdio_reset;
1524 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525 tp->mdio_bus->irq = &tp->mdio_irq[0];
1526
1527 for (i = 0; i < PHY_MAX_ADDR; i++)
1528 tp->mdio_bus->irq[i] = PHY_POLL;
1529
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1534 */
1535 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536 tg3_bmcr_reset(tp);
1537
1538 i = mdiobus_register(tp->mdio_bus);
1539 if (i) {
1540 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541 mdiobus_free(tp->mdio_bus);
1542 return i;
1543 }
1544
1545 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546
1547 if (!phydev || !phydev->drv) {
1548 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549 mdiobus_unregister(tp->mdio_bus);
1550 mdiobus_free(tp->mdio_bus);
1551 return -ENODEV;
1552 }
1553
1554 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555 case PHY_ID_BCM57780:
1556 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558 break;
1559 case PHY_ID_BCM50610:
1560 case PHY_ID_BCM50610M:
1561 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562 PHY_BRCM_RX_REFCLK_UNUSED |
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571 /* fallthru */
1572 case PHY_ID_RTL8211C:
1573 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574 break;
1575 case PHY_ID_RTL8201E:
1576 case PHY_ID_BCMAC131:
1577 phydev->interface = PHY_INTERFACE_MODE_MII;
1578 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580 break;
1581 }
1582
1583 tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586 tg3_mdio_config_5785(tp);
1587
1588 return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593 if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 tg3_flag_clear(tp, MDIOBUS_INITED);
1595 mdiobus_unregister(tp->mdio_bus);
1596 mdiobus_free(tp->mdio_bus);
1597 }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603 u32 val;
1604
1605 val = tr32(GRC_RX_CPU_EVENT);
1606 val |= GRC_RX_CPU_DRIVER_EVENT;
1607 tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609 tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617 int i;
1618 unsigned int delay_cnt;
1619 long time_remain;
1620
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain = (long)(tp->last_event_jiffies + 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624 (long)jiffies;
1625 if (time_remain < 0)
1626 return;
1627
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt = jiffies_to_usecs(time_remain);
1630 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 delay_cnt = (delay_cnt >> 3) + 1;
1633
1634 for (i = 0; i < delay_cnt; i++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636 break;
1637 udelay(8);
1638 }
1639 }
1640
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644 u32 reg, val;
1645
1646 val = 0;
1647 if (!tg3_readphy(tp, MII_BMCR, &reg))
1648 val = reg << 16;
1649 if (!tg3_readphy(tp, MII_BMSR, &reg))
1650 val |= (reg & 0xffff);
1651 *data++ = val;
1652
1653 val = 0;
1654 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655 val = reg << 16;
1656 if (!tg3_readphy(tp, MII_LPA, &reg))
1657 val |= (reg & 0xffff);
1658 *data++ = val;
1659
1660 val = 0;
1661 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663 val = reg << 16;
1664 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665 val |= (reg & 0xffff);
1666 }
1667 *data++ = val;
1668
1669 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670 val = reg << 16;
1671 else
1672 val = 0;
1673 *data++ = val;
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679 u32 data[4];
1680
1681 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682 return;
1683
1684 tg3_phy_gather_ump_data(tp, data);
1685
1686 tg3_wait_for_event_ack(tp);
1687
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694
1695 tg3_generate_fw_event(tp);
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp);
1704
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706
1707 tg3_generate_fw_event(tp);
1708
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp);
1711 }
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719
1720 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721 switch (kind) {
1722 case RESET_KIND_INIT:
1723 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724 DRV_STATE_START);
1725 break;
1726
1727 case RESET_KIND_SHUTDOWN:
1728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729 DRV_STATE_UNLOAD);
1730 break;
1731
1732 case RESET_KIND_SUSPEND:
1733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 DRV_STATE_SUSPEND);
1735 break;
1736
1737 default:
1738 break;
1739 }
1740 }
1741
1742 if (kind == RESET_KIND_INIT ||
1743 kind == RESET_KIND_SUSPEND)
1744 tg3_ape_driver_state_change(tp, kind);
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 switch (kind) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1755 break;
1756
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1760 break;
1761
1762 default:
1763 break;
1764 }
1765 }
1766
1767 if (kind == RESET_KIND_SHUTDOWN)
1768 tg3_ape_driver_state_change(tp, kind);
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774 if (tg3_flag(tp, ENABLE_ASF)) {
1775 switch (kind) {
1776 case RESET_KIND_INIT:
1777 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 DRV_STATE_START);
1779 break;
1780
1781 case RESET_KIND_SHUTDOWN:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 DRV_STATE_UNLOAD);
1784 break;
1785
1786 case RESET_KIND_SUSPEND:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_SUSPEND);
1789 break;
1790
1791 default:
1792 break;
1793 }
1794 }
1795 }
1796
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799 int i;
1800 u32 val;
1801
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1804 return 0;
1805 }
1806
1807 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 return 0;
1812 udelay(100);
1813 }
1814 return -ENODEV;
1815 }
1816
1817 /* Wait for firmware initialization to complete. */
1818 for (i = 0; i < 100000; i++) {
1819 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821 break;
1822 udelay(10);
1823 }
1824
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1829 */
1830 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831 tg3_flag_set(tp, NO_FWARE_REPORTED);
1832
1833 netdev_info(tp->dev, "No firmware running\n");
1834 }
1835
1836 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1839 */
1840 mdelay(10);
1841 }
1842
1843 return 0;
1844 }
1845
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848 if (!netif_carrier_ok(tp->dev)) {
1849 netif_info(tp, link, tp->dev, "Link is down\n");
1850 tg3_ump_link_report(tp);
1851 } else if (netif_msg_link(tp)) {
1852 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853 (tp->link_config.active_speed == SPEED_1000 ?
1854 1000 :
1855 (tp->link_config.active_speed == SPEED_100 ?
1856 100 : 10)),
1857 (tp->link_config.active_duplex == DUPLEX_FULL ?
1858 "full" : "half"));
1859
1860 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862 "on" : "off",
1863 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864 "on" : "off");
1865
1866 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867 netdev_info(tp->dev, "EEE is %s\n",
1868 tp->setlpicnt ? "enabled" : "disabled");
1869
1870 tg3_ump_link_report(tp);
1871 }
1872 }
1873
1874 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1875 {
1876 u16 miireg;
1877
1878 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1879 miireg = ADVERTISE_1000XPAUSE;
1880 else if (flow_ctrl & FLOW_CTRL_TX)
1881 miireg = ADVERTISE_1000XPSE_ASYM;
1882 else if (flow_ctrl & FLOW_CTRL_RX)
1883 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1884 else
1885 miireg = 0;
1886
1887 return miireg;
1888 }
1889
1890 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1891 {
1892 u8 cap = 0;
1893
1894 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1895 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1896 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1897 if (lcladv & ADVERTISE_1000XPAUSE)
1898 cap = FLOW_CTRL_RX;
1899 if (rmtadv & ADVERTISE_1000XPAUSE)
1900 cap = FLOW_CTRL_TX;
1901 }
1902
1903 return cap;
1904 }
1905
1906 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1907 {
1908 u8 autoneg;
1909 u8 flowctrl = 0;
1910 u32 old_rx_mode = tp->rx_mode;
1911 u32 old_tx_mode = tp->tx_mode;
1912
1913 if (tg3_flag(tp, USE_PHYLIB))
1914 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1915 else
1916 autoneg = tp->link_config.autoneg;
1917
1918 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1919 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1920 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1921 else
1922 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1923 } else
1924 flowctrl = tp->link_config.flowctrl;
1925
1926 tp->link_config.active_flowctrl = flowctrl;
1927
1928 if (flowctrl & FLOW_CTRL_RX)
1929 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1930 else
1931 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1932
1933 if (old_rx_mode != tp->rx_mode)
1934 tw32_f(MAC_RX_MODE, tp->rx_mode);
1935
1936 if (flowctrl & FLOW_CTRL_TX)
1937 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1938 else
1939 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1940
1941 if (old_tx_mode != tp->tx_mode)
1942 tw32_f(MAC_TX_MODE, tp->tx_mode);
1943 }
1944
1945 static void tg3_adjust_link(struct net_device *dev)
1946 {
1947 u8 oldflowctrl, linkmesg = 0;
1948 u32 mac_mode, lcl_adv, rmt_adv;
1949 struct tg3 *tp = netdev_priv(dev);
1950 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1951
1952 spin_lock_bh(&tp->lock);
1953
1954 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1955 MAC_MODE_HALF_DUPLEX);
1956
1957 oldflowctrl = tp->link_config.active_flowctrl;
1958
1959 if (phydev->link) {
1960 lcl_adv = 0;
1961 rmt_adv = 0;
1962
1963 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1964 mac_mode |= MAC_MODE_PORT_MODE_MII;
1965 else if (phydev->speed == SPEED_1000 ||
1966 tg3_asic_rev(tp) != ASIC_REV_5785)
1967 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1968 else
1969 mac_mode |= MAC_MODE_PORT_MODE_MII;
1970
1971 if (phydev->duplex == DUPLEX_HALF)
1972 mac_mode |= MAC_MODE_HALF_DUPLEX;
1973 else {
1974 lcl_adv = mii_advertise_flowctrl(
1975 tp->link_config.flowctrl);
1976
1977 if (phydev->pause)
1978 rmt_adv = LPA_PAUSE_CAP;
1979 if (phydev->asym_pause)
1980 rmt_adv |= LPA_PAUSE_ASYM;
1981 }
1982
1983 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1984 } else
1985 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1986
1987 if (mac_mode != tp->mac_mode) {
1988 tp->mac_mode = mac_mode;
1989 tw32_f(MAC_MODE, tp->mac_mode);
1990 udelay(40);
1991 }
1992
1993 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1994 if (phydev->speed == SPEED_10)
1995 tw32(MAC_MI_STAT,
1996 MAC_MI_STAT_10MBPS_MODE |
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1998 else
1999 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000 }
2001
2002 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2003 tw32(MAC_TX_LENGTHS,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2005 (6 << TX_LENGTHS_IPG_SHIFT) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2007 else
2008 tw32(MAC_TX_LENGTHS,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2010 (6 << TX_LENGTHS_IPG_SHIFT) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2012
2013 if (phydev->link != tp->old_link ||
2014 phydev->speed != tp->link_config.active_speed ||
2015 phydev->duplex != tp->link_config.active_duplex ||
2016 oldflowctrl != tp->link_config.active_flowctrl)
2017 linkmesg = 1;
2018
2019 tp->old_link = phydev->link;
2020 tp->link_config.active_speed = phydev->speed;
2021 tp->link_config.active_duplex = phydev->duplex;
2022
2023 spin_unlock_bh(&tp->lock);
2024
2025 if (linkmesg)
2026 tg3_link_report(tp);
2027 }
2028
2029 static int tg3_phy_init(struct tg3 *tp)
2030 {
2031 struct phy_device *phydev;
2032
2033 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2034 return 0;
2035
2036 /* Bring the PHY back to a known state. */
2037 tg3_bmcr_reset(tp);
2038
2039 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2040
2041 /* Attach the MAC to the PHY. */
2042 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2043 tg3_adjust_link, phydev->interface);
2044 if (IS_ERR(phydev)) {
2045 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev);
2047 }
2048
2049 /* Mask with MAC supported features. */
2050 switch (phydev->interface) {
2051 case PHY_INTERFACE_MODE_GMII:
2052 case PHY_INTERFACE_MODE_RGMII:
2053 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2054 phydev->supported &= (PHY_GBIT_FEATURES |
2055 SUPPORTED_Pause |
2056 SUPPORTED_Asym_Pause);
2057 break;
2058 }
2059 /* fallthru */
2060 case PHY_INTERFACE_MODE_MII:
2061 phydev->supported &= (PHY_BASIC_FEATURES |
2062 SUPPORTED_Pause |
2063 SUPPORTED_Asym_Pause);
2064 break;
2065 default:
2066 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 return -EINVAL;
2068 }
2069
2070 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2071
2072 phydev->advertising = phydev->supported;
2073
2074 return 0;
2075 }
2076
2077 static void tg3_phy_start(struct tg3 *tp)
2078 {
2079 struct phy_device *phydev;
2080
2081 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2082 return;
2083
2084 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2085
2086 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2087 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2088 phydev->speed = tp->link_config.speed;
2089 phydev->duplex = tp->link_config.duplex;
2090 phydev->autoneg = tp->link_config.autoneg;
2091 phydev->advertising = tp->link_config.advertising;
2092 }
2093
2094 phy_start(phydev);
2095
2096 phy_start_aneg(phydev);
2097 }
2098
2099 static void tg3_phy_stop(struct tg3 *tp)
2100 {
2101 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2102 return;
2103
2104 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2105 }
2106
2107 static void tg3_phy_fini(struct tg3 *tp)
2108 {
2109 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2110 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2111 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2112 }
2113 }
2114
2115 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2116 {
2117 int err;
2118 u32 val;
2119
2120 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2121 return 0;
2122
2123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err = tg3_phy_auxctl_write(tp,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2128 0x4c20);
2129 goto done;
2130 }
2131
2132 err = tg3_phy_auxctl_read(tp,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2134 if (err)
2135 return err;
2136
2137 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2138 err = tg3_phy_auxctl_write(tp,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2140
2141 done:
2142 return err;
2143 }
2144
2145 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2146 {
2147 u32 phytest;
2148
2149 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2150 u32 phy;
2151
2152 tg3_writephy(tp, MII_TG3_FET_TEST,
2153 phytest | MII_TG3_FET_SHADOW_EN);
2154 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2155 if (enable)
2156 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2157 else
2158 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2160 }
2161 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2162 }
2163 }
2164
2165 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2166 {
2167 u32 reg;
2168
2169 if (!tg3_flag(tp, 5705_PLUS) ||
2170 (tg3_flag(tp, 5717_PLUS) &&
2171 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2172 return;
2173
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2175 tg3_phy_fet_toggle_apd(tp, enable);
2176 return;
2177 }
2178
2179 reg = MII_TG3_MISC_SHDW_WREN |
2180 MII_TG3_MISC_SHDW_SCR5_SEL |
2181 MII_TG3_MISC_SHDW_SCR5_LPED |
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2183 MII_TG3_MISC_SHDW_SCR5_SDTL |
2184 MII_TG3_MISC_SHDW_SCR5_C125OE;
2185 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2186 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2187
2188 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2189
2190
2191 reg = MII_TG3_MISC_SHDW_WREN |
2192 MII_TG3_MISC_SHDW_APD_SEL |
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2194 if (enable)
2195 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2196
2197 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2198 }
2199
2200 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2201 {
2202 u32 phy;
2203
2204 if (!tg3_flag(tp, 5705_PLUS) ||
2205 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2206 return;
2207
2208 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2209 u32 ephy;
2210
2211 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2212 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2213
2214 tg3_writephy(tp, MII_TG3_FET_TEST,
2215 ephy | MII_TG3_FET_SHADOW_EN);
2216 if (!tg3_readphy(tp, reg, &phy)) {
2217 if (enable)
2218 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2219 else
2220 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221 tg3_writephy(tp, reg, phy);
2222 }
2223 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2224 }
2225 } else {
2226 int ret;
2227
2228 ret = tg3_phy_auxctl_read(tp,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2230 if (!ret) {
2231 if (enable)
2232 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2233 else
2234 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235 tg3_phy_auxctl_write(tp,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2237 }
2238 }
2239 }
2240
2241 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2242 {
2243 int ret;
2244 u32 val;
2245
2246 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2247 return;
2248
2249 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2250 if (!ret)
2251 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2252 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2253 }
2254
2255 static void tg3_phy_apply_otp(struct tg3 *tp)
2256 {
2257 u32 otp, phy;
2258
2259 if (!tp->phy_otp)
2260 return;
2261
2262 otp = tp->phy_otp;
2263
2264 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2265 return;
2266
2267 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2268 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2269 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2270
2271 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2272 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2273 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2274
2275 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2276 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2277 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2278
2279 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2280 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2281
2282 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2284
2285 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2286 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2287 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2288
2289 tg3_phy_toggle_auxctl_smdsp(tp, false);
2290 }
2291
2292 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2293 {
2294 u32 val;
2295
2296 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2297 return;
2298
2299 tp->setlpicnt = 0;
2300
2301 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2302 current_link_up == 1 &&
2303 tp->link_config.active_duplex == DUPLEX_FULL &&
2304 (tp->link_config.active_speed == SPEED_100 ||
2305 tp->link_config.active_speed == SPEED_1000)) {
2306 u32 eeectl;
2307
2308 if (tp->link_config.active_speed == SPEED_1000)
2309 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2310 else
2311 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2312
2313 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2314
2315 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2316 TG3_CL45_D7_EEERES_STAT, &val);
2317
2318 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2319 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2320 tp->setlpicnt = 2;
2321 }
2322
2323 if (!tp->setlpicnt) {
2324 if (current_link_up == 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2326 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp, false);
2328 }
2329
2330 val = tr32(TG3_CPMU_EEE_MODE);
2331 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2332 }
2333 }
2334
2335 static void tg3_phy_eee_enable(struct tg3 *tp)
2336 {
2337 u32 val;
2338
2339 if (tp->link_config.active_speed == SPEED_1000 &&
2340 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2341 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2342 tg3_flag(tp, 57765_CLASS)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2344 val = MII_TG3_DSP_TAP26_ALNOKO |
2345 MII_TG3_DSP_TAP26_RMRXSTO;
2346 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2347 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348 }
2349
2350 val = tr32(TG3_CPMU_EEE_MODE);
2351 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2352 }
2353
2354 static int tg3_wait_macro_done(struct tg3 *tp)
2355 {
2356 int limit = 100;
2357
2358 while (limit--) {
2359 u32 tmp32;
2360
2361 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2362 if ((tmp32 & 0x1000) == 0)
2363 break;
2364 }
2365 }
2366 if (limit < 0)
2367 return -EBUSY;
2368
2369 return 0;
2370 }
2371
2372 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2373 {
2374 static const u32 test_pat[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2379 };
2380 int chan;
2381
2382 for (chan = 0; chan < 4; chan++) {
2383 int i;
2384
2385 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2386 (chan * 0x2000) | 0x0200);
2387 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2388
2389 for (i = 0; i < 6; i++)
2390 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2391 test_pat[chan][i]);
2392
2393 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2394 if (tg3_wait_macro_done(tp)) {
2395 *resetp = 1;
2396 return -EBUSY;
2397 }
2398
2399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2400 (chan * 0x2000) | 0x0200);
2401 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2402 if (tg3_wait_macro_done(tp)) {
2403 *resetp = 1;
2404 return -EBUSY;
2405 }
2406
2407 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2408 if (tg3_wait_macro_done(tp)) {
2409 *resetp = 1;
2410 return -EBUSY;
2411 }
2412
2413 for (i = 0; i < 6; i += 2) {
2414 u32 low, high;
2415
2416 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2418 tg3_wait_macro_done(tp)) {
2419 *resetp = 1;
2420 return -EBUSY;
2421 }
2422 low &= 0x7fff;
2423 high &= 0x000f;
2424 if (low != test_pat[chan][i] ||
2425 high != test_pat[chan][i+1]) {
2426 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2429
2430 return -EBUSY;
2431 }
2432 }
2433 }
2434
2435 return 0;
2436 }
2437
2438 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2439 {
2440 int chan;
2441
2442 for (chan = 0; chan < 4; chan++) {
2443 int i;
2444
2445 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2446 (chan * 0x2000) | 0x0200);
2447 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2448 for (i = 0; i < 6; i++)
2449 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2451 if (tg3_wait_macro_done(tp))
2452 return -EBUSY;
2453 }
2454
2455 return 0;
2456 }
2457
2458 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2459 {
2460 u32 reg32, phy9_orig;
2461 int retries, do_phy_reset, err;
2462
2463 retries = 10;
2464 do_phy_reset = 1;
2465 do {
2466 if (do_phy_reset) {
2467 err = tg3_bmcr_reset(tp);
2468 if (err)
2469 return err;
2470 do_phy_reset = 0;
2471 }
2472
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2475 continue;
2476
2477 reg32 |= 0x3000;
2478 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2479
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp, MII_BMCR,
2482 BMCR_FULLDPLX | BMCR_SPEED1000);
2483
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2486 continue;
2487
2488 tg3_writephy(tp, MII_CTRL1000,
2489 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2490
2491 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2492 if (err)
2493 return err;
2494
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp, 0x8005, 0x0800);
2497
2498 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2499 if (!err)
2500 break;
2501 } while (--retries);
2502
2503 err = tg3_phy_reset_chanpat(tp);
2504 if (err)
2505 return err;
2506
2507 tg3_phydsp_write(tp, 0x8005, 0x0000);
2508
2509 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2510 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2511
2512 tg3_phy_toggle_auxctl_smdsp(tp, false);
2513
2514 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2515
2516 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2517 reg32 &= ~0x3000;
2518 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2519 } else if (!err)
2520 err = -EBUSY;
2521
2522 return err;
2523 }
2524
2525 static void tg3_carrier_on(struct tg3 *tp)
2526 {
2527 netif_carrier_on(tp->dev);
2528 tp->link_up = true;
2529 }
2530
2531 static void tg3_carrier_off(struct tg3 *tp)
2532 {
2533 netif_carrier_off(tp->dev);
2534 tp->link_up = false;
2535 }
2536
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2539 */
2540 static int tg3_phy_reset(struct tg3 *tp)
2541 {
2542 u32 val, cpmuctrl;
2543 int err;
2544
2545 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2546 val = tr32(GRC_MISC_CFG);
2547 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2548 udelay(40);
2549 }
2550 err = tg3_readphy(tp, MII_BMSR, &val);
2551 err |= tg3_readphy(tp, MII_BMSR, &val);
2552 if (err != 0)
2553 return -EBUSY;
2554
2555 if (netif_running(tp->dev) && tp->link_up) {
2556 tg3_carrier_off(tp);
2557 tg3_link_report(tp);
2558 }
2559
2560 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2561 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2562 tg3_asic_rev(tp) == ASIC_REV_5705) {
2563 err = tg3_phy_reset_5703_4_5(tp);
2564 if (err)
2565 return err;
2566 goto out;
2567 }
2568
2569 cpmuctrl = 0;
2570 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2571 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2572 cpmuctrl = tr32(TG3_CPMU_CTRL);
2573 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2574 tw32(TG3_CPMU_CTRL,
2575 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2576 }
2577
2578 err = tg3_bmcr_reset(tp);
2579 if (err)
2580 return err;
2581
2582 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2583 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2584 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2585
2586 tw32(TG3_CPMU_CTRL, cpmuctrl);
2587 }
2588
2589 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2590 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2591 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2592 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5) {
2594 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2595 udelay(40);
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597 }
2598 }
2599
2600 if (tg3_flag(tp, 5717_PLUS) &&
2601 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2602 return 0;
2603
2604 tg3_phy_apply_otp(tp);
2605
2606 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2607 tg3_phy_toggle_apd(tp, true);
2608 else
2609 tg3_phy_toggle_apd(tp, false);
2610
2611 out:
2612 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2614 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp, false);
2617 }
2618
2619 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2620 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622 }
2623
2624 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2626 tg3_phydsp_write(tp, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp, false);
2630 }
2631 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2633 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2634 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2635 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2636 tg3_writephy(tp, MII_TG3_TEST1,
2637 MII_TG3_TEST1_TRIM_EN | 0x4);
2638 } else
2639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2640
2641 tg3_phy_toggle_auxctl_smdsp(tp, false);
2642 }
2643 }
2644
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2650 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err = tg3_phy_auxctl_read(tp,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2654 if (!err)
2655 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2656 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2657 }
2658
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2661 */
2662 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2663 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2664 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2665 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2666 }
2667
2668 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2671 }
2672
2673 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2674 tg3_phydsp_write(tp, 0xffb, 0x4000);
2675
2676 tg3_phy_toggle_automdix(tp, 1);
2677 tg3_phy_set_wirespeed(tp);
2678 return 0;
2679 }
2680
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2690
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2696
2697 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 {
2699 u32 status, shift;
2700
2701 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2702 tg3_asic_rev(tp) == ASIC_REV_5719)
2703 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2704 else
2705 status = tr32(TG3_CPMU_DRV_STATUS);
2706
2707 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2708 status &= ~(TG3_GPIO_MSG_MASK << shift);
2709 status |= (newstat << shift);
2710
2711 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2712 tg3_asic_rev(tp) == ASIC_REV_5719)
2713 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2714 else
2715 tw32(TG3_CPMU_DRV_STATUS, status);
2716
2717 return status >> TG3_APE_GPIO_MSG_SHIFT;
2718 }
2719
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2721 {
2722 if (!tg3_flag(tp, IS_NIC))
2723 return 0;
2724
2725 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2726 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2727 tg3_asic_rev(tp) == ASIC_REV_5720) {
2728 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2729 return -EIO;
2730
2731 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2732
2733 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY);
2735
2736 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2737 } else {
2738 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740 }
2741
2742 return 0;
2743 }
2744
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 {
2747 u32 grc_local_ctrl;
2748
2749 if (!tg3_flag(tp, IS_NIC) ||
2750 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2751 tg3_asic_rev(tp) == ASIC_REV_5701)
2752 return;
2753
2754 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2755
2756 tw32_wait_f(GRC_LOCAL_CTRL,
2757 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY);
2759
2760 tw32_wait_f(GRC_LOCAL_CTRL,
2761 grc_local_ctrl,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764 tw32_wait_f(GRC_LOCAL_CTRL,
2765 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 }
2768
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2770 {
2771 if (!tg3_flag(tp, IS_NIC))
2772 return;
2773
2774 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2775 tg3_asic_rev(tp) == ASIC_REV_5701) {
2776 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2777 (GRC_LCLCTRL_GPIO_OE0 |
2778 GRC_LCLCTRL_GPIO_OE1 |
2779 GRC_LCLCTRL_GPIO_OE2 |
2780 GRC_LCLCTRL_GPIO_OUTPUT0 |
2781 GRC_LCLCTRL_GPIO_OUTPUT1),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY);
2783 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2787 GRC_LCLCTRL_GPIO_OE1 |
2788 GRC_LCLCTRL_GPIO_OE2 |
2789 GRC_LCLCTRL_GPIO_OUTPUT0 |
2790 GRC_LCLCTRL_GPIO_OUTPUT1 |
2791 tp->grc_local_ctrl;
2792 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2796 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2800 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2802 } else {
2803 u32 no_gpio2;
2804 u32 grc_local_ctrl = 0;
2805
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2808 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2809 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2810 grc_local_ctrl,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2812 }
2813
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2 = tp->nic_sram_data_cfg &
2816 NIC_SRAM_DATA_CFG_NO_GPIO2;
2817
2818 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT1 |
2822 GRC_LCLCTRL_GPIO_OUTPUT2;
2823 if (no_gpio2) {
2824 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2825 GRC_LCLCTRL_GPIO_OUTPUT2);
2826 }
2827 tw32_wait_f(GRC_LOCAL_CTRL,
2828 tp->grc_local_ctrl | grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2832
2833 tw32_wait_f(GRC_LOCAL_CTRL,
2834 tp->grc_local_ctrl | grc_local_ctrl,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837 if (!no_gpio2) {
2838 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2839 tw32_wait_f(GRC_LOCAL_CTRL,
2840 tp->grc_local_ctrl | grc_local_ctrl,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 }
2843 }
2844 }
2845
2846 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 {
2848 u32 msg = 0;
2849
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2852 return;
2853
2854 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2855 msg = TG3_GPIO_MSG_NEED_VAUX;
2856
2857 msg = tg3_set_function_status(tp, msg);
2858
2859 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2860 goto done;
2861
2862 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2863 tg3_pwrsrc_switch_to_vaux(tp);
2864 else
2865 tg3_pwrsrc_die_with_vmain(tp);
2866
2867 done:
2868 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2869 }
2870
2871 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2872 {
2873 bool need_vaux = false;
2874
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2877 return;
2878
2879 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2880 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2881 tg3_asic_rev(tp) == ASIC_REV_5720) {
2882 tg3_frob_aux_power_5717(tp, include_wol ?
2883 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884 return;
2885 }
2886
2887 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2888 struct net_device *dev_peer;
2889
2890 dev_peer = pci_get_drvdata(tp->pdev_peer);
2891
2892 /* remove_one() may have been run on the peer. */
2893 if (dev_peer) {
2894 struct tg3 *tp_peer = netdev_priv(dev_peer);
2895
2896 if (tg3_flag(tp_peer, INIT_COMPLETE))
2897 return;
2898
2899 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2900 tg3_flag(tp_peer, ENABLE_ASF))
2901 need_vaux = true;
2902 }
2903 }
2904
2905 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2906 tg3_flag(tp, ENABLE_ASF))
2907 need_vaux = true;
2908
2909 if (need_vaux)
2910 tg3_pwrsrc_switch_to_vaux(tp);
2911 else
2912 tg3_pwrsrc_die_with_vmain(tp);
2913 }
2914
2915 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2916 {
2917 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2918 return 1;
2919 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2920 if (speed != SPEED_10)
2921 return 1;
2922 } else if (speed == SPEED_10)
2923 return 1;
2924
2925 return 0;
2926 }
2927
2928 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 {
2930 u32 val;
2931
2932 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2933 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2934 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2935 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2936
2937 sg_dig_ctrl |=
2938 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2939 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2940 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2941 }
2942 return;
2943 }
2944
2945 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2946 tg3_bmcr_reset(tp);
2947 val = tr32(GRC_MISC_CFG);
2948 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2949 udelay(40);
2950 return;
2951 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2952 u32 phytest;
2953 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2954 u32 phy;
2955
2956 tg3_writephy(tp, MII_ADVERTISE, 0);
2957 tg3_writephy(tp, MII_BMCR,
2958 BMCR_ANENABLE | BMCR_ANRESTART);
2959
2960 tg3_writephy(tp, MII_TG3_FET_TEST,
2961 phytest | MII_TG3_FET_SHADOW_EN);
2962 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2963 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2964 tg3_writephy(tp,
2965 MII_TG3_FET_SHDW_AUXMODE4,
2966 phy);
2967 }
2968 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2969 }
2970 return;
2971 } else if (do_low_power) {
2972 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2974
2975 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2977 MII_TG3_AUXCTL_PCTL_VREG_11V;
2978 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2979 }
2980
2981 /* The PHY should not be powered down on some chips because
2982 * of bugs.
2983 */
2984 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2985 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2986 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2987 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2988 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2989 !tp->pci_fn))
2990 return;
2991
2992 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2993 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2994 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2995 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2996 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2998 }
2999
3000 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3001 }
3002
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3 *tp)
3005 {
3006 if (tg3_flag(tp, NVRAM)) {
3007 int i;
3008
3009 if (tp->nvram_lock_cnt == 0) {
3010 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3011 for (i = 0; i < 8000; i++) {
3012 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3013 break;
3014 udelay(20);
3015 }
3016 if (i == 8000) {
3017 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018 return -ENODEV;
3019 }
3020 }
3021 tp->nvram_lock_cnt++;
3022 }
3023 return 0;
3024 }
3025
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3 *tp)
3028 {
3029 if (tg3_flag(tp, NVRAM)) {
3030 if (tp->nvram_lock_cnt > 0)
3031 tp->nvram_lock_cnt--;
3032 if (tp->nvram_lock_cnt == 0)
3033 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034 }
3035 }
3036
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3 *tp)
3039 {
3040 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3041 u32 nvaccess = tr32(NVRAM_ACCESS);
3042
3043 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044 }
3045 }
3046
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3 *tp)
3049 {
3050 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3051 u32 nvaccess = tr32(NVRAM_ACCESS);
3052
3053 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054 }
3055 }
3056
3057 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3058 u32 offset, u32 *val)
3059 {
3060 u32 tmp;
3061 int i;
3062
3063 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3064 return -EINVAL;
3065
3066 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3067 EEPROM_ADDR_DEVID_MASK |
3068 EEPROM_ADDR_READ);
3069 tw32(GRC_EEPROM_ADDR,
3070 tmp |
3071 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3072 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3073 EEPROM_ADDR_ADDR_MASK) |
3074 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3075
3076 for (i = 0; i < 1000; i++) {
3077 tmp = tr32(GRC_EEPROM_ADDR);
3078
3079 if (tmp & EEPROM_ADDR_COMPLETE)
3080 break;
3081 msleep(1);
3082 }
3083 if (!(tmp & EEPROM_ADDR_COMPLETE))
3084 return -EBUSY;
3085
3086 tmp = tr32(GRC_EEPROM_DATA);
3087
3088 /*
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3091 */
3092 *val = swab32(tmp);
3093
3094 return 0;
3095 }
3096
3097 #define NVRAM_CMD_TIMEOUT 10000
3098
3099 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 {
3101 int i;
3102
3103 tw32(NVRAM_CMD, nvram_cmd);
3104 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3105 udelay(10);
3106 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3107 udelay(10);
3108 break;
3109 }
3110 }
3111
3112 if (i == NVRAM_CMD_TIMEOUT)
3113 return -EBUSY;
3114
3115 return 0;
3116 }
3117
3118 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3119 {
3120 if (tg3_flag(tp, NVRAM) &&
3121 tg3_flag(tp, NVRAM_BUFFERED) &&
3122 tg3_flag(tp, FLASH) &&
3123 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3124 (tp->nvram_jedecnum == JEDEC_ATMEL))
3125
3126 addr = ((addr / tp->nvram_pagesize) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS) +
3128 (addr % tp->nvram_pagesize);
3129
3130 return addr;
3131 }
3132
3133 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3134 {
3135 if (tg3_flag(tp, NVRAM) &&
3136 tg3_flag(tp, NVRAM_BUFFERED) &&
3137 tg3_flag(tp, FLASH) &&
3138 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3139 (tp->nvram_jedecnum == JEDEC_ATMEL))
3140
3141 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3142 tp->nvram_pagesize) +
3143 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3144
3145 return addr;
3146 }
3147
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3153 */
3154 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 {
3156 int ret;
3157
3158 if (!tg3_flag(tp, NVRAM))
3159 return tg3_nvram_read_using_eeprom(tp, offset, val);
3160
3161 offset = tg3_nvram_phys_addr(tp, offset);
3162
3163 if (offset > NVRAM_ADDR_MSK)
3164 return -EINVAL;
3165
3166 ret = tg3_nvram_lock(tp);
3167 if (ret)
3168 return ret;
3169
3170 tg3_enable_nvram_access(tp);
3171
3172 tw32(NVRAM_ADDR, offset);
3173 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3174 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3175
3176 if (ret == 0)
3177 *val = tr32(NVRAM_RDDATA);
3178
3179 tg3_disable_nvram_access(tp);
3180
3181 tg3_nvram_unlock(tp);
3182
3183 return ret;
3184 }
3185
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3188 {
3189 u32 v;
3190 int res = tg3_nvram_read(tp, offset, &v);
3191 if (!res)
3192 *val = cpu_to_be32(v);
3193 return res;
3194 }
3195
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3197 u32 offset, u32 len, u8 *buf)
3198 {
3199 int i, j, rc = 0;
3200 u32 val;
3201
3202 for (i = 0; i < len; i += 4) {
3203 u32 addr;
3204 __be32 data;
3205
3206 addr = offset + i;
3207
3208 memcpy(&data, buf + i, 4);
3209
3210 /*
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3215 */
3216 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3217
3218 val = tr32(GRC_EEPROM_ADDR);
3219 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3220
3221 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3222 EEPROM_ADDR_READ);
3223 tw32(GRC_EEPROM_ADDR, val |
3224 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3225 (addr & EEPROM_ADDR_ADDR_MASK) |
3226 EEPROM_ADDR_START |
3227 EEPROM_ADDR_WRITE);
3228
3229 for (j = 0; j < 1000; j++) {
3230 val = tr32(GRC_EEPROM_ADDR);
3231
3232 if (val & EEPROM_ADDR_COMPLETE)
3233 break;
3234 msleep(1);
3235 }
3236 if (!(val & EEPROM_ADDR_COMPLETE)) {
3237 rc = -EBUSY;
3238 break;
3239 }
3240 }
3241
3242 return rc;
3243 }
3244
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247 u8 *buf)
3248 {
3249 int ret = 0;
3250 u32 pagesize = tp->nvram_pagesize;
3251 u32 pagemask = pagesize - 1;
3252 u32 nvram_cmd;
3253 u8 *tmp;
3254
3255 tmp = kmalloc(pagesize, GFP_KERNEL);
3256 if (tmp == NULL)
3257 return -ENOMEM;
3258
3259 while (len) {
3260 int j;
3261 u32 phy_addr, page_off, size;
3262
3263 phy_addr = offset & ~pagemask;
3264
3265 for (j = 0; j < pagesize; j += 4) {
3266 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3267 (__be32 *) (tmp + j));
3268 if (ret)
3269 break;
3270 }
3271 if (ret)
3272 break;
3273
3274 page_off = offset & pagemask;
3275 size = pagesize;
3276 if (len < size)
3277 size = len;
3278
3279 len -= size;
3280
3281 memcpy(tmp + page_off, buf, size);
3282
3283 offset = offset + (pagesize - page_off);
3284
3285 tg3_enable_nvram_access(tp);
3286
3287 /*
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3290 */
3291 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292
3293 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3294 break;
3295
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR, phy_addr);
3298
3299 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3301
3302 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3303 break;
3304
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3307
3308 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3309 break;
3310
3311 for (j = 0; j < pagesize; j += 4) {
3312 __be32 data;
3313
3314 data = *((__be32 *) (tmp + j));
3315
3316 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3317
3318 tw32(NVRAM_ADDR, phy_addr + j);
3319
3320 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321 NVRAM_CMD_WR;
3322
3323 if (j == 0)
3324 nvram_cmd |= NVRAM_CMD_FIRST;
3325 else if (j == (pagesize - 4))
3326 nvram_cmd |= NVRAM_CMD_LAST;
3327
3328 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329 if (ret)
3330 break;
3331 }
3332 if (ret)
3333 break;
3334 }
3335
3336 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3337 tg3_nvram_exec_cmd(tp, nvram_cmd);
3338
3339 kfree(tmp);
3340
3341 return ret;
3342 }
3343
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3346 u8 *buf)
3347 {
3348 int i, ret = 0;
3349
3350 for (i = 0; i < len; i += 4, offset += 4) {
3351 u32 page_off, phy_addr, nvram_cmd;
3352 __be32 data;
3353
3354 memcpy(&data, buf + i, 4);
3355 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3356
3357 page_off = offset % tp->nvram_pagesize;
3358
3359 phy_addr = tg3_nvram_phys_addr(tp, offset);
3360
3361 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3362
3363 if (page_off == 0 || i == 0)
3364 nvram_cmd |= NVRAM_CMD_FIRST;
3365 if (page_off == (tp->nvram_pagesize - 4))
3366 nvram_cmd |= NVRAM_CMD_LAST;
3367
3368 if (i == (len - 4))
3369 nvram_cmd |= NVRAM_CMD_LAST;
3370
3371 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3372 !tg3_flag(tp, FLASH) ||
3373 !tg3_flag(tp, 57765_PLUS))
3374 tw32(NVRAM_ADDR, phy_addr);
3375
3376 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3377 !tg3_flag(tp, 5755_PLUS) &&
3378 (tp->nvram_jedecnum == JEDEC_ST) &&
3379 (nvram_cmd & NVRAM_CMD_FIRST)) {
3380 u32 cmd;
3381
3382 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383 ret = tg3_nvram_exec_cmd(tp, cmd);
3384 if (ret)
3385 break;
3386 }
3387 if (!tg3_flag(tp, FLASH)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3390 }
3391
3392 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3393 if (ret)
3394 break;
3395 }
3396 return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 {
3402 int ret;
3403
3404 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3405 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407 udelay(40);
3408 }
3409
3410 if (!tg3_flag(tp, NVRAM)) {
3411 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412 } else {
3413 u32 grc_mode;
3414
3415 ret = tg3_nvram_lock(tp);
3416 if (ret)
3417 return ret;
3418
3419 tg3_enable_nvram_access(tp);
3420 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3421 tw32(NVRAM_WRITE1, 0x406);
3422
3423 grc_mode = tr32(GRC_MODE);
3424 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3425
3426 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3427 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3428 buf);
3429 } else {
3430 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431 buf);
3432 }
3433
3434 grc_mode = tr32(GRC_MODE);
3435 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3436
3437 tg3_disable_nvram_access(tp);
3438 tg3_nvram_unlock(tp);
3439 }
3440
3441 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3442 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3443 udelay(40);
3444 }
3445
3446 return ret;
3447 }
3448
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3453
3454 /* tp->lock is held. */
3455 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3456 {
3457 int i;
3458 const int iters = 10000;
3459
3460 for (i = 0; i < iters; i++) {
3461 tw32(cpu_base + CPU_STATE, 0xffffffff);
3462 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3463 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3464 break;
3465 }
3466
3467 return (i == iters) ? -EBUSY : 0;
3468 }
3469
3470 /* tp->lock is held. */
3471 static int tg3_rxcpu_pause(struct tg3 *tp)
3472 {
3473 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3474
3475 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3476 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3477 udelay(10);
3478
3479 return rc;
3480 }
3481
3482 /* tp->lock is held. */
3483 static int tg3_txcpu_pause(struct tg3 *tp)
3484 {
3485 return tg3_pause_cpu(tp, TX_CPU_BASE);
3486 }
3487
3488 /* tp->lock is held. */
3489 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3490 {
3491 tw32(cpu_base + CPU_STATE, 0xffffffff);
3492 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3493 }
3494
3495 /* tp->lock is held. */
3496 static void tg3_rxcpu_resume(struct tg3 *tp)
3497 {
3498 tg3_resume_cpu(tp, RX_CPU_BASE);
3499 }
3500
3501 /* tp->lock is held. */
3502 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3503 {
3504 int rc;
3505
3506 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3507
3508 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3509 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3510
3511 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3512 return 0;
3513 }
3514 if (cpu_base == RX_CPU_BASE) {
3515 rc = tg3_rxcpu_pause(tp);
3516 } else {
3517 /*
3518 * There is only an Rx CPU for the 5750 derivative in the
3519 * BCM4785.
3520 */
3521 if (tg3_flag(tp, IS_SSB_CORE))
3522 return 0;
3523
3524 rc = tg3_txcpu_pause(tp);
3525 }
3526
3527 if (rc) {
3528 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3529 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3530 return -ENODEV;
3531 }
3532
3533 /* Clear firmware's nvram arbitration. */
3534 if (tg3_flag(tp, NVRAM))
3535 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3536 return 0;
3537 }
3538
3539 static int tg3_fw_data_len(struct tg3 *tp,
3540 const struct tg3_firmware_hdr *fw_hdr)
3541 {
3542 int fw_len;
3543
3544 /* Non fragmented firmware have one firmware header followed by a
3545 * contiguous chunk of data to be written. The length field in that
3546 * header is not the length of data to be written but the complete
3547 * length of the bss. The data length is determined based on
3548 * tp->fw->size minus headers.
3549 *
3550 * Fragmented firmware have a main header followed by multiple
3551 * fragments. Each fragment is identical to non fragmented firmware
3552 * with a firmware header followed by a contiguous chunk of data. In
3553 * the main header, the length field is unused and set to 0xffffffff.
3554 * In each fragment header the length is the entire size of that
3555 * fragment i.e. fragment data + header length. Data length is
3556 * therefore length field in the header minus TG3_FW_HDR_LEN.
3557 */
3558 if (tp->fw_len == 0xffffffff)
3559 fw_len = be32_to_cpu(fw_hdr->len);
3560 else
3561 fw_len = tp->fw->size;
3562
3563 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3564 }
3565
3566 /* tp->lock is held. */
3567 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3568 u32 cpu_scratch_base, int cpu_scratch_size,
3569 const struct tg3_firmware_hdr *fw_hdr)
3570 {
3571 int err, lock_err, i;
3572 void (*write_op)(struct tg3 *, u32, u32);
3573 int total_len = tp->fw->size;
3574
3575 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3576 netdev_err(tp->dev,
3577 "%s: Trying to load TX cpu firmware which is 5705\n",
3578 __func__);
3579 return -EINVAL;
3580 }
3581
3582 if (tg3_flag(tp, 5705_PLUS))
3583 write_op = tg3_write_mem;
3584 else
3585 write_op = tg3_write_indirect_reg32;
3586
3587 /* It is possible that bootcode is still loading at this point.
3588 * Get the nvram lock first before halting the cpu.
3589 */
3590 lock_err = tg3_nvram_lock(tp);
3591 err = tg3_halt_cpu(tp, cpu_base);
3592 if (!lock_err)
3593 tg3_nvram_unlock(tp);
3594 if (err)
3595 goto out;
3596
3597 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3598 write_op(tp, cpu_scratch_base + i, 0);
3599 tw32(cpu_base + CPU_STATE, 0xffffffff);
3600 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3601
3602 do {
3603 u32 *fw_data = (u32 *)(fw_hdr + 1);
3604 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3605 write_op(tp, cpu_scratch_base +
3606 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3607 (i * sizeof(u32)),
3608 be32_to_cpu(fw_data[i]));
3609
3610 total_len -= be32_to_cpu(fw_hdr->len);
3611
3612 /* Advance to next fragment */
3613 fw_hdr = (struct tg3_firmware_hdr *)
3614 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3615 } while (total_len > 0);
3616
3617 err = 0;
3618
3619 out:
3620 return err;
3621 }
3622
3623 /* tp->lock is held. */
3624 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3625 {
3626 int i;
3627 const int iters = 5;
3628
3629 tw32(cpu_base + CPU_STATE, 0xffffffff);
3630 tw32_f(cpu_base + CPU_PC, pc);
3631
3632 for (i = 0; i < iters; i++) {
3633 if (tr32(cpu_base + CPU_PC) == pc)
3634 break;
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3637 tw32_f(cpu_base + CPU_PC, pc);
3638 udelay(1000);
3639 }
3640
3641 return (i == iters) ? -EBUSY : 0;
3642 }
3643
3644 /* tp->lock is held. */
3645 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3646 {
3647 const struct tg3_firmware_hdr *fw_hdr;
3648 int err;
3649
3650 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3651
3652 /* Firmware blob starts with version numbers, followed by
3653 start address and length. We are setting complete length.
3654 length = end_address_of_bss - start_address_of_text.
3655 Remainder is the blob to be loaded contiguously
3656 from start address. */
3657
3658 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3659 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3660 fw_hdr);
3661 if (err)
3662 return err;
3663
3664 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3665 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3666 fw_hdr);
3667 if (err)
3668 return err;
3669
3670 /* Now startup only the RX cpu. */
3671 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3672 be32_to_cpu(fw_hdr->base_addr));
3673 if (err) {
3674 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3675 "should be %08x\n", __func__,
3676 tr32(RX_CPU_BASE + CPU_PC),
3677 be32_to_cpu(fw_hdr->base_addr));
3678 return -ENODEV;
3679 }
3680
3681 tg3_rxcpu_resume(tp);
3682
3683 return 0;
3684 }
3685
3686 /* tp->lock is held. */
3687 static int tg3_load_tso_firmware(struct tg3 *tp)
3688 {
3689 const struct tg3_firmware_hdr *fw_hdr;
3690 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3691 int err;
3692
3693 if (!tg3_flag(tp, FW_TSO))
3694 return 0;
3695
3696 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3697
3698 /* Firmware blob starts with version numbers, followed by
3699 start address and length. We are setting complete length.
3700 length = end_address_of_bss - start_address_of_text.
3701 Remainder is the blob to be loaded contiguously
3702 from start address. */
3703
3704 cpu_scratch_size = tp->fw_len;
3705
3706 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3707 cpu_base = RX_CPU_BASE;
3708 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3709 } else {
3710 cpu_base = TX_CPU_BASE;
3711 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3712 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3713 }
3714
3715 err = tg3_load_firmware_cpu(tp, cpu_base,
3716 cpu_scratch_base, cpu_scratch_size,
3717 fw_hdr);
3718 if (err)
3719 return err;
3720
3721 /* Now startup the cpu. */
3722 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3723 be32_to_cpu(fw_hdr->base_addr));
3724 if (err) {
3725 netdev_err(tp->dev,
3726 "%s fails to set CPU PC, is %08x should be %08x\n",
3727 __func__, tr32(cpu_base + CPU_PC),
3728 be32_to_cpu(fw_hdr->base_addr));
3729 return -ENODEV;
3730 }
3731
3732 tg3_resume_cpu(tp, cpu_base);
3733 return 0;
3734 }
3735
3736
3737 /* tp->lock is held. */
3738 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3739 {
3740 u32 addr_high, addr_low;
3741 int i;
3742
3743 addr_high = ((tp->dev->dev_addr[0] << 8) |
3744 tp->dev->dev_addr[1]);
3745 addr_low = ((tp->dev->dev_addr[2] << 24) |
3746 (tp->dev->dev_addr[3] << 16) |
3747 (tp->dev->dev_addr[4] << 8) |
3748 (tp->dev->dev_addr[5] << 0));
3749 for (i = 0; i < 4; i++) {
3750 if (i == 1 && skip_mac_1)
3751 continue;
3752 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3753 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3754 }
3755
3756 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3757 tg3_asic_rev(tp) == ASIC_REV_5704) {
3758 for (i = 0; i < 12; i++) {
3759 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3760 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3761 }
3762 }
3763
3764 addr_high = (tp->dev->dev_addr[0] +
3765 tp->dev->dev_addr[1] +
3766 tp->dev->dev_addr[2] +
3767 tp->dev->dev_addr[3] +
3768 tp->dev->dev_addr[4] +
3769 tp->dev->dev_addr[5]) &
3770 TX_BACKOFF_SEED_MASK;
3771 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3772 }
3773
3774 static void tg3_enable_register_access(struct tg3 *tp)
3775 {
3776 /*
3777 * Make sure register accesses (indirect or otherwise) will function
3778 * correctly.
3779 */
3780 pci_write_config_dword(tp->pdev,
3781 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3782 }
3783
3784 static int tg3_power_up(struct tg3 *tp)
3785 {
3786 int err;
3787
3788 tg3_enable_register_access(tp);
3789
3790 err = pci_set_power_state(tp->pdev, PCI_D0);
3791 if (!err) {
3792 /* Switch out of Vaux if it is a NIC */
3793 tg3_pwrsrc_switch_to_vmain(tp);
3794 } else {
3795 netdev_err(tp->dev, "Transition to D0 failed\n");
3796 }
3797
3798 return err;
3799 }
3800
3801 static int tg3_setup_phy(struct tg3 *, int);
3802
3803 static int tg3_power_down_prepare(struct tg3 *tp)
3804 {
3805 u32 misc_host_ctrl;
3806 bool device_should_wake, do_low_power;
3807
3808 tg3_enable_register_access(tp);
3809
3810 /* Restore the CLKREQ setting. */
3811 if (tg3_flag(tp, CLKREQ_BUG))
3812 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3813 PCI_EXP_LNKCTL_CLKREQ_EN);
3814
3815 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3816 tw32(TG3PCI_MISC_HOST_CTRL,
3817 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3818
3819 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3820 tg3_flag(tp, WOL_ENABLE);
3821
3822 if (tg3_flag(tp, USE_PHYLIB)) {
3823 do_low_power = false;
3824 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3825 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3826 struct phy_device *phydev;
3827 u32 phyid, advertising;
3828
3829 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3830
3831 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3832
3833 tp->link_config.speed = phydev->speed;
3834 tp->link_config.duplex = phydev->duplex;
3835 tp->link_config.autoneg = phydev->autoneg;
3836 tp->link_config.advertising = phydev->advertising;
3837
3838 advertising = ADVERTISED_TP |
3839 ADVERTISED_Pause |
3840 ADVERTISED_Autoneg |
3841 ADVERTISED_10baseT_Half;
3842
3843 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3844 if (tg3_flag(tp, WOL_SPEED_100MB))
3845 advertising |=
3846 ADVERTISED_100baseT_Half |
3847 ADVERTISED_100baseT_Full |
3848 ADVERTISED_10baseT_Full;
3849 else
3850 advertising |= ADVERTISED_10baseT_Full;
3851 }
3852
3853 phydev->advertising = advertising;
3854
3855 phy_start_aneg(phydev);
3856
3857 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3858 if (phyid != PHY_ID_BCMAC131) {
3859 phyid &= PHY_BCM_OUI_MASK;
3860 if (phyid == PHY_BCM_OUI_1 ||
3861 phyid == PHY_BCM_OUI_2 ||
3862 phyid == PHY_BCM_OUI_3)
3863 do_low_power = true;
3864 }
3865 }
3866 } else {
3867 do_low_power = true;
3868
3869 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3870 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3871
3872 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3873 tg3_setup_phy(tp, 0);
3874 }
3875
3876 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3877 u32 val;
3878
3879 val = tr32(GRC_VCPU_EXT_CTRL);
3880 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3881 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3882 int i;
3883 u32 val;
3884
3885 for (i = 0; i < 200; i++) {
3886 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3887 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3888 break;
3889 msleep(1);
3890 }
3891 }
3892 if (tg3_flag(tp, WOL_CAP))
3893 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3894 WOL_DRV_STATE_SHUTDOWN |
3895 WOL_DRV_WOL |
3896 WOL_SET_MAGIC_PKT);
3897
3898 if (device_should_wake) {
3899 u32 mac_mode;
3900
3901 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3902 if (do_low_power &&
3903 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3904 tg3_phy_auxctl_write(tp,
3905 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3906 MII_TG3_AUXCTL_PCTL_WOL_EN |
3907 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3908 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3909 udelay(40);
3910 }
3911
3912 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3913 mac_mode = MAC_MODE_PORT_MODE_GMII;
3914 else
3915 mac_mode = MAC_MODE_PORT_MODE_MII;
3916
3917 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3918 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3919 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3920 SPEED_100 : SPEED_10;
3921 if (tg3_5700_link_polarity(tp, speed))
3922 mac_mode |= MAC_MODE_LINK_POLARITY;
3923 else
3924 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3925 }
3926 } else {
3927 mac_mode = MAC_MODE_PORT_MODE_TBI;
3928 }
3929
3930 if (!tg3_flag(tp, 5750_PLUS))
3931 tw32(MAC_LED_CTRL, tp->led_ctrl);
3932
3933 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3934 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3935 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3936 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3937
3938 if (tg3_flag(tp, ENABLE_APE))
3939 mac_mode |= MAC_MODE_APE_TX_EN |
3940 MAC_MODE_APE_RX_EN |
3941 MAC_MODE_TDE_ENABLE;
3942
3943 tw32_f(MAC_MODE, mac_mode);
3944 udelay(100);
3945
3946 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3947 udelay(10);
3948 }
3949
3950 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3951 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3952 tg3_asic_rev(tp) == ASIC_REV_5701)) {
3953 u32 base_val;
3954
3955 base_val = tp->pci_clock_ctrl;
3956 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3957 CLOCK_CTRL_TXCLK_DISABLE);
3958
3959 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3960 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3961 } else if (tg3_flag(tp, 5780_CLASS) ||
3962 tg3_flag(tp, CPMU_PRESENT) ||
3963 tg3_asic_rev(tp) == ASIC_REV_5906) {
3964 /* do nothing */
3965 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3966 u32 newbits1, newbits2;
3967
3968 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3969 tg3_asic_rev(tp) == ASIC_REV_5701) {
3970 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3971 CLOCK_CTRL_TXCLK_DISABLE |
3972 CLOCK_CTRL_ALTCLK);
3973 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3974 } else if (tg3_flag(tp, 5705_PLUS)) {
3975 newbits1 = CLOCK_CTRL_625_CORE;
3976 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3977 } else {
3978 newbits1 = CLOCK_CTRL_ALTCLK;
3979 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3980 }
3981
3982 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3983 40);
3984
3985 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3986 40);
3987
3988 if (!tg3_flag(tp, 5705_PLUS)) {
3989 u32 newbits3;
3990
3991 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3992 tg3_asic_rev(tp) == ASIC_REV_5701) {
3993 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3994 CLOCK_CTRL_TXCLK_DISABLE |
3995 CLOCK_CTRL_44MHZ_CORE);
3996 } else {
3997 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3998 }
3999
4000 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4001 tp->pci_clock_ctrl | newbits3, 40);
4002 }
4003 }
4004
4005 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4006 tg3_power_down_phy(tp, do_low_power);
4007
4008 tg3_frob_aux_power(tp, true);
4009
4010 /* Workaround for unstable PLL clock */
4011 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4012 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4013 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4014 u32 val = tr32(0x7d00);
4015
4016 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4017 tw32(0x7d00, val);
4018 if (!tg3_flag(tp, ENABLE_ASF)) {
4019 int err;
4020
4021 err = tg3_nvram_lock(tp);
4022 tg3_halt_cpu(tp, RX_CPU_BASE);
4023 if (!err)
4024 tg3_nvram_unlock(tp);
4025 }
4026 }
4027
4028 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4029
4030 return 0;
4031 }
4032
4033 static void tg3_power_down(struct tg3 *tp)
4034 {
4035 tg3_power_down_prepare(tp);
4036
4037 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4038 pci_set_power_state(tp->pdev, PCI_D3hot);
4039 }
4040
4041 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4042 {
4043 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4044 case MII_TG3_AUX_STAT_10HALF:
4045 *speed = SPEED_10;
4046 *duplex = DUPLEX_HALF;
4047 break;
4048
4049 case MII_TG3_AUX_STAT_10FULL:
4050 *speed = SPEED_10;
4051 *duplex = DUPLEX_FULL;
4052 break;
4053
4054 case MII_TG3_AUX_STAT_100HALF:
4055 *speed = SPEED_100;
4056 *duplex = DUPLEX_HALF;
4057 break;
4058
4059 case MII_TG3_AUX_STAT_100FULL:
4060 *speed = SPEED_100;
4061 *duplex = DUPLEX_FULL;
4062 break;
4063
4064 case MII_TG3_AUX_STAT_1000HALF:
4065 *speed = SPEED_1000;
4066 *duplex = DUPLEX_HALF;
4067 break;
4068
4069 case MII_TG3_AUX_STAT_1000FULL:
4070 *speed = SPEED_1000;
4071 *duplex = DUPLEX_FULL;
4072 break;
4073
4074 default:
4075 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4076 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4077 SPEED_10;
4078 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4079 DUPLEX_HALF;
4080 break;
4081 }
4082 *speed = SPEED_UNKNOWN;
4083 *duplex = DUPLEX_UNKNOWN;
4084 break;
4085 }
4086 }
4087
4088 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4089 {
4090 int err = 0;
4091 u32 val, new_adv;
4092
4093 new_adv = ADVERTISE_CSMA;
4094 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4095 new_adv |= mii_advertise_flowctrl(flowctrl);
4096
4097 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4098 if (err)
4099 goto done;
4100
4101 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4102 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4103
4104 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4105 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4106 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4107
4108 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4109 if (err)
4110 goto done;
4111 }
4112
4113 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4114 goto done;
4115
4116 tw32(TG3_CPMU_EEE_MODE,
4117 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4118
4119 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4120 if (!err) {
4121 u32 err2;
4122
4123 val = 0;
4124 /* Advertise 100-BaseTX EEE ability */
4125 if (advertise & ADVERTISED_100baseT_Full)
4126 val |= MDIO_AN_EEE_ADV_100TX;
4127 /* Advertise 1000-BaseT EEE ability */
4128 if (advertise & ADVERTISED_1000baseT_Full)
4129 val |= MDIO_AN_EEE_ADV_1000T;
4130 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4131 if (err)
4132 val = 0;
4133
4134 switch (tg3_asic_rev(tp)) {
4135 case ASIC_REV_5717:
4136 case ASIC_REV_57765:
4137 case ASIC_REV_57766:
4138 case ASIC_REV_5719:
4139 /* If we advertised any eee advertisements above... */
4140 if (val)
4141 val = MII_TG3_DSP_TAP26_ALNOKO |
4142 MII_TG3_DSP_TAP26_RMRXSTO |
4143 MII_TG3_DSP_TAP26_OPCSINPT;
4144 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4145 /* Fall through */
4146 case ASIC_REV_5720:
4147 case ASIC_REV_5762:
4148 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4149 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4150 MII_TG3_DSP_CH34TP2_HIBW01);
4151 }
4152
4153 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4154 if (!err)
4155 err = err2;
4156 }
4157
4158 done:
4159 return err;
4160 }
4161
4162 static void tg3_phy_copper_begin(struct tg3 *tp)
4163 {
4164 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4165 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4166 u32 adv, fc;
4167
4168 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4169 adv = ADVERTISED_10baseT_Half |
4170 ADVERTISED_10baseT_Full;
4171 if (tg3_flag(tp, WOL_SPEED_100MB))
4172 adv |= ADVERTISED_100baseT_Half |
4173 ADVERTISED_100baseT_Full;
4174
4175 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4176 } else {
4177 adv = tp->link_config.advertising;
4178 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4179 adv &= ~(ADVERTISED_1000baseT_Half |
4180 ADVERTISED_1000baseT_Full);
4181
4182 fc = tp->link_config.flowctrl;
4183 }
4184
4185 tg3_phy_autoneg_cfg(tp, adv, fc);
4186
4187 tg3_writephy(tp, MII_BMCR,
4188 BMCR_ANENABLE | BMCR_ANRESTART);
4189 } else {
4190 int i;
4191 u32 bmcr, orig_bmcr;
4192
4193 tp->link_config.active_speed = tp->link_config.speed;
4194 tp->link_config.active_duplex = tp->link_config.duplex;
4195
4196 bmcr = 0;
4197 switch (tp->link_config.speed) {
4198 default:
4199 case SPEED_10:
4200 break;
4201
4202 case SPEED_100:
4203 bmcr |= BMCR_SPEED100;
4204 break;
4205
4206 case SPEED_1000:
4207 bmcr |= BMCR_SPEED1000;
4208 break;
4209 }
4210
4211 if (tp->link_config.duplex == DUPLEX_FULL)
4212 bmcr |= BMCR_FULLDPLX;
4213
4214 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4215 (bmcr != orig_bmcr)) {
4216 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4217 for (i = 0; i < 1500; i++) {
4218 u32 tmp;
4219
4220 udelay(10);
4221 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4222 tg3_readphy(tp, MII_BMSR, &tmp))
4223 continue;
4224 if (!(tmp & BMSR_LSTATUS)) {
4225 udelay(40);
4226 break;
4227 }
4228 }
4229 tg3_writephy(tp, MII_BMCR, bmcr);
4230 udelay(40);
4231 }
4232 }
4233 }
4234
4235 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4236 {
4237 int err;
4238
4239 /* Turn off tap power management. */
4240 /* Set Extended packet length bit */
4241 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4242
4243 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4244 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4245 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4246 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4247 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4248
4249 udelay(40);
4250
4251 return err;
4252 }
4253
4254 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4255 {
4256 u32 advmsk, tgtadv, advertising;
4257
4258 advertising = tp->link_config.advertising;
4259 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4260
4261 advmsk = ADVERTISE_ALL;
4262 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4263 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4264 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4265 }
4266
4267 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4268 return false;
4269
4270 if ((*lcladv & advmsk) != tgtadv)
4271 return false;
4272
4273 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4274 u32 tg3_ctrl;
4275
4276 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4277
4278 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4279 return false;
4280
4281 if (tgtadv &&
4282 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4283 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4284 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4285 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4286 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4287 } else {
4288 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4289 }
4290
4291 if (tg3_ctrl != tgtadv)
4292 return false;
4293 }
4294
4295 return true;
4296 }
4297
4298 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4299 {
4300 u32 lpeth = 0;
4301
4302 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4303 u32 val;
4304
4305 if (tg3_readphy(tp, MII_STAT1000, &val))
4306 return false;
4307
4308 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4309 }
4310
4311 if (tg3_readphy(tp, MII_LPA, rmtadv))
4312 return false;
4313
4314 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4315 tp->link_config.rmt_adv = lpeth;
4316
4317 return true;
4318 }
4319
4320 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4321 {
4322 if (curr_link_up != tp->link_up) {
4323 if (curr_link_up) {
4324 tg3_carrier_on(tp);
4325 } else {
4326 tg3_carrier_off(tp);
4327 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4328 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4329 }
4330
4331 tg3_link_report(tp);
4332 return true;
4333 }
4334
4335 return false;
4336 }
4337
4338 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4339 {
4340 int current_link_up;
4341 u32 bmsr, val;
4342 u32 lcl_adv, rmt_adv;
4343 u16 current_speed;
4344 u8 current_duplex;
4345 int i, err;
4346
4347 tw32(MAC_EVENT, 0);
4348
4349 tw32_f(MAC_STATUS,
4350 (MAC_STATUS_SYNC_CHANGED |
4351 MAC_STATUS_CFG_CHANGED |
4352 MAC_STATUS_MI_COMPLETION |
4353 MAC_STATUS_LNKSTATE_CHANGED));
4354 udelay(40);
4355
4356 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4357 tw32_f(MAC_MI_MODE,
4358 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4359 udelay(80);
4360 }
4361
4362 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4363
4364 /* Some third-party PHYs need to be reset on link going
4365 * down.
4366 */
4367 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4368 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4369 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4370 tp->link_up) {
4371 tg3_readphy(tp, MII_BMSR, &bmsr);
4372 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4373 !(bmsr & BMSR_LSTATUS))
4374 force_reset = 1;
4375 }
4376 if (force_reset)
4377 tg3_phy_reset(tp);
4378
4379 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4380 tg3_readphy(tp, MII_BMSR, &bmsr);
4381 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4382 !tg3_flag(tp, INIT_COMPLETE))
4383 bmsr = 0;
4384
4385 if (!(bmsr & BMSR_LSTATUS)) {
4386 err = tg3_init_5401phy_dsp(tp);
4387 if (err)
4388 return err;
4389
4390 tg3_readphy(tp, MII_BMSR, &bmsr);
4391 for (i = 0; i < 1000; i++) {
4392 udelay(10);
4393 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4394 (bmsr & BMSR_LSTATUS)) {
4395 udelay(40);
4396 break;
4397 }
4398 }
4399
4400 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4401 TG3_PHY_REV_BCM5401_B0 &&
4402 !(bmsr & BMSR_LSTATUS) &&
4403 tp->link_config.active_speed == SPEED_1000) {
4404 err = tg3_phy_reset(tp);
4405 if (!err)
4406 err = tg3_init_5401phy_dsp(tp);
4407 if (err)
4408 return err;
4409 }
4410 }
4411 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4412 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4413 /* 5701 {A0,B0} CRC bug workaround */
4414 tg3_writephy(tp, 0x15, 0x0a75);
4415 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4416 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4417 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4418 }
4419
4420 /* Clear pending interrupts... */
4421 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4422 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4423
4424 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4425 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4426 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4427 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4428
4429 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4430 tg3_asic_rev(tp) == ASIC_REV_5701) {
4431 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4432 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4433 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4434 else
4435 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4436 }
4437
4438 current_link_up = 0;
4439 current_speed = SPEED_UNKNOWN;
4440 current_duplex = DUPLEX_UNKNOWN;
4441 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4442 tp->link_config.rmt_adv = 0;
4443
4444 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4445 err = tg3_phy_auxctl_read(tp,
4446 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4447 &val);
4448 if (!err && !(val & (1 << 10))) {
4449 tg3_phy_auxctl_write(tp,
4450 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4451 val | (1 << 10));
4452 goto relink;
4453 }
4454 }
4455
4456 bmsr = 0;
4457 for (i = 0; i < 100; i++) {
4458 tg3_readphy(tp, MII_BMSR, &bmsr);
4459 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4460 (bmsr & BMSR_LSTATUS))
4461 break;
4462 udelay(40);
4463 }
4464
4465 if (bmsr & BMSR_LSTATUS) {
4466 u32 aux_stat, bmcr;
4467
4468 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4469 for (i = 0; i < 2000; i++) {
4470 udelay(10);
4471 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4472 aux_stat)
4473 break;
4474 }
4475
4476 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4477 &current_speed,
4478 &current_duplex);
4479
4480 bmcr = 0;
4481 for (i = 0; i < 200; i++) {
4482 tg3_readphy(tp, MII_BMCR, &bmcr);
4483 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4484 continue;
4485 if (bmcr && bmcr != 0x7fff)
4486 break;
4487 udelay(10);
4488 }
4489
4490 lcl_adv = 0;
4491 rmt_adv = 0;
4492
4493 tp->link_config.active_speed = current_speed;
4494 tp->link_config.active_duplex = current_duplex;
4495
4496 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4497 if ((bmcr & BMCR_ANENABLE) &&
4498 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4499 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4500 current_link_up = 1;
4501 } else {
4502 if (!(bmcr & BMCR_ANENABLE) &&
4503 tp->link_config.speed == current_speed &&
4504 tp->link_config.duplex == current_duplex &&
4505 tp->link_config.flowctrl ==
4506 tp->link_config.active_flowctrl) {
4507 current_link_up = 1;
4508 }
4509 }
4510
4511 if (current_link_up == 1 &&
4512 tp->link_config.active_duplex == DUPLEX_FULL) {
4513 u32 reg, bit;
4514
4515 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4516 reg = MII_TG3_FET_GEN_STAT;
4517 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4518 } else {
4519 reg = MII_TG3_EXT_STAT;
4520 bit = MII_TG3_EXT_STAT_MDIX;
4521 }
4522
4523 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4524 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4525
4526 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4527 }
4528 }
4529
4530 relink:
4531 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4532 tg3_phy_copper_begin(tp);
4533
4534 if (tg3_flag(tp, ROBOSWITCH)) {
4535 current_link_up = 1;
4536 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4537 current_speed = SPEED_1000;
4538 current_duplex = DUPLEX_FULL;
4539 tp->link_config.active_speed = current_speed;
4540 tp->link_config.active_duplex = current_duplex;
4541 }
4542
4543 tg3_readphy(tp, MII_BMSR, &bmsr);
4544 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4545 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4546 current_link_up = 1;
4547 }
4548
4549 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4550 if (current_link_up == 1) {
4551 if (tp->link_config.active_speed == SPEED_100 ||
4552 tp->link_config.active_speed == SPEED_10)
4553 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4554 else
4555 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4556 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4557 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4558 else
4559 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4560
4561 /* In order for the 5750 core in BCM4785 chip to work properly
4562 * in RGMII mode, the Led Control Register must be set up.
4563 */
4564 if (tg3_flag(tp, RGMII_MODE)) {
4565 u32 led_ctrl = tr32(MAC_LED_CTRL);
4566 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4567
4568 if (tp->link_config.active_speed == SPEED_10)
4569 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4570 else if (tp->link_config.active_speed == SPEED_100)
4571 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4572 LED_CTRL_100MBPS_ON);
4573 else if (tp->link_config.active_speed == SPEED_1000)
4574 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4575 LED_CTRL_1000MBPS_ON);
4576
4577 tw32(MAC_LED_CTRL, led_ctrl);
4578 udelay(40);
4579 }
4580
4581 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4582 if (tp->link_config.active_duplex == DUPLEX_HALF)
4583 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4584
4585 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4586 if (current_link_up == 1 &&
4587 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4588 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4589 else
4590 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4591 }
4592
4593 /* ??? Without this setting Netgear GA302T PHY does not
4594 * ??? send/receive packets...
4595 */
4596 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4597 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4598 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4599 tw32_f(MAC_MI_MODE, tp->mi_mode);
4600 udelay(80);
4601 }
4602
4603 tw32_f(MAC_MODE, tp->mac_mode);
4604 udelay(40);
4605
4606 tg3_phy_eee_adjust(tp, current_link_up);
4607
4608 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4609 /* Polled via timer. */
4610 tw32_f(MAC_EVENT, 0);
4611 } else {
4612 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4613 }
4614 udelay(40);
4615
4616 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4617 current_link_up == 1 &&
4618 tp->link_config.active_speed == SPEED_1000 &&
4619 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4620 udelay(120);
4621 tw32_f(MAC_STATUS,
4622 (MAC_STATUS_SYNC_CHANGED |
4623 MAC_STATUS_CFG_CHANGED));
4624 udelay(40);
4625 tg3_write_mem(tp,
4626 NIC_SRAM_FIRMWARE_MBOX,
4627 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4628 }
4629
4630 /* Prevent send BD corruption. */
4631 if (tg3_flag(tp, CLKREQ_BUG)) {
4632 if (tp->link_config.active_speed == SPEED_100 ||
4633 tp->link_config.active_speed == SPEED_10)
4634 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4635 PCI_EXP_LNKCTL_CLKREQ_EN);
4636 else
4637 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4638 PCI_EXP_LNKCTL_CLKREQ_EN);
4639 }
4640
4641 tg3_test_and_report_link_chg(tp, current_link_up);
4642
4643 return 0;
4644 }
4645
4646 struct tg3_fiber_aneginfo {
4647 int state;
4648 #define ANEG_STATE_UNKNOWN 0
4649 #define ANEG_STATE_AN_ENABLE 1
4650 #define ANEG_STATE_RESTART_INIT 2
4651 #define ANEG_STATE_RESTART 3
4652 #define ANEG_STATE_DISABLE_LINK_OK 4
4653 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4654 #define ANEG_STATE_ABILITY_DETECT 6
4655 #define ANEG_STATE_ACK_DETECT_INIT 7
4656 #define ANEG_STATE_ACK_DETECT 8
4657 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4658 #define ANEG_STATE_COMPLETE_ACK 10
4659 #define ANEG_STATE_IDLE_DETECT_INIT 11
4660 #define ANEG_STATE_IDLE_DETECT 12
4661 #define ANEG_STATE_LINK_OK 13
4662 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4663 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4664
4665 u32 flags;
4666 #define MR_AN_ENABLE 0x00000001
4667 #define MR_RESTART_AN 0x00000002
4668 #define MR_AN_COMPLETE 0x00000004
4669 #define MR_PAGE_RX 0x00000008
4670 #define MR_NP_LOADED 0x00000010
4671 #define MR_TOGGLE_TX 0x00000020
4672 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4673 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4674 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4675 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4676 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4677 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4678 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4679 #define MR_TOGGLE_RX 0x00002000
4680 #define MR_NP_RX 0x00004000
4681
4682 #define MR_LINK_OK 0x80000000
4683
4684 unsigned long link_time, cur_time;
4685
4686 u32 ability_match_cfg;
4687 int ability_match_count;
4688
4689 char ability_match, idle_match, ack_match;
4690
4691 u32 txconfig, rxconfig;
4692 #define ANEG_CFG_NP 0x00000080
4693 #define ANEG_CFG_ACK 0x00000040
4694 #define ANEG_CFG_RF2 0x00000020
4695 #define ANEG_CFG_RF1 0x00000010
4696 #define ANEG_CFG_PS2 0x00000001
4697 #define ANEG_CFG_PS1 0x00008000
4698 #define ANEG_CFG_HD 0x00004000
4699 #define ANEG_CFG_FD 0x00002000
4700 #define ANEG_CFG_INVAL 0x00001f06
4701
4702 };
4703 #define ANEG_OK 0
4704 #define ANEG_DONE 1
4705 #define ANEG_TIMER_ENAB 2
4706 #define ANEG_FAILED -1
4707
4708 #define ANEG_STATE_SETTLE_TIME 10000
4709
4710 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4711 struct tg3_fiber_aneginfo *ap)
4712 {
4713 u16 flowctrl;
4714 unsigned long delta;
4715 u32 rx_cfg_reg;
4716 int ret;
4717
4718 if (ap->state == ANEG_STATE_UNKNOWN) {
4719 ap->rxconfig = 0;
4720 ap->link_time = 0;
4721 ap->cur_time = 0;
4722 ap->ability_match_cfg = 0;
4723 ap->ability_match_count = 0;
4724 ap->ability_match = 0;
4725 ap->idle_match = 0;
4726 ap->ack_match = 0;
4727 }
4728 ap->cur_time++;
4729
4730 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4731 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4732
4733 if (rx_cfg_reg != ap->ability_match_cfg) {
4734 ap->ability_match_cfg = rx_cfg_reg;
4735 ap->ability_match = 0;
4736 ap->ability_match_count = 0;
4737 } else {
4738 if (++ap->ability_match_count > 1) {
4739 ap->ability_match = 1;
4740 ap->ability_match_cfg = rx_cfg_reg;
4741 }
4742 }
4743 if (rx_cfg_reg & ANEG_CFG_ACK)
4744 ap->ack_match = 1;
4745 else
4746 ap->ack_match = 0;
4747
4748 ap->idle_match = 0;
4749 } else {
4750 ap->idle_match = 1;
4751 ap->ability_match_cfg = 0;
4752 ap->ability_match_count = 0;
4753 ap->ability_match = 0;
4754 ap->ack_match = 0;
4755
4756 rx_cfg_reg = 0;
4757 }
4758
4759 ap->rxconfig = rx_cfg_reg;
4760 ret = ANEG_OK;
4761
4762 switch (ap->state) {
4763 case ANEG_STATE_UNKNOWN:
4764 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4765 ap->state = ANEG_STATE_AN_ENABLE;
4766
4767 /* fallthru */
4768 case ANEG_STATE_AN_ENABLE:
4769 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4770 if (ap->flags & MR_AN_ENABLE) {
4771 ap->link_time = 0;
4772 ap->cur_time = 0;
4773 ap->ability_match_cfg = 0;
4774 ap->ability_match_count = 0;
4775 ap->ability_match = 0;
4776 ap->idle_match = 0;
4777 ap->ack_match = 0;
4778
4779 ap->state = ANEG_STATE_RESTART_INIT;
4780 } else {
4781 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4782 }
4783 break;
4784
4785 case ANEG_STATE_RESTART_INIT:
4786 ap->link_time = ap->cur_time;
4787 ap->flags &= ~(MR_NP_LOADED);
4788 ap->txconfig = 0;
4789 tw32(MAC_TX_AUTO_NEG, 0);
4790 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4791 tw32_f(MAC_MODE, tp->mac_mode);
4792 udelay(40);
4793
4794 ret = ANEG_TIMER_ENAB;
4795 ap->state = ANEG_STATE_RESTART;
4796
4797 /* fallthru */
4798 case ANEG_STATE_RESTART:
4799 delta = ap->cur_time - ap->link_time;
4800 if (delta > ANEG_STATE_SETTLE_TIME)
4801 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4802 else
4803 ret = ANEG_TIMER_ENAB;
4804 break;
4805
4806 case ANEG_STATE_DISABLE_LINK_OK:
4807 ret = ANEG_DONE;
4808 break;
4809
4810 case ANEG_STATE_ABILITY_DETECT_INIT:
4811 ap->flags &= ~(MR_TOGGLE_TX);
4812 ap->txconfig = ANEG_CFG_FD;
4813 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4814 if (flowctrl & ADVERTISE_1000XPAUSE)
4815 ap->txconfig |= ANEG_CFG_PS1;
4816 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4817 ap->txconfig |= ANEG_CFG_PS2;
4818 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4819 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4820 tw32_f(MAC_MODE, tp->mac_mode);
4821 udelay(40);
4822
4823 ap->state = ANEG_STATE_ABILITY_DETECT;
4824 break;
4825
4826 case ANEG_STATE_ABILITY_DETECT:
4827 if (ap->ability_match != 0 && ap->rxconfig != 0)
4828 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4829 break;
4830
4831 case ANEG_STATE_ACK_DETECT_INIT:
4832 ap->txconfig |= ANEG_CFG_ACK;
4833 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4834 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4835 tw32_f(MAC_MODE, tp->mac_mode);
4836 udelay(40);
4837
4838 ap->state = ANEG_STATE_ACK_DETECT;
4839
4840 /* fallthru */
4841 case ANEG_STATE_ACK_DETECT:
4842 if (ap->ack_match != 0) {
4843 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4844 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4845 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4846 } else {
4847 ap->state = ANEG_STATE_AN_ENABLE;
4848 }
4849 } else if (ap->ability_match != 0 &&
4850 ap->rxconfig == 0) {
4851 ap->state = ANEG_STATE_AN_ENABLE;
4852 }
4853 break;
4854
4855 case ANEG_STATE_COMPLETE_ACK_INIT:
4856 if (ap->rxconfig & ANEG_CFG_INVAL) {
4857 ret = ANEG_FAILED;
4858 break;
4859 }
4860 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4861 MR_LP_ADV_HALF_DUPLEX |
4862 MR_LP_ADV_SYM_PAUSE |
4863 MR_LP_ADV_ASYM_PAUSE |
4864 MR_LP_ADV_REMOTE_FAULT1 |
4865 MR_LP_ADV_REMOTE_FAULT2 |
4866 MR_LP_ADV_NEXT_PAGE |
4867 MR_TOGGLE_RX |
4868 MR_NP_RX);
4869 if (ap->rxconfig & ANEG_CFG_FD)
4870 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4871 if (ap->rxconfig & ANEG_CFG_HD)
4872 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4873 if (ap->rxconfig & ANEG_CFG_PS1)
4874 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4875 if (ap->rxconfig & ANEG_CFG_PS2)
4876 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4877 if (ap->rxconfig & ANEG_CFG_RF1)
4878 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4879 if (ap->rxconfig & ANEG_CFG_RF2)
4880 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4881 if (ap->rxconfig & ANEG_CFG_NP)
4882 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4883
4884 ap->link_time = ap->cur_time;
4885
4886 ap->flags ^= (MR_TOGGLE_TX);
4887 if (ap->rxconfig & 0x0008)
4888 ap->flags |= MR_TOGGLE_RX;
4889 if (ap->rxconfig & ANEG_CFG_NP)
4890 ap->flags |= MR_NP_RX;
4891 ap->flags |= MR_PAGE_RX;
4892
4893 ap->state = ANEG_STATE_COMPLETE_ACK;
4894 ret = ANEG_TIMER_ENAB;
4895 break;
4896
4897 case ANEG_STATE_COMPLETE_ACK:
4898 if (ap->ability_match != 0 &&
4899 ap->rxconfig == 0) {
4900 ap->state = ANEG_STATE_AN_ENABLE;
4901 break;
4902 }
4903 delta = ap->cur_time - ap->link_time;
4904 if (delta > ANEG_STATE_SETTLE_TIME) {
4905 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4906 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4907 } else {
4908 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4909 !(ap->flags & MR_NP_RX)) {
4910 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4911 } else {
4912 ret = ANEG_FAILED;
4913 }
4914 }
4915 }
4916 break;
4917
4918 case ANEG_STATE_IDLE_DETECT_INIT:
4919 ap->link_time = ap->cur_time;
4920 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4921 tw32_f(MAC_MODE, tp->mac_mode);
4922 udelay(40);
4923
4924 ap->state = ANEG_STATE_IDLE_DETECT;
4925 ret = ANEG_TIMER_ENAB;
4926 break;
4927
4928 case ANEG_STATE_IDLE_DETECT:
4929 if (ap->ability_match != 0 &&
4930 ap->rxconfig == 0) {
4931 ap->state = ANEG_STATE_AN_ENABLE;
4932 break;
4933 }
4934 delta = ap->cur_time - ap->link_time;
4935 if (delta > ANEG_STATE_SETTLE_TIME) {
4936 /* XXX another gem from the Broadcom driver :( */
4937 ap->state = ANEG_STATE_LINK_OK;
4938 }
4939 break;
4940
4941 case ANEG_STATE_LINK_OK:
4942 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4943 ret = ANEG_DONE;
4944 break;
4945
4946 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4947 /* ??? unimplemented */
4948 break;
4949
4950 case ANEG_STATE_NEXT_PAGE_WAIT:
4951 /* ??? unimplemented */
4952 break;
4953
4954 default:
4955 ret = ANEG_FAILED;
4956 break;
4957 }
4958
4959 return ret;
4960 }
4961
4962 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4963 {
4964 int res = 0;
4965 struct tg3_fiber_aneginfo aninfo;
4966 int status = ANEG_FAILED;
4967 unsigned int tick;
4968 u32 tmp;
4969
4970 tw32_f(MAC_TX_AUTO_NEG, 0);
4971
4972 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4973 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4974 udelay(40);
4975
4976 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4977 udelay(40);
4978
4979 memset(&aninfo, 0, sizeof(aninfo));
4980 aninfo.flags |= MR_AN_ENABLE;
4981 aninfo.state = ANEG_STATE_UNKNOWN;
4982 aninfo.cur_time = 0;
4983 tick = 0;
4984 while (++tick < 195000) {
4985 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4986 if (status == ANEG_DONE || status == ANEG_FAILED)
4987 break;
4988
4989 udelay(1);
4990 }
4991
4992 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4993 tw32_f(MAC_MODE, tp->mac_mode);
4994 udelay(40);
4995
4996 *txflags = aninfo.txconfig;
4997 *rxflags = aninfo.flags;
4998
4999 if (status == ANEG_DONE &&
5000 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5001 MR_LP_ADV_FULL_DUPLEX)))
5002 res = 1;
5003
5004 return res;
5005 }
5006
5007 static void tg3_init_bcm8002(struct tg3 *tp)
5008 {
5009 u32 mac_status = tr32(MAC_STATUS);
5010 int i;
5011
5012 /* Reset when initting first time or we have a link. */
5013 if (tg3_flag(tp, INIT_COMPLETE) &&
5014 !(mac_status & MAC_STATUS_PCS_SYNCED))
5015 return;
5016
5017 /* Set PLL lock range. */
5018 tg3_writephy(tp, 0x16, 0x8007);
5019
5020 /* SW reset */
5021 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5022
5023 /* Wait for reset to complete. */
5024 /* XXX schedule_timeout() ... */
5025 for (i = 0; i < 500; i++)
5026 udelay(10);
5027
5028 /* Config mode; select PMA/Ch 1 regs. */
5029 tg3_writephy(tp, 0x10, 0x8411);
5030
5031 /* Enable auto-lock and comdet, select txclk for tx. */
5032 tg3_writephy(tp, 0x11, 0x0a10);
5033
5034 tg3_writephy(tp, 0x18, 0x00a0);
5035 tg3_writephy(tp, 0x16, 0x41ff);
5036
5037 /* Assert and deassert POR. */
5038 tg3_writephy(tp, 0x13, 0x0400);
5039 udelay(40);
5040 tg3_writephy(tp, 0x13, 0x0000);
5041
5042 tg3_writephy(tp, 0x11, 0x0a50);
5043 udelay(40);
5044 tg3_writephy(tp, 0x11, 0x0a10);
5045
5046 /* Wait for signal to stabilize */
5047 /* XXX schedule_timeout() ... */
5048 for (i = 0; i < 15000; i++)
5049 udelay(10);
5050
5051 /* Deselect the channel register so we can read the PHYID
5052 * later.
5053 */
5054 tg3_writephy(tp, 0x10, 0x8011);
5055 }
5056
5057 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5058 {
5059 u16 flowctrl;
5060 u32 sg_dig_ctrl, sg_dig_status;
5061 u32 serdes_cfg, expected_sg_dig_ctrl;
5062 int workaround, port_a;
5063 int current_link_up;
5064
5065 serdes_cfg = 0;
5066 expected_sg_dig_ctrl = 0;
5067 workaround = 0;
5068 port_a = 1;
5069 current_link_up = 0;
5070
5071 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5072 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5073 workaround = 1;
5074 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5075 port_a = 0;
5076
5077 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5078 /* preserve bits 20-23 for voltage regulator */
5079 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5080 }
5081
5082 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5083
5084 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5085 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5086 if (workaround) {
5087 u32 val = serdes_cfg;
5088
5089 if (port_a)
5090 val |= 0xc010000;
5091 else
5092 val |= 0x4010000;
5093 tw32_f(MAC_SERDES_CFG, val);
5094 }
5095
5096 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5097 }
5098 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5099 tg3_setup_flow_control(tp, 0, 0);
5100 current_link_up = 1;
5101 }
5102 goto out;
5103 }
5104
5105 /* Want auto-negotiation. */
5106 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5107
5108 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5109 if (flowctrl & ADVERTISE_1000XPAUSE)
5110 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5111 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5112 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5113
5114 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5115 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5116 tp->serdes_counter &&
5117 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5118 MAC_STATUS_RCVD_CFG)) ==
5119 MAC_STATUS_PCS_SYNCED)) {
5120 tp->serdes_counter--;
5121 current_link_up = 1;
5122 goto out;
5123 }
5124 restart_autoneg:
5125 if (workaround)
5126 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5127 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5128 udelay(5);
5129 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5130
5131 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5132 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5133 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5134 MAC_STATUS_SIGNAL_DET)) {
5135 sg_dig_status = tr32(SG_DIG_STATUS);
5136 mac_status = tr32(MAC_STATUS);
5137
5138 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5139 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5140 u32 local_adv = 0, remote_adv = 0;
5141
5142 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5143 local_adv |= ADVERTISE_1000XPAUSE;
5144 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5145 local_adv |= ADVERTISE_1000XPSE_ASYM;
5146
5147 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5148 remote_adv |= LPA_1000XPAUSE;
5149 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5150 remote_adv |= LPA_1000XPAUSE_ASYM;
5151
5152 tp->link_config.rmt_adv =
5153 mii_adv_to_ethtool_adv_x(remote_adv);
5154
5155 tg3_setup_flow_control(tp, local_adv, remote_adv);
5156 current_link_up = 1;
5157 tp->serdes_counter = 0;
5158 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5159 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5160 if (tp->serdes_counter)
5161 tp->serdes_counter--;
5162 else {
5163 if (workaround) {
5164 u32 val = serdes_cfg;
5165
5166 if (port_a)
5167 val |= 0xc010000;
5168 else
5169 val |= 0x4010000;
5170
5171 tw32_f(MAC_SERDES_CFG, val);
5172 }
5173
5174 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5175 udelay(40);
5176
5177 /* Link parallel detection - link is up */
5178 /* only if we have PCS_SYNC and not */
5179 /* receiving config code words */
5180 mac_status = tr32(MAC_STATUS);
5181 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5182 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5183 tg3_setup_flow_control(tp, 0, 0);
5184 current_link_up = 1;
5185 tp->phy_flags |=
5186 TG3_PHYFLG_PARALLEL_DETECT;
5187 tp->serdes_counter =
5188 SERDES_PARALLEL_DET_TIMEOUT;
5189 } else
5190 goto restart_autoneg;
5191 }
5192 }
5193 } else {
5194 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5195 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5196 }
5197
5198 out:
5199 return current_link_up;
5200 }
5201
5202 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5203 {
5204 int current_link_up = 0;
5205
5206 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5207 goto out;
5208
5209 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5210 u32 txflags, rxflags;
5211 int i;
5212
5213 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5214 u32 local_adv = 0, remote_adv = 0;
5215
5216 if (txflags & ANEG_CFG_PS1)
5217 local_adv |= ADVERTISE_1000XPAUSE;
5218 if (txflags & ANEG_CFG_PS2)
5219 local_adv |= ADVERTISE_1000XPSE_ASYM;
5220
5221 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5222 remote_adv |= LPA_1000XPAUSE;
5223 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5224 remote_adv |= LPA_1000XPAUSE_ASYM;
5225
5226 tp->link_config.rmt_adv =
5227 mii_adv_to_ethtool_adv_x(remote_adv);
5228
5229 tg3_setup_flow_control(tp, local_adv, remote_adv);
5230
5231 current_link_up = 1;
5232 }
5233 for (i = 0; i < 30; i++) {
5234 udelay(20);
5235 tw32_f(MAC_STATUS,
5236 (MAC_STATUS_SYNC_CHANGED |
5237 MAC_STATUS_CFG_CHANGED));
5238 udelay(40);
5239 if ((tr32(MAC_STATUS) &
5240 (MAC_STATUS_SYNC_CHANGED |
5241 MAC_STATUS_CFG_CHANGED)) == 0)
5242 break;
5243 }
5244
5245 mac_status = tr32(MAC_STATUS);
5246 if (current_link_up == 0 &&
5247 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5248 !(mac_status & MAC_STATUS_RCVD_CFG))
5249 current_link_up = 1;
5250 } else {
5251 tg3_setup_flow_control(tp, 0, 0);
5252
5253 /* Forcing 1000FD link up. */
5254 current_link_up = 1;
5255
5256 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5257 udelay(40);
5258
5259 tw32_f(MAC_MODE, tp->mac_mode);
5260 udelay(40);
5261 }
5262
5263 out:
5264 return current_link_up;
5265 }
5266
5267 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5268 {
5269 u32 orig_pause_cfg;
5270 u16 orig_active_speed;
5271 u8 orig_active_duplex;
5272 u32 mac_status;
5273 int current_link_up;
5274 int i;
5275
5276 orig_pause_cfg = tp->link_config.active_flowctrl;
5277 orig_active_speed = tp->link_config.active_speed;
5278 orig_active_duplex = tp->link_config.active_duplex;
5279
5280 if (!tg3_flag(tp, HW_AUTONEG) &&
5281 tp->link_up &&
5282 tg3_flag(tp, INIT_COMPLETE)) {
5283 mac_status = tr32(MAC_STATUS);
5284 mac_status &= (MAC_STATUS_PCS_SYNCED |
5285 MAC_STATUS_SIGNAL_DET |
5286 MAC_STATUS_CFG_CHANGED |
5287 MAC_STATUS_RCVD_CFG);
5288 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5289 MAC_STATUS_SIGNAL_DET)) {
5290 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5291 MAC_STATUS_CFG_CHANGED));
5292 return 0;
5293 }
5294 }
5295
5296 tw32_f(MAC_TX_AUTO_NEG, 0);
5297
5298 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5299 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5300 tw32_f(MAC_MODE, tp->mac_mode);
5301 udelay(40);
5302
5303 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5304 tg3_init_bcm8002(tp);
5305
5306 /* Enable link change event even when serdes polling. */
5307 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5308 udelay(40);
5309
5310 current_link_up = 0;
5311 tp->link_config.rmt_adv = 0;
5312 mac_status = tr32(MAC_STATUS);
5313
5314 if (tg3_flag(tp, HW_AUTONEG))
5315 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5316 else
5317 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5318
5319 tp->napi[0].hw_status->status =
5320 (SD_STATUS_UPDATED |
5321 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5322
5323 for (i = 0; i < 100; i++) {
5324 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5325 MAC_STATUS_CFG_CHANGED));
5326 udelay(5);
5327 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5328 MAC_STATUS_CFG_CHANGED |
5329 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5330 break;
5331 }
5332
5333 mac_status = tr32(MAC_STATUS);
5334 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5335 current_link_up = 0;
5336 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5337 tp->serdes_counter == 0) {
5338 tw32_f(MAC_MODE, (tp->mac_mode |
5339 MAC_MODE_SEND_CONFIGS));
5340 udelay(1);
5341 tw32_f(MAC_MODE, tp->mac_mode);
5342 }
5343 }
5344
5345 if (current_link_up == 1) {
5346 tp->link_config.active_speed = SPEED_1000;
5347 tp->link_config.active_duplex = DUPLEX_FULL;
5348 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5349 LED_CTRL_LNKLED_OVERRIDE |
5350 LED_CTRL_1000MBPS_ON));
5351 } else {
5352 tp->link_config.active_speed = SPEED_UNKNOWN;
5353 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5354 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5355 LED_CTRL_LNKLED_OVERRIDE |
5356 LED_CTRL_TRAFFIC_OVERRIDE));
5357 }
5358
5359 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5360 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5361 if (orig_pause_cfg != now_pause_cfg ||
5362 orig_active_speed != tp->link_config.active_speed ||
5363 orig_active_duplex != tp->link_config.active_duplex)
5364 tg3_link_report(tp);
5365 }
5366
5367 return 0;
5368 }
5369
5370 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5371 {
5372 int current_link_up, err = 0;
5373 u32 bmsr, bmcr;
5374 u16 current_speed;
5375 u8 current_duplex;
5376 u32 local_adv, remote_adv;
5377
5378 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5379 tw32_f(MAC_MODE, tp->mac_mode);
5380 udelay(40);
5381
5382 tw32(MAC_EVENT, 0);
5383
5384 tw32_f(MAC_STATUS,
5385 (MAC_STATUS_SYNC_CHANGED |
5386 MAC_STATUS_CFG_CHANGED |
5387 MAC_STATUS_MI_COMPLETION |
5388 MAC_STATUS_LNKSTATE_CHANGED));
5389 udelay(40);
5390
5391 if (force_reset)
5392 tg3_phy_reset(tp);
5393
5394 current_link_up = 0;
5395 current_speed = SPEED_UNKNOWN;
5396 current_duplex = DUPLEX_UNKNOWN;
5397 tp->link_config.rmt_adv = 0;
5398
5399 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5400 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5401 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5402 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5403 bmsr |= BMSR_LSTATUS;
5404 else
5405 bmsr &= ~BMSR_LSTATUS;
5406 }
5407
5408 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5409
5410 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5411 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5412 /* do nothing, just check for link up at the end */
5413 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5414 u32 adv, newadv;
5415
5416 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5417 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5418 ADVERTISE_1000XPAUSE |
5419 ADVERTISE_1000XPSE_ASYM |
5420 ADVERTISE_SLCT);
5421
5422 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5423 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5424
5425 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5426 tg3_writephy(tp, MII_ADVERTISE, newadv);
5427 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5428 tg3_writephy(tp, MII_BMCR, bmcr);
5429
5430 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5431 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5432 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5433
5434 return err;
5435 }
5436 } else {
5437 u32 new_bmcr;
5438
5439 bmcr &= ~BMCR_SPEED1000;
5440 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5441
5442 if (tp->link_config.duplex == DUPLEX_FULL)
5443 new_bmcr |= BMCR_FULLDPLX;
5444
5445 if (new_bmcr != bmcr) {
5446 /* BMCR_SPEED1000 is a reserved bit that needs
5447 * to be set on write.
5448 */
5449 new_bmcr |= BMCR_SPEED1000;
5450
5451 /* Force a linkdown */
5452 if (tp->link_up) {
5453 u32 adv;
5454
5455 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5456 adv &= ~(ADVERTISE_1000XFULL |
5457 ADVERTISE_1000XHALF |
5458 ADVERTISE_SLCT);
5459 tg3_writephy(tp, MII_ADVERTISE, adv);
5460 tg3_writephy(tp, MII_BMCR, bmcr |
5461 BMCR_ANRESTART |
5462 BMCR_ANENABLE);
5463 udelay(10);
5464 tg3_carrier_off(tp);
5465 }
5466 tg3_writephy(tp, MII_BMCR, new_bmcr);
5467 bmcr = new_bmcr;
5468 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5469 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5470 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5471 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5472 bmsr |= BMSR_LSTATUS;
5473 else
5474 bmsr &= ~BMSR_LSTATUS;
5475 }
5476 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5477 }
5478 }
5479
5480 if (bmsr & BMSR_LSTATUS) {
5481 current_speed = SPEED_1000;
5482 current_link_up = 1;
5483 if (bmcr & BMCR_FULLDPLX)
5484 current_duplex = DUPLEX_FULL;
5485 else
5486 current_duplex = DUPLEX_HALF;
5487
5488 local_adv = 0;
5489 remote_adv = 0;
5490
5491 if (bmcr & BMCR_ANENABLE) {
5492 u32 common;
5493
5494 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5495 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5496 common = local_adv & remote_adv;
5497 if (common & (ADVERTISE_1000XHALF |
5498 ADVERTISE_1000XFULL)) {
5499 if (common & ADVERTISE_1000XFULL)
5500 current_duplex = DUPLEX_FULL;
5501 else
5502 current_duplex = DUPLEX_HALF;
5503
5504 tp->link_config.rmt_adv =
5505 mii_adv_to_ethtool_adv_x(remote_adv);
5506 } else if (!tg3_flag(tp, 5780_CLASS)) {
5507 /* Link is up via parallel detect */
5508 } else {
5509 current_link_up = 0;
5510 }
5511 }
5512 }
5513
5514 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5515 tg3_setup_flow_control(tp, local_adv, remote_adv);
5516
5517 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5518 if (tp->link_config.active_duplex == DUPLEX_HALF)
5519 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5520
5521 tw32_f(MAC_MODE, tp->mac_mode);
5522 udelay(40);
5523
5524 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5525
5526 tp->link_config.active_speed = current_speed;
5527 tp->link_config.active_duplex = current_duplex;
5528
5529 tg3_test_and_report_link_chg(tp, current_link_up);
5530 return err;
5531 }
5532
5533 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5534 {
5535 if (tp->serdes_counter) {
5536 /* Give autoneg time to complete. */
5537 tp->serdes_counter--;
5538 return;
5539 }
5540
5541 if (!tp->link_up &&
5542 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5543 u32 bmcr;
5544
5545 tg3_readphy(tp, MII_BMCR, &bmcr);
5546 if (bmcr & BMCR_ANENABLE) {
5547 u32 phy1, phy2;
5548
5549 /* Select shadow register 0x1f */
5550 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5551 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5552
5553 /* Select expansion interrupt status register */
5554 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5555 MII_TG3_DSP_EXP1_INT_STAT);
5556 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5557 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5558
5559 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5560 /* We have signal detect and not receiving
5561 * config code words, link is up by parallel
5562 * detection.
5563 */
5564
5565 bmcr &= ~BMCR_ANENABLE;
5566 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5567 tg3_writephy(tp, MII_BMCR, bmcr);
5568 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5569 }
5570 }
5571 } else if (tp->link_up &&
5572 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5573 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5574 u32 phy2;
5575
5576 /* Select expansion interrupt status register */
5577 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5578 MII_TG3_DSP_EXP1_INT_STAT);
5579 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5580 if (phy2 & 0x20) {
5581 u32 bmcr;
5582
5583 /* Config code words received, turn on autoneg. */
5584 tg3_readphy(tp, MII_BMCR, &bmcr);
5585 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5586
5587 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5588
5589 }
5590 }
5591 }
5592
5593 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5594 {
5595 u32 val;
5596 int err;
5597
5598 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5599 err = tg3_setup_fiber_phy(tp, force_reset);
5600 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5601 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5602 else
5603 err = tg3_setup_copper_phy(tp, force_reset);
5604
5605 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5606 u32 scale;
5607
5608 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5609 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5610 scale = 65;
5611 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5612 scale = 6;
5613 else
5614 scale = 12;
5615
5616 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5617 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5618 tw32(GRC_MISC_CFG, val);
5619 }
5620
5621 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5622 (6 << TX_LENGTHS_IPG_SHIFT);
5623 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5624 tg3_asic_rev(tp) == ASIC_REV_5762)
5625 val |= tr32(MAC_TX_LENGTHS) &
5626 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5627 TX_LENGTHS_CNT_DWN_VAL_MSK);
5628
5629 if (tp->link_config.active_speed == SPEED_1000 &&
5630 tp->link_config.active_duplex == DUPLEX_HALF)
5631 tw32(MAC_TX_LENGTHS, val |
5632 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5633 else
5634 tw32(MAC_TX_LENGTHS, val |
5635 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5636
5637 if (!tg3_flag(tp, 5705_PLUS)) {
5638 if (tp->link_up) {
5639 tw32(HOSTCC_STAT_COAL_TICKS,
5640 tp->coal.stats_block_coalesce_usecs);
5641 } else {
5642 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5643 }
5644 }
5645
5646 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5647 val = tr32(PCIE_PWR_MGMT_THRESH);
5648 if (!tp->link_up)
5649 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5650 tp->pwrmgmt_thresh;
5651 else
5652 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5653 tw32(PCIE_PWR_MGMT_THRESH, val);
5654 }
5655
5656 return err;
5657 }
5658
5659 /* tp->lock must be held */
5660 static u64 tg3_refclk_read(struct tg3 *tp)
5661 {
5662 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5663 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5664 }
5665
5666 /* tp->lock must be held */
5667 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5668 {
5669 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5670 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5671 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5672 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5673 }
5674
5675 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5676 static inline void tg3_full_unlock(struct tg3 *tp);
5677 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5678 {
5679 struct tg3 *tp = netdev_priv(dev);
5680
5681 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5682 SOF_TIMESTAMPING_RX_SOFTWARE |
5683 SOF_TIMESTAMPING_SOFTWARE |
5684 SOF_TIMESTAMPING_TX_HARDWARE |
5685 SOF_TIMESTAMPING_RX_HARDWARE |
5686 SOF_TIMESTAMPING_RAW_HARDWARE;
5687
5688 if (tp->ptp_clock)
5689 info->phc_index = ptp_clock_index(tp->ptp_clock);
5690 else
5691 info->phc_index = -1;
5692
5693 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5694
5695 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5696 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5697 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5698 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5699 return 0;
5700 }
5701
5702 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5703 {
5704 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5705 bool neg_adj = false;
5706 u32 correction = 0;
5707
5708 if (ppb < 0) {
5709 neg_adj = true;
5710 ppb = -ppb;
5711 }
5712
5713 /* Frequency adjustment is performed using hardware with a 24 bit
5714 * accumulator and a programmable correction value. On each clk, the
5715 * correction value gets added to the accumulator and when it
5716 * overflows, the time counter is incremented/decremented.
5717 *
5718 * So conversion from ppb to correction value is
5719 * ppb * (1 << 24) / 1000000000
5720 */
5721 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5722 TG3_EAV_REF_CLK_CORRECT_MASK;
5723
5724 tg3_full_lock(tp, 0);
5725
5726 if (correction)
5727 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5728 TG3_EAV_REF_CLK_CORRECT_EN |
5729 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5730 else
5731 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5732
5733 tg3_full_unlock(tp);
5734
5735 return 0;
5736 }
5737
5738 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5739 {
5740 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5741
5742 tg3_full_lock(tp, 0);
5743 tp->ptp_adjust += delta;
5744 tg3_full_unlock(tp);
5745
5746 return 0;
5747 }
5748
5749 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5750 {
5751 u64 ns;
5752 u32 remainder;
5753 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5754
5755 tg3_full_lock(tp, 0);
5756 ns = tg3_refclk_read(tp);
5757 ns += tp->ptp_adjust;
5758 tg3_full_unlock(tp);
5759
5760 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5761 ts->tv_nsec = remainder;
5762
5763 return 0;
5764 }
5765
5766 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5767 const struct timespec *ts)
5768 {
5769 u64 ns;
5770 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5771
5772 ns = timespec_to_ns(ts);
5773
5774 tg3_full_lock(tp, 0);
5775 tg3_refclk_write(tp, ns);
5776 tp->ptp_adjust = 0;
5777 tg3_full_unlock(tp);
5778
5779 return 0;
5780 }
5781
5782 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5783 struct ptp_clock_request *rq, int on)
5784 {
5785 return -EOPNOTSUPP;
5786 }
5787
5788 static const struct ptp_clock_info tg3_ptp_caps = {
5789 .owner = THIS_MODULE,
5790 .name = "tg3 clock",
5791 .max_adj = 250000000,
5792 .n_alarm = 0,
5793 .n_ext_ts = 0,
5794 .n_per_out = 0,
5795 .pps = 0,
5796 .adjfreq = tg3_ptp_adjfreq,
5797 .adjtime = tg3_ptp_adjtime,
5798 .gettime = tg3_ptp_gettime,
5799 .settime = tg3_ptp_settime,
5800 .enable = tg3_ptp_enable,
5801 };
5802
5803 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5804 struct skb_shared_hwtstamps *timestamp)
5805 {
5806 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5807 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5808 tp->ptp_adjust);
5809 }
5810
5811 /* tp->lock must be held */
5812 static void tg3_ptp_init(struct tg3 *tp)
5813 {
5814 if (!tg3_flag(tp, PTP_CAPABLE))
5815 return;
5816
5817 /* Initialize the hardware clock to the system time. */
5818 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5819 tp->ptp_adjust = 0;
5820 tp->ptp_info = tg3_ptp_caps;
5821 }
5822
5823 /* tp->lock must be held */
5824 static void tg3_ptp_resume(struct tg3 *tp)
5825 {
5826 if (!tg3_flag(tp, PTP_CAPABLE))
5827 return;
5828
5829 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5830 tp->ptp_adjust = 0;
5831 }
5832
5833 static void tg3_ptp_fini(struct tg3 *tp)
5834 {
5835 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5836 return;
5837
5838 ptp_clock_unregister(tp->ptp_clock);
5839 tp->ptp_clock = NULL;
5840 tp->ptp_adjust = 0;
5841 }
5842
5843 static inline int tg3_irq_sync(struct tg3 *tp)
5844 {
5845 return tp->irq_sync;
5846 }
5847
5848 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5849 {
5850 int i;
5851
5852 dst = (u32 *)((u8 *)dst + off);
5853 for (i = 0; i < len; i += sizeof(u32))
5854 *dst++ = tr32(off + i);
5855 }
5856
5857 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5858 {
5859 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5860 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5861 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5862 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5863 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5864 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5865 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5866 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5867 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5868 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5869 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5870 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5871 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5872 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5873 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5874 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5875 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5876 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5877 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5878
5879 if (tg3_flag(tp, SUPPORT_MSIX))
5880 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5881
5882 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5883 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5884 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5885 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5886 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5887 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5888 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5889 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5890
5891 if (!tg3_flag(tp, 5705_PLUS)) {
5892 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5893 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5894 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5895 }
5896
5897 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5898 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5899 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5900 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5901 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5902
5903 if (tg3_flag(tp, NVRAM))
5904 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5905 }
5906
5907 static void tg3_dump_state(struct tg3 *tp)
5908 {
5909 int i;
5910 u32 *regs;
5911
5912 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5913 if (!regs)
5914 return;
5915
5916 if (tg3_flag(tp, PCI_EXPRESS)) {
5917 /* Read up to but not including private PCI registers */
5918 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5919 regs[i / sizeof(u32)] = tr32(i);
5920 } else
5921 tg3_dump_legacy_regs(tp, regs);
5922
5923 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5924 if (!regs[i + 0] && !regs[i + 1] &&
5925 !regs[i + 2] && !regs[i + 3])
5926 continue;
5927
5928 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5929 i * 4,
5930 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5931 }
5932
5933 kfree(regs);
5934
5935 for (i = 0; i < tp->irq_cnt; i++) {
5936 struct tg3_napi *tnapi = &tp->napi[i];
5937
5938 /* SW status block */
5939 netdev_err(tp->dev,
5940 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5941 i,
5942 tnapi->hw_status->status,
5943 tnapi->hw_status->status_tag,
5944 tnapi->hw_status->rx_jumbo_consumer,
5945 tnapi->hw_status->rx_consumer,
5946 tnapi->hw_status->rx_mini_consumer,
5947 tnapi->hw_status->idx[0].rx_producer,
5948 tnapi->hw_status->idx[0].tx_consumer);
5949
5950 netdev_err(tp->dev,
5951 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5952 i,
5953 tnapi->last_tag, tnapi->last_irq_tag,
5954 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5955 tnapi->rx_rcb_ptr,
5956 tnapi->prodring.rx_std_prod_idx,
5957 tnapi->prodring.rx_std_cons_idx,
5958 tnapi->prodring.rx_jmb_prod_idx,
5959 tnapi->prodring.rx_jmb_cons_idx);
5960 }
5961 }
5962
5963 /* This is called whenever we suspect that the system chipset is re-
5964 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5965 * is bogus tx completions. We try to recover by setting the
5966 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5967 * in the workqueue.
5968 */
5969 static void tg3_tx_recover(struct tg3 *tp)
5970 {
5971 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5972 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5973
5974 netdev_warn(tp->dev,
5975 "The system may be re-ordering memory-mapped I/O "
5976 "cycles to the network device, attempting to recover. "
5977 "Please report the problem to the driver maintainer "
5978 "and include system chipset information.\n");
5979
5980 spin_lock(&tp->lock);
5981 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5982 spin_unlock(&tp->lock);
5983 }
5984
5985 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5986 {
5987 /* Tell compiler to fetch tx indices from memory. */
5988 barrier();
5989 return tnapi->tx_pending -
5990 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5991 }
5992
5993 /* Tigon3 never reports partial packet sends. So we do not
5994 * need special logic to handle SKBs that have not had all
5995 * of their frags sent yet, like SunGEM does.
5996 */
5997 static void tg3_tx(struct tg3_napi *tnapi)
5998 {
5999 struct tg3 *tp = tnapi->tp;
6000 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6001 u32 sw_idx = tnapi->tx_cons;
6002 struct netdev_queue *txq;
6003 int index = tnapi - tp->napi;
6004 unsigned int pkts_compl = 0, bytes_compl = 0;
6005
6006 if (tg3_flag(tp, ENABLE_TSS))
6007 index--;
6008
6009 txq = netdev_get_tx_queue(tp->dev, index);
6010
6011 while (sw_idx != hw_idx) {
6012 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6013 struct sk_buff *skb = ri->skb;
6014 int i, tx_bug = 0;
6015
6016 if (unlikely(skb == NULL)) {
6017 tg3_tx_recover(tp);
6018 return;
6019 }
6020
6021 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6022 struct skb_shared_hwtstamps timestamp;
6023 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6024 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6025
6026 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6027
6028 skb_tstamp_tx(skb, &timestamp);
6029 }
6030
6031 pci_unmap_single(tp->pdev,
6032 dma_unmap_addr(ri, mapping),
6033 skb_headlen(skb),
6034 PCI_DMA_TODEVICE);
6035
6036 ri->skb = NULL;
6037
6038 while (ri->fragmented) {
6039 ri->fragmented = false;
6040 sw_idx = NEXT_TX(sw_idx);
6041 ri = &tnapi->tx_buffers[sw_idx];
6042 }
6043
6044 sw_idx = NEXT_TX(sw_idx);
6045
6046 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6047 ri = &tnapi->tx_buffers[sw_idx];
6048 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6049 tx_bug = 1;
6050
6051 pci_unmap_page(tp->pdev,
6052 dma_unmap_addr(ri, mapping),
6053 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6054 PCI_DMA_TODEVICE);
6055
6056 while (ri->fragmented) {
6057 ri->fragmented = false;
6058 sw_idx = NEXT_TX(sw_idx);
6059 ri = &tnapi->tx_buffers[sw_idx];
6060 }
6061
6062 sw_idx = NEXT_TX(sw_idx);
6063 }
6064
6065 pkts_compl++;
6066 bytes_compl += skb->len;
6067
6068 dev_kfree_skb(skb);
6069
6070 if (unlikely(tx_bug)) {
6071 tg3_tx_recover(tp);
6072 return;
6073 }
6074 }
6075
6076 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6077
6078 tnapi->tx_cons = sw_idx;
6079
6080 /* Need to make the tx_cons update visible to tg3_start_xmit()
6081 * before checking for netif_queue_stopped(). Without the
6082 * memory barrier, there is a small possibility that tg3_start_xmit()
6083 * will miss it and cause the queue to be stopped forever.
6084 */
6085 smp_mb();
6086
6087 if (unlikely(netif_tx_queue_stopped(txq) &&
6088 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6089 __netif_tx_lock(txq, smp_processor_id());
6090 if (netif_tx_queue_stopped(txq) &&
6091 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6092 netif_tx_wake_queue(txq);
6093 __netif_tx_unlock(txq);
6094 }
6095 }
6096
6097 static void tg3_frag_free(bool is_frag, void *data)
6098 {
6099 if (is_frag)
6100 put_page(virt_to_head_page(data));
6101 else
6102 kfree(data);
6103 }
6104
6105 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6106 {
6107 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6109
6110 if (!ri->data)
6111 return;
6112
6113 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6114 map_sz, PCI_DMA_FROMDEVICE);
6115 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6116 ri->data = NULL;
6117 }
6118
6119
6120 /* Returns size of skb allocated or < 0 on error.
6121 *
6122 * We only need to fill in the address because the other members
6123 * of the RX descriptor are invariant, see tg3_init_rings.
6124 *
6125 * Note the purposeful assymetry of cpu vs. chip accesses. For
6126 * posting buffers we only dirty the first cache line of the RX
6127 * descriptor (containing the address). Whereas for the RX status
6128 * buffers the cpu only reads the last cacheline of the RX descriptor
6129 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6130 */
6131 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6132 u32 opaque_key, u32 dest_idx_unmasked,
6133 unsigned int *frag_size)
6134 {
6135 struct tg3_rx_buffer_desc *desc;
6136 struct ring_info *map;
6137 u8 *data;
6138 dma_addr_t mapping;
6139 int skb_size, data_size, dest_idx;
6140
6141 switch (opaque_key) {
6142 case RXD_OPAQUE_RING_STD:
6143 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6144 desc = &tpr->rx_std[dest_idx];
6145 map = &tpr->rx_std_buffers[dest_idx];
6146 data_size = tp->rx_pkt_map_sz;
6147 break;
6148
6149 case RXD_OPAQUE_RING_JUMBO:
6150 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6151 desc = &tpr->rx_jmb[dest_idx].std;
6152 map = &tpr->rx_jmb_buffers[dest_idx];
6153 data_size = TG3_RX_JMB_MAP_SZ;
6154 break;
6155
6156 default:
6157 return -EINVAL;
6158 }
6159
6160 /* Do not overwrite any of the map or rp information
6161 * until we are sure we can commit to a new buffer.
6162 *
6163 * Callers depend upon this behavior and assume that
6164 * we leave everything unchanged if we fail.
6165 */
6166 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6167 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6168 if (skb_size <= PAGE_SIZE) {
6169 data = netdev_alloc_frag(skb_size);
6170 *frag_size = skb_size;
6171 } else {
6172 data = kmalloc(skb_size, GFP_ATOMIC);
6173 *frag_size = 0;
6174 }
6175 if (!data)
6176 return -ENOMEM;
6177
6178 mapping = pci_map_single(tp->pdev,
6179 data + TG3_RX_OFFSET(tp),
6180 data_size,
6181 PCI_DMA_FROMDEVICE);
6182 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6183 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6184 return -EIO;
6185 }
6186
6187 map->data = data;
6188 dma_unmap_addr_set(map, mapping, mapping);
6189
6190 desc->addr_hi = ((u64)mapping >> 32);
6191 desc->addr_lo = ((u64)mapping & 0xffffffff);
6192
6193 return data_size;
6194 }
6195
6196 /* We only need to move over in the address because the other
6197 * members of the RX descriptor are invariant. See notes above
6198 * tg3_alloc_rx_data for full details.
6199 */
6200 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6201 struct tg3_rx_prodring_set *dpr,
6202 u32 opaque_key, int src_idx,
6203 u32 dest_idx_unmasked)
6204 {
6205 struct tg3 *tp = tnapi->tp;
6206 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6207 struct ring_info *src_map, *dest_map;
6208 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6209 int dest_idx;
6210
6211 switch (opaque_key) {
6212 case RXD_OPAQUE_RING_STD:
6213 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6214 dest_desc = &dpr->rx_std[dest_idx];
6215 dest_map = &dpr->rx_std_buffers[dest_idx];
6216 src_desc = &spr->rx_std[src_idx];
6217 src_map = &spr->rx_std_buffers[src_idx];
6218 break;
6219
6220 case RXD_OPAQUE_RING_JUMBO:
6221 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6222 dest_desc = &dpr->rx_jmb[dest_idx].std;
6223 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6224 src_desc = &spr->rx_jmb[src_idx].std;
6225 src_map = &spr->rx_jmb_buffers[src_idx];
6226 break;
6227
6228 default:
6229 return;
6230 }
6231
6232 dest_map->data = src_map->data;
6233 dma_unmap_addr_set(dest_map, mapping,
6234 dma_unmap_addr(src_map, mapping));
6235 dest_desc->addr_hi = src_desc->addr_hi;
6236 dest_desc->addr_lo = src_desc->addr_lo;
6237
6238 /* Ensure that the update to the skb happens after the physical
6239 * addresses have been transferred to the new BD location.
6240 */
6241 smp_wmb();
6242
6243 src_map->data = NULL;
6244 }
6245
6246 /* The RX ring scheme is composed of multiple rings which post fresh
6247 * buffers to the chip, and one special ring the chip uses to report
6248 * status back to the host.
6249 *
6250 * The special ring reports the status of received packets to the
6251 * host. The chip does not write into the original descriptor the
6252 * RX buffer was obtained from. The chip simply takes the original
6253 * descriptor as provided by the host, updates the status and length
6254 * field, then writes this into the next status ring entry.
6255 *
6256 * Each ring the host uses to post buffers to the chip is described
6257 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6258 * it is first placed into the on-chip ram. When the packet's length
6259 * is known, it walks down the TG3_BDINFO entries to select the ring.
6260 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6261 * which is within the range of the new packet's length is chosen.
6262 *
6263 * The "separate ring for rx status" scheme may sound queer, but it makes
6264 * sense from a cache coherency perspective. If only the host writes
6265 * to the buffer post rings, and only the chip writes to the rx status
6266 * rings, then cache lines never move beyond shared-modified state.
6267 * If both the host and chip were to write into the same ring, cache line
6268 * eviction could occur since both entities want it in an exclusive state.
6269 */
6270 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6271 {
6272 struct tg3 *tp = tnapi->tp;
6273 u32 work_mask, rx_std_posted = 0;
6274 u32 std_prod_idx, jmb_prod_idx;
6275 u32 sw_idx = tnapi->rx_rcb_ptr;
6276 u16 hw_idx;
6277 int received;
6278 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6279
6280 hw_idx = *(tnapi->rx_rcb_prod_idx);
6281 /*
6282 * We need to order the read of hw_idx and the read of
6283 * the opaque cookie.
6284 */
6285 rmb();
6286 work_mask = 0;
6287 received = 0;
6288 std_prod_idx = tpr->rx_std_prod_idx;
6289 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6290 while (sw_idx != hw_idx && budget > 0) {
6291 struct ring_info *ri;
6292 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6293 unsigned int len;
6294 struct sk_buff *skb;
6295 dma_addr_t dma_addr;
6296 u32 opaque_key, desc_idx, *post_ptr;
6297 u8 *data;
6298 u64 tstamp = 0;
6299
6300 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6301 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6302 if (opaque_key == RXD_OPAQUE_RING_STD) {
6303 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6304 dma_addr = dma_unmap_addr(ri, mapping);
6305 data = ri->data;
6306 post_ptr = &std_prod_idx;
6307 rx_std_posted++;
6308 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6309 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6310 dma_addr = dma_unmap_addr(ri, mapping);
6311 data = ri->data;
6312 post_ptr = &jmb_prod_idx;
6313 } else
6314 goto next_pkt_nopost;
6315
6316 work_mask |= opaque_key;
6317
6318 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6319 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6320 drop_it:
6321 tg3_recycle_rx(tnapi, tpr, opaque_key,
6322 desc_idx, *post_ptr);
6323 drop_it_no_recycle:
6324 /* Other statistics kept track of by card. */
6325 tp->rx_dropped++;
6326 goto next_pkt;
6327 }
6328
6329 prefetch(data + TG3_RX_OFFSET(tp));
6330 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6331 ETH_FCS_LEN;
6332
6333 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6334 RXD_FLAG_PTPSTAT_PTPV1 ||
6335 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6336 RXD_FLAG_PTPSTAT_PTPV2) {
6337 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6338 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6339 }
6340
6341 if (len > TG3_RX_COPY_THRESH(tp)) {
6342 int skb_size;
6343 unsigned int frag_size;
6344
6345 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6346 *post_ptr, &frag_size);
6347 if (skb_size < 0)
6348 goto drop_it;
6349
6350 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6351 PCI_DMA_FROMDEVICE);
6352
6353 skb = build_skb(data, frag_size);
6354 if (!skb) {
6355 tg3_frag_free(frag_size != 0, data);
6356 goto drop_it_no_recycle;
6357 }
6358 skb_reserve(skb, TG3_RX_OFFSET(tp));
6359 /* Ensure that the update to the data happens
6360 * after the usage of the old DMA mapping.
6361 */
6362 smp_wmb();
6363
6364 ri->data = NULL;
6365
6366 } else {
6367 tg3_recycle_rx(tnapi, tpr, opaque_key,
6368 desc_idx, *post_ptr);
6369
6370 skb = netdev_alloc_skb(tp->dev,
6371 len + TG3_RAW_IP_ALIGN);
6372 if (skb == NULL)
6373 goto drop_it_no_recycle;
6374
6375 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6376 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6377 memcpy(skb->data,
6378 data + TG3_RX_OFFSET(tp),
6379 len);
6380 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6381 }
6382
6383 skb_put(skb, len);
6384 if (tstamp)
6385 tg3_hwclock_to_timestamp(tp, tstamp,
6386 skb_hwtstamps(skb));
6387
6388 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6389 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6390 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6391 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6392 skb->ip_summed = CHECKSUM_UNNECESSARY;
6393 else
6394 skb_checksum_none_assert(skb);
6395
6396 skb->protocol = eth_type_trans(skb, tp->dev);
6397
6398 if (len > (tp->dev->mtu + ETH_HLEN) &&
6399 skb->protocol != htons(ETH_P_8021Q)) {
6400 dev_kfree_skb(skb);
6401 goto drop_it_no_recycle;
6402 }
6403
6404 if (desc->type_flags & RXD_FLAG_VLAN &&
6405 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6406 __vlan_hwaccel_put_tag(skb,
6407 desc->err_vlan & RXD_VLAN_MASK);
6408
6409 napi_gro_receive(&tnapi->napi, skb);
6410
6411 received++;
6412 budget--;
6413
6414 next_pkt:
6415 (*post_ptr)++;
6416
6417 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6418 tpr->rx_std_prod_idx = std_prod_idx &
6419 tp->rx_std_ring_mask;
6420 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6421 tpr->rx_std_prod_idx);
6422 work_mask &= ~RXD_OPAQUE_RING_STD;
6423 rx_std_posted = 0;
6424 }
6425 next_pkt_nopost:
6426 sw_idx++;
6427 sw_idx &= tp->rx_ret_ring_mask;
6428
6429 /* Refresh hw_idx to see if there is new work */
6430 if (sw_idx == hw_idx) {
6431 hw_idx = *(tnapi->rx_rcb_prod_idx);
6432 rmb();
6433 }
6434 }
6435
6436 /* ACK the status ring. */
6437 tnapi->rx_rcb_ptr = sw_idx;
6438 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6439
6440 /* Refill RX ring(s). */
6441 if (!tg3_flag(tp, ENABLE_RSS)) {
6442 /* Sync BD data before updating mailbox */
6443 wmb();
6444
6445 if (work_mask & RXD_OPAQUE_RING_STD) {
6446 tpr->rx_std_prod_idx = std_prod_idx &
6447 tp->rx_std_ring_mask;
6448 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6449 tpr->rx_std_prod_idx);
6450 }
6451 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6452 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6453 tp->rx_jmb_ring_mask;
6454 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6455 tpr->rx_jmb_prod_idx);
6456 }
6457 mmiowb();
6458 } else if (work_mask) {
6459 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6460 * updated before the producer indices can be updated.
6461 */
6462 smp_wmb();
6463
6464 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6465 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6466
6467 if (tnapi != &tp->napi[1]) {
6468 tp->rx_refill = true;
6469 napi_schedule(&tp->napi[1].napi);
6470 }
6471 }
6472
6473 return received;
6474 }
6475
6476 static void tg3_poll_link(struct tg3 *tp)
6477 {
6478 /* handle link change and other phy events */
6479 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6480 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6481
6482 if (sblk->status & SD_STATUS_LINK_CHG) {
6483 sblk->status = SD_STATUS_UPDATED |
6484 (sblk->status & ~SD_STATUS_LINK_CHG);
6485 spin_lock(&tp->lock);
6486 if (tg3_flag(tp, USE_PHYLIB)) {
6487 tw32_f(MAC_STATUS,
6488 (MAC_STATUS_SYNC_CHANGED |
6489 MAC_STATUS_CFG_CHANGED |
6490 MAC_STATUS_MI_COMPLETION |
6491 MAC_STATUS_LNKSTATE_CHANGED));
6492 udelay(40);
6493 } else
6494 tg3_setup_phy(tp, 0);
6495 spin_unlock(&tp->lock);
6496 }
6497 }
6498 }
6499
6500 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6501 struct tg3_rx_prodring_set *dpr,
6502 struct tg3_rx_prodring_set *spr)
6503 {
6504 u32 si, di, cpycnt, src_prod_idx;
6505 int i, err = 0;
6506
6507 while (1) {
6508 src_prod_idx = spr->rx_std_prod_idx;
6509
6510 /* Make sure updates to the rx_std_buffers[] entries and the
6511 * standard producer index are seen in the correct order.
6512 */
6513 smp_rmb();
6514
6515 if (spr->rx_std_cons_idx == src_prod_idx)
6516 break;
6517
6518 if (spr->rx_std_cons_idx < src_prod_idx)
6519 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6520 else
6521 cpycnt = tp->rx_std_ring_mask + 1 -
6522 spr->rx_std_cons_idx;
6523
6524 cpycnt = min(cpycnt,
6525 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6526
6527 si = spr->rx_std_cons_idx;
6528 di = dpr->rx_std_prod_idx;
6529
6530 for (i = di; i < di + cpycnt; i++) {
6531 if (dpr->rx_std_buffers[i].data) {
6532 cpycnt = i - di;
6533 err = -ENOSPC;
6534 break;
6535 }
6536 }
6537
6538 if (!cpycnt)
6539 break;
6540
6541 /* Ensure that updates to the rx_std_buffers ring and the
6542 * shadowed hardware producer ring from tg3_recycle_skb() are
6543 * ordered correctly WRT the skb check above.
6544 */
6545 smp_rmb();
6546
6547 memcpy(&dpr->rx_std_buffers[di],
6548 &spr->rx_std_buffers[si],
6549 cpycnt * sizeof(struct ring_info));
6550
6551 for (i = 0; i < cpycnt; i++, di++, si++) {
6552 struct tg3_rx_buffer_desc *sbd, *dbd;
6553 sbd = &spr->rx_std[si];
6554 dbd = &dpr->rx_std[di];
6555 dbd->addr_hi = sbd->addr_hi;
6556 dbd->addr_lo = sbd->addr_lo;
6557 }
6558
6559 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6560 tp->rx_std_ring_mask;
6561 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6562 tp->rx_std_ring_mask;
6563 }
6564
6565 while (1) {
6566 src_prod_idx = spr->rx_jmb_prod_idx;
6567
6568 /* Make sure updates to the rx_jmb_buffers[] entries and
6569 * the jumbo producer index are seen in the correct order.
6570 */
6571 smp_rmb();
6572
6573 if (spr->rx_jmb_cons_idx == src_prod_idx)
6574 break;
6575
6576 if (spr->rx_jmb_cons_idx < src_prod_idx)
6577 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6578 else
6579 cpycnt = tp->rx_jmb_ring_mask + 1 -
6580 spr->rx_jmb_cons_idx;
6581
6582 cpycnt = min(cpycnt,
6583 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6584
6585 si = spr->rx_jmb_cons_idx;
6586 di = dpr->rx_jmb_prod_idx;
6587
6588 for (i = di; i < di + cpycnt; i++) {
6589 if (dpr->rx_jmb_buffers[i].data) {
6590 cpycnt = i - di;
6591 err = -ENOSPC;
6592 break;
6593 }
6594 }
6595
6596 if (!cpycnt)
6597 break;
6598
6599 /* Ensure that updates to the rx_jmb_buffers ring and the
6600 * shadowed hardware producer ring from tg3_recycle_skb() are
6601 * ordered correctly WRT the skb check above.
6602 */
6603 smp_rmb();
6604
6605 memcpy(&dpr->rx_jmb_buffers[di],
6606 &spr->rx_jmb_buffers[si],
6607 cpycnt * sizeof(struct ring_info));
6608
6609 for (i = 0; i < cpycnt; i++, di++, si++) {
6610 struct tg3_rx_buffer_desc *sbd, *dbd;
6611 sbd = &spr->rx_jmb[si].std;
6612 dbd = &dpr->rx_jmb[di].std;
6613 dbd->addr_hi = sbd->addr_hi;
6614 dbd->addr_lo = sbd->addr_lo;
6615 }
6616
6617 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6618 tp->rx_jmb_ring_mask;
6619 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6620 tp->rx_jmb_ring_mask;
6621 }
6622
6623 return err;
6624 }
6625
6626 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6627 {
6628 struct tg3 *tp = tnapi->tp;
6629
6630 /* run TX completion thread */
6631 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6632 tg3_tx(tnapi);
6633 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6634 return work_done;
6635 }
6636
6637 if (!tnapi->rx_rcb_prod_idx)
6638 return work_done;
6639
6640 /* run RX thread, within the bounds set by NAPI.
6641 * All RX "locking" is done by ensuring outside
6642 * code synchronizes with tg3->napi.poll()
6643 */
6644 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6645 work_done += tg3_rx(tnapi, budget - work_done);
6646
6647 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6648 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6649 int i, err = 0;
6650 u32 std_prod_idx = dpr->rx_std_prod_idx;
6651 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6652
6653 tp->rx_refill = false;
6654 for (i = 1; i <= tp->rxq_cnt; i++)
6655 err |= tg3_rx_prodring_xfer(tp, dpr,
6656 &tp->napi[i].prodring);
6657
6658 wmb();
6659
6660 if (std_prod_idx != dpr->rx_std_prod_idx)
6661 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6662 dpr->rx_std_prod_idx);
6663
6664 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6665 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6666 dpr->rx_jmb_prod_idx);
6667
6668 mmiowb();
6669
6670 if (err)
6671 tw32_f(HOSTCC_MODE, tp->coal_now);
6672 }
6673
6674 return work_done;
6675 }
6676
6677 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6678 {
6679 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6680 schedule_work(&tp->reset_task);
6681 }
6682
6683 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6684 {
6685 cancel_work_sync(&tp->reset_task);
6686 tg3_flag_clear(tp, RESET_TASK_PENDING);
6687 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6688 }
6689
6690 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6691 {
6692 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6693 struct tg3 *tp = tnapi->tp;
6694 int work_done = 0;
6695 struct tg3_hw_status *sblk = tnapi->hw_status;
6696
6697 while (1) {
6698 work_done = tg3_poll_work(tnapi, work_done, budget);
6699
6700 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6701 goto tx_recovery;
6702
6703 if (unlikely(work_done >= budget))
6704 break;
6705
6706 /* tp->last_tag is used in tg3_int_reenable() below
6707 * to tell the hw how much work has been processed,
6708 * so we must read it before checking for more work.
6709 */
6710 tnapi->last_tag = sblk->status_tag;
6711 tnapi->last_irq_tag = tnapi->last_tag;
6712 rmb();
6713
6714 /* check for RX/TX work to do */
6715 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6716 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6717
6718 /* This test here is not race free, but will reduce
6719 * the number of interrupts by looping again.
6720 */
6721 if (tnapi == &tp->napi[1] && tp->rx_refill)
6722 continue;
6723
6724 napi_complete(napi);
6725 /* Reenable interrupts. */
6726 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6727
6728 /* This test here is synchronized by napi_schedule()
6729 * and napi_complete() to close the race condition.
6730 */
6731 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6732 tw32(HOSTCC_MODE, tp->coalesce_mode |
6733 HOSTCC_MODE_ENABLE |
6734 tnapi->coal_now);
6735 }
6736 mmiowb();
6737 break;
6738 }
6739 }
6740
6741 return work_done;
6742
6743 tx_recovery:
6744 /* work_done is guaranteed to be less than budget. */
6745 napi_complete(napi);
6746 tg3_reset_task_schedule(tp);
6747 return work_done;
6748 }
6749
6750 static void tg3_process_error(struct tg3 *tp)
6751 {
6752 u32 val;
6753 bool real_error = false;
6754
6755 if (tg3_flag(tp, ERROR_PROCESSED))
6756 return;
6757
6758 /* Check Flow Attention register */
6759 val = tr32(HOSTCC_FLOW_ATTN);
6760 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6761 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6762 real_error = true;
6763 }
6764
6765 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6766 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6767 real_error = true;
6768 }
6769
6770 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6771 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6772 real_error = true;
6773 }
6774
6775 if (!real_error)
6776 return;
6777
6778 tg3_dump_state(tp);
6779
6780 tg3_flag_set(tp, ERROR_PROCESSED);
6781 tg3_reset_task_schedule(tp);
6782 }
6783
6784 static int tg3_poll(struct napi_struct *napi, int budget)
6785 {
6786 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6787 struct tg3 *tp = tnapi->tp;
6788 int work_done = 0;
6789 struct tg3_hw_status *sblk = tnapi->hw_status;
6790
6791 while (1) {
6792 if (sblk->status & SD_STATUS_ERROR)
6793 tg3_process_error(tp);
6794
6795 tg3_poll_link(tp);
6796
6797 work_done = tg3_poll_work(tnapi, work_done, budget);
6798
6799 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6800 goto tx_recovery;
6801
6802 if (unlikely(work_done >= budget))
6803 break;
6804
6805 if (tg3_flag(tp, TAGGED_STATUS)) {
6806 /* tp->last_tag is used in tg3_int_reenable() below
6807 * to tell the hw how much work has been processed,
6808 * so we must read it before checking for more work.
6809 */
6810 tnapi->last_tag = sblk->status_tag;
6811 tnapi->last_irq_tag = tnapi->last_tag;
6812 rmb();
6813 } else
6814 sblk->status &= ~SD_STATUS_UPDATED;
6815
6816 if (likely(!tg3_has_work(tnapi))) {
6817 napi_complete(napi);
6818 tg3_int_reenable(tnapi);
6819 break;
6820 }
6821 }
6822
6823 return work_done;
6824
6825 tx_recovery:
6826 /* work_done is guaranteed to be less than budget. */
6827 napi_complete(napi);
6828 tg3_reset_task_schedule(tp);
6829 return work_done;
6830 }
6831
6832 static void tg3_napi_disable(struct tg3 *tp)
6833 {
6834 int i;
6835
6836 for (i = tp->irq_cnt - 1; i >= 0; i--)
6837 napi_disable(&tp->napi[i].napi);
6838 }
6839
6840 static void tg3_napi_enable(struct tg3 *tp)
6841 {
6842 int i;
6843
6844 for (i = 0; i < tp->irq_cnt; i++)
6845 napi_enable(&tp->napi[i].napi);
6846 }
6847
6848 static void tg3_napi_init(struct tg3 *tp)
6849 {
6850 int i;
6851
6852 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6853 for (i = 1; i < tp->irq_cnt; i++)
6854 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6855 }
6856
6857 static void tg3_napi_fini(struct tg3 *tp)
6858 {
6859 int i;
6860
6861 for (i = 0; i < tp->irq_cnt; i++)
6862 netif_napi_del(&tp->napi[i].napi);
6863 }
6864
6865 static inline void tg3_netif_stop(struct tg3 *tp)
6866 {
6867 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6868 tg3_napi_disable(tp);
6869 netif_carrier_off(tp->dev);
6870 netif_tx_disable(tp->dev);
6871 }
6872
6873 /* tp->lock must be held */
6874 static inline void tg3_netif_start(struct tg3 *tp)
6875 {
6876 tg3_ptp_resume(tp);
6877
6878 /* NOTE: unconditional netif_tx_wake_all_queues is only
6879 * appropriate so long as all callers are assured to
6880 * have free tx slots (such as after tg3_init_hw)
6881 */
6882 netif_tx_wake_all_queues(tp->dev);
6883
6884 if (tp->link_up)
6885 netif_carrier_on(tp->dev);
6886
6887 tg3_napi_enable(tp);
6888 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6889 tg3_enable_ints(tp);
6890 }
6891
6892 static void tg3_irq_quiesce(struct tg3 *tp)
6893 {
6894 int i;
6895
6896 BUG_ON(tp->irq_sync);
6897
6898 tp->irq_sync = 1;
6899 smp_mb();
6900
6901 for (i = 0; i < tp->irq_cnt; i++)
6902 synchronize_irq(tp->napi[i].irq_vec);
6903 }
6904
6905 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6906 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6907 * with as well. Most of the time, this is not necessary except when
6908 * shutting down the device.
6909 */
6910 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6911 {
6912 spin_lock_bh(&tp->lock);
6913 if (irq_sync)
6914 tg3_irq_quiesce(tp);
6915 }
6916
6917 static inline void tg3_full_unlock(struct tg3 *tp)
6918 {
6919 spin_unlock_bh(&tp->lock);
6920 }
6921
6922 /* One-shot MSI handler - Chip automatically disables interrupt
6923 * after sending MSI so driver doesn't have to do it.
6924 */
6925 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6926 {
6927 struct tg3_napi *tnapi = dev_id;
6928 struct tg3 *tp = tnapi->tp;
6929
6930 prefetch(tnapi->hw_status);
6931 if (tnapi->rx_rcb)
6932 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6933
6934 if (likely(!tg3_irq_sync(tp)))
6935 napi_schedule(&tnapi->napi);
6936
6937 return IRQ_HANDLED;
6938 }
6939
6940 /* MSI ISR - No need to check for interrupt sharing and no need to
6941 * flush status block and interrupt mailbox. PCI ordering rules
6942 * guarantee that MSI will arrive after the status block.
6943 */
6944 static irqreturn_t tg3_msi(int irq, void *dev_id)
6945 {
6946 struct tg3_napi *tnapi = dev_id;
6947 struct tg3 *tp = tnapi->tp;
6948
6949 prefetch(tnapi->hw_status);
6950 if (tnapi->rx_rcb)
6951 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6952 /*
6953 * Writing any value to intr-mbox-0 clears PCI INTA# and
6954 * chip-internal interrupt pending events.
6955 * Writing non-zero to intr-mbox-0 additional tells the
6956 * NIC to stop sending us irqs, engaging "in-intr-handler"
6957 * event coalescing.
6958 */
6959 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6960 if (likely(!tg3_irq_sync(tp)))
6961 napi_schedule(&tnapi->napi);
6962
6963 return IRQ_RETVAL(1);
6964 }
6965
6966 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6967 {
6968 struct tg3_napi *tnapi = dev_id;
6969 struct tg3 *tp = tnapi->tp;
6970 struct tg3_hw_status *sblk = tnapi->hw_status;
6971 unsigned int handled = 1;
6972
6973 /* In INTx mode, it is possible for the interrupt to arrive at
6974 * the CPU before the status block posted prior to the interrupt.
6975 * Reading the PCI State register will confirm whether the
6976 * interrupt is ours and will flush the status block.
6977 */
6978 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6979 if (tg3_flag(tp, CHIP_RESETTING) ||
6980 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6981 handled = 0;
6982 goto out;
6983 }
6984 }
6985
6986 /*
6987 * Writing any value to intr-mbox-0 clears PCI INTA# and
6988 * chip-internal interrupt pending events.
6989 * Writing non-zero to intr-mbox-0 additional tells the
6990 * NIC to stop sending us irqs, engaging "in-intr-handler"
6991 * event coalescing.
6992 *
6993 * Flush the mailbox to de-assert the IRQ immediately to prevent
6994 * spurious interrupts. The flush impacts performance but
6995 * excessive spurious interrupts can be worse in some cases.
6996 */
6997 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6998 if (tg3_irq_sync(tp))
6999 goto out;
7000 sblk->status &= ~SD_STATUS_UPDATED;
7001 if (likely(tg3_has_work(tnapi))) {
7002 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7003 napi_schedule(&tnapi->napi);
7004 } else {
7005 /* No work, shared interrupt perhaps? re-enable
7006 * interrupts, and flush that PCI write
7007 */
7008 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7009 0x00000000);
7010 }
7011 out:
7012 return IRQ_RETVAL(handled);
7013 }
7014
7015 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7016 {
7017 struct tg3_napi *tnapi = dev_id;
7018 struct tg3 *tp = tnapi->tp;
7019 struct tg3_hw_status *sblk = tnapi->hw_status;
7020 unsigned int handled = 1;
7021
7022 /* In INTx mode, it is possible for the interrupt to arrive at
7023 * the CPU before the status block posted prior to the interrupt.
7024 * Reading the PCI State register will confirm whether the
7025 * interrupt is ours and will flush the status block.
7026 */
7027 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7028 if (tg3_flag(tp, CHIP_RESETTING) ||
7029 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7030 handled = 0;
7031 goto out;
7032 }
7033 }
7034
7035 /*
7036 * writing any value to intr-mbox-0 clears PCI INTA# and
7037 * chip-internal interrupt pending events.
7038 * writing non-zero to intr-mbox-0 additional tells the
7039 * NIC to stop sending us irqs, engaging "in-intr-handler"
7040 * event coalescing.
7041 *
7042 * Flush the mailbox to de-assert the IRQ immediately to prevent
7043 * spurious interrupts. The flush impacts performance but
7044 * excessive spurious interrupts can be worse in some cases.
7045 */
7046 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7047
7048 /*
7049 * In a shared interrupt configuration, sometimes other devices'
7050 * interrupts will scream. We record the current status tag here
7051 * so that the above check can report that the screaming interrupts
7052 * are unhandled. Eventually they will be silenced.
7053 */
7054 tnapi->last_irq_tag = sblk->status_tag;
7055
7056 if (tg3_irq_sync(tp))
7057 goto out;
7058
7059 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7060
7061 napi_schedule(&tnapi->napi);
7062
7063 out:
7064 return IRQ_RETVAL(handled);
7065 }
7066
7067 /* ISR for interrupt test */
7068 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7069 {
7070 struct tg3_napi *tnapi = dev_id;
7071 struct tg3 *tp = tnapi->tp;
7072 struct tg3_hw_status *sblk = tnapi->hw_status;
7073
7074 if ((sblk->status & SD_STATUS_UPDATED) ||
7075 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7076 tg3_disable_ints(tp);
7077 return IRQ_RETVAL(1);
7078 }
7079 return IRQ_RETVAL(0);
7080 }
7081
7082 #ifdef CONFIG_NET_POLL_CONTROLLER
7083 static void tg3_poll_controller(struct net_device *dev)
7084 {
7085 int i;
7086 struct tg3 *tp = netdev_priv(dev);
7087
7088 if (tg3_irq_sync(tp))
7089 return;
7090
7091 for (i = 0; i < tp->irq_cnt; i++)
7092 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7093 }
7094 #endif
7095
7096 static void tg3_tx_timeout(struct net_device *dev)
7097 {
7098 struct tg3 *tp = netdev_priv(dev);
7099
7100 if (netif_msg_tx_err(tp)) {
7101 netdev_err(dev, "transmit timed out, resetting\n");
7102 tg3_dump_state(tp);
7103 }
7104
7105 tg3_reset_task_schedule(tp);
7106 }
7107
7108 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7109 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7110 {
7111 u32 base = (u32) mapping & 0xffffffff;
7112
7113 return (base > 0xffffdcc0) && (base + len + 8 < base);
7114 }
7115
7116 /* Test for DMA addresses > 40-bit */
7117 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7118 int len)
7119 {
7120 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7121 if (tg3_flag(tp, 40BIT_DMA_BUG))
7122 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7123 return 0;
7124 #else
7125 return 0;
7126 #endif
7127 }
7128
7129 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7130 dma_addr_t mapping, u32 len, u32 flags,
7131 u32 mss, u32 vlan)
7132 {
7133 txbd->addr_hi = ((u64) mapping >> 32);
7134 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7135 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7136 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7137 }
7138
7139 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7140 dma_addr_t map, u32 len, u32 flags,
7141 u32 mss, u32 vlan)
7142 {
7143 struct tg3 *tp = tnapi->tp;
7144 bool hwbug = false;
7145
7146 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7147 hwbug = true;
7148
7149 if (tg3_4g_overflow_test(map, len))
7150 hwbug = true;
7151
7152 if (tg3_40bit_overflow_test(tp, map, len))
7153 hwbug = true;
7154
7155 if (tp->dma_limit) {
7156 u32 prvidx = *entry;
7157 u32 tmp_flag = flags & ~TXD_FLAG_END;
7158 while (len > tp->dma_limit && *budget) {
7159 u32 frag_len = tp->dma_limit;
7160 len -= tp->dma_limit;
7161
7162 /* Avoid the 8byte DMA problem */
7163 if (len <= 8) {
7164 len += tp->dma_limit / 2;
7165 frag_len = tp->dma_limit / 2;
7166 }
7167
7168 tnapi->tx_buffers[*entry].fragmented = true;
7169
7170 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7171 frag_len, tmp_flag, mss, vlan);
7172 *budget -= 1;
7173 prvidx = *entry;
7174 *entry = NEXT_TX(*entry);
7175
7176 map += frag_len;
7177 }
7178
7179 if (len) {
7180 if (*budget) {
7181 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7182 len, flags, mss, vlan);
7183 *budget -= 1;
7184 *entry = NEXT_TX(*entry);
7185 } else {
7186 hwbug = true;
7187 tnapi->tx_buffers[prvidx].fragmented = false;
7188 }
7189 }
7190 } else {
7191 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7192 len, flags, mss, vlan);
7193 *entry = NEXT_TX(*entry);
7194 }
7195
7196 return hwbug;
7197 }
7198
7199 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7200 {
7201 int i;
7202 struct sk_buff *skb;
7203 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7204
7205 skb = txb->skb;
7206 txb->skb = NULL;
7207
7208 pci_unmap_single(tnapi->tp->pdev,
7209 dma_unmap_addr(txb, mapping),
7210 skb_headlen(skb),
7211 PCI_DMA_TODEVICE);
7212
7213 while (txb->fragmented) {
7214 txb->fragmented = false;
7215 entry = NEXT_TX(entry);
7216 txb = &tnapi->tx_buffers[entry];
7217 }
7218
7219 for (i = 0; i <= last; i++) {
7220 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7221
7222 entry = NEXT_TX(entry);
7223 txb = &tnapi->tx_buffers[entry];
7224
7225 pci_unmap_page(tnapi->tp->pdev,
7226 dma_unmap_addr(txb, mapping),
7227 skb_frag_size(frag), PCI_DMA_TODEVICE);
7228
7229 while (txb->fragmented) {
7230 txb->fragmented = false;
7231 entry = NEXT_TX(entry);
7232 txb = &tnapi->tx_buffers[entry];
7233 }
7234 }
7235 }
7236
7237 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7238 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7239 struct sk_buff **pskb,
7240 u32 *entry, u32 *budget,
7241 u32 base_flags, u32 mss, u32 vlan)
7242 {
7243 struct tg3 *tp = tnapi->tp;
7244 struct sk_buff *new_skb, *skb = *pskb;
7245 dma_addr_t new_addr = 0;
7246 int ret = 0;
7247
7248 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7249 new_skb = skb_copy(skb, GFP_ATOMIC);
7250 else {
7251 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7252
7253 new_skb = skb_copy_expand(skb,
7254 skb_headroom(skb) + more_headroom,
7255 skb_tailroom(skb), GFP_ATOMIC);
7256 }
7257
7258 if (!new_skb) {
7259 ret = -1;
7260 } else {
7261 /* New SKB is guaranteed to be linear. */
7262 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7263 PCI_DMA_TODEVICE);
7264 /* Make sure the mapping succeeded */
7265 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7266 dev_kfree_skb(new_skb);
7267 ret = -1;
7268 } else {
7269 u32 save_entry = *entry;
7270
7271 base_flags |= TXD_FLAG_END;
7272
7273 tnapi->tx_buffers[*entry].skb = new_skb;
7274 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7275 mapping, new_addr);
7276
7277 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7278 new_skb->len, base_flags,
7279 mss, vlan)) {
7280 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7281 dev_kfree_skb(new_skb);
7282 ret = -1;
7283 }
7284 }
7285 }
7286
7287 dev_kfree_skb(skb);
7288 *pskb = new_skb;
7289 return ret;
7290 }
7291
7292 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7293
7294 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7295 * TSO header is greater than 80 bytes.
7296 */
7297 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7298 {
7299 struct sk_buff *segs, *nskb;
7300 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7301
7302 /* Estimate the number of fragments in the worst case */
7303 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7304 netif_stop_queue(tp->dev);
7305
7306 /* netif_tx_stop_queue() must be done before checking
7307 * checking tx index in tg3_tx_avail() below, because in
7308 * tg3_tx(), we update tx index before checking for
7309 * netif_tx_queue_stopped().
7310 */
7311 smp_mb();
7312 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7313 return NETDEV_TX_BUSY;
7314
7315 netif_wake_queue(tp->dev);
7316 }
7317
7318 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7319 if (IS_ERR(segs))
7320 goto tg3_tso_bug_end;
7321
7322 do {
7323 nskb = segs;
7324 segs = segs->next;
7325 nskb->next = NULL;
7326 tg3_start_xmit(nskb, tp->dev);
7327 } while (segs);
7328
7329 tg3_tso_bug_end:
7330 dev_kfree_skb(skb);
7331
7332 return NETDEV_TX_OK;
7333 }
7334
7335 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7336 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7337 */
7338 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7339 {
7340 struct tg3 *tp = netdev_priv(dev);
7341 u32 len, entry, base_flags, mss, vlan = 0;
7342 u32 budget;
7343 int i = -1, would_hit_hwbug;
7344 dma_addr_t mapping;
7345 struct tg3_napi *tnapi;
7346 struct netdev_queue *txq;
7347 unsigned int last;
7348
7349 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7350 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7351 if (tg3_flag(tp, ENABLE_TSS))
7352 tnapi++;
7353
7354 budget = tg3_tx_avail(tnapi);
7355
7356 /* We are running in BH disabled context with netif_tx_lock
7357 * and TX reclaim runs via tp->napi.poll inside of a software
7358 * interrupt. Furthermore, IRQ processing runs lockless so we have
7359 * no IRQ context deadlocks to worry about either. Rejoice!
7360 */
7361 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7362 if (!netif_tx_queue_stopped(txq)) {
7363 netif_tx_stop_queue(txq);
7364
7365 /* This is a hard error, log it. */
7366 netdev_err(dev,
7367 "BUG! Tx Ring full when queue awake!\n");
7368 }
7369 return NETDEV_TX_BUSY;
7370 }
7371
7372 entry = tnapi->tx_prod;
7373 base_flags = 0;
7374 if (skb->ip_summed == CHECKSUM_PARTIAL)
7375 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7376
7377 mss = skb_shinfo(skb)->gso_size;
7378 if (mss) {
7379 struct iphdr *iph;
7380 u32 tcp_opt_len, hdr_len;
7381
7382 if (skb_header_cloned(skb) &&
7383 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7384 goto drop;
7385
7386 iph = ip_hdr(skb);
7387 tcp_opt_len = tcp_optlen(skb);
7388
7389 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7390
7391 if (!skb_is_gso_v6(skb)) {
7392 iph->check = 0;
7393 iph->tot_len = htons(mss + hdr_len);
7394 }
7395
7396 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7397 tg3_flag(tp, TSO_BUG))
7398 return tg3_tso_bug(tp, skb);
7399
7400 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7401 TXD_FLAG_CPU_POST_DMA);
7402
7403 if (tg3_flag(tp, HW_TSO_1) ||
7404 tg3_flag(tp, HW_TSO_2) ||
7405 tg3_flag(tp, HW_TSO_3)) {
7406 tcp_hdr(skb)->check = 0;
7407 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7408 } else
7409 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7410 iph->daddr, 0,
7411 IPPROTO_TCP,
7412 0);
7413
7414 if (tg3_flag(tp, HW_TSO_3)) {
7415 mss |= (hdr_len & 0xc) << 12;
7416 if (hdr_len & 0x10)
7417 base_flags |= 0x00000010;
7418 base_flags |= (hdr_len & 0x3e0) << 5;
7419 } else if (tg3_flag(tp, HW_TSO_2))
7420 mss |= hdr_len << 9;
7421 else if (tg3_flag(tp, HW_TSO_1) ||
7422 tg3_asic_rev(tp) == ASIC_REV_5705) {
7423 if (tcp_opt_len || iph->ihl > 5) {
7424 int tsflags;
7425
7426 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7427 mss |= (tsflags << 11);
7428 }
7429 } else {
7430 if (tcp_opt_len || iph->ihl > 5) {
7431 int tsflags;
7432
7433 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7434 base_flags |= tsflags << 12;
7435 }
7436 }
7437 }
7438
7439 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7440 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7441 base_flags |= TXD_FLAG_JMB_PKT;
7442
7443 if (vlan_tx_tag_present(skb)) {
7444 base_flags |= TXD_FLAG_VLAN;
7445 vlan = vlan_tx_tag_get(skb);
7446 }
7447
7448 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7449 tg3_flag(tp, TX_TSTAMP_EN)) {
7450 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7451 base_flags |= TXD_FLAG_HWTSTAMP;
7452 }
7453
7454 len = skb_headlen(skb);
7455
7456 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7457 if (pci_dma_mapping_error(tp->pdev, mapping))
7458 goto drop;
7459
7460
7461 tnapi->tx_buffers[entry].skb = skb;
7462 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7463
7464 would_hit_hwbug = 0;
7465
7466 if (tg3_flag(tp, 5701_DMA_BUG))
7467 would_hit_hwbug = 1;
7468
7469 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7470 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7471 mss, vlan)) {
7472 would_hit_hwbug = 1;
7473 } else if (skb_shinfo(skb)->nr_frags > 0) {
7474 u32 tmp_mss = mss;
7475
7476 if (!tg3_flag(tp, HW_TSO_1) &&
7477 !tg3_flag(tp, HW_TSO_2) &&
7478 !tg3_flag(tp, HW_TSO_3))
7479 tmp_mss = 0;
7480
7481 /* Now loop through additional data
7482 * fragments, and queue them.
7483 */
7484 last = skb_shinfo(skb)->nr_frags - 1;
7485 for (i = 0; i <= last; i++) {
7486 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7487
7488 len = skb_frag_size(frag);
7489 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7490 len, DMA_TO_DEVICE);
7491
7492 tnapi->tx_buffers[entry].skb = NULL;
7493 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7494 mapping);
7495 if (dma_mapping_error(&tp->pdev->dev, mapping))
7496 goto dma_error;
7497
7498 if (!budget ||
7499 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7500 len, base_flags |
7501 ((i == last) ? TXD_FLAG_END : 0),
7502 tmp_mss, vlan)) {
7503 would_hit_hwbug = 1;
7504 break;
7505 }
7506 }
7507 }
7508
7509 if (would_hit_hwbug) {
7510 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7511
7512 /* If the workaround fails due to memory/mapping
7513 * failure, silently drop this packet.
7514 */
7515 entry = tnapi->tx_prod;
7516 budget = tg3_tx_avail(tnapi);
7517 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7518 base_flags, mss, vlan))
7519 goto drop_nofree;
7520 }
7521
7522 skb_tx_timestamp(skb);
7523 netdev_tx_sent_queue(txq, skb->len);
7524
7525 /* Sync BD data before updating mailbox */
7526 wmb();
7527
7528 /* Packets are ready, update Tx producer idx local and on card. */
7529 tw32_tx_mbox(tnapi->prodmbox, entry);
7530
7531 tnapi->tx_prod = entry;
7532 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7533 netif_tx_stop_queue(txq);
7534
7535 /* netif_tx_stop_queue() must be done before checking
7536 * checking tx index in tg3_tx_avail() below, because in
7537 * tg3_tx(), we update tx index before checking for
7538 * netif_tx_queue_stopped().
7539 */
7540 smp_mb();
7541 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7542 netif_tx_wake_queue(txq);
7543 }
7544
7545 mmiowb();
7546 return NETDEV_TX_OK;
7547
7548 dma_error:
7549 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7550 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7551 drop:
7552 dev_kfree_skb(skb);
7553 drop_nofree:
7554 tp->tx_dropped++;
7555 return NETDEV_TX_OK;
7556 }
7557
7558 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7559 {
7560 if (enable) {
7561 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7562 MAC_MODE_PORT_MODE_MASK);
7563
7564 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7565
7566 if (!tg3_flag(tp, 5705_PLUS))
7567 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7568
7569 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7570 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7571 else
7572 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7573 } else {
7574 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7575
7576 if (tg3_flag(tp, 5705_PLUS) ||
7577 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7578 tg3_asic_rev(tp) == ASIC_REV_5700)
7579 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7580 }
7581
7582 tw32(MAC_MODE, tp->mac_mode);
7583 udelay(40);
7584 }
7585
7586 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7587 {
7588 u32 val, bmcr, mac_mode, ptest = 0;
7589
7590 tg3_phy_toggle_apd(tp, false);
7591 tg3_phy_toggle_automdix(tp, 0);
7592
7593 if (extlpbk && tg3_phy_set_extloopbk(tp))
7594 return -EIO;
7595
7596 bmcr = BMCR_FULLDPLX;
7597 switch (speed) {
7598 case SPEED_10:
7599 break;
7600 case SPEED_100:
7601 bmcr |= BMCR_SPEED100;
7602 break;
7603 case SPEED_1000:
7604 default:
7605 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7606 speed = SPEED_100;
7607 bmcr |= BMCR_SPEED100;
7608 } else {
7609 speed = SPEED_1000;
7610 bmcr |= BMCR_SPEED1000;
7611 }
7612 }
7613
7614 if (extlpbk) {
7615 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7616 tg3_readphy(tp, MII_CTRL1000, &val);
7617 val |= CTL1000_AS_MASTER |
7618 CTL1000_ENABLE_MASTER;
7619 tg3_writephy(tp, MII_CTRL1000, val);
7620 } else {
7621 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7622 MII_TG3_FET_PTEST_TRIM_2;
7623 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7624 }
7625 } else
7626 bmcr |= BMCR_LOOPBACK;
7627
7628 tg3_writephy(tp, MII_BMCR, bmcr);
7629
7630 /* The write needs to be flushed for the FETs */
7631 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7632 tg3_readphy(tp, MII_BMCR, &bmcr);
7633
7634 udelay(40);
7635
7636 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7637 tg3_asic_rev(tp) == ASIC_REV_5785) {
7638 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7639 MII_TG3_FET_PTEST_FRC_TX_LINK |
7640 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7641
7642 /* The write needs to be flushed for the AC131 */
7643 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7644 }
7645
7646 /* Reset to prevent losing 1st rx packet intermittently */
7647 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7648 tg3_flag(tp, 5780_CLASS)) {
7649 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7650 udelay(10);
7651 tw32_f(MAC_RX_MODE, tp->rx_mode);
7652 }
7653
7654 mac_mode = tp->mac_mode &
7655 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7656 if (speed == SPEED_1000)
7657 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7658 else
7659 mac_mode |= MAC_MODE_PORT_MODE_MII;
7660
7661 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7662 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7663
7664 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7665 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7666 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7667 mac_mode |= MAC_MODE_LINK_POLARITY;
7668
7669 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7670 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7671 }
7672
7673 tw32(MAC_MODE, mac_mode);
7674 udelay(40);
7675
7676 return 0;
7677 }
7678
7679 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7680 {
7681 struct tg3 *tp = netdev_priv(dev);
7682
7683 if (features & NETIF_F_LOOPBACK) {
7684 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7685 return;
7686
7687 spin_lock_bh(&tp->lock);
7688 tg3_mac_loopback(tp, true);
7689 netif_carrier_on(tp->dev);
7690 spin_unlock_bh(&tp->lock);
7691 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7692 } else {
7693 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7694 return;
7695
7696 spin_lock_bh(&tp->lock);
7697 tg3_mac_loopback(tp, false);
7698 /* Force link status check */
7699 tg3_setup_phy(tp, 1);
7700 spin_unlock_bh(&tp->lock);
7701 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7702 }
7703 }
7704
7705 static netdev_features_t tg3_fix_features(struct net_device *dev,
7706 netdev_features_t features)
7707 {
7708 struct tg3 *tp = netdev_priv(dev);
7709
7710 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7711 features &= ~NETIF_F_ALL_TSO;
7712
7713 return features;
7714 }
7715
7716 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7717 {
7718 netdev_features_t changed = dev->features ^ features;
7719
7720 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7721 tg3_set_loopback(dev, features);
7722
7723 return 0;
7724 }
7725
7726 static void tg3_rx_prodring_free(struct tg3 *tp,
7727 struct tg3_rx_prodring_set *tpr)
7728 {
7729 int i;
7730
7731 if (tpr != &tp->napi[0].prodring) {
7732 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7733 i = (i + 1) & tp->rx_std_ring_mask)
7734 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7735 tp->rx_pkt_map_sz);
7736
7737 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7738 for (i = tpr->rx_jmb_cons_idx;
7739 i != tpr->rx_jmb_prod_idx;
7740 i = (i + 1) & tp->rx_jmb_ring_mask) {
7741 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7742 TG3_RX_JMB_MAP_SZ);
7743 }
7744 }
7745
7746 return;
7747 }
7748
7749 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7750 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7751 tp->rx_pkt_map_sz);
7752
7753 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7754 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7755 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7756 TG3_RX_JMB_MAP_SZ);
7757 }
7758 }
7759
7760 /* Initialize rx rings for packet processing.
7761 *
7762 * The chip has been shut down and the driver detached from
7763 * the networking, so no interrupts or new tx packets will
7764 * end up in the driver. tp->{tx,}lock are held and thus
7765 * we may not sleep.
7766 */
7767 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7768 struct tg3_rx_prodring_set *tpr)
7769 {
7770 u32 i, rx_pkt_dma_sz;
7771
7772 tpr->rx_std_cons_idx = 0;
7773 tpr->rx_std_prod_idx = 0;
7774 tpr->rx_jmb_cons_idx = 0;
7775 tpr->rx_jmb_prod_idx = 0;
7776
7777 if (tpr != &tp->napi[0].prodring) {
7778 memset(&tpr->rx_std_buffers[0], 0,
7779 TG3_RX_STD_BUFF_RING_SIZE(tp));
7780 if (tpr->rx_jmb_buffers)
7781 memset(&tpr->rx_jmb_buffers[0], 0,
7782 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7783 goto done;
7784 }
7785
7786 /* Zero out all descriptors. */
7787 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7788
7789 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7790 if (tg3_flag(tp, 5780_CLASS) &&
7791 tp->dev->mtu > ETH_DATA_LEN)
7792 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7793 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7794
7795 /* Initialize invariants of the rings, we only set this
7796 * stuff once. This works because the card does not
7797 * write into the rx buffer posting rings.
7798 */
7799 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7800 struct tg3_rx_buffer_desc *rxd;
7801
7802 rxd = &tpr->rx_std[i];
7803 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7804 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7805 rxd->opaque = (RXD_OPAQUE_RING_STD |
7806 (i << RXD_OPAQUE_INDEX_SHIFT));
7807 }
7808
7809 /* Now allocate fresh SKBs for each rx ring. */
7810 for (i = 0; i < tp->rx_pending; i++) {
7811 unsigned int frag_size;
7812
7813 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7814 &frag_size) < 0) {
7815 netdev_warn(tp->dev,
7816 "Using a smaller RX standard ring. Only "
7817 "%d out of %d buffers were allocated "
7818 "successfully\n", i, tp->rx_pending);
7819 if (i == 0)
7820 goto initfail;
7821 tp->rx_pending = i;
7822 break;
7823 }
7824 }
7825
7826 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7827 goto done;
7828
7829 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7830
7831 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7832 goto done;
7833
7834 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7835 struct tg3_rx_buffer_desc *rxd;
7836
7837 rxd = &tpr->rx_jmb[i].std;
7838 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7839 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7840 RXD_FLAG_JUMBO;
7841 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7842 (i << RXD_OPAQUE_INDEX_SHIFT));
7843 }
7844
7845 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7846 unsigned int frag_size;
7847
7848 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7849 &frag_size) < 0) {
7850 netdev_warn(tp->dev,
7851 "Using a smaller RX jumbo ring. Only %d "
7852 "out of %d buffers were allocated "
7853 "successfully\n", i, tp->rx_jumbo_pending);
7854 if (i == 0)
7855 goto initfail;
7856 tp->rx_jumbo_pending = i;
7857 break;
7858 }
7859 }
7860
7861 done:
7862 return 0;
7863
7864 initfail:
7865 tg3_rx_prodring_free(tp, tpr);
7866 return -ENOMEM;
7867 }
7868
7869 static void tg3_rx_prodring_fini(struct tg3 *tp,
7870 struct tg3_rx_prodring_set *tpr)
7871 {
7872 kfree(tpr->rx_std_buffers);
7873 tpr->rx_std_buffers = NULL;
7874 kfree(tpr->rx_jmb_buffers);
7875 tpr->rx_jmb_buffers = NULL;
7876 if (tpr->rx_std) {
7877 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7878 tpr->rx_std, tpr->rx_std_mapping);
7879 tpr->rx_std = NULL;
7880 }
7881 if (tpr->rx_jmb) {
7882 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7883 tpr->rx_jmb, tpr->rx_jmb_mapping);
7884 tpr->rx_jmb = NULL;
7885 }
7886 }
7887
7888 static int tg3_rx_prodring_init(struct tg3 *tp,
7889 struct tg3_rx_prodring_set *tpr)
7890 {
7891 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7892 GFP_KERNEL);
7893 if (!tpr->rx_std_buffers)
7894 return -ENOMEM;
7895
7896 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7897 TG3_RX_STD_RING_BYTES(tp),
7898 &tpr->rx_std_mapping,
7899 GFP_KERNEL);
7900 if (!tpr->rx_std)
7901 goto err_out;
7902
7903 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7904 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7905 GFP_KERNEL);
7906 if (!tpr->rx_jmb_buffers)
7907 goto err_out;
7908
7909 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7910 TG3_RX_JMB_RING_BYTES(tp),
7911 &tpr->rx_jmb_mapping,
7912 GFP_KERNEL);
7913 if (!tpr->rx_jmb)
7914 goto err_out;
7915 }
7916
7917 return 0;
7918
7919 err_out:
7920 tg3_rx_prodring_fini(tp, tpr);
7921 return -ENOMEM;
7922 }
7923
7924 /* Free up pending packets in all rx/tx rings.
7925 *
7926 * The chip has been shut down and the driver detached from
7927 * the networking, so no interrupts or new tx packets will
7928 * end up in the driver. tp->{tx,}lock is not held and we are not
7929 * in an interrupt context and thus may sleep.
7930 */
7931 static void tg3_free_rings(struct tg3 *tp)
7932 {
7933 int i, j;
7934
7935 for (j = 0; j < tp->irq_cnt; j++) {
7936 struct tg3_napi *tnapi = &tp->napi[j];
7937
7938 tg3_rx_prodring_free(tp, &tnapi->prodring);
7939
7940 if (!tnapi->tx_buffers)
7941 continue;
7942
7943 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7944 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7945
7946 if (!skb)
7947 continue;
7948
7949 tg3_tx_skb_unmap(tnapi, i,
7950 skb_shinfo(skb)->nr_frags - 1);
7951
7952 dev_kfree_skb_any(skb);
7953 }
7954 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7955 }
7956 }
7957
7958 /* Initialize tx/rx rings for packet processing.
7959 *
7960 * The chip has been shut down and the driver detached from
7961 * the networking, so no interrupts or new tx packets will
7962 * end up in the driver. tp->{tx,}lock are held and thus
7963 * we may not sleep.
7964 */
7965 static int tg3_init_rings(struct tg3 *tp)
7966 {
7967 int i;
7968
7969 /* Free up all the SKBs. */
7970 tg3_free_rings(tp);
7971
7972 for (i = 0; i < tp->irq_cnt; i++) {
7973 struct tg3_napi *tnapi = &tp->napi[i];
7974
7975 tnapi->last_tag = 0;
7976 tnapi->last_irq_tag = 0;
7977 tnapi->hw_status->status = 0;
7978 tnapi->hw_status->status_tag = 0;
7979 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7980
7981 tnapi->tx_prod = 0;
7982 tnapi->tx_cons = 0;
7983 if (tnapi->tx_ring)
7984 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7985
7986 tnapi->rx_rcb_ptr = 0;
7987 if (tnapi->rx_rcb)
7988 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7989
7990 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7991 tg3_free_rings(tp);
7992 return -ENOMEM;
7993 }
7994 }
7995
7996 return 0;
7997 }
7998
7999 static void tg3_mem_tx_release(struct tg3 *tp)
8000 {
8001 int i;
8002
8003 for (i = 0; i < tp->irq_max; i++) {
8004 struct tg3_napi *tnapi = &tp->napi[i];
8005
8006 if (tnapi->tx_ring) {
8007 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8008 tnapi->tx_ring, tnapi->tx_desc_mapping);
8009 tnapi->tx_ring = NULL;
8010 }
8011
8012 kfree(tnapi->tx_buffers);
8013 tnapi->tx_buffers = NULL;
8014 }
8015 }
8016
8017 static int tg3_mem_tx_acquire(struct tg3 *tp)
8018 {
8019 int i;
8020 struct tg3_napi *tnapi = &tp->napi[0];
8021
8022 /* If multivector TSS is enabled, vector 0 does not handle
8023 * tx interrupts. Don't allocate any resources for it.
8024 */
8025 if (tg3_flag(tp, ENABLE_TSS))
8026 tnapi++;
8027
8028 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8029 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8030 TG3_TX_RING_SIZE, GFP_KERNEL);
8031 if (!tnapi->tx_buffers)
8032 goto err_out;
8033
8034 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8035 TG3_TX_RING_BYTES,
8036 &tnapi->tx_desc_mapping,
8037 GFP_KERNEL);
8038 if (!tnapi->tx_ring)
8039 goto err_out;
8040 }
8041
8042 return 0;
8043
8044 err_out:
8045 tg3_mem_tx_release(tp);
8046 return -ENOMEM;
8047 }
8048
8049 static void tg3_mem_rx_release(struct tg3 *tp)
8050 {
8051 int i;
8052
8053 for (i = 0; i < tp->irq_max; i++) {
8054 struct tg3_napi *tnapi = &tp->napi[i];
8055
8056 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8057
8058 if (!tnapi->rx_rcb)
8059 continue;
8060
8061 dma_free_coherent(&tp->pdev->dev,
8062 TG3_RX_RCB_RING_BYTES(tp),
8063 tnapi->rx_rcb,
8064 tnapi->rx_rcb_mapping);
8065 tnapi->rx_rcb = NULL;
8066 }
8067 }
8068
8069 static int tg3_mem_rx_acquire(struct tg3 *tp)
8070 {
8071 unsigned int i, limit;
8072
8073 limit = tp->rxq_cnt;
8074
8075 /* If RSS is enabled, we need a (dummy) producer ring
8076 * set on vector zero. This is the true hw prodring.
8077 */
8078 if (tg3_flag(tp, ENABLE_RSS))
8079 limit++;
8080
8081 for (i = 0; i < limit; i++) {
8082 struct tg3_napi *tnapi = &tp->napi[i];
8083
8084 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8085 goto err_out;
8086
8087 /* If multivector RSS is enabled, vector 0
8088 * does not handle rx or tx interrupts.
8089 * Don't allocate any resources for it.
8090 */
8091 if (!i && tg3_flag(tp, ENABLE_RSS))
8092 continue;
8093
8094 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8095 TG3_RX_RCB_RING_BYTES(tp),
8096 &tnapi->rx_rcb_mapping,
8097 GFP_KERNEL);
8098 if (!tnapi->rx_rcb)
8099 goto err_out;
8100
8101 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8102 }
8103
8104 return 0;
8105
8106 err_out:
8107 tg3_mem_rx_release(tp);
8108 return -ENOMEM;
8109 }
8110
8111 /*
8112 * Must not be invoked with interrupt sources disabled and
8113 * the hardware shutdown down.
8114 */
8115 static void tg3_free_consistent(struct tg3 *tp)
8116 {
8117 int i;
8118
8119 for (i = 0; i < tp->irq_cnt; i++) {
8120 struct tg3_napi *tnapi = &tp->napi[i];
8121
8122 if (tnapi->hw_status) {
8123 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8124 tnapi->hw_status,
8125 tnapi->status_mapping);
8126 tnapi->hw_status = NULL;
8127 }
8128 }
8129
8130 tg3_mem_rx_release(tp);
8131 tg3_mem_tx_release(tp);
8132
8133 if (tp->hw_stats) {
8134 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8135 tp->hw_stats, tp->stats_mapping);
8136 tp->hw_stats = NULL;
8137 }
8138 }
8139
8140 /*
8141 * Must not be invoked with interrupt sources disabled and
8142 * the hardware shutdown down. Can sleep.
8143 */
8144 static int tg3_alloc_consistent(struct tg3 *tp)
8145 {
8146 int i;
8147
8148 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8149 sizeof(struct tg3_hw_stats),
8150 &tp->stats_mapping,
8151 GFP_KERNEL);
8152 if (!tp->hw_stats)
8153 goto err_out;
8154
8155 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8156
8157 for (i = 0; i < tp->irq_cnt; i++) {
8158 struct tg3_napi *tnapi = &tp->napi[i];
8159 struct tg3_hw_status *sblk;
8160
8161 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8162 TG3_HW_STATUS_SIZE,
8163 &tnapi->status_mapping,
8164 GFP_KERNEL);
8165 if (!tnapi->hw_status)
8166 goto err_out;
8167
8168 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8169 sblk = tnapi->hw_status;
8170
8171 if (tg3_flag(tp, ENABLE_RSS)) {
8172 u16 *prodptr = NULL;
8173
8174 /*
8175 * When RSS is enabled, the status block format changes
8176 * slightly. The "rx_jumbo_consumer", "reserved",
8177 * and "rx_mini_consumer" members get mapped to the
8178 * other three rx return ring producer indexes.
8179 */
8180 switch (i) {
8181 case 1:
8182 prodptr = &sblk->idx[0].rx_producer;
8183 break;
8184 case 2:
8185 prodptr = &sblk->rx_jumbo_consumer;
8186 break;
8187 case 3:
8188 prodptr = &sblk->reserved;
8189 break;
8190 case 4:
8191 prodptr = &sblk->rx_mini_consumer;
8192 break;
8193 }
8194 tnapi->rx_rcb_prod_idx = prodptr;
8195 } else {
8196 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8197 }
8198 }
8199
8200 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8201 goto err_out;
8202
8203 return 0;
8204
8205 err_out:
8206 tg3_free_consistent(tp);
8207 return -ENOMEM;
8208 }
8209
8210 #define MAX_WAIT_CNT 1000
8211
8212 /* To stop a block, clear the enable bit and poll till it
8213 * clears. tp->lock is held.
8214 */
8215 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8216 {
8217 unsigned int i;
8218 u32 val;
8219
8220 if (tg3_flag(tp, 5705_PLUS)) {
8221 switch (ofs) {
8222 case RCVLSC_MODE:
8223 case DMAC_MODE:
8224 case MBFREE_MODE:
8225 case BUFMGR_MODE:
8226 case MEMARB_MODE:
8227 /* We can't enable/disable these bits of the
8228 * 5705/5750, just say success.
8229 */
8230 return 0;
8231
8232 default:
8233 break;
8234 }
8235 }
8236
8237 val = tr32(ofs);
8238 val &= ~enable_bit;
8239 tw32_f(ofs, val);
8240
8241 for (i = 0; i < MAX_WAIT_CNT; i++) {
8242 udelay(100);
8243 val = tr32(ofs);
8244 if ((val & enable_bit) == 0)
8245 break;
8246 }
8247
8248 if (i == MAX_WAIT_CNT && !silent) {
8249 dev_err(&tp->pdev->dev,
8250 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8251 ofs, enable_bit);
8252 return -ENODEV;
8253 }
8254
8255 return 0;
8256 }
8257
8258 /* tp->lock is held. */
8259 static int tg3_abort_hw(struct tg3 *tp, int silent)
8260 {
8261 int i, err;
8262
8263 tg3_disable_ints(tp);
8264
8265 tp->rx_mode &= ~RX_MODE_ENABLE;
8266 tw32_f(MAC_RX_MODE, tp->rx_mode);
8267 udelay(10);
8268
8269 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8270 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8271 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8272 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8273 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8274 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8275
8276 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8277 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8278 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8279 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8280 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8281 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8282 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8283
8284 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8285 tw32_f(MAC_MODE, tp->mac_mode);
8286 udelay(40);
8287
8288 tp->tx_mode &= ~TX_MODE_ENABLE;
8289 tw32_f(MAC_TX_MODE, tp->tx_mode);
8290
8291 for (i = 0; i < MAX_WAIT_CNT; i++) {
8292 udelay(100);
8293 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8294 break;
8295 }
8296 if (i >= MAX_WAIT_CNT) {
8297 dev_err(&tp->pdev->dev,
8298 "%s timed out, TX_MODE_ENABLE will not clear "
8299 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8300 err |= -ENODEV;
8301 }
8302
8303 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8304 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8305 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8306
8307 tw32(FTQ_RESET, 0xffffffff);
8308 tw32(FTQ_RESET, 0x00000000);
8309
8310 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8311 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8312
8313 for (i = 0; i < tp->irq_cnt; i++) {
8314 struct tg3_napi *tnapi = &tp->napi[i];
8315 if (tnapi->hw_status)
8316 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8317 }
8318
8319 return err;
8320 }
8321
8322 /* Save PCI command register before chip reset */
8323 static void tg3_save_pci_state(struct tg3 *tp)
8324 {
8325 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8326 }
8327
8328 /* Restore PCI state after chip reset */
8329 static void tg3_restore_pci_state(struct tg3 *tp)
8330 {
8331 u32 val;
8332
8333 /* Re-enable indirect register accesses. */
8334 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8335 tp->misc_host_ctrl);
8336
8337 /* Set MAX PCI retry to zero. */
8338 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8339 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8340 tg3_flag(tp, PCIX_MODE))
8341 val |= PCISTATE_RETRY_SAME_DMA;
8342 /* Allow reads and writes to the APE register and memory space. */
8343 if (tg3_flag(tp, ENABLE_APE))
8344 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8345 PCISTATE_ALLOW_APE_SHMEM_WR |
8346 PCISTATE_ALLOW_APE_PSPACE_WR;
8347 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8348
8349 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8350
8351 if (!tg3_flag(tp, PCI_EXPRESS)) {
8352 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8353 tp->pci_cacheline_sz);
8354 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8355 tp->pci_lat_timer);
8356 }
8357
8358 /* Make sure PCI-X relaxed ordering bit is clear. */
8359 if (tg3_flag(tp, PCIX_MODE)) {
8360 u16 pcix_cmd;
8361
8362 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8363 &pcix_cmd);
8364 pcix_cmd &= ~PCI_X_CMD_ERO;
8365 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8366 pcix_cmd);
8367 }
8368
8369 if (tg3_flag(tp, 5780_CLASS)) {
8370
8371 /* Chip reset on 5780 will reset MSI enable bit,
8372 * so need to restore it.
8373 */
8374 if (tg3_flag(tp, USING_MSI)) {
8375 u16 ctrl;
8376
8377 pci_read_config_word(tp->pdev,
8378 tp->msi_cap + PCI_MSI_FLAGS,
8379 &ctrl);
8380 pci_write_config_word(tp->pdev,
8381 tp->msi_cap + PCI_MSI_FLAGS,
8382 ctrl | PCI_MSI_FLAGS_ENABLE);
8383 val = tr32(MSGINT_MODE);
8384 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8385 }
8386 }
8387 }
8388
8389 /* tp->lock is held. */
8390 static int tg3_chip_reset(struct tg3 *tp)
8391 {
8392 u32 val;
8393 void (*write_op)(struct tg3 *, u32, u32);
8394 int i, err;
8395
8396 tg3_nvram_lock(tp);
8397
8398 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8399
8400 /* No matching tg3_nvram_unlock() after this because
8401 * chip reset below will undo the nvram lock.
8402 */
8403 tp->nvram_lock_cnt = 0;
8404
8405 /* GRC_MISC_CFG core clock reset will clear the memory
8406 * enable bit in PCI register 4 and the MSI enable bit
8407 * on some chips, so we save relevant registers here.
8408 */
8409 tg3_save_pci_state(tp);
8410
8411 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8412 tg3_flag(tp, 5755_PLUS))
8413 tw32(GRC_FASTBOOT_PC, 0);
8414
8415 /*
8416 * We must avoid the readl() that normally takes place.
8417 * It locks machines, causes machine checks, and other
8418 * fun things. So, temporarily disable the 5701
8419 * hardware workaround, while we do the reset.
8420 */
8421 write_op = tp->write32;
8422 if (write_op == tg3_write_flush_reg32)
8423 tp->write32 = tg3_write32;
8424
8425 /* Prevent the irq handler from reading or writing PCI registers
8426 * during chip reset when the memory enable bit in the PCI command
8427 * register may be cleared. The chip does not generate interrupt
8428 * at this time, but the irq handler may still be called due to irq
8429 * sharing or irqpoll.
8430 */
8431 tg3_flag_set(tp, CHIP_RESETTING);
8432 for (i = 0; i < tp->irq_cnt; i++) {
8433 struct tg3_napi *tnapi = &tp->napi[i];
8434 if (tnapi->hw_status) {
8435 tnapi->hw_status->status = 0;
8436 tnapi->hw_status->status_tag = 0;
8437 }
8438 tnapi->last_tag = 0;
8439 tnapi->last_irq_tag = 0;
8440 }
8441 smp_mb();
8442
8443 for (i = 0; i < tp->irq_cnt; i++)
8444 synchronize_irq(tp->napi[i].irq_vec);
8445
8446 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8447 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8448 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8449 }
8450
8451 /* do the reset */
8452 val = GRC_MISC_CFG_CORECLK_RESET;
8453
8454 if (tg3_flag(tp, PCI_EXPRESS)) {
8455 /* Force PCIe 1.0a mode */
8456 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8457 !tg3_flag(tp, 57765_PLUS) &&
8458 tr32(TG3_PCIE_PHY_TSTCTL) ==
8459 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8460 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8461
8462 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8463 tw32(GRC_MISC_CFG, (1 << 29));
8464 val |= (1 << 29);
8465 }
8466 }
8467
8468 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8469 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8470 tw32(GRC_VCPU_EXT_CTRL,
8471 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8472 }
8473
8474 /* Manage gphy power for all CPMU absent PCIe devices. */
8475 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8476 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8477
8478 tw32(GRC_MISC_CFG, val);
8479
8480 /* restore 5701 hardware bug workaround write method */
8481 tp->write32 = write_op;
8482
8483 /* Unfortunately, we have to delay before the PCI read back.
8484 * Some 575X chips even will not respond to a PCI cfg access
8485 * when the reset command is given to the chip.
8486 *
8487 * How do these hardware designers expect things to work
8488 * properly if the PCI write is posted for a long period
8489 * of time? It is always necessary to have some method by
8490 * which a register read back can occur to push the write
8491 * out which does the reset.
8492 *
8493 * For most tg3 variants the trick below was working.
8494 * Ho hum...
8495 */
8496 udelay(120);
8497
8498 /* Flush PCI posted writes. The normal MMIO registers
8499 * are inaccessible at this time so this is the only
8500 * way to make this reliably (actually, this is no longer
8501 * the case, see above). I tried to use indirect
8502 * register read/write but this upset some 5701 variants.
8503 */
8504 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8505
8506 udelay(120);
8507
8508 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8509 u16 val16;
8510
8511 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8512 int j;
8513 u32 cfg_val;
8514
8515 /* Wait for link training to complete. */
8516 for (j = 0; j < 5000; j++)
8517 udelay(100);
8518
8519 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8520 pci_write_config_dword(tp->pdev, 0xc4,
8521 cfg_val | (1 << 15));
8522 }
8523
8524 /* Clear the "no snoop" and "relaxed ordering" bits. */
8525 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8526 /*
8527 * Older PCIe devices only support the 128 byte
8528 * MPS setting. Enforce the restriction.
8529 */
8530 if (!tg3_flag(tp, CPMU_PRESENT))
8531 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8532 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8533
8534 /* Clear error status */
8535 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8536 PCI_EXP_DEVSTA_CED |
8537 PCI_EXP_DEVSTA_NFED |
8538 PCI_EXP_DEVSTA_FED |
8539 PCI_EXP_DEVSTA_URD);
8540 }
8541
8542 tg3_restore_pci_state(tp);
8543
8544 tg3_flag_clear(tp, CHIP_RESETTING);
8545 tg3_flag_clear(tp, ERROR_PROCESSED);
8546
8547 val = 0;
8548 if (tg3_flag(tp, 5780_CLASS))
8549 val = tr32(MEMARB_MODE);
8550 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8551
8552 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8553 tg3_stop_fw(tp);
8554 tw32(0x5000, 0x400);
8555 }
8556
8557 if (tg3_flag(tp, IS_SSB_CORE)) {
8558 /*
8559 * BCM4785: In order to avoid repercussions from using
8560 * potentially defective internal ROM, stop the Rx RISC CPU,
8561 * which is not required.
8562 */
8563 tg3_stop_fw(tp);
8564 tg3_halt_cpu(tp, RX_CPU_BASE);
8565 }
8566
8567 tw32(GRC_MODE, tp->grc_mode);
8568
8569 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8570 val = tr32(0xc4);
8571
8572 tw32(0xc4, val | (1 << 15));
8573 }
8574
8575 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8576 tg3_asic_rev(tp) == ASIC_REV_5705) {
8577 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8578 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8579 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8580 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8581 }
8582
8583 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8584 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8585 val = tp->mac_mode;
8586 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8587 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8588 val = tp->mac_mode;
8589 } else
8590 val = 0;
8591
8592 tw32_f(MAC_MODE, val);
8593 udelay(40);
8594
8595 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8596
8597 err = tg3_poll_fw(tp);
8598 if (err)
8599 return err;
8600
8601 tg3_mdio_start(tp);
8602
8603 if (tg3_flag(tp, PCI_EXPRESS) &&
8604 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8605 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8606 !tg3_flag(tp, 57765_PLUS)) {
8607 val = tr32(0x7c00);
8608
8609 tw32(0x7c00, val | (1 << 25));
8610 }
8611
8612 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8613 val = tr32(TG3_CPMU_CLCK_ORIDE);
8614 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8615 }
8616
8617 /* Reprobe ASF enable state. */
8618 tg3_flag_clear(tp, ENABLE_ASF);
8619 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8620 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8621 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8622 u32 nic_cfg;
8623
8624 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8625 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8626 tg3_flag_set(tp, ENABLE_ASF);
8627 tp->last_event_jiffies = jiffies;
8628 if (tg3_flag(tp, 5750_PLUS))
8629 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8630 }
8631 }
8632
8633 return 0;
8634 }
8635
8636 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8637 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8638
8639 /* tp->lock is held. */
8640 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8641 {
8642 int err;
8643
8644 tg3_stop_fw(tp);
8645
8646 tg3_write_sig_pre_reset(tp, kind);
8647
8648 tg3_abort_hw(tp, silent);
8649 err = tg3_chip_reset(tp);
8650
8651 __tg3_set_mac_addr(tp, 0);
8652
8653 tg3_write_sig_legacy(tp, kind);
8654 tg3_write_sig_post_reset(tp, kind);
8655
8656 if (tp->hw_stats) {
8657 /* Save the stats across chip resets... */
8658 tg3_get_nstats(tp, &tp->net_stats_prev);
8659 tg3_get_estats(tp, &tp->estats_prev);
8660
8661 /* And make sure the next sample is new data */
8662 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8663 }
8664
8665 if (err)
8666 return err;
8667
8668 return 0;
8669 }
8670
8671 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8672 {
8673 struct tg3 *tp = netdev_priv(dev);
8674 struct sockaddr *addr = p;
8675 int err = 0, skip_mac_1 = 0;
8676
8677 if (!is_valid_ether_addr(addr->sa_data))
8678 return -EADDRNOTAVAIL;
8679
8680 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8681
8682 if (!netif_running(dev))
8683 return 0;
8684
8685 if (tg3_flag(tp, ENABLE_ASF)) {
8686 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8687
8688 addr0_high = tr32(MAC_ADDR_0_HIGH);
8689 addr0_low = tr32(MAC_ADDR_0_LOW);
8690 addr1_high = tr32(MAC_ADDR_1_HIGH);
8691 addr1_low = tr32(MAC_ADDR_1_LOW);
8692
8693 /* Skip MAC addr 1 if ASF is using it. */
8694 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8695 !(addr1_high == 0 && addr1_low == 0))
8696 skip_mac_1 = 1;
8697 }
8698 spin_lock_bh(&tp->lock);
8699 __tg3_set_mac_addr(tp, skip_mac_1);
8700 spin_unlock_bh(&tp->lock);
8701
8702 return err;
8703 }
8704
8705 /* tp->lock is held. */
8706 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8707 dma_addr_t mapping, u32 maxlen_flags,
8708 u32 nic_addr)
8709 {
8710 tg3_write_mem(tp,
8711 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8712 ((u64) mapping >> 32));
8713 tg3_write_mem(tp,
8714 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8715 ((u64) mapping & 0xffffffff));
8716 tg3_write_mem(tp,
8717 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8718 maxlen_flags);
8719
8720 if (!tg3_flag(tp, 5705_PLUS))
8721 tg3_write_mem(tp,
8722 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8723 nic_addr);
8724 }
8725
8726
8727 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8728 {
8729 int i = 0;
8730
8731 if (!tg3_flag(tp, ENABLE_TSS)) {
8732 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8733 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8734 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8735 } else {
8736 tw32(HOSTCC_TXCOL_TICKS, 0);
8737 tw32(HOSTCC_TXMAX_FRAMES, 0);
8738 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8739
8740 for (; i < tp->txq_cnt; i++) {
8741 u32 reg;
8742
8743 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8744 tw32(reg, ec->tx_coalesce_usecs);
8745 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8746 tw32(reg, ec->tx_max_coalesced_frames);
8747 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8748 tw32(reg, ec->tx_max_coalesced_frames_irq);
8749 }
8750 }
8751
8752 for (; i < tp->irq_max - 1; i++) {
8753 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8754 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8755 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8756 }
8757 }
8758
8759 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8760 {
8761 int i = 0;
8762 u32 limit = tp->rxq_cnt;
8763
8764 if (!tg3_flag(tp, ENABLE_RSS)) {
8765 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8766 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8767 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8768 limit--;
8769 } else {
8770 tw32(HOSTCC_RXCOL_TICKS, 0);
8771 tw32(HOSTCC_RXMAX_FRAMES, 0);
8772 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8773 }
8774
8775 for (; i < limit; i++) {
8776 u32 reg;
8777
8778 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8779 tw32(reg, ec->rx_coalesce_usecs);
8780 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8781 tw32(reg, ec->rx_max_coalesced_frames);
8782 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8783 tw32(reg, ec->rx_max_coalesced_frames_irq);
8784 }
8785
8786 for (; i < tp->irq_max - 1; i++) {
8787 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8788 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8789 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8790 }
8791 }
8792
8793 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8794 {
8795 tg3_coal_tx_init(tp, ec);
8796 tg3_coal_rx_init(tp, ec);
8797
8798 if (!tg3_flag(tp, 5705_PLUS)) {
8799 u32 val = ec->stats_block_coalesce_usecs;
8800
8801 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8802 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8803
8804 if (!tp->link_up)
8805 val = 0;
8806
8807 tw32(HOSTCC_STAT_COAL_TICKS, val);
8808 }
8809 }
8810
8811 /* tp->lock is held. */
8812 static void tg3_rings_reset(struct tg3 *tp)
8813 {
8814 int i;
8815 u32 stblk, txrcb, rxrcb, limit;
8816 struct tg3_napi *tnapi = &tp->napi[0];
8817
8818 /* Disable all transmit rings but the first. */
8819 if (!tg3_flag(tp, 5705_PLUS))
8820 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8821 else if (tg3_flag(tp, 5717_PLUS))
8822 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8823 else if (tg3_flag(tp, 57765_CLASS) ||
8824 tg3_asic_rev(tp) == ASIC_REV_5762)
8825 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8826 else
8827 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8828
8829 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8830 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8831 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8832 BDINFO_FLAGS_DISABLED);
8833
8834
8835 /* Disable all receive return rings but the first. */
8836 if (tg3_flag(tp, 5717_PLUS))
8837 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8838 else if (!tg3_flag(tp, 5705_PLUS))
8839 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8840 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8841 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8842 tg3_flag(tp, 57765_CLASS))
8843 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8844 else
8845 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8846
8847 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8848 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8849 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8850 BDINFO_FLAGS_DISABLED);
8851
8852 /* Disable interrupts */
8853 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8854 tp->napi[0].chk_msi_cnt = 0;
8855 tp->napi[0].last_rx_cons = 0;
8856 tp->napi[0].last_tx_cons = 0;
8857
8858 /* Zero mailbox registers. */
8859 if (tg3_flag(tp, SUPPORT_MSIX)) {
8860 for (i = 1; i < tp->irq_max; i++) {
8861 tp->napi[i].tx_prod = 0;
8862 tp->napi[i].tx_cons = 0;
8863 if (tg3_flag(tp, ENABLE_TSS))
8864 tw32_mailbox(tp->napi[i].prodmbox, 0);
8865 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8866 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8867 tp->napi[i].chk_msi_cnt = 0;
8868 tp->napi[i].last_rx_cons = 0;
8869 tp->napi[i].last_tx_cons = 0;
8870 }
8871 if (!tg3_flag(tp, ENABLE_TSS))
8872 tw32_mailbox(tp->napi[0].prodmbox, 0);
8873 } else {
8874 tp->napi[0].tx_prod = 0;
8875 tp->napi[0].tx_cons = 0;
8876 tw32_mailbox(tp->napi[0].prodmbox, 0);
8877 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8878 }
8879
8880 /* Make sure the NIC-based send BD rings are disabled. */
8881 if (!tg3_flag(tp, 5705_PLUS)) {
8882 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8883 for (i = 0; i < 16; i++)
8884 tw32_tx_mbox(mbox + i * 8, 0);
8885 }
8886
8887 txrcb = NIC_SRAM_SEND_RCB;
8888 rxrcb = NIC_SRAM_RCV_RET_RCB;
8889
8890 /* Clear status block in ram. */
8891 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8892
8893 /* Set status block DMA address */
8894 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8895 ((u64) tnapi->status_mapping >> 32));
8896 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8897 ((u64) tnapi->status_mapping & 0xffffffff));
8898
8899 if (tnapi->tx_ring) {
8900 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8901 (TG3_TX_RING_SIZE <<
8902 BDINFO_FLAGS_MAXLEN_SHIFT),
8903 NIC_SRAM_TX_BUFFER_DESC);
8904 txrcb += TG3_BDINFO_SIZE;
8905 }
8906
8907 if (tnapi->rx_rcb) {
8908 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8909 (tp->rx_ret_ring_mask + 1) <<
8910 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8911 rxrcb += TG3_BDINFO_SIZE;
8912 }
8913
8914 stblk = HOSTCC_STATBLCK_RING1;
8915
8916 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8917 u64 mapping = (u64)tnapi->status_mapping;
8918 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8919 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8920
8921 /* Clear status block in ram. */
8922 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8923
8924 if (tnapi->tx_ring) {
8925 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8926 (TG3_TX_RING_SIZE <<
8927 BDINFO_FLAGS_MAXLEN_SHIFT),
8928 NIC_SRAM_TX_BUFFER_DESC);
8929 txrcb += TG3_BDINFO_SIZE;
8930 }
8931
8932 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8933 ((tp->rx_ret_ring_mask + 1) <<
8934 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8935
8936 stblk += 8;
8937 rxrcb += TG3_BDINFO_SIZE;
8938 }
8939 }
8940
8941 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8942 {
8943 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8944
8945 if (!tg3_flag(tp, 5750_PLUS) ||
8946 tg3_flag(tp, 5780_CLASS) ||
8947 tg3_asic_rev(tp) == ASIC_REV_5750 ||
8948 tg3_asic_rev(tp) == ASIC_REV_5752 ||
8949 tg3_flag(tp, 57765_PLUS))
8950 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8951 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8952 tg3_asic_rev(tp) == ASIC_REV_5787)
8953 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8954 else
8955 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8956
8957 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8958 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8959
8960 val = min(nic_rep_thresh, host_rep_thresh);
8961 tw32(RCVBDI_STD_THRESH, val);
8962
8963 if (tg3_flag(tp, 57765_PLUS))
8964 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8965
8966 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8967 return;
8968
8969 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8970
8971 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8972
8973 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8974 tw32(RCVBDI_JUMBO_THRESH, val);
8975
8976 if (tg3_flag(tp, 57765_PLUS))
8977 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8978 }
8979
8980 static inline u32 calc_crc(unsigned char *buf, int len)
8981 {
8982 u32 reg;
8983 u32 tmp;
8984 int j, k;
8985
8986 reg = 0xffffffff;
8987
8988 for (j = 0; j < len; j++) {
8989 reg ^= buf[j];
8990
8991 for (k = 0; k < 8; k++) {
8992 tmp = reg & 0x01;
8993
8994 reg >>= 1;
8995
8996 if (tmp)
8997 reg ^= 0xedb88320;
8998 }
8999 }
9000
9001 return ~reg;
9002 }
9003
9004 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9005 {
9006 /* accept or reject all multicast frames */
9007 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9008 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9009 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9010 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9011 }
9012
9013 static void __tg3_set_rx_mode(struct net_device *dev)
9014 {
9015 struct tg3 *tp = netdev_priv(dev);
9016 u32 rx_mode;
9017
9018 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9019 RX_MODE_KEEP_VLAN_TAG);
9020
9021 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9022 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9023 * flag clear.
9024 */
9025 if (!tg3_flag(tp, ENABLE_ASF))
9026 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9027 #endif
9028
9029 if (dev->flags & IFF_PROMISC) {
9030 /* Promiscuous mode. */
9031 rx_mode |= RX_MODE_PROMISC;
9032 } else if (dev->flags & IFF_ALLMULTI) {
9033 /* Accept all multicast. */
9034 tg3_set_multi(tp, 1);
9035 } else if (netdev_mc_empty(dev)) {
9036 /* Reject all multicast. */
9037 tg3_set_multi(tp, 0);
9038 } else {
9039 /* Accept one or more multicast(s). */
9040 struct netdev_hw_addr *ha;
9041 u32 mc_filter[4] = { 0, };
9042 u32 regidx;
9043 u32 bit;
9044 u32 crc;
9045
9046 netdev_for_each_mc_addr(ha, dev) {
9047 crc = calc_crc(ha->addr, ETH_ALEN);
9048 bit = ~crc & 0x7f;
9049 regidx = (bit & 0x60) >> 5;
9050 bit &= 0x1f;
9051 mc_filter[regidx] |= (1 << bit);
9052 }
9053
9054 tw32(MAC_HASH_REG_0, mc_filter[0]);
9055 tw32(MAC_HASH_REG_1, mc_filter[1]);
9056 tw32(MAC_HASH_REG_2, mc_filter[2]);
9057 tw32(MAC_HASH_REG_3, mc_filter[3]);
9058 }
9059
9060 if (rx_mode != tp->rx_mode) {
9061 tp->rx_mode = rx_mode;
9062 tw32_f(MAC_RX_MODE, rx_mode);
9063 udelay(10);
9064 }
9065 }
9066
9067 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9068 {
9069 int i;
9070
9071 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9072 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9073 }
9074
9075 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9076 {
9077 int i;
9078
9079 if (!tg3_flag(tp, SUPPORT_MSIX))
9080 return;
9081
9082 if (tp->rxq_cnt == 1) {
9083 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9084 return;
9085 }
9086
9087 /* Validate table against current IRQ count */
9088 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9089 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9090 break;
9091 }
9092
9093 if (i != TG3_RSS_INDIR_TBL_SIZE)
9094 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9095 }
9096
9097 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9098 {
9099 int i = 0;
9100 u32 reg = MAC_RSS_INDIR_TBL_0;
9101
9102 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9103 u32 val = tp->rss_ind_tbl[i];
9104 i++;
9105 for (; i % 8; i++) {
9106 val <<= 4;
9107 val |= tp->rss_ind_tbl[i];
9108 }
9109 tw32(reg, val);
9110 reg += 4;
9111 }
9112 }
9113
9114 /* tp->lock is held. */
9115 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9116 {
9117 u32 val, rdmac_mode;
9118 int i, err, limit;
9119 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9120
9121 tg3_disable_ints(tp);
9122
9123 tg3_stop_fw(tp);
9124
9125 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9126
9127 if (tg3_flag(tp, INIT_COMPLETE))
9128 tg3_abort_hw(tp, 1);
9129
9130 /* Enable MAC control of LPI */
9131 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9132 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9133 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9134 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9135 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9136
9137 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9138
9139 tw32_f(TG3_CPMU_EEE_CTRL,
9140 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9141
9142 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9143 TG3_CPMU_EEEMD_LPI_IN_TX |
9144 TG3_CPMU_EEEMD_LPI_IN_RX |
9145 TG3_CPMU_EEEMD_EEE_ENABLE;
9146
9147 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9148 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9149
9150 if (tg3_flag(tp, ENABLE_APE))
9151 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9152
9153 tw32_f(TG3_CPMU_EEE_MODE, val);
9154
9155 tw32_f(TG3_CPMU_EEE_DBTMR1,
9156 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9157 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9158
9159 tw32_f(TG3_CPMU_EEE_DBTMR2,
9160 TG3_CPMU_DBTMR2_APE_TX_2047US |
9161 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9162 }
9163
9164 if (reset_phy)
9165 tg3_phy_reset(tp);
9166
9167 err = tg3_chip_reset(tp);
9168 if (err)
9169 return err;
9170
9171 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9172
9173 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9174 val = tr32(TG3_CPMU_CTRL);
9175 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9176 tw32(TG3_CPMU_CTRL, val);
9177
9178 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9179 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9180 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9181 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9182
9183 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9184 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9185 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9186 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9187
9188 val = tr32(TG3_CPMU_HST_ACC);
9189 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9190 val |= CPMU_HST_ACC_MACCLK_6_25;
9191 tw32(TG3_CPMU_HST_ACC, val);
9192 }
9193
9194 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9195 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9196 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9197 PCIE_PWR_MGMT_L1_THRESH_4MS;
9198 tw32(PCIE_PWR_MGMT_THRESH, val);
9199
9200 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9201 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9202
9203 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9204
9205 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9206 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9207 }
9208
9209 if (tg3_flag(tp, L1PLLPD_EN)) {
9210 u32 grc_mode = tr32(GRC_MODE);
9211
9212 /* Access the lower 1K of PL PCIE block registers. */
9213 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9214 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9215
9216 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9217 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9218 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9219
9220 tw32(GRC_MODE, grc_mode);
9221 }
9222
9223 if (tg3_flag(tp, 57765_CLASS)) {
9224 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9225 u32 grc_mode = tr32(GRC_MODE);
9226
9227 /* Access the lower 1K of PL PCIE block registers. */
9228 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9229 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9230
9231 val = tr32(TG3_PCIE_TLDLPL_PORT +
9232 TG3_PCIE_PL_LO_PHYCTL5);
9233 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9234 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9235
9236 tw32(GRC_MODE, grc_mode);
9237 }
9238
9239 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9240 u32 grc_mode;
9241
9242 /* Fix transmit hangs */
9243 val = tr32(TG3_CPMU_PADRNG_CTL);
9244 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9245 tw32(TG3_CPMU_PADRNG_CTL, val);
9246
9247 grc_mode = tr32(GRC_MODE);
9248
9249 /* Access the lower 1K of DL PCIE block registers. */
9250 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9251 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9252
9253 val = tr32(TG3_PCIE_TLDLPL_PORT +
9254 TG3_PCIE_DL_LO_FTSMAX);
9255 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9256 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9257 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9258
9259 tw32(GRC_MODE, grc_mode);
9260 }
9261
9262 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9263 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9264 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9265 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9266 }
9267
9268 /* This works around an issue with Athlon chipsets on
9269 * B3 tigon3 silicon. This bit has no effect on any
9270 * other revision. But do not set this on PCI Express
9271 * chips and don't even touch the clocks if the CPMU is present.
9272 */
9273 if (!tg3_flag(tp, CPMU_PRESENT)) {
9274 if (!tg3_flag(tp, PCI_EXPRESS))
9275 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9276 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9277 }
9278
9279 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9280 tg3_flag(tp, PCIX_MODE)) {
9281 val = tr32(TG3PCI_PCISTATE);
9282 val |= PCISTATE_RETRY_SAME_DMA;
9283 tw32(TG3PCI_PCISTATE, val);
9284 }
9285
9286 if (tg3_flag(tp, ENABLE_APE)) {
9287 /* Allow reads and writes to the
9288 * APE register and memory space.
9289 */
9290 val = tr32(TG3PCI_PCISTATE);
9291 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9292 PCISTATE_ALLOW_APE_SHMEM_WR |
9293 PCISTATE_ALLOW_APE_PSPACE_WR;
9294 tw32(TG3PCI_PCISTATE, val);
9295 }
9296
9297 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9298 /* Enable some hw fixes. */
9299 val = tr32(TG3PCI_MSI_DATA);
9300 val |= (1 << 26) | (1 << 28) | (1 << 29);
9301 tw32(TG3PCI_MSI_DATA, val);
9302 }
9303
9304 /* Descriptor ring init may make accesses to the
9305 * NIC SRAM area to setup the TX descriptors, so we
9306 * can only do this after the hardware has been
9307 * successfully reset.
9308 */
9309 err = tg3_init_rings(tp);
9310 if (err)
9311 return err;
9312
9313 if (tg3_flag(tp, 57765_PLUS)) {
9314 val = tr32(TG3PCI_DMA_RW_CTRL) &
9315 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9316 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9317 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9318 if (!tg3_flag(tp, 57765_CLASS) &&
9319 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9320 tg3_asic_rev(tp) != ASIC_REV_5762)
9321 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9322 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9323 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9324 tg3_asic_rev(tp) != ASIC_REV_5761) {
9325 /* This value is determined during the probe time DMA
9326 * engine test, tg3_test_dma.
9327 */
9328 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9329 }
9330
9331 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9332 GRC_MODE_4X_NIC_SEND_RINGS |
9333 GRC_MODE_NO_TX_PHDR_CSUM |
9334 GRC_MODE_NO_RX_PHDR_CSUM);
9335 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9336
9337 /* Pseudo-header checksum is done by hardware logic and not
9338 * the offload processers, so make the chip do the pseudo-
9339 * header checksums on receive. For transmit it is more
9340 * convenient to do the pseudo-header checksum in software
9341 * as Linux does that on transmit for us in all cases.
9342 */
9343 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9344
9345 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9346 if (tp->rxptpctl)
9347 tw32(TG3_RX_PTP_CTL,
9348 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9349
9350 if (tg3_flag(tp, PTP_CAPABLE))
9351 val |= GRC_MODE_TIME_SYNC_ENABLE;
9352
9353 tw32(GRC_MODE, tp->grc_mode | val);
9354
9355 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9356 val = tr32(GRC_MISC_CFG);
9357 val &= ~0xff;
9358 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9359 tw32(GRC_MISC_CFG, val);
9360
9361 /* Initialize MBUF/DESC pool. */
9362 if (tg3_flag(tp, 5750_PLUS)) {
9363 /* Do nothing. */
9364 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9365 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9366 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9367 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9368 else
9369 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9370 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9371 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9372 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9373 int fw_len;
9374
9375 fw_len = tp->fw_len;
9376 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9377 tw32(BUFMGR_MB_POOL_ADDR,
9378 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9379 tw32(BUFMGR_MB_POOL_SIZE,
9380 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9381 }
9382
9383 if (tp->dev->mtu <= ETH_DATA_LEN) {
9384 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9385 tp->bufmgr_config.mbuf_read_dma_low_water);
9386 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9387 tp->bufmgr_config.mbuf_mac_rx_low_water);
9388 tw32(BUFMGR_MB_HIGH_WATER,
9389 tp->bufmgr_config.mbuf_high_water);
9390 } else {
9391 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9392 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9393 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9394 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9395 tw32(BUFMGR_MB_HIGH_WATER,
9396 tp->bufmgr_config.mbuf_high_water_jumbo);
9397 }
9398 tw32(BUFMGR_DMA_LOW_WATER,
9399 tp->bufmgr_config.dma_low_water);
9400 tw32(BUFMGR_DMA_HIGH_WATER,
9401 tp->bufmgr_config.dma_high_water);
9402
9403 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9404 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9405 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9406 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9407 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9408 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9409 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9410 tw32(BUFMGR_MODE, val);
9411 for (i = 0; i < 2000; i++) {
9412 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9413 break;
9414 udelay(10);
9415 }
9416 if (i >= 2000) {
9417 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9418 return -ENODEV;
9419 }
9420
9421 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9422 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9423
9424 tg3_setup_rxbd_thresholds(tp);
9425
9426 /* Initialize TG3_BDINFO's at:
9427 * RCVDBDI_STD_BD: standard eth size rx ring
9428 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9429 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9430 *
9431 * like so:
9432 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9433 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9434 * ring attribute flags
9435 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9436 *
9437 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9438 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9439 *
9440 * The size of each ring is fixed in the firmware, but the location is
9441 * configurable.
9442 */
9443 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9444 ((u64) tpr->rx_std_mapping >> 32));
9445 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9446 ((u64) tpr->rx_std_mapping & 0xffffffff));
9447 if (!tg3_flag(tp, 5717_PLUS))
9448 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9449 NIC_SRAM_RX_BUFFER_DESC);
9450
9451 /* Disable the mini ring */
9452 if (!tg3_flag(tp, 5705_PLUS))
9453 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9454 BDINFO_FLAGS_DISABLED);
9455
9456 /* Program the jumbo buffer descriptor ring control
9457 * blocks on those devices that have them.
9458 */
9459 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9460 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9461
9462 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9463 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9464 ((u64) tpr->rx_jmb_mapping >> 32));
9465 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9466 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9467 val = TG3_RX_JMB_RING_SIZE(tp) <<
9468 BDINFO_FLAGS_MAXLEN_SHIFT;
9469 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9470 val | BDINFO_FLAGS_USE_EXT_RECV);
9471 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9472 tg3_flag(tp, 57765_CLASS) ||
9473 tg3_asic_rev(tp) == ASIC_REV_5762)
9474 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9475 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9476 } else {
9477 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9478 BDINFO_FLAGS_DISABLED);
9479 }
9480
9481 if (tg3_flag(tp, 57765_PLUS)) {
9482 val = TG3_RX_STD_RING_SIZE(tp);
9483 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9484 val |= (TG3_RX_STD_DMA_SZ << 2);
9485 } else
9486 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9487 } else
9488 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9489
9490 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9491
9492 tpr->rx_std_prod_idx = tp->rx_pending;
9493 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9494
9495 tpr->rx_jmb_prod_idx =
9496 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9497 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9498
9499 tg3_rings_reset(tp);
9500
9501 /* Initialize MAC address and backoff seed. */
9502 __tg3_set_mac_addr(tp, 0);
9503
9504 /* MTU + ethernet header + FCS + optional VLAN tag */
9505 tw32(MAC_RX_MTU_SIZE,
9506 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9507
9508 /* The slot time is changed by tg3_setup_phy if we
9509 * run at gigabit with half duplex.
9510 */
9511 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9512 (6 << TX_LENGTHS_IPG_SHIFT) |
9513 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9514
9515 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9516 tg3_asic_rev(tp) == ASIC_REV_5762)
9517 val |= tr32(MAC_TX_LENGTHS) &
9518 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9519 TX_LENGTHS_CNT_DWN_VAL_MSK);
9520
9521 tw32(MAC_TX_LENGTHS, val);
9522
9523 /* Receive rules. */
9524 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9525 tw32(RCVLPC_CONFIG, 0x0181);
9526
9527 /* Calculate RDMAC_MODE setting early, we need it to determine
9528 * the RCVLPC_STATE_ENABLE mask.
9529 */
9530 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9531 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9532 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9533 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9534 RDMAC_MODE_LNGREAD_ENAB);
9535
9536 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9537 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9538
9539 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9540 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9541 tg3_asic_rev(tp) == ASIC_REV_57780)
9542 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9543 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9544 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9545
9546 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9547 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9548 if (tg3_flag(tp, TSO_CAPABLE) &&
9549 tg3_asic_rev(tp) == ASIC_REV_5705) {
9550 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9551 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9552 !tg3_flag(tp, IS_5788)) {
9553 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9554 }
9555 }
9556
9557 if (tg3_flag(tp, PCI_EXPRESS))
9558 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9559
9560 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9561 tp->dma_limit = 0;
9562 if (tp->dev->mtu <= ETH_DATA_LEN) {
9563 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9564 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9565 }
9566 }
9567
9568 if (tg3_flag(tp, HW_TSO_1) ||
9569 tg3_flag(tp, HW_TSO_2) ||
9570 tg3_flag(tp, HW_TSO_3))
9571 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9572
9573 if (tg3_flag(tp, 57765_PLUS) ||
9574 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9575 tg3_asic_rev(tp) == ASIC_REV_57780)
9576 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9577
9578 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9579 tg3_asic_rev(tp) == ASIC_REV_5762)
9580 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9581
9582 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9583 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9584 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9585 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9586 tg3_flag(tp, 57765_PLUS)) {
9587 u32 tgtreg;
9588
9589 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9590 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9591 else
9592 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9593
9594 val = tr32(tgtreg);
9595 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9596 tg3_asic_rev(tp) == ASIC_REV_5762) {
9597 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9598 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9599 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9600 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9601 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9602 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9603 }
9604 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9605 }
9606
9607 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9608 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9609 tg3_asic_rev(tp) == ASIC_REV_5762) {
9610 u32 tgtreg;
9611
9612 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9613 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9614 else
9615 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9616
9617 val = tr32(tgtreg);
9618 tw32(tgtreg, val |
9619 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9620 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9621 }
9622
9623 /* Receive/send statistics. */
9624 if (tg3_flag(tp, 5750_PLUS)) {
9625 val = tr32(RCVLPC_STATS_ENABLE);
9626 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9627 tw32(RCVLPC_STATS_ENABLE, val);
9628 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9629 tg3_flag(tp, TSO_CAPABLE)) {
9630 val = tr32(RCVLPC_STATS_ENABLE);
9631 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9632 tw32(RCVLPC_STATS_ENABLE, val);
9633 } else {
9634 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9635 }
9636 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9637 tw32(SNDDATAI_STATSENAB, 0xffffff);
9638 tw32(SNDDATAI_STATSCTRL,
9639 (SNDDATAI_SCTRL_ENABLE |
9640 SNDDATAI_SCTRL_FASTUPD));
9641
9642 /* Setup host coalescing engine. */
9643 tw32(HOSTCC_MODE, 0);
9644 for (i = 0; i < 2000; i++) {
9645 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9646 break;
9647 udelay(10);
9648 }
9649
9650 __tg3_set_coalesce(tp, &tp->coal);
9651
9652 if (!tg3_flag(tp, 5705_PLUS)) {
9653 /* Status/statistics block address. See tg3_timer,
9654 * the tg3_periodic_fetch_stats call there, and
9655 * tg3_get_stats to see how this works for 5705/5750 chips.
9656 */
9657 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9658 ((u64) tp->stats_mapping >> 32));
9659 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9660 ((u64) tp->stats_mapping & 0xffffffff));
9661 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9662
9663 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9664
9665 /* Clear statistics and status block memory areas */
9666 for (i = NIC_SRAM_STATS_BLK;
9667 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9668 i += sizeof(u32)) {
9669 tg3_write_mem(tp, i, 0);
9670 udelay(40);
9671 }
9672 }
9673
9674 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9675
9676 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9677 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9678 if (!tg3_flag(tp, 5705_PLUS))
9679 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9680
9681 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9682 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9683 /* reset to prevent losing 1st rx packet intermittently */
9684 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9685 udelay(10);
9686 }
9687
9688 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9689 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9690 MAC_MODE_FHDE_ENABLE;
9691 if (tg3_flag(tp, ENABLE_APE))
9692 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9693 if (!tg3_flag(tp, 5705_PLUS) &&
9694 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9695 tg3_asic_rev(tp) != ASIC_REV_5700)
9696 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9697 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9698 udelay(40);
9699
9700 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9701 * If TG3_FLAG_IS_NIC is zero, we should read the
9702 * register to preserve the GPIO settings for LOMs. The GPIOs,
9703 * whether used as inputs or outputs, are set by boot code after
9704 * reset.
9705 */
9706 if (!tg3_flag(tp, IS_NIC)) {
9707 u32 gpio_mask;
9708
9709 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9710 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9711 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9712
9713 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9714 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9715 GRC_LCLCTRL_GPIO_OUTPUT3;
9716
9717 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9718 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9719
9720 tp->grc_local_ctrl &= ~gpio_mask;
9721 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9722
9723 /* GPIO1 must be driven high for eeprom write protect */
9724 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9725 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9726 GRC_LCLCTRL_GPIO_OUTPUT1);
9727 }
9728 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9729 udelay(100);
9730
9731 if (tg3_flag(tp, USING_MSIX)) {
9732 val = tr32(MSGINT_MODE);
9733 val |= MSGINT_MODE_ENABLE;
9734 if (tp->irq_cnt > 1)
9735 val |= MSGINT_MODE_MULTIVEC_EN;
9736 if (!tg3_flag(tp, 1SHOT_MSI))
9737 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9738 tw32(MSGINT_MODE, val);
9739 }
9740
9741 if (!tg3_flag(tp, 5705_PLUS)) {
9742 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9743 udelay(40);
9744 }
9745
9746 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9747 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9748 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9749 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9750 WDMAC_MODE_LNGREAD_ENAB);
9751
9752 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9753 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9754 if (tg3_flag(tp, TSO_CAPABLE) &&
9755 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9756 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9757 /* nothing */
9758 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9759 !tg3_flag(tp, IS_5788)) {
9760 val |= WDMAC_MODE_RX_ACCEL;
9761 }
9762 }
9763
9764 /* Enable host coalescing bug fix */
9765 if (tg3_flag(tp, 5755_PLUS))
9766 val |= WDMAC_MODE_STATUS_TAG_FIX;
9767
9768 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9769 val |= WDMAC_MODE_BURST_ALL_DATA;
9770
9771 tw32_f(WDMAC_MODE, val);
9772 udelay(40);
9773
9774 if (tg3_flag(tp, PCIX_MODE)) {
9775 u16 pcix_cmd;
9776
9777 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9778 &pcix_cmd);
9779 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9780 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9781 pcix_cmd |= PCI_X_CMD_READ_2K;
9782 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9783 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9784 pcix_cmd |= PCI_X_CMD_READ_2K;
9785 }
9786 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9787 pcix_cmd);
9788 }
9789
9790 tw32_f(RDMAC_MODE, rdmac_mode);
9791 udelay(40);
9792
9793 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9794 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9795 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9796 break;
9797 }
9798 if (i < TG3_NUM_RDMA_CHANNELS) {
9799 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9800 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9801 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9802 tg3_flag_set(tp, 5719_RDMA_BUG);
9803 }
9804 }
9805
9806 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9807 if (!tg3_flag(tp, 5705_PLUS))
9808 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9809
9810 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9811 tw32(SNDDATAC_MODE,
9812 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9813 else
9814 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9815
9816 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9817 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9818 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9819 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9820 val |= RCVDBDI_MODE_LRG_RING_SZ;
9821 tw32(RCVDBDI_MODE, val);
9822 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9823 if (tg3_flag(tp, HW_TSO_1) ||
9824 tg3_flag(tp, HW_TSO_2) ||
9825 tg3_flag(tp, HW_TSO_3))
9826 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9827 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9828 if (tg3_flag(tp, ENABLE_TSS))
9829 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9830 tw32(SNDBDI_MODE, val);
9831 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9832
9833 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9834 err = tg3_load_5701_a0_firmware_fix(tp);
9835 if (err)
9836 return err;
9837 }
9838
9839 if (tg3_flag(tp, TSO_CAPABLE)) {
9840 err = tg3_load_tso_firmware(tp);
9841 if (err)
9842 return err;
9843 }
9844
9845 tp->tx_mode = TX_MODE_ENABLE;
9846
9847 if (tg3_flag(tp, 5755_PLUS) ||
9848 tg3_asic_rev(tp) == ASIC_REV_5906)
9849 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9850
9851 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9852 tg3_asic_rev(tp) == ASIC_REV_5762) {
9853 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9854 tp->tx_mode &= ~val;
9855 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9856 }
9857
9858 tw32_f(MAC_TX_MODE, tp->tx_mode);
9859 udelay(100);
9860
9861 if (tg3_flag(tp, ENABLE_RSS)) {
9862 tg3_rss_write_indir_tbl(tp);
9863
9864 /* Setup the "secret" hash key. */
9865 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9866 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9867 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9868 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9869 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9870 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9871 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9872 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9873 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9874 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9875 }
9876
9877 tp->rx_mode = RX_MODE_ENABLE;
9878 if (tg3_flag(tp, 5755_PLUS))
9879 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9880
9881 if (tg3_flag(tp, ENABLE_RSS))
9882 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9883 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9884 RX_MODE_RSS_IPV6_HASH_EN |
9885 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9886 RX_MODE_RSS_IPV4_HASH_EN |
9887 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9888
9889 tw32_f(MAC_RX_MODE, tp->rx_mode);
9890 udelay(10);
9891
9892 tw32(MAC_LED_CTRL, tp->led_ctrl);
9893
9894 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9895 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9896 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9897 udelay(10);
9898 }
9899 tw32_f(MAC_RX_MODE, tp->rx_mode);
9900 udelay(10);
9901
9902 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9903 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9904 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9905 /* Set drive transmission level to 1.2V */
9906 /* only if the signal pre-emphasis bit is not set */
9907 val = tr32(MAC_SERDES_CFG);
9908 val &= 0xfffff000;
9909 val |= 0x880;
9910 tw32(MAC_SERDES_CFG, val);
9911 }
9912 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9913 tw32(MAC_SERDES_CFG, 0x616000);
9914 }
9915
9916 /* Prevent chip from dropping frames when flow control
9917 * is enabled.
9918 */
9919 if (tg3_flag(tp, 57765_CLASS))
9920 val = 1;
9921 else
9922 val = 2;
9923 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9924
9925 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
9926 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9927 /* Use hardware link auto-negotiation */
9928 tg3_flag_set(tp, HW_AUTONEG);
9929 }
9930
9931 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9932 tg3_asic_rev(tp) == ASIC_REV_5714) {
9933 u32 tmp;
9934
9935 tmp = tr32(SERDES_RX_CTRL);
9936 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9937 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9938 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9939 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9940 }
9941
9942 if (!tg3_flag(tp, USE_PHYLIB)) {
9943 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9944 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9945
9946 err = tg3_setup_phy(tp, 0);
9947 if (err)
9948 return err;
9949
9950 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9951 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9952 u32 tmp;
9953
9954 /* Clear CRC stats. */
9955 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9956 tg3_writephy(tp, MII_TG3_TEST1,
9957 tmp | MII_TG3_TEST1_CRC_EN);
9958 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9959 }
9960 }
9961 }
9962
9963 __tg3_set_rx_mode(tp->dev);
9964
9965 /* Initialize receive rules. */
9966 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9967 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9968 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9969 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9970
9971 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9972 limit = 8;
9973 else
9974 limit = 16;
9975 if (tg3_flag(tp, ENABLE_ASF))
9976 limit -= 4;
9977 switch (limit) {
9978 case 16:
9979 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9980 case 15:
9981 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9982 case 14:
9983 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9984 case 13:
9985 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9986 case 12:
9987 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9988 case 11:
9989 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9990 case 10:
9991 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9992 case 9:
9993 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9994 case 8:
9995 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9996 case 7:
9997 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9998 case 6:
9999 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10000 case 5:
10001 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10002 case 4:
10003 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10004 case 3:
10005 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10006 case 2:
10007 case 1:
10008
10009 default:
10010 break;
10011 }
10012
10013 if (tg3_flag(tp, ENABLE_APE))
10014 /* Write our heartbeat update interval to APE. */
10015 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10016 APE_HOST_HEARTBEAT_INT_DISABLE);
10017
10018 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10019
10020 return 0;
10021 }
10022
10023 /* Called at device open time to get the chip ready for
10024 * packet processing. Invoked with tp->lock held.
10025 */
10026 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10027 {
10028 tg3_switch_clocks(tp);
10029
10030 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10031
10032 return tg3_reset_hw(tp, reset_phy);
10033 }
10034
10035 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10036 {
10037 int i;
10038
10039 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10040 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10041
10042 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10043 off += len;
10044
10045 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10046 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10047 memset(ocir, 0, TG3_OCIR_LEN);
10048 }
10049 }
10050
10051 /* sysfs attributes for hwmon */
10052 static ssize_t tg3_show_temp(struct device *dev,
10053 struct device_attribute *devattr, char *buf)
10054 {
10055 struct pci_dev *pdev = to_pci_dev(dev);
10056 struct net_device *netdev = pci_get_drvdata(pdev);
10057 struct tg3 *tp = netdev_priv(netdev);
10058 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10059 u32 temperature;
10060
10061 spin_lock_bh(&tp->lock);
10062 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10063 sizeof(temperature));
10064 spin_unlock_bh(&tp->lock);
10065 return sprintf(buf, "%u\n", temperature);
10066 }
10067
10068
10069 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10070 TG3_TEMP_SENSOR_OFFSET);
10071 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10072 TG3_TEMP_CAUTION_OFFSET);
10073 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10074 TG3_TEMP_MAX_OFFSET);
10075
10076 static struct attribute *tg3_attributes[] = {
10077 &sensor_dev_attr_temp1_input.dev_attr.attr,
10078 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10079 &sensor_dev_attr_temp1_max.dev_attr.attr,
10080 NULL
10081 };
10082
10083 static const struct attribute_group tg3_group = {
10084 .attrs = tg3_attributes,
10085 };
10086
10087 static void tg3_hwmon_close(struct tg3 *tp)
10088 {
10089 if (tp->hwmon_dev) {
10090 hwmon_device_unregister(tp->hwmon_dev);
10091 tp->hwmon_dev = NULL;
10092 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10093 }
10094 }
10095
10096 static void tg3_hwmon_open(struct tg3 *tp)
10097 {
10098 int i, err;
10099 u32 size = 0;
10100 struct pci_dev *pdev = tp->pdev;
10101 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10102
10103 tg3_sd_scan_scratchpad(tp, ocirs);
10104
10105 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10106 if (!ocirs[i].src_data_length)
10107 continue;
10108
10109 size += ocirs[i].src_hdr_length;
10110 size += ocirs[i].src_data_length;
10111 }
10112
10113 if (!size)
10114 return;
10115
10116 /* Register hwmon sysfs hooks */
10117 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10118 if (err) {
10119 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10120 return;
10121 }
10122
10123 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10124 if (IS_ERR(tp->hwmon_dev)) {
10125 tp->hwmon_dev = NULL;
10126 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10127 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10128 }
10129 }
10130
10131
10132 #define TG3_STAT_ADD32(PSTAT, REG) \
10133 do { u32 __val = tr32(REG); \
10134 (PSTAT)->low += __val; \
10135 if ((PSTAT)->low < __val) \
10136 (PSTAT)->high += 1; \
10137 } while (0)
10138
10139 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10140 {
10141 struct tg3_hw_stats *sp = tp->hw_stats;
10142
10143 if (!tp->link_up)
10144 return;
10145
10146 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10147 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10148 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10149 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10150 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10151 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10152 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10153 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10154 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10155 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10156 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10157 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10158 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10159 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10160 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10161 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10162 u32 val;
10163
10164 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10165 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10166 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10167 tg3_flag_clear(tp, 5719_RDMA_BUG);
10168 }
10169
10170 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10171 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10172 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10173 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10174 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10175 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10176 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10177 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10178 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10179 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10180 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10181 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10182 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10183 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10184
10185 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10186 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10187 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10188 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10189 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10190 } else {
10191 u32 val = tr32(HOSTCC_FLOW_ATTN);
10192 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10193 if (val) {
10194 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10195 sp->rx_discards.low += val;
10196 if (sp->rx_discards.low < val)
10197 sp->rx_discards.high += 1;
10198 }
10199 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10200 }
10201 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10202 }
10203
10204 static void tg3_chk_missed_msi(struct tg3 *tp)
10205 {
10206 u32 i;
10207
10208 for (i = 0; i < tp->irq_cnt; i++) {
10209 struct tg3_napi *tnapi = &tp->napi[i];
10210
10211 if (tg3_has_work(tnapi)) {
10212 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10213 tnapi->last_tx_cons == tnapi->tx_cons) {
10214 if (tnapi->chk_msi_cnt < 1) {
10215 tnapi->chk_msi_cnt++;
10216 return;
10217 }
10218 tg3_msi(0, tnapi);
10219 }
10220 }
10221 tnapi->chk_msi_cnt = 0;
10222 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10223 tnapi->last_tx_cons = tnapi->tx_cons;
10224 }
10225 }
10226
10227 static void tg3_timer(unsigned long __opaque)
10228 {
10229 struct tg3 *tp = (struct tg3 *) __opaque;
10230
10231 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10232 goto restart_timer;
10233
10234 spin_lock(&tp->lock);
10235
10236 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10237 tg3_flag(tp, 57765_CLASS))
10238 tg3_chk_missed_msi(tp);
10239
10240 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10241 /* BCM4785: Flush posted writes from GbE to host memory. */
10242 tr32(HOSTCC_MODE);
10243 }
10244
10245 if (!tg3_flag(tp, TAGGED_STATUS)) {
10246 /* All of this garbage is because when using non-tagged
10247 * IRQ status the mailbox/status_block protocol the chip
10248 * uses with the cpu is race prone.
10249 */
10250 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10251 tw32(GRC_LOCAL_CTRL,
10252 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10253 } else {
10254 tw32(HOSTCC_MODE, tp->coalesce_mode |
10255 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10256 }
10257
10258 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10259 spin_unlock(&tp->lock);
10260 tg3_reset_task_schedule(tp);
10261 goto restart_timer;
10262 }
10263 }
10264
10265 /* This part only runs once per second. */
10266 if (!--tp->timer_counter) {
10267 if (tg3_flag(tp, 5705_PLUS))
10268 tg3_periodic_fetch_stats(tp);
10269
10270 if (tp->setlpicnt && !--tp->setlpicnt)
10271 tg3_phy_eee_enable(tp);
10272
10273 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10274 u32 mac_stat;
10275 int phy_event;
10276
10277 mac_stat = tr32(MAC_STATUS);
10278
10279 phy_event = 0;
10280 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10281 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10282 phy_event = 1;
10283 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10284 phy_event = 1;
10285
10286 if (phy_event)
10287 tg3_setup_phy(tp, 0);
10288 } else if (tg3_flag(tp, POLL_SERDES)) {
10289 u32 mac_stat = tr32(MAC_STATUS);
10290 int need_setup = 0;
10291
10292 if (tp->link_up &&
10293 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10294 need_setup = 1;
10295 }
10296 if (!tp->link_up &&
10297 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10298 MAC_STATUS_SIGNAL_DET))) {
10299 need_setup = 1;
10300 }
10301 if (need_setup) {
10302 if (!tp->serdes_counter) {
10303 tw32_f(MAC_MODE,
10304 (tp->mac_mode &
10305 ~MAC_MODE_PORT_MODE_MASK));
10306 udelay(40);
10307 tw32_f(MAC_MODE, tp->mac_mode);
10308 udelay(40);
10309 }
10310 tg3_setup_phy(tp, 0);
10311 }
10312 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10313 tg3_flag(tp, 5780_CLASS)) {
10314 tg3_serdes_parallel_detect(tp);
10315 }
10316
10317 tp->timer_counter = tp->timer_multiplier;
10318 }
10319
10320 /* Heartbeat is only sent once every 2 seconds.
10321 *
10322 * The heartbeat is to tell the ASF firmware that the host
10323 * driver is still alive. In the event that the OS crashes,
10324 * ASF needs to reset the hardware to free up the FIFO space
10325 * that may be filled with rx packets destined for the host.
10326 * If the FIFO is full, ASF will no longer function properly.
10327 *
10328 * Unintended resets have been reported on real time kernels
10329 * where the timer doesn't run on time. Netpoll will also have
10330 * same problem.
10331 *
10332 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10333 * to check the ring condition when the heartbeat is expiring
10334 * before doing the reset. This will prevent most unintended
10335 * resets.
10336 */
10337 if (!--tp->asf_counter) {
10338 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10339 tg3_wait_for_event_ack(tp);
10340
10341 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10342 FWCMD_NICDRV_ALIVE3);
10343 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10344 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10345 TG3_FW_UPDATE_TIMEOUT_SEC);
10346
10347 tg3_generate_fw_event(tp);
10348 }
10349 tp->asf_counter = tp->asf_multiplier;
10350 }
10351
10352 spin_unlock(&tp->lock);
10353
10354 restart_timer:
10355 tp->timer.expires = jiffies + tp->timer_offset;
10356 add_timer(&tp->timer);
10357 }
10358
10359 static void tg3_timer_init(struct tg3 *tp)
10360 {
10361 if (tg3_flag(tp, TAGGED_STATUS) &&
10362 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10363 !tg3_flag(tp, 57765_CLASS))
10364 tp->timer_offset = HZ;
10365 else
10366 tp->timer_offset = HZ / 10;
10367
10368 BUG_ON(tp->timer_offset > HZ);
10369
10370 tp->timer_multiplier = (HZ / tp->timer_offset);
10371 tp->asf_multiplier = (HZ / tp->timer_offset) *
10372 TG3_FW_UPDATE_FREQ_SEC;
10373
10374 init_timer(&tp->timer);
10375 tp->timer.data = (unsigned long) tp;
10376 tp->timer.function = tg3_timer;
10377 }
10378
10379 static void tg3_timer_start(struct tg3 *tp)
10380 {
10381 tp->asf_counter = tp->asf_multiplier;
10382 tp->timer_counter = tp->timer_multiplier;
10383
10384 tp->timer.expires = jiffies + tp->timer_offset;
10385 add_timer(&tp->timer);
10386 }
10387
10388 static void tg3_timer_stop(struct tg3 *tp)
10389 {
10390 del_timer_sync(&tp->timer);
10391 }
10392
10393 /* Restart hardware after configuration changes, self-test, etc.
10394 * Invoked with tp->lock held.
10395 */
10396 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10397 __releases(tp->lock)
10398 __acquires(tp->lock)
10399 {
10400 int err;
10401
10402 err = tg3_init_hw(tp, reset_phy);
10403 if (err) {
10404 netdev_err(tp->dev,
10405 "Failed to re-initialize device, aborting\n");
10406 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10407 tg3_full_unlock(tp);
10408 tg3_timer_stop(tp);
10409 tp->irq_sync = 0;
10410 tg3_napi_enable(tp);
10411 dev_close(tp->dev);
10412 tg3_full_lock(tp, 0);
10413 }
10414 return err;
10415 }
10416
10417 static void tg3_reset_task(struct work_struct *work)
10418 {
10419 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10420 int err;
10421
10422 tg3_full_lock(tp, 0);
10423
10424 if (!netif_running(tp->dev)) {
10425 tg3_flag_clear(tp, RESET_TASK_PENDING);
10426 tg3_full_unlock(tp);
10427 return;
10428 }
10429
10430 tg3_full_unlock(tp);
10431
10432 tg3_phy_stop(tp);
10433
10434 tg3_netif_stop(tp);
10435
10436 tg3_full_lock(tp, 1);
10437
10438 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10439 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10440 tp->write32_rx_mbox = tg3_write_flush_reg32;
10441 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10442 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10443 }
10444
10445 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10446 err = tg3_init_hw(tp, 1);
10447 if (err)
10448 goto out;
10449
10450 tg3_netif_start(tp);
10451
10452 out:
10453 tg3_full_unlock(tp);
10454
10455 if (!err)
10456 tg3_phy_start(tp);
10457
10458 tg3_flag_clear(tp, RESET_TASK_PENDING);
10459 }
10460
10461 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10462 {
10463 irq_handler_t fn;
10464 unsigned long flags;
10465 char *name;
10466 struct tg3_napi *tnapi = &tp->napi[irq_num];
10467
10468 if (tp->irq_cnt == 1)
10469 name = tp->dev->name;
10470 else {
10471 name = &tnapi->irq_lbl[0];
10472 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10473 name[IFNAMSIZ-1] = 0;
10474 }
10475
10476 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10477 fn = tg3_msi;
10478 if (tg3_flag(tp, 1SHOT_MSI))
10479 fn = tg3_msi_1shot;
10480 flags = 0;
10481 } else {
10482 fn = tg3_interrupt;
10483 if (tg3_flag(tp, TAGGED_STATUS))
10484 fn = tg3_interrupt_tagged;
10485 flags = IRQF_SHARED;
10486 }
10487
10488 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10489 }
10490
10491 static int tg3_test_interrupt(struct tg3 *tp)
10492 {
10493 struct tg3_napi *tnapi = &tp->napi[0];
10494 struct net_device *dev = tp->dev;
10495 int err, i, intr_ok = 0;
10496 u32 val;
10497
10498 if (!netif_running(dev))
10499 return -ENODEV;
10500
10501 tg3_disable_ints(tp);
10502
10503 free_irq(tnapi->irq_vec, tnapi);
10504
10505 /*
10506 * Turn off MSI one shot mode. Otherwise this test has no
10507 * observable way to know whether the interrupt was delivered.
10508 */
10509 if (tg3_flag(tp, 57765_PLUS)) {
10510 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10511 tw32(MSGINT_MODE, val);
10512 }
10513
10514 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10515 IRQF_SHARED, dev->name, tnapi);
10516 if (err)
10517 return err;
10518
10519 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10520 tg3_enable_ints(tp);
10521
10522 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10523 tnapi->coal_now);
10524
10525 for (i = 0; i < 5; i++) {
10526 u32 int_mbox, misc_host_ctrl;
10527
10528 int_mbox = tr32_mailbox(tnapi->int_mbox);
10529 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10530
10531 if ((int_mbox != 0) ||
10532 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10533 intr_ok = 1;
10534 break;
10535 }
10536
10537 if (tg3_flag(tp, 57765_PLUS) &&
10538 tnapi->hw_status->status_tag != tnapi->last_tag)
10539 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10540
10541 msleep(10);
10542 }
10543
10544 tg3_disable_ints(tp);
10545
10546 free_irq(tnapi->irq_vec, tnapi);
10547
10548 err = tg3_request_irq(tp, 0);
10549
10550 if (err)
10551 return err;
10552
10553 if (intr_ok) {
10554 /* Reenable MSI one shot mode. */
10555 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10556 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10557 tw32(MSGINT_MODE, val);
10558 }
10559 return 0;
10560 }
10561
10562 return -EIO;
10563 }
10564
10565 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10566 * successfully restored
10567 */
10568 static int tg3_test_msi(struct tg3 *tp)
10569 {
10570 int err;
10571 u16 pci_cmd;
10572
10573 if (!tg3_flag(tp, USING_MSI))
10574 return 0;
10575
10576 /* Turn off SERR reporting in case MSI terminates with Master
10577 * Abort.
10578 */
10579 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10580 pci_write_config_word(tp->pdev, PCI_COMMAND,
10581 pci_cmd & ~PCI_COMMAND_SERR);
10582
10583 err = tg3_test_interrupt(tp);
10584
10585 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10586
10587 if (!err)
10588 return 0;
10589
10590 /* other failures */
10591 if (err != -EIO)
10592 return err;
10593
10594 /* MSI test failed, go back to INTx mode */
10595 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10596 "to INTx mode. Please report this failure to the PCI "
10597 "maintainer and include system chipset information\n");
10598
10599 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10600
10601 pci_disable_msi(tp->pdev);
10602
10603 tg3_flag_clear(tp, USING_MSI);
10604 tp->napi[0].irq_vec = tp->pdev->irq;
10605
10606 err = tg3_request_irq(tp, 0);
10607 if (err)
10608 return err;
10609
10610 /* Need to reset the chip because the MSI cycle may have terminated
10611 * with Master Abort.
10612 */
10613 tg3_full_lock(tp, 1);
10614
10615 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10616 err = tg3_init_hw(tp, 1);
10617
10618 tg3_full_unlock(tp);
10619
10620 if (err)
10621 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10622
10623 return err;
10624 }
10625
10626 static int tg3_request_firmware(struct tg3 *tp)
10627 {
10628 const struct tg3_firmware_hdr *fw_hdr;
10629
10630 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10631 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10632 tp->fw_needed);
10633 return -ENOENT;
10634 }
10635
10636 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10637
10638 /* Firmware blob starts with version numbers, followed by
10639 * start address and _full_ length including BSS sections
10640 * (which must be longer than the actual data, of course
10641 */
10642
10643 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10644 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10645 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10646 tp->fw_len, tp->fw_needed);
10647 release_firmware(tp->fw);
10648 tp->fw = NULL;
10649 return -EINVAL;
10650 }
10651
10652 /* We no longer need firmware; we have it. */
10653 tp->fw_needed = NULL;
10654 return 0;
10655 }
10656
10657 static u32 tg3_irq_count(struct tg3 *tp)
10658 {
10659 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10660
10661 if (irq_cnt > 1) {
10662 /* We want as many rx rings enabled as there are cpus.
10663 * In multiqueue MSI-X mode, the first MSI-X vector
10664 * only deals with link interrupts, etc, so we add
10665 * one to the number of vectors we are requesting.
10666 */
10667 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10668 }
10669
10670 return irq_cnt;
10671 }
10672
10673 static bool tg3_enable_msix(struct tg3 *tp)
10674 {
10675 int i, rc;
10676 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10677
10678 tp->txq_cnt = tp->txq_req;
10679 tp->rxq_cnt = tp->rxq_req;
10680 if (!tp->rxq_cnt)
10681 tp->rxq_cnt = netif_get_num_default_rss_queues();
10682 if (tp->rxq_cnt > tp->rxq_max)
10683 tp->rxq_cnt = tp->rxq_max;
10684
10685 /* Disable multiple TX rings by default. Simple round-robin hardware
10686 * scheduling of the TX rings can cause starvation of rings with
10687 * small packets when other rings have TSO or jumbo packets.
10688 */
10689 if (!tp->txq_req)
10690 tp->txq_cnt = 1;
10691
10692 tp->irq_cnt = tg3_irq_count(tp);
10693
10694 for (i = 0; i < tp->irq_max; i++) {
10695 msix_ent[i].entry = i;
10696 msix_ent[i].vector = 0;
10697 }
10698
10699 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10700 if (rc < 0) {
10701 return false;
10702 } else if (rc != 0) {
10703 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10704 return false;
10705 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10706 tp->irq_cnt, rc);
10707 tp->irq_cnt = rc;
10708 tp->rxq_cnt = max(rc - 1, 1);
10709 if (tp->txq_cnt)
10710 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10711 }
10712
10713 for (i = 0; i < tp->irq_max; i++)
10714 tp->napi[i].irq_vec = msix_ent[i].vector;
10715
10716 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10717 pci_disable_msix(tp->pdev);
10718 return false;
10719 }
10720
10721 if (tp->irq_cnt == 1)
10722 return true;
10723
10724 tg3_flag_set(tp, ENABLE_RSS);
10725
10726 if (tp->txq_cnt > 1)
10727 tg3_flag_set(tp, ENABLE_TSS);
10728
10729 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10730
10731 return true;
10732 }
10733
10734 static void tg3_ints_init(struct tg3 *tp)
10735 {
10736 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10737 !tg3_flag(tp, TAGGED_STATUS)) {
10738 /* All MSI supporting chips should support tagged
10739 * status. Assert that this is the case.
10740 */
10741 netdev_warn(tp->dev,
10742 "MSI without TAGGED_STATUS? Not using MSI\n");
10743 goto defcfg;
10744 }
10745
10746 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10747 tg3_flag_set(tp, USING_MSIX);
10748 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10749 tg3_flag_set(tp, USING_MSI);
10750
10751 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10752 u32 msi_mode = tr32(MSGINT_MODE);
10753 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10754 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10755 if (!tg3_flag(tp, 1SHOT_MSI))
10756 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10757 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10758 }
10759 defcfg:
10760 if (!tg3_flag(tp, USING_MSIX)) {
10761 tp->irq_cnt = 1;
10762 tp->napi[0].irq_vec = tp->pdev->irq;
10763 }
10764
10765 if (tp->irq_cnt == 1) {
10766 tp->txq_cnt = 1;
10767 tp->rxq_cnt = 1;
10768 netif_set_real_num_tx_queues(tp->dev, 1);
10769 netif_set_real_num_rx_queues(tp->dev, 1);
10770 }
10771 }
10772
10773 static void tg3_ints_fini(struct tg3 *tp)
10774 {
10775 if (tg3_flag(tp, USING_MSIX))
10776 pci_disable_msix(tp->pdev);
10777 else if (tg3_flag(tp, USING_MSI))
10778 pci_disable_msi(tp->pdev);
10779 tg3_flag_clear(tp, USING_MSI);
10780 tg3_flag_clear(tp, USING_MSIX);
10781 tg3_flag_clear(tp, ENABLE_RSS);
10782 tg3_flag_clear(tp, ENABLE_TSS);
10783 }
10784
10785 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10786 bool init)
10787 {
10788 struct net_device *dev = tp->dev;
10789 int i, err;
10790
10791 /*
10792 * Setup interrupts first so we know how
10793 * many NAPI resources to allocate
10794 */
10795 tg3_ints_init(tp);
10796
10797 tg3_rss_check_indir_tbl(tp);
10798
10799 /* The placement of this call is tied
10800 * to the setup and use of Host TX descriptors.
10801 */
10802 err = tg3_alloc_consistent(tp);
10803 if (err)
10804 goto err_out1;
10805
10806 tg3_napi_init(tp);
10807
10808 tg3_napi_enable(tp);
10809
10810 for (i = 0; i < tp->irq_cnt; i++) {
10811 struct tg3_napi *tnapi = &tp->napi[i];
10812 err = tg3_request_irq(tp, i);
10813 if (err) {
10814 for (i--; i >= 0; i--) {
10815 tnapi = &tp->napi[i];
10816 free_irq(tnapi->irq_vec, tnapi);
10817 }
10818 goto err_out2;
10819 }
10820 }
10821
10822 tg3_full_lock(tp, 0);
10823
10824 err = tg3_init_hw(tp, reset_phy);
10825 if (err) {
10826 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10827 tg3_free_rings(tp);
10828 }
10829
10830 tg3_full_unlock(tp);
10831
10832 if (err)
10833 goto err_out3;
10834
10835 if (test_irq && tg3_flag(tp, USING_MSI)) {
10836 err = tg3_test_msi(tp);
10837
10838 if (err) {
10839 tg3_full_lock(tp, 0);
10840 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10841 tg3_free_rings(tp);
10842 tg3_full_unlock(tp);
10843
10844 goto err_out2;
10845 }
10846
10847 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10848 u32 val = tr32(PCIE_TRANSACTION_CFG);
10849
10850 tw32(PCIE_TRANSACTION_CFG,
10851 val | PCIE_TRANS_CFG_1SHOT_MSI);
10852 }
10853 }
10854
10855 tg3_phy_start(tp);
10856
10857 tg3_hwmon_open(tp);
10858
10859 tg3_full_lock(tp, 0);
10860
10861 tg3_timer_start(tp);
10862 tg3_flag_set(tp, INIT_COMPLETE);
10863 tg3_enable_ints(tp);
10864
10865 if (init)
10866 tg3_ptp_init(tp);
10867 else
10868 tg3_ptp_resume(tp);
10869
10870
10871 tg3_full_unlock(tp);
10872
10873 netif_tx_start_all_queues(dev);
10874
10875 /*
10876 * Reset loopback feature if it was turned on while the device was down
10877 * make sure that it's installed properly now.
10878 */
10879 if (dev->features & NETIF_F_LOOPBACK)
10880 tg3_set_loopback(dev, dev->features);
10881
10882 return 0;
10883
10884 err_out3:
10885 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10886 struct tg3_napi *tnapi = &tp->napi[i];
10887 free_irq(tnapi->irq_vec, tnapi);
10888 }
10889
10890 err_out2:
10891 tg3_napi_disable(tp);
10892 tg3_napi_fini(tp);
10893 tg3_free_consistent(tp);
10894
10895 err_out1:
10896 tg3_ints_fini(tp);
10897
10898 return err;
10899 }
10900
10901 static void tg3_stop(struct tg3 *tp)
10902 {
10903 int i;
10904
10905 tg3_reset_task_cancel(tp);
10906 tg3_netif_stop(tp);
10907
10908 tg3_timer_stop(tp);
10909
10910 tg3_hwmon_close(tp);
10911
10912 tg3_phy_stop(tp);
10913
10914 tg3_full_lock(tp, 1);
10915
10916 tg3_disable_ints(tp);
10917
10918 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10919 tg3_free_rings(tp);
10920 tg3_flag_clear(tp, INIT_COMPLETE);
10921
10922 tg3_full_unlock(tp);
10923
10924 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10925 struct tg3_napi *tnapi = &tp->napi[i];
10926 free_irq(tnapi->irq_vec, tnapi);
10927 }
10928
10929 tg3_ints_fini(tp);
10930
10931 tg3_napi_fini(tp);
10932
10933 tg3_free_consistent(tp);
10934 }
10935
10936 static int tg3_open(struct net_device *dev)
10937 {
10938 struct tg3 *tp = netdev_priv(dev);
10939 int err;
10940
10941 if (tp->fw_needed) {
10942 err = tg3_request_firmware(tp);
10943 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10944 if (err)
10945 return err;
10946 } else if (err) {
10947 netdev_warn(tp->dev, "TSO capability disabled\n");
10948 tg3_flag_clear(tp, TSO_CAPABLE);
10949 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10950 netdev_notice(tp->dev, "TSO capability restored\n");
10951 tg3_flag_set(tp, TSO_CAPABLE);
10952 }
10953 }
10954
10955 tg3_carrier_off(tp);
10956
10957 err = tg3_power_up(tp);
10958 if (err)
10959 return err;
10960
10961 tg3_full_lock(tp, 0);
10962
10963 tg3_disable_ints(tp);
10964 tg3_flag_clear(tp, INIT_COMPLETE);
10965
10966 tg3_full_unlock(tp);
10967
10968 err = tg3_start(tp, true, true, true);
10969 if (err) {
10970 tg3_frob_aux_power(tp, false);
10971 pci_set_power_state(tp->pdev, PCI_D3hot);
10972 }
10973
10974 if (tg3_flag(tp, PTP_CAPABLE)) {
10975 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10976 &tp->pdev->dev);
10977 if (IS_ERR(tp->ptp_clock))
10978 tp->ptp_clock = NULL;
10979 }
10980
10981 return err;
10982 }
10983
10984 static int tg3_close(struct net_device *dev)
10985 {
10986 struct tg3 *tp = netdev_priv(dev);
10987
10988 tg3_ptp_fini(tp);
10989
10990 tg3_stop(tp);
10991
10992 /* Clear stats across close / open calls */
10993 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10994 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10995
10996 tg3_power_down(tp);
10997
10998 tg3_carrier_off(tp);
10999
11000 return 0;
11001 }
11002
11003 static inline u64 get_stat64(tg3_stat64_t *val)
11004 {
11005 return ((u64)val->high << 32) | ((u64)val->low);
11006 }
11007
11008 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11009 {
11010 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11011
11012 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11013 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11014 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11015 u32 val;
11016
11017 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11018 tg3_writephy(tp, MII_TG3_TEST1,
11019 val | MII_TG3_TEST1_CRC_EN);
11020 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11021 } else
11022 val = 0;
11023
11024 tp->phy_crc_errors += val;
11025
11026 return tp->phy_crc_errors;
11027 }
11028
11029 return get_stat64(&hw_stats->rx_fcs_errors);
11030 }
11031
11032 #define ESTAT_ADD(member) \
11033 estats->member = old_estats->member + \
11034 get_stat64(&hw_stats->member)
11035
11036 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11037 {
11038 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11039 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11040
11041 ESTAT_ADD(rx_octets);
11042 ESTAT_ADD(rx_fragments);
11043 ESTAT_ADD(rx_ucast_packets);
11044 ESTAT_ADD(rx_mcast_packets);
11045 ESTAT_ADD(rx_bcast_packets);
11046 ESTAT_ADD(rx_fcs_errors);
11047 ESTAT_ADD(rx_align_errors);
11048 ESTAT_ADD(rx_xon_pause_rcvd);
11049 ESTAT_ADD(rx_xoff_pause_rcvd);
11050 ESTAT_ADD(rx_mac_ctrl_rcvd);
11051 ESTAT_ADD(rx_xoff_entered);
11052 ESTAT_ADD(rx_frame_too_long_errors);
11053 ESTAT_ADD(rx_jabbers);
11054 ESTAT_ADD(rx_undersize_packets);
11055 ESTAT_ADD(rx_in_length_errors);
11056 ESTAT_ADD(rx_out_length_errors);
11057 ESTAT_ADD(rx_64_or_less_octet_packets);
11058 ESTAT_ADD(rx_65_to_127_octet_packets);
11059 ESTAT_ADD(rx_128_to_255_octet_packets);
11060 ESTAT_ADD(rx_256_to_511_octet_packets);
11061 ESTAT_ADD(rx_512_to_1023_octet_packets);
11062 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11063 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11064 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11065 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11066 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11067
11068 ESTAT_ADD(tx_octets);
11069 ESTAT_ADD(tx_collisions);
11070 ESTAT_ADD(tx_xon_sent);
11071 ESTAT_ADD(tx_xoff_sent);
11072 ESTAT_ADD(tx_flow_control);
11073 ESTAT_ADD(tx_mac_errors);
11074 ESTAT_ADD(tx_single_collisions);
11075 ESTAT_ADD(tx_mult_collisions);
11076 ESTAT_ADD(tx_deferred);
11077 ESTAT_ADD(tx_excessive_collisions);
11078 ESTAT_ADD(tx_late_collisions);
11079 ESTAT_ADD(tx_collide_2times);
11080 ESTAT_ADD(tx_collide_3times);
11081 ESTAT_ADD(tx_collide_4times);
11082 ESTAT_ADD(tx_collide_5times);
11083 ESTAT_ADD(tx_collide_6times);
11084 ESTAT_ADD(tx_collide_7times);
11085 ESTAT_ADD(tx_collide_8times);
11086 ESTAT_ADD(tx_collide_9times);
11087 ESTAT_ADD(tx_collide_10times);
11088 ESTAT_ADD(tx_collide_11times);
11089 ESTAT_ADD(tx_collide_12times);
11090 ESTAT_ADD(tx_collide_13times);
11091 ESTAT_ADD(tx_collide_14times);
11092 ESTAT_ADD(tx_collide_15times);
11093 ESTAT_ADD(tx_ucast_packets);
11094 ESTAT_ADD(tx_mcast_packets);
11095 ESTAT_ADD(tx_bcast_packets);
11096 ESTAT_ADD(tx_carrier_sense_errors);
11097 ESTAT_ADD(tx_discards);
11098 ESTAT_ADD(tx_errors);
11099
11100 ESTAT_ADD(dma_writeq_full);
11101 ESTAT_ADD(dma_write_prioq_full);
11102 ESTAT_ADD(rxbds_empty);
11103 ESTAT_ADD(rx_discards);
11104 ESTAT_ADD(rx_errors);
11105 ESTAT_ADD(rx_threshold_hit);
11106
11107 ESTAT_ADD(dma_readq_full);
11108 ESTAT_ADD(dma_read_prioq_full);
11109 ESTAT_ADD(tx_comp_queue_full);
11110
11111 ESTAT_ADD(ring_set_send_prod_index);
11112 ESTAT_ADD(ring_status_update);
11113 ESTAT_ADD(nic_irqs);
11114 ESTAT_ADD(nic_avoided_irqs);
11115 ESTAT_ADD(nic_tx_threshold_hit);
11116
11117 ESTAT_ADD(mbuf_lwm_thresh_hit);
11118 }
11119
11120 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11121 {
11122 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11123 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11124
11125 stats->rx_packets = old_stats->rx_packets +
11126 get_stat64(&hw_stats->rx_ucast_packets) +
11127 get_stat64(&hw_stats->rx_mcast_packets) +
11128 get_stat64(&hw_stats->rx_bcast_packets);
11129
11130 stats->tx_packets = old_stats->tx_packets +
11131 get_stat64(&hw_stats->tx_ucast_packets) +
11132 get_stat64(&hw_stats->tx_mcast_packets) +
11133 get_stat64(&hw_stats->tx_bcast_packets);
11134
11135 stats->rx_bytes = old_stats->rx_bytes +
11136 get_stat64(&hw_stats->rx_octets);
11137 stats->tx_bytes = old_stats->tx_bytes +
11138 get_stat64(&hw_stats->tx_octets);
11139
11140 stats->rx_errors = old_stats->rx_errors +
11141 get_stat64(&hw_stats->rx_errors);
11142 stats->tx_errors = old_stats->tx_errors +
11143 get_stat64(&hw_stats->tx_errors) +
11144 get_stat64(&hw_stats->tx_mac_errors) +
11145 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11146 get_stat64(&hw_stats->tx_discards);
11147
11148 stats->multicast = old_stats->multicast +
11149 get_stat64(&hw_stats->rx_mcast_packets);
11150 stats->collisions = old_stats->collisions +
11151 get_stat64(&hw_stats->tx_collisions);
11152
11153 stats->rx_length_errors = old_stats->rx_length_errors +
11154 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11155 get_stat64(&hw_stats->rx_undersize_packets);
11156
11157 stats->rx_over_errors = old_stats->rx_over_errors +
11158 get_stat64(&hw_stats->rxbds_empty);
11159 stats->rx_frame_errors = old_stats->rx_frame_errors +
11160 get_stat64(&hw_stats->rx_align_errors);
11161 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11162 get_stat64(&hw_stats->tx_discards);
11163 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11164 get_stat64(&hw_stats->tx_carrier_sense_errors);
11165
11166 stats->rx_crc_errors = old_stats->rx_crc_errors +
11167 tg3_calc_crc_errors(tp);
11168
11169 stats->rx_missed_errors = old_stats->rx_missed_errors +
11170 get_stat64(&hw_stats->rx_discards);
11171
11172 stats->rx_dropped = tp->rx_dropped;
11173 stats->tx_dropped = tp->tx_dropped;
11174 }
11175
11176 static int tg3_get_regs_len(struct net_device *dev)
11177 {
11178 return TG3_REG_BLK_SIZE;
11179 }
11180
11181 static void tg3_get_regs(struct net_device *dev,
11182 struct ethtool_regs *regs, void *_p)
11183 {
11184 struct tg3 *tp = netdev_priv(dev);
11185
11186 regs->version = 0;
11187
11188 memset(_p, 0, TG3_REG_BLK_SIZE);
11189
11190 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11191 return;
11192
11193 tg3_full_lock(tp, 0);
11194
11195 tg3_dump_legacy_regs(tp, (u32 *)_p);
11196
11197 tg3_full_unlock(tp);
11198 }
11199
11200 static int tg3_get_eeprom_len(struct net_device *dev)
11201 {
11202 struct tg3 *tp = netdev_priv(dev);
11203
11204 return tp->nvram_size;
11205 }
11206
11207 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11208 {
11209 struct tg3 *tp = netdev_priv(dev);
11210 int ret;
11211 u8 *pd;
11212 u32 i, offset, len, b_offset, b_count;
11213 __be32 val;
11214
11215 if (tg3_flag(tp, NO_NVRAM))
11216 return -EINVAL;
11217
11218 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11219 return -EAGAIN;
11220
11221 offset = eeprom->offset;
11222 len = eeprom->len;
11223 eeprom->len = 0;
11224
11225 eeprom->magic = TG3_EEPROM_MAGIC;
11226
11227 if (offset & 3) {
11228 /* adjustments to start on required 4 byte boundary */
11229 b_offset = offset & 3;
11230 b_count = 4 - b_offset;
11231 if (b_count > len) {
11232 /* i.e. offset=1 len=2 */
11233 b_count = len;
11234 }
11235 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11236 if (ret)
11237 return ret;
11238 memcpy(data, ((char *)&val) + b_offset, b_count);
11239 len -= b_count;
11240 offset += b_count;
11241 eeprom->len += b_count;
11242 }
11243
11244 /* read bytes up to the last 4 byte boundary */
11245 pd = &data[eeprom->len];
11246 for (i = 0; i < (len - (len & 3)); i += 4) {
11247 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11248 if (ret) {
11249 eeprom->len += i;
11250 return ret;
11251 }
11252 memcpy(pd + i, &val, 4);
11253 }
11254 eeprom->len += i;
11255
11256 if (len & 3) {
11257 /* read last bytes not ending on 4 byte boundary */
11258 pd = &data[eeprom->len];
11259 b_count = len & 3;
11260 b_offset = offset + len - b_count;
11261 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11262 if (ret)
11263 return ret;
11264 memcpy(pd, &val, b_count);
11265 eeprom->len += b_count;
11266 }
11267 return 0;
11268 }
11269
11270 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11271 {
11272 struct tg3 *tp = netdev_priv(dev);
11273 int ret;
11274 u32 offset, len, b_offset, odd_len;
11275 u8 *buf;
11276 __be32 start, end;
11277
11278 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11279 return -EAGAIN;
11280
11281 if (tg3_flag(tp, NO_NVRAM) ||
11282 eeprom->magic != TG3_EEPROM_MAGIC)
11283 return -EINVAL;
11284
11285 offset = eeprom->offset;
11286 len = eeprom->len;
11287
11288 if ((b_offset = (offset & 3))) {
11289 /* adjustments to start on required 4 byte boundary */
11290 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11291 if (ret)
11292 return ret;
11293 len += b_offset;
11294 offset &= ~3;
11295 if (len < 4)
11296 len = 4;
11297 }
11298
11299 odd_len = 0;
11300 if (len & 3) {
11301 /* adjustments to end on required 4 byte boundary */
11302 odd_len = 1;
11303 len = (len + 3) & ~3;
11304 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11305 if (ret)
11306 return ret;
11307 }
11308
11309 buf = data;
11310 if (b_offset || odd_len) {
11311 buf = kmalloc(len, GFP_KERNEL);
11312 if (!buf)
11313 return -ENOMEM;
11314 if (b_offset)
11315 memcpy(buf, &start, 4);
11316 if (odd_len)
11317 memcpy(buf+len-4, &end, 4);
11318 memcpy(buf + b_offset, data, eeprom->len);
11319 }
11320
11321 ret = tg3_nvram_write_block(tp, offset, len, buf);
11322
11323 if (buf != data)
11324 kfree(buf);
11325
11326 return ret;
11327 }
11328
11329 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11330 {
11331 struct tg3 *tp = netdev_priv(dev);
11332
11333 if (tg3_flag(tp, USE_PHYLIB)) {
11334 struct phy_device *phydev;
11335 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11336 return -EAGAIN;
11337 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11338 return phy_ethtool_gset(phydev, cmd);
11339 }
11340
11341 cmd->supported = (SUPPORTED_Autoneg);
11342
11343 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11344 cmd->supported |= (SUPPORTED_1000baseT_Half |
11345 SUPPORTED_1000baseT_Full);
11346
11347 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11348 cmd->supported |= (SUPPORTED_100baseT_Half |
11349 SUPPORTED_100baseT_Full |
11350 SUPPORTED_10baseT_Half |
11351 SUPPORTED_10baseT_Full |
11352 SUPPORTED_TP);
11353 cmd->port = PORT_TP;
11354 } else {
11355 cmd->supported |= SUPPORTED_FIBRE;
11356 cmd->port = PORT_FIBRE;
11357 }
11358
11359 cmd->advertising = tp->link_config.advertising;
11360 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11361 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11362 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11363 cmd->advertising |= ADVERTISED_Pause;
11364 } else {
11365 cmd->advertising |= ADVERTISED_Pause |
11366 ADVERTISED_Asym_Pause;
11367 }
11368 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11369 cmd->advertising |= ADVERTISED_Asym_Pause;
11370 }
11371 }
11372 if (netif_running(dev) && tp->link_up) {
11373 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11374 cmd->duplex = tp->link_config.active_duplex;
11375 cmd->lp_advertising = tp->link_config.rmt_adv;
11376 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11377 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11378 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11379 else
11380 cmd->eth_tp_mdix = ETH_TP_MDI;
11381 }
11382 } else {
11383 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11384 cmd->duplex = DUPLEX_UNKNOWN;
11385 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11386 }
11387 cmd->phy_address = tp->phy_addr;
11388 cmd->transceiver = XCVR_INTERNAL;
11389 cmd->autoneg = tp->link_config.autoneg;
11390 cmd->maxtxpkt = 0;
11391 cmd->maxrxpkt = 0;
11392 return 0;
11393 }
11394
11395 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11396 {
11397 struct tg3 *tp = netdev_priv(dev);
11398 u32 speed = ethtool_cmd_speed(cmd);
11399
11400 if (tg3_flag(tp, USE_PHYLIB)) {
11401 struct phy_device *phydev;
11402 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11403 return -EAGAIN;
11404 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11405 return phy_ethtool_sset(phydev, cmd);
11406 }
11407
11408 if (cmd->autoneg != AUTONEG_ENABLE &&
11409 cmd->autoneg != AUTONEG_DISABLE)
11410 return -EINVAL;
11411
11412 if (cmd->autoneg == AUTONEG_DISABLE &&
11413 cmd->duplex != DUPLEX_FULL &&
11414 cmd->duplex != DUPLEX_HALF)
11415 return -EINVAL;
11416
11417 if (cmd->autoneg == AUTONEG_ENABLE) {
11418 u32 mask = ADVERTISED_Autoneg |
11419 ADVERTISED_Pause |
11420 ADVERTISED_Asym_Pause;
11421
11422 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11423 mask |= ADVERTISED_1000baseT_Half |
11424 ADVERTISED_1000baseT_Full;
11425
11426 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11427 mask |= ADVERTISED_100baseT_Half |
11428 ADVERTISED_100baseT_Full |
11429 ADVERTISED_10baseT_Half |
11430 ADVERTISED_10baseT_Full |
11431 ADVERTISED_TP;
11432 else
11433 mask |= ADVERTISED_FIBRE;
11434
11435 if (cmd->advertising & ~mask)
11436 return -EINVAL;
11437
11438 mask &= (ADVERTISED_1000baseT_Half |
11439 ADVERTISED_1000baseT_Full |
11440 ADVERTISED_100baseT_Half |
11441 ADVERTISED_100baseT_Full |
11442 ADVERTISED_10baseT_Half |
11443 ADVERTISED_10baseT_Full);
11444
11445 cmd->advertising &= mask;
11446 } else {
11447 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11448 if (speed != SPEED_1000)
11449 return -EINVAL;
11450
11451 if (cmd->duplex != DUPLEX_FULL)
11452 return -EINVAL;
11453 } else {
11454 if (speed != SPEED_100 &&
11455 speed != SPEED_10)
11456 return -EINVAL;
11457 }
11458 }
11459
11460 tg3_full_lock(tp, 0);
11461
11462 tp->link_config.autoneg = cmd->autoneg;
11463 if (cmd->autoneg == AUTONEG_ENABLE) {
11464 tp->link_config.advertising = (cmd->advertising |
11465 ADVERTISED_Autoneg);
11466 tp->link_config.speed = SPEED_UNKNOWN;
11467 tp->link_config.duplex = DUPLEX_UNKNOWN;
11468 } else {
11469 tp->link_config.advertising = 0;
11470 tp->link_config.speed = speed;
11471 tp->link_config.duplex = cmd->duplex;
11472 }
11473
11474 if (netif_running(dev))
11475 tg3_setup_phy(tp, 1);
11476
11477 tg3_full_unlock(tp);
11478
11479 return 0;
11480 }
11481
11482 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11483 {
11484 struct tg3 *tp = netdev_priv(dev);
11485
11486 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11487 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11488 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11489 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11490 }
11491
11492 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11493 {
11494 struct tg3 *tp = netdev_priv(dev);
11495
11496 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11497 wol->supported = WAKE_MAGIC;
11498 else
11499 wol->supported = 0;
11500 wol->wolopts = 0;
11501 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11502 wol->wolopts = WAKE_MAGIC;
11503 memset(&wol->sopass, 0, sizeof(wol->sopass));
11504 }
11505
11506 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11507 {
11508 struct tg3 *tp = netdev_priv(dev);
11509 struct device *dp = &tp->pdev->dev;
11510
11511 if (wol->wolopts & ~WAKE_MAGIC)
11512 return -EINVAL;
11513 if ((wol->wolopts & WAKE_MAGIC) &&
11514 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11515 return -EINVAL;
11516
11517 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11518
11519 spin_lock_bh(&tp->lock);
11520 if (device_may_wakeup(dp))
11521 tg3_flag_set(tp, WOL_ENABLE);
11522 else
11523 tg3_flag_clear(tp, WOL_ENABLE);
11524 spin_unlock_bh(&tp->lock);
11525
11526 return 0;
11527 }
11528
11529 static u32 tg3_get_msglevel(struct net_device *dev)
11530 {
11531 struct tg3 *tp = netdev_priv(dev);
11532 return tp->msg_enable;
11533 }
11534
11535 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11536 {
11537 struct tg3 *tp = netdev_priv(dev);
11538 tp->msg_enable = value;
11539 }
11540
11541 static int tg3_nway_reset(struct net_device *dev)
11542 {
11543 struct tg3 *tp = netdev_priv(dev);
11544 int r;
11545
11546 if (!netif_running(dev))
11547 return -EAGAIN;
11548
11549 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11550 return -EINVAL;
11551
11552 if (tg3_flag(tp, USE_PHYLIB)) {
11553 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11554 return -EAGAIN;
11555 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11556 } else {
11557 u32 bmcr;
11558
11559 spin_lock_bh(&tp->lock);
11560 r = -EINVAL;
11561 tg3_readphy(tp, MII_BMCR, &bmcr);
11562 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11563 ((bmcr & BMCR_ANENABLE) ||
11564 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11565 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11566 BMCR_ANENABLE);
11567 r = 0;
11568 }
11569 spin_unlock_bh(&tp->lock);
11570 }
11571
11572 return r;
11573 }
11574
11575 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11576 {
11577 struct tg3 *tp = netdev_priv(dev);
11578
11579 ering->rx_max_pending = tp->rx_std_ring_mask;
11580 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11581 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11582 else
11583 ering->rx_jumbo_max_pending = 0;
11584
11585 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11586
11587 ering->rx_pending = tp->rx_pending;
11588 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11589 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11590 else
11591 ering->rx_jumbo_pending = 0;
11592
11593 ering->tx_pending = tp->napi[0].tx_pending;
11594 }
11595
11596 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11597 {
11598 struct tg3 *tp = netdev_priv(dev);
11599 int i, irq_sync = 0, err = 0;
11600
11601 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11602 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11603 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11604 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11605 (tg3_flag(tp, TSO_BUG) &&
11606 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11607 return -EINVAL;
11608
11609 if (netif_running(dev)) {
11610 tg3_phy_stop(tp);
11611 tg3_netif_stop(tp);
11612 irq_sync = 1;
11613 }
11614
11615 tg3_full_lock(tp, irq_sync);
11616
11617 tp->rx_pending = ering->rx_pending;
11618
11619 if (tg3_flag(tp, MAX_RXPEND_64) &&
11620 tp->rx_pending > 63)
11621 tp->rx_pending = 63;
11622 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11623
11624 for (i = 0; i < tp->irq_max; i++)
11625 tp->napi[i].tx_pending = ering->tx_pending;
11626
11627 if (netif_running(dev)) {
11628 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11629 err = tg3_restart_hw(tp, 1);
11630 if (!err)
11631 tg3_netif_start(tp);
11632 }
11633
11634 tg3_full_unlock(tp);
11635
11636 if (irq_sync && !err)
11637 tg3_phy_start(tp);
11638
11639 return err;
11640 }
11641
11642 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11643 {
11644 struct tg3 *tp = netdev_priv(dev);
11645
11646 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11647
11648 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11649 epause->rx_pause = 1;
11650 else
11651 epause->rx_pause = 0;
11652
11653 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11654 epause->tx_pause = 1;
11655 else
11656 epause->tx_pause = 0;
11657 }
11658
11659 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11660 {
11661 struct tg3 *tp = netdev_priv(dev);
11662 int err = 0;
11663
11664 if (tg3_flag(tp, USE_PHYLIB)) {
11665 u32 newadv;
11666 struct phy_device *phydev;
11667
11668 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11669
11670 if (!(phydev->supported & SUPPORTED_Pause) ||
11671 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11672 (epause->rx_pause != epause->tx_pause)))
11673 return -EINVAL;
11674
11675 tp->link_config.flowctrl = 0;
11676 if (epause->rx_pause) {
11677 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11678
11679 if (epause->tx_pause) {
11680 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11681 newadv = ADVERTISED_Pause;
11682 } else
11683 newadv = ADVERTISED_Pause |
11684 ADVERTISED_Asym_Pause;
11685 } else if (epause->tx_pause) {
11686 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11687 newadv = ADVERTISED_Asym_Pause;
11688 } else
11689 newadv = 0;
11690
11691 if (epause->autoneg)
11692 tg3_flag_set(tp, PAUSE_AUTONEG);
11693 else
11694 tg3_flag_clear(tp, PAUSE_AUTONEG);
11695
11696 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11697 u32 oldadv = phydev->advertising &
11698 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11699 if (oldadv != newadv) {
11700 phydev->advertising &=
11701 ~(ADVERTISED_Pause |
11702 ADVERTISED_Asym_Pause);
11703 phydev->advertising |= newadv;
11704 if (phydev->autoneg) {
11705 /*
11706 * Always renegotiate the link to
11707 * inform our link partner of our
11708 * flow control settings, even if the
11709 * flow control is forced. Let
11710 * tg3_adjust_link() do the final
11711 * flow control setup.
11712 */
11713 return phy_start_aneg(phydev);
11714 }
11715 }
11716
11717 if (!epause->autoneg)
11718 tg3_setup_flow_control(tp, 0, 0);
11719 } else {
11720 tp->link_config.advertising &=
11721 ~(ADVERTISED_Pause |
11722 ADVERTISED_Asym_Pause);
11723 tp->link_config.advertising |= newadv;
11724 }
11725 } else {
11726 int irq_sync = 0;
11727
11728 if (netif_running(dev)) {
11729 tg3_netif_stop(tp);
11730 irq_sync = 1;
11731 }
11732
11733 tg3_full_lock(tp, irq_sync);
11734
11735 if (epause->autoneg)
11736 tg3_flag_set(tp, PAUSE_AUTONEG);
11737 else
11738 tg3_flag_clear(tp, PAUSE_AUTONEG);
11739 if (epause->rx_pause)
11740 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11741 else
11742 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11743 if (epause->tx_pause)
11744 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11745 else
11746 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11747
11748 if (netif_running(dev)) {
11749 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11750 err = tg3_restart_hw(tp, 1);
11751 if (!err)
11752 tg3_netif_start(tp);
11753 }
11754
11755 tg3_full_unlock(tp);
11756 }
11757
11758 return err;
11759 }
11760
11761 static int tg3_get_sset_count(struct net_device *dev, int sset)
11762 {
11763 switch (sset) {
11764 case ETH_SS_TEST:
11765 return TG3_NUM_TEST;
11766 case ETH_SS_STATS:
11767 return TG3_NUM_STATS;
11768 default:
11769 return -EOPNOTSUPP;
11770 }
11771 }
11772
11773 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11774 u32 *rules __always_unused)
11775 {
11776 struct tg3 *tp = netdev_priv(dev);
11777
11778 if (!tg3_flag(tp, SUPPORT_MSIX))
11779 return -EOPNOTSUPP;
11780
11781 switch (info->cmd) {
11782 case ETHTOOL_GRXRINGS:
11783 if (netif_running(tp->dev))
11784 info->data = tp->rxq_cnt;
11785 else {
11786 info->data = num_online_cpus();
11787 if (info->data > TG3_RSS_MAX_NUM_QS)
11788 info->data = TG3_RSS_MAX_NUM_QS;
11789 }
11790
11791 /* The first interrupt vector only
11792 * handles link interrupts.
11793 */
11794 info->data -= 1;
11795 return 0;
11796
11797 default:
11798 return -EOPNOTSUPP;
11799 }
11800 }
11801
11802 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11803 {
11804 u32 size = 0;
11805 struct tg3 *tp = netdev_priv(dev);
11806
11807 if (tg3_flag(tp, SUPPORT_MSIX))
11808 size = TG3_RSS_INDIR_TBL_SIZE;
11809
11810 return size;
11811 }
11812
11813 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11814 {
11815 struct tg3 *tp = netdev_priv(dev);
11816 int i;
11817
11818 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11819 indir[i] = tp->rss_ind_tbl[i];
11820
11821 return 0;
11822 }
11823
11824 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11825 {
11826 struct tg3 *tp = netdev_priv(dev);
11827 size_t i;
11828
11829 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11830 tp->rss_ind_tbl[i] = indir[i];
11831
11832 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11833 return 0;
11834
11835 /* It is legal to write the indirection
11836 * table while the device is running.
11837 */
11838 tg3_full_lock(tp, 0);
11839 tg3_rss_write_indir_tbl(tp);
11840 tg3_full_unlock(tp);
11841
11842 return 0;
11843 }
11844
11845 static void tg3_get_channels(struct net_device *dev,
11846 struct ethtool_channels *channel)
11847 {
11848 struct tg3 *tp = netdev_priv(dev);
11849 u32 deflt_qs = netif_get_num_default_rss_queues();
11850
11851 channel->max_rx = tp->rxq_max;
11852 channel->max_tx = tp->txq_max;
11853
11854 if (netif_running(dev)) {
11855 channel->rx_count = tp->rxq_cnt;
11856 channel->tx_count = tp->txq_cnt;
11857 } else {
11858 if (tp->rxq_req)
11859 channel->rx_count = tp->rxq_req;
11860 else
11861 channel->rx_count = min(deflt_qs, tp->rxq_max);
11862
11863 if (tp->txq_req)
11864 channel->tx_count = tp->txq_req;
11865 else
11866 channel->tx_count = min(deflt_qs, tp->txq_max);
11867 }
11868 }
11869
11870 static int tg3_set_channels(struct net_device *dev,
11871 struct ethtool_channels *channel)
11872 {
11873 struct tg3 *tp = netdev_priv(dev);
11874
11875 if (!tg3_flag(tp, SUPPORT_MSIX))
11876 return -EOPNOTSUPP;
11877
11878 if (channel->rx_count > tp->rxq_max ||
11879 channel->tx_count > tp->txq_max)
11880 return -EINVAL;
11881
11882 tp->rxq_req = channel->rx_count;
11883 tp->txq_req = channel->tx_count;
11884
11885 if (!netif_running(dev))
11886 return 0;
11887
11888 tg3_stop(tp);
11889
11890 tg3_carrier_off(tp);
11891
11892 tg3_start(tp, true, false, false);
11893
11894 return 0;
11895 }
11896
11897 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11898 {
11899 switch (stringset) {
11900 case ETH_SS_STATS:
11901 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11902 break;
11903 case ETH_SS_TEST:
11904 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11905 break;
11906 default:
11907 WARN_ON(1); /* we need a WARN() */
11908 break;
11909 }
11910 }
11911
11912 static int tg3_set_phys_id(struct net_device *dev,
11913 enum ethtool_phys_id_state state)
11914 {
11915 struct tg3 *tp = netdev_priv(dev);
11916
11917 if (!netif_running(tp->dev))
11918 return -EAGAIN;
11919
11920 switch (state) {
11921 case ETHTOOL_ID_ACTIVE:
11922 return 1; /* cycle on/off once per second */
11923
11924 case ETHTOOL_ID_ON:
11925 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11926 LED_CTRL_1000MBPS_ON |
11927 LED_CTRL_100MBPS_ON |
11928 LED_CTRL_10MBPS_ON |
11929 LED_CTRL_TRAFFIC_OVERRIDE |
11930 LED_CTRL_TRAFFIC_BLINK |
11931 LED_CTRL_TRAFFIC_LED);
11932 break;
11933
11934 case ETHTOOL_ID_OFF:
11935 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11936 LED_CTRL_TRAFFIC_OVERRIDE);
11937 break;
11938
11939 case ETHTOOL_ID_INACTIVE:
11940 tw32(MAC_LED_CTRL, tp->led_ctrl);
11941 break;
11942 }
11943
11944 return 0;
11945 }
11946
11947 static void tg3_get_ethtool_stats(struct net_device *dev,
11948 struct ethtool_stats *estats, u64 *tmp_stats)
11949 {
11950 struct tg3 *tp = netdev_priv(dev);
11951
11952 if (tp->hw_stats)
11953 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11954 else
11955 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11956 }
11957
11958 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11959 {
11960 int i;
11961 __be32 *buf;
11962 u32 offset = 0, len = 0;
11963 u32 magic, val;
11964
11965 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11966 return NULL;
11967
11968 if (magic == TG3_EEPROM_MAGIC) {
11969 for (offset = TG3_NVM_DIR_START;
11970 offset < TG3_NVM_DIR_END;
11971 offset += TG3_NVM_DIRENT_SIZE) {
11972 if (tg3_nvram_read(tp, offset, &val))
11973 return NULL;
11974
11975 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11976 TG3_NVM_DIRTYPE_EXTVPD)
11977 break;
11978 }
11979
11980 if (offset != TG3_NVM_DIR_END) {
11981 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11982 if (tg3_nvram_read(tp, offset + 4, &offset))
11983 return NULL;
11984
11985 offset = tg3_nvram_logical_addr(tp, offset);
11986 }
11987 }
11988
11989 if (!offset || !len) {
11990 offset = TG3_NVM_VPD_OFF;
11991 len = TG3_NVM_VPD_LEN;
11992 }
11993
11994 buf = kmalloc(len, GFP_KERNEL);
11995 if (buf == NULL)
11996 return NULL;
11997
11998 if (magic == TG3_EEPROM_MAGIC) {
11999 for (i = 0; i < len; i += 4) {
12000 /* The data is in little-endian format in NVRAM.
12001 * Use the big-endian read routines to preserve
12002 * the byte order as it exists in NVRAM.
12003 */
12004 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12005 goto error;
12006 }
12007 } else {
12008 u8 *ptr;
12009 ssize_t cnt;
12010 unsigned int pos = 0;
12011
12012 ptr = (u8 *)&buf[0];
12013 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12014 cnt = pci_read_vpd(tp->pdev, pos,
12015 len - pos, ptr);
12016 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12017 cnt = 0;
12018 else if (cnt < 0)
12019 goto error;
12020 }
12021 if (pos != len)
12022 goto error;
12023 }
12024
12025 *vpdlen = len;
12026
12027 return buf;
12028
12029 error:
12030 kfree(buf);
12031 return NULL;
12032 }
12033
12034 #define NVRAM_TEST_SIZE 0x100
12035 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12036 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12037 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12038 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12039 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12040 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12041 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12042 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12043
12044 static int tg3_test_nvram(struct tg3 *tp)
12045 {
12046 u32 csum, magic, len;
12047 __be32 *buf;
12048 int i, j, k, err = 0, size;
12049
12050 if (tg3_flag(tp, NO_NVRAM))
12051 return 0;
12052
12053 if (tg3_nvram_read(tp, 0, &magic) != 0)
12054 return -EIO;
12055
12056 if (magic == TG3_EEPROM_MAGIC)
12057 size = NVRAM_TEST_SIZE;
12058 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12059 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12060 TG3_EEPROM_SB_FORMAT_1) {
12061 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12062 case TG3_EEPROM_SB_REVISION_0:
12063 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12064 break;
12065 case TG3_EEPROM_SB_REVISION_2:
12066 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12067 break;
12068 case TG3_EEPROM_SB_REVISION_3:
12069 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12070 break;
12071 case TG3_EEPROM_SB_REVISION_4:
12072 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12073 break;
12074 case TG3_EEPROM_SB_REVISION_5:
12075 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12076 break;
12077 case TG3_EEPROM_SB_REVISION_6:
12078 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12079 break;
12080 default:
12081 return -EIO;
12082 }
12083 } else
12084 return 0;
12085 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12086 size = NVRAM_SELFBOOT_HW_SIZE;
12087 else
12088 return -EIO;
12089
12090 buf = kmalloc(size, GFP_KERNEL);
12091 if (buf == NULL)
12092 return -ENOMEM;
12093
12094 err = -EIO;
12095 for (i = 0, j = 0; i < size; i += 4, j++) {
12096 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12097 if (err)
12098 break;
12099 }
12100 if (i < size)
12101 goto out;
12102
12103 /* Selfboot format */
12104 magic = be32_to_cpu(buf[0]);
12105 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12106 TG3_EEPROM_MAGIC_FW) {
12107 u8 *buf8 = (u8 *) buf, csum8 = 0;
12108
12109 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12110 TG3_EEPROM_SB_REVISION_2) {
12111 /* For rev 2, the csum doesn't include the MBA. */
12112 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12113 csum8 += buf8[i];
12114 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12115 csum8 += buf8[i];
12116 } else {
12117 for (i = 0; i < size; i++)
12118 csum8 += buf8[i];
12119 }
12120
12121 if (csum8 == 0) {
12122 err = 0;
12123 goto out;
12124 }
12125
12126 err = -EIO;
12127 goto out;
12128 }
12129
12130 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12131 TG3_EEPROM_MAGIC_HW) {
12132 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12133 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12134 u8 *buf8 = (u8 *) buf;
12135
12136 /* Separate the parity bits and the data bytes. */
12137 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12138 if ((i == 0) || (i == 8)) {
12139 int l;
12140 u8 msk;
12141
12142 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12143 parity[k++] = buf8[i] & msk;
12144 i++;
12145 } else if (i == 16) {
12146 int l;
12147 u8 msk;
12148
12149 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12150 parity[k++] = buf8[i] & msk;
12151 i++;
12152
12153 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12154 parity[k++] = buf8[i] & msk;
12155 i++;
12156 }
12157 data[j++] = buf8[i];
12158 }
12159
12160 err = -EIO;
12161 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12162 u8 hw8 = hweight8(data[i]);
12163
12164 if ((hw8 & 0x1) && parity[i])
12165 goto out;
12166 else if (!(hw8 & 0x1) && !parity[i])
12167 goto out;
12168 }
12169 err = 0;
12170 goto out;
12171 }
12172
12173 err = -EIO;
12174
12175 /* Bootstrap checksum at offset 0x10 */
12176 csum = calc_crc((unsigned char *) buf, 0x10);
12177 if (csum != le32_to_cpu(buf[0x10/4]))
12178 goto out;
12179
12180 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12181 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12182 if (csum != le32_to_cpu(buf[0xfc/4]))
12183 goto out;
12184
12185 kfree(buf);
12186
12187 buf = tg3_vpd_readblock(tp, &len);
12188 if (!buf)
12189 return -ENOMEM;
12190
12191 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12192 if (i > 0) {
12193 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12194 if (j < 0)
12195 goto out;
12196
12197 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12198 goto out;
12199
12200 i += PCI_VPD_LRDT_TAG_SIZE;
12201 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12202 PCI_VPD_RO_KEYWORD_CHKSUM);
12203 if (j > 0) {
12204 u8 csum8 = 0;
12205
12206 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12207
12208 for (i = 0; i <= j; i++)
12209 csum8 += ((u8 *)buf)[i];
12210
12211 if (csum8)
12212 goto out;
12213 }
12214 }
12215
12216 err = 0;
12217
12218 out:
12219 kfree(buf);
12220 return err;
12221 }
12222
12223 #define TG3_SERDES_TIMEOUT_SEC 2
12224 #define TG3_COPPER_TIMEOUT_SEC 6
12225
12226 static int tg3_test_link(struct tg3 *tp)
12227 {
12228 int i, max;
12229
12230 if (!netif_running(tp->dev))
12231 return -ENODEV;
12232
12233 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12234 max = TG3_SERDES_TIMEOUT_SEC;
12235 else
12236 max = TG3_COPPER_TIMEOUT_SEC;
12237
12238 for (i = 0; i < max; i++) {
12239 if (tp->link_up)
12240 return 0;
12241
12242 if (msleep_interruptible(1000))
12243 break;
12244 }
12245
12246 return -EIO;
12247 }
12248
12249 /* Only test the commonly used registers */
12250 static int tg3_test_registers(struct tg3 *tp)
12251 {
12252 int i, is_5705, is_5750;
12253 u32 offset, read_mask, write_mask, val, save_val, read_val;
12254 static struct {
12255 u16 offset;
12256 u16 flags;
12257 #define TG3_FL_5705 0x1
12258 #define TG3_FL_NOT_5705 0x2
12259 #define TG3_FL_NOT_5788 0x4
12260 #define TG3_FL_NOT_5750 0x8
12261 u32 read_mask;
12262 u32 write_mask;
12263 } reg_tbl[] = {
12264 /* MAC Control Registers */
12265 { MAC_MODE, TG3_FL_NOT_5705,
12266 0x00000000, 0x00ef6f8c },
12267 { MAC_MODE, TG3_FL_5705,
12268 0x00000000, 0x01ef6b8c },
12269 { MAC_STATUS, TG3_FL_NOT_5705,
12270 0x03800107, 0x00000000 },
12271 { MAC_STATUS, TG3_FL_5705,
12272 0x03800100, 0x00000000 },
12273 { MAC_ADDR_0_HIGH, 0x0000,
12274 0x00000000, 0x0000ffff },
12275 { MAC_ADDR_0_LOW, 0x0000,
12276 0x00000000, 0xffffffff },
12277 { MAC_RX_MTU_SIZE, 0x0000,
12278 0x00000000, 0x0000ffff },
12279 { MAC_TX_MODE, 0x0000,
12280 0x00000000, 0x00000070 },
12281 { MAC_TX_LENGTHS, 0x0000,
12282 0x00000000, 0x00003fff },
12283 { MAC_RX_MODE, TG3_FL_NOT_5705,
12284 0x00000000, 0x000007fc },
12285 { MAC_RX_MODE, TG3_FL_5705,
12286 0x00000000, 0x000007dc },
12287 { MAC_HASH_REG_0, 0x0000,
12288 0x00000000, 0xffffffff },
12289 { MAC_HASH_REG_1, 0x0000,
12290 0x00000000, 0xffffffff },
12291 { MAC_HASH_REG_2, 0x0000,
12292 0x00000000, 0xffffffff },
12293 { MAC_HASH_REG_3, 0x0000,
12294 0x00000000, 0xffffffff },
12295
12296 /* Receive Data and Receive BD Initiator Control Registers. */
12297 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12298 0x00000000, 0xffffffff },
12299 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12300 0x00000000, 0xffffffff },
12301 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12302 0x00000000, 0x00000003 },
12303 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12304 0x00000000, 0xffffffff },
12305 { RCVDBDI_STD_BD+0, 0x0000,
12306 0x00000000, 0xffffffff },
12307 { RCVDBDI_STD_BD+4, 0x0000,
12308 0x00000000, 0xffffffff },
12309 { RCVDBDI_STD_BD+8, 0x0000,
12310 0x00000000, 0xffff0002 },
12311 { RCVDBDI_STD_BD+0xc, 0x0000,
12312 0x00000000, 0xffffffff },
12313
12314 /* Receive BD Initiator Control Registers. */
12315 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12316 0x00000000, 0xffffffff },
12317 { RCVBDI_STD_THRESH, TG3_FL_5705,
12318 0x00000000, 0x000003ff },
12319 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12320 0x00000000, 0xffffffff },
12321
12322 /* Host Coalescing Control Registers. */
12323 { HOSTCC_MODE, TG3_FL_NOT_5705,
12324 0x00000000, 0x00000004 },
12325 { HOSTCC_MODE, TG3_FL_5705,
12326 0x00000000, 0x000000f6 },
12327 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12328 0x00000000, 0xffffffff },
12329 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12330 0x00000000, 0x000003ff },
12331 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12332 0x00000000, 0xffffffff },
12333 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12334 0x00000000, 0x000003ff },
12335 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12336 0x00000000, 0xffffffff },
12337 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12338 0x00000000, 0x000000ff },
12339 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12340 0x00000000, 0xffffffff },
12341 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12342 0x00000000, 0x000000ff },
12343 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12344 0x00000000, 0xffffffff },
12345 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12346 0x00000000, 0xffffffff },
12347 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12348 0x00000000, 0xffffffff },
12349 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12350 0x00000000, 0x000000ff },
12351 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12352 0x00000000, 0xffffffff },
12353 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12354 0x00000000, 0x000000ff },
12355 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12356 0x00000000, 0xffffffff },
12357 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12358 0x00000000, 0xffffffff },
12359 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12360 0x00000000, 0xffffffff },
12361 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12362 0x00000000, 0xffffffff },
12363 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12364 0x00000000, 0xffffffff },
12365 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12366 0xffffffff, 0x00000000 },
12367 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12368 0xffffffff, 0x00000000 },
12369
12370 /* Buffer Manager Control Registers. */
12371 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12372 0x00000000, 0x007fff80 },
12373 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12374 0x00000000, 0x007fffff },
12375 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12376 0x00000000, 0x0000003f },
12377 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12378 0x00000000, 0x000001ff },
12379 { BUFMGR_MB_HIGH_WATER, 0x0000,
12380 0x00000000, 0x000001ff },
12381 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12382 0xffffffff, 0x00000000 },
12383 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12384 0xffffffff, 0x00000000 },
12385
12386 /* Mailbox Registers */
12387 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12388 0x00000000, 0x000001ff },
12389 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12390 0x00000000, 0x000001ff },
12391 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12392 0x00000000, 0x000007ff },
12393 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12394 0x00000000, 0x000001ff },
12395
12396 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12397 };
12398
12399 is_5705 = is_5750 = 0;
12400 if (tg3_flag(tp, 5705_PLUS)) {
12401 is_5705 = 1;
12402 if (tg3_flag(tp, 5750_PLUS))
12403 is_5750 = 1;
12404 }
12405
12406 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12407 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12408 continue;
12409
12410 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12411 continue;
12412
12413 if (tg3_flag(tp, IS_5788) &&
12414 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12415 continue;
12416
12417 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12418 continue;
12419
12420 offset = (u32) reg_tbl[i].offset;
12421 read_mask = reg_tbl[i].read_mask;
12422 write_mask = reg_tbl[i].write_mask;
12423
12424 /* Save the original register content */
12425 save_val = tr32(offset);
12426
12427 /* Determine the read-only value. */
12428 read_val = save_val & read_mask;
12429
12430 /* Write zero to the register, then make sure the read-only bits
12431 * are not changed and the read/write bits are all zeros.
12432 */
12433 tw32(offset, 0);
12434
12435 val = tr32(offset);
12436
12437 /* Test the read-only and read/write bits. */
12438 if (((val & read_mask) != read_val) || (val & write_mask))
12439 goto out;
12440
12441 /* Write ones to all the bits defined by RdMask and WrMask, then
12442 * make sure the read-only bits are not changed and the
12443 * read/write bits are all ones.
12444 */
12445 tw32(offset, read_mask | write_mask);
12446
12447 val = tr32(offset);
12448
12449 /* Test the read-only bits. */
12450 if ((val & read_mask) != read_val)
12451 goto out;
12452
12453 /* Test the read/write bits. */
12454 if ((val & write_mask) != write_mask)
12455 goto out;
12456
12457 tw32(offset, save_val);
12458 }
12459
12460 return 0;
12461
12462 out:
12463 if (netif_msg_hw(tp))
12464 netdev_err(tp->dev,
12465 "Register test failed at offset %x\n", offset);
12466 tw32(offset, save_val);
12467 return -EIO;
12468 }
12469
12470 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12471 {
12472 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12473 int i;
12474 u32 j;
12475
12476 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12477 for (j = 0; j < len; j += 4) {
12478 u32 val;
12479
12480 tg3_write_mem(tp, offset + j, test_pattern[i]);
12481 tg3_read_mem(tp, offset + j, &val);
12482 if (val != test_pattern[i])
12483 return -EIO;
12484 }
12485 }
12486 return 0;
12487 }
12488
12489 static int tg3_test_memory(struct tg3 *tp)
12490 {
12491 static struct mem_entry {
12492 u32 offset;
12493 u32 len;
12494 } mem_tbl_570x[] = {
12495 { 0x00000000, 0x00b50},
12496 { 0x00002000, 0x1c000},
12497 { 0xffffffff, 0x00000}
12498 }, mem_tbl_5705[] = {
12499 { 0x00000100, 0x0000c},
12500 { 0x00000200, 0x00008},
12501 { 0x00004000, 0x00800},
12502 { 0x00006000, 0x01000},
12503 { 0x00008000, 0x02000},
12504 { 0x00010000, 0x0e000},
12505 { 0xffffffff, 0x00000}
12506 }, mem_tbl_5755[] = {
12507 { 0x00000200, 0x00008},
12508 { 0x00004000, 0x00800},
12509 { 0x00006000, 0x00800},
12510 { 0x00008000, 0x02000},
12511 { 0x00010000, 0x0c000},
12512 { 0xffffffff, 0x00000}
12513 }, mem_tbl_5906[] = {
12514 { 0x00000200, 0x00008},
12515 { 0x00004000, 0x00400},
12516 { 0x00006000, 0x00400},
12517 { 0x00008000, 0x01000},
12518 { 0x00010000, 0x01000},
12519 { 0xffffffff, 0x00000}
12520 }, mem_tbl_5717[] = {
12521 { 0x00000200, 0x00008},
12522 { 0x00010000, 0x0a000},
12523 { 0x00020000, 0x13c00},
12524 { 0xffffffff, 0x00000}
12525 }, mem_tbl_57765[] = {
12526 { 0x00000200, 0x00008},
12527 { 0x00004000, 0x00800},
12528 { 0x00006000, 0x09800},
12529 { 0x00010000, 0x0a000},
12530 { 0xffffffff, 0x00000}
12531 };
12532 struct mem_entry *mem_tbl;
12533 int err = 0;
12534 int i;
12535
12536 if (tg3_flag(tp, 5717_PLUS))
12537 mem_tbl = mem_tbl_5717;
12538 else if (tg3_flag(tp, 57765_CLASS) ||
12539 tg3_asic_rev(tp) == ASIC_REV_5762)
12540 mem_tbl = mem_tbl_57765;
12541 else if (tg3_flag(tp, 5755_PLUS))
12542 mem_tbl = mem_tbl_5755;
12543 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12544 mem_tbl = mem_tbl_5906;
12545 else if (tg3_flag(tp, 5705_PLUS))
12546 mem_tbl = mem_tbl_5705;
12547 else
12548 mem_tbl = mem_tbl_570x;
12549
12550 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12551 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12552 if (err)
12553 break;
12554 }
12555
12556 return err;
12557 }
12558
12559 #define TG3_TSO_MSS 500
12560
12561 #define TG3_TSO_IP_HDR_LEN 20
12562 #define TG3_TSO_TCP_HDR_LEN 20
12563 #define TG3_TSO_TCP_OPT_LEN 12
12564
12565 static const u8 tg3_tso_header[] = {
12566 0x08, 0x00,
12567 0x45, 0x00, 0x00, 0x00,
12568 0x00, 0x00, 0x40, 0x00,
12569 0x40, 0x06, 0x00, 0x00,
12570 0x0a, 0x00, 0x00, 0x01,
12571 0x0a, 0x00, 0x00, 0x02,
12572 0x0d, 0x00, 0xe0, 0x00,
12573 0x00, 0x00, 0x01, 0x00,
12574 0x00, 0x00, 0x02, 0x00,
12575 0x80, 0x10, 0x10, 0x00,
12576 0x14, 0x09, 0x00, 0x00,
12577 0x01, 0x01, 0x08, 0x0a,
12578 0x11, 0x11, 0x11, 0x11,
12579 0x11, 0x11, 0x11, 0x11,
12580 };
12581
12582 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12583 {
12584 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12585 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12586 u32 budget;
12587 struct sk_buff *skb;
12588 u8 *tx_data, *rx_data;
12589 dma_addr_t map;
12590 int num_pkts, tx_len, rx_len, i, err;
12591 struct tg3_rx_buffer_desc *desc;
12592 struct tg3_napi *tnapi, *rnapi;
12593 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12594
12595 tnapi = &tp->napi[0];
12596 rnapi = &tp->napi[0];
12597 if (tp->irq_cnt > 1) {
12598 if (tg3_flag(tp, ENABLE_RSS))
12599 rnapi = &tp->napi[1];
12600 if (tg3_flag(tp, ENABLE_TSS))
12601 tnapi = &tp->napi[1];
12602 }
12603 coal_now = tnapi->coal_now | rnapi->coal_now;
12604
12605 err = -EIO;
12606
12607 tx_len = pktsz;
12608 skb = netdev_alloc_skb(tp->dev, tx_len);
12609 if (!skb)
12610 return -ENOMEM;
12611
12612 tx_data = skb_put(skb, tx_len);
12613 memcpy(tx_data, tp->dev->dev_addr, 6);
12614 memset(tx_data + 6, 0x0, 8);
12615
12616 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12617
12618 if (tso_loopback) {
12619 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12620
12621 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12622 TG3_TSO_TCP_OPT_LEN;
12623
12624 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12625 sizeof(tg3_tso_header));
12626 mss = TG3_TSO_MSS;
12627
12628 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12629 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12630
12631 /* Set the total length field in the IP header */
12632 iph->tot_len = htons((u16)(mss + hdr_len));
12633
12634 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12635 TXD_FLAG_CPU_POST_DMA);
12636
12637 if (tg3_flag(tp, HW_TSO_1) ||
12638 tg3_flag(tp, HW_TSO_2) ||
12639 tg3_flag(tp, HW_TSO_3)) {
12640 struct tcphdr *th;
12641 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12642 th = (struct tcphdr *)&tx_data[val];
12643 th->check = 0;
12644 } else
12645 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12646
12647 if (tg3_flag(tp, HW_TSO_3)) {
12648 mss |= (hdr_len & 0xc) << 12;
12649 if (hdr_len & 0x10)
12650 base_flags |= 0x00000010;
12651 base_flags |= (hdr_len & 0x3e0) << 5;
12652 } else if (tg3_flag(tp, HW_TSO_2))
12653 mss |= hdr_len << 9;
12654 else if (tg3_flag(tp, HW_TSO_1) ||
12655 tg3_asic_rev(tp) == ASIC_REV_5705) {
12656 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12657 } else {
12658 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12659 }
12660
12661 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12662 } else {
12663 num_pkts = 1;
12664 data_off = ETH_HLEN;
12665
12666 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12667 tx_len > VLAN_ETH_FRAME_LEN)
12668 base_flags |= TXD_FLAG_JMB_PKT;
12669 }
12670
12671 for (i = data_off; i < tx_len; i++)
12672 tx_data[i] = (u8) (i & 0xff);
12673
12674 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12675 if (pci_dma_mapping_error(tp->pdev, map)) {
12676 dev_kfree_skb(skb);
12677 return -EIO;
12678 }
12679
12680 val = tnapi->tx_prod;
12681 tnapi->tx_buffers[val].skb = skb;
12682 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12683
12684 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12685 rnapi->coal_now);
12686
12687 udelay(10);
12688
12689 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12690
12691 budget = tg3_tx_avail(tnapi);
12692 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12693 base_flags | TXD_FLAG_END, mss, 0)) {
12694 tnapi->tx_buffers[val].skb = NULL;
12695 dev_kfree_skb(skb);
12696 return -EIO;
12697 }
12698
12699 tnapi->tx_prod++;
12700
12701 /* Sync BD data before updating mailbox */
12702 wmb();
12703
12704 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12705 tr32_mailbox(tnapi->prodmbox);
12706
12707 udelay(10);
12708
12709 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12710 for (i = 0; i < 35; i++) {
12711 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12712 coal_now);
12713
12714 udelay(10);
12715
12716 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12717 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12718 if ((tx_idx == tnapi->tx_prod) &&
12719 (rx_idx == (rx_start_idx + num_pkts)))
12720 break;
12721 }
12722
12723 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12724 dev_kfree_skb(skb);
12725
12726 if (tx_idx != tnapi->tx_prod)
12727 goto out;
12728
12729 if (rx_idx != rx_start_idx + num_pkts)
12730 goto out;
12731
12732 val = data_off;
12733 while (rx_idx != rx_start_idx) {
12734 desc = &rnapi->rx_rcb[rx_start_idx++];
12735 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12736 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12737
12738 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12739 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12740 goto out;
12741
12742 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12743 - ETH_FCS_LEN;
12744
12745 if (!tso_loopback) {
12746 if (rx_len != tx_len)
12747 goto out;
12748
12749 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12750 if (opaque_key != RXD_OPAQUE_RING_STD)
12751 goto out;
12752 } else {
12753 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12754 goto out;
12755 }
12756 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12757 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12758 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12759 goto out;
12760 }
12761
12762 if (opaque_key == RXD_OPAQUE_RING_STD) {
12763 rx_data = tpr->rx_std_buffers[desc_idx].data;
12764 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12765 mapping);
12766 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12767 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12768 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12769 mapping);
12770 } else
12771 goto out;
12772
12773 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12774 PCI_DMA_FROMDEVICE);
12775
12776 rx_data += TG3_RX_OFFSET(tp);
12777 for (i = data_off; i < rx_len; i++, val++) {
12778 if (*(rx_data + i) != (u8) (val & 0xff))
12779 goto out;
12780 }
12781 }
12782
12783 err = 0;
12784
12785 /* tg3_free_rings will unmap and free the rx_data */
12786 out:
12787 return err;
12788 }
12789
12790 #define TG3_STD_LOOPBACK_FAILED 1
12791 #define TG3_JMB_LOOPBACK_FAILED 2
12792 #define TG3_TSO_LOOPBACK_FAILED 4
12793 #define TG3_LOOPBACK_FAILED \
12794 (TG3_STD_LOOPBACK_FAILED | \
12795 TG3_JMB_LOOPBACK_FAILED | \
12796 TG3_TSO_LOOPBACK_FAILED)
12797
12798 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12799 {
12800 int err = -EIO;
12801 u32 eee_cap;
12802 u32 jmb_pkt_sz = 9000;
12803
12804 if (tp->dma_limit)
12805 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12806
12807 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12808 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12809
12810 if (!netif_running(tp->dev)) {
12811 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12812 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12813 if (do_extlpbk)
12814 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12815 goto done;
12816 }
12817
12818 err = tg3_reset_hw(tp, 1);
12819 if (err) {
12820 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12821 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12822 if (do_extlpbk)
12823 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12824 goto done;
12825 }
12826
12827 if (tg3_flag(tp, ENABLE_RSS)) {
12828 int i;
12829
12830 /* Reroute all rx packets to the 1st queue */
12831 for (i = MAC_RSS_INDIR_TBL_0;
12832 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12833 tw32(i, 0x0);
12834 }
12835
12836 /* HW errata - mac loopback fails in some cases on 5780.
12837 * Normal traffic and PHY loopback are not affected by
12838 * errata. Also, the MAC loopback test is deprecated for
12839 * all newer ASIC revisions.
12840 */
12841 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12842 !tg3_flag(tp, CPMU_PRESENT)) {
12843 tg3_mac_loopback(tp, true);
12844
12845 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12846 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12847
12848 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12849 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12850 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12851
12852 tg3_mac_loopback(tp, false);
12853 }
12854
12855 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12856 !tg3_flag(tp, USE_PHYLIB)) {
12857 int i;
12858
12859 tg3_phy_lpbk_set(tp, 0, false);
12860
12861 /* Wait for link */
12862 for (i = 0; i < 100; i++) {
12863 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12864 break;
12865 mdelay(1);
12866 }
12867
12868 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12869 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12870 if (tg3_flag(tp, TSO_CAPABLE) &&
12871 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12872 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12873 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12874 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12875 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12876
12877 if (do_extlpbk) {
12878 tg3_phy_lpbk_set(tp, 0, true);
12879
12880 /* All link indications report up, but the hardware
12881 * isn't really ready for about 20 msec. Double it
12882 * to be sure.
12883 */
12884 mdelay(40);
12885
12886 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12887 data[TG3_EXT_LOOPB_TEST] |=
12888 TG3_STD_LOOPBACK_FAILED;
12889 if (tg3_flag(tp, TSO_CAPABLE) &&
12890 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12891 data[TG3_EXT_LOOPB_TEST] |=
12892 TG3_TSO_LOOPBACK_FAILED;
12893 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12894 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12895 data[TG3_EXT_LOOPB_TEST] |=
12896 TG3_JMB_LOOPBACK_FAILED;
12897 }
12898
12899 /* Re-enable gphy autopowerdown. */
12900 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12901 tg3_phy_toggle_apd(tp, true);
12902 }
12903
12904 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12905 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12906
12907 done:
12908 tp->phy_flags |= eee_cap;
12909
12910 return err;
12911 }
12912
12913 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12914 u64 *data)
12915 {
12916 struct tg3 *tp = netdev_priv(dev);
12917 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12918
12919 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12920 tg3_power_up(tp)) {
12921 etest->flags |= ETH_TEST_FL_FAILED;
12922 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12923 return;
12924 }
12925
12926 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12927
12928 if (tg3_test_nvram(tp) != 0) {
12929 etest->flags |= ETH_TEST_FL_FAILED;
12930 data[TG3_NVRAM_TEST] = 1;
12931 }
12932 if (!doextlpbk && tg3_test_link(tp)) {
12933 etest->flags |= ETH_TEST_FL_FAILED;
12934 data[TG3_LINK_TEST] = 1;
12935 }
12936 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12937 int err, err2 = 0, irq_sync = 0;
12938
12939 if (netif_running(dev)) {
12940 tg3_phy_stop(tp);
12941 tg3_netif_stop(tp);
12942 irq_sync = 1;
12943 }
12944
12945 tg3_full_lock(tp, irq_sync);
12946 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12947 err = tg3_nvram_lock(tp);
12948 tg3_halt_cpu(tp, RX_CPU_BASE);
12949 if (!tg3_flag(tp, 5705_PLUS))
12950 tg3_halt_cpu(tp, TX_CPU_BASE);
12951 if (!err)
12952 tg3_nvram_unlock(tp);
12953
12954 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12955 tg3_phy_reset(tp);
12956
12957 if (tg3_test_registers(tp) != 0) {
12958 etest->flags |= ETH_TEST_FL_FAILED;
12959 data[TG3_REGISTER_TEST] = 1;
12960 }
12961
12962 if (tg3_test_memory(tp) != 0) {
12963 etest->flags |= ETH_TEST_FL_FAILED;
12964 data[TG3_MEMORY_TEST] = 1;
12965 }
12966
12967 if (doextlpbk)
12968 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12969
12970 if (tg3_test_loopback(tp, data, doextlpbk))
12971 etest->flags |= ETH_TEST_FL_FAILED;
12972
12973 tg3_full_unlock(tp);
12974
12975 if (tg3_test_interrupt(tp) != 0) {
12976 etest->flags |= ETH_TEST_FL_FAILED;
12977 data[TG3_INTERRUPT_TEST] = 1;
12978 }
12979
12980 tg3_full_lock(tp, 0);
12981
12982 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12983 if (netif_running(dev)) {
12984 tg3_flag_set(tp, INIT_COMPLETE);
12985 err2 = tg3_restart_hw(tp, 1);
12986 if (!err2)
12987 tg3_netif_start(tp);
12988 }
12989
12990 tg3_full_unlock(tp);
12991
12992 if (irq_sync && !err2)
12993 tg3_phy_start(tp);
12994 }
12995 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12996 tg3_power_down(tp);
12997
12998 }
12999
13000 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13001 struct ifreq *ifr, int cmd)
13002 {
13003 struct tg3 *tp = netdev_priv(dev);
13004 struct hwtstamp_config stmpconf;
13005
13006 if (!tg3_flag(tp, PTP_CAPABLE))
13007 return -EINVAL;
13008
13009 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13010 return -EFAULT;
13011
13012 if (stmpconf.flags)
13013 return -EINVAL;
13014
13015 switch (stmpconf.tx_type) {
13016 case HWTSTAMP_TX_ON:
13017 tg3_flag_set(tp, TX_TSTAMP_EN);
13018 break;
13019 case HWTSTAMP_TX_OFF:
13020 tg3_flag_clear(tp, TX_TSTAMP_EN);
13021 break;
13022 default:
13023 return -ERANGE;
13024 }
13025
13026 switch (stmpconf.rx_filter) {
13027 case HWTSTAMP_FILTER_NONE:
13028 tp->rxptpctl = 0;
13029 break;
13030 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13031 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13032 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13033 break;
13034 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13035 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13036 TG3_RX_PTP_CTL_SYNC_EVNT;
13037 break;
13038 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13039 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13040 TG3_RX_PTP_CTL_DELAY_REQ;
13041 break;
13042 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13043 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13044 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13045 break;
13046 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13047 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13048 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13049 break;
13050 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13051 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13052 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13053 break;
13054 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13055 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13056 TG3_RX_PTP_CTL_SYNC_EVNT;
13057 break;
13058 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13059 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13060 TG3_RX_PTP_CTL_SYNC_EVNT;
13061 break;
13062 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13063 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13064 TG3_RX_PTP_CTL_SYNC_EVNT;
13065 break;
13066 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13067 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13068 TG3_RX_PTP_CTL_DELAY_REQ;
13069 break;
13070 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13071 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13072 TG3_RX_PTP_CTL_DELAY_REQ;
13073 break;
13074 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13075 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13076 TG3_RX_PTP_CTL_DELAY_REQ;
13077 break;
13078 default:
13079 return -ERANGE;
13080 }
13081
13082 if (netif_running(dev) && tp->rxptpctl)
13083 tw32(TG3_RX_PTP_CTL,
13084 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13085
13086 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13087 -EFAULT : 0;
13088 }
13089
13090 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13091 {
13092 struct mii_ioctl_data *data = if_mii(ifr);
13093 struct tg3 *tp = netdev_priv(dev);
13094 int err;
13095
13096 if (tg3_flag(tp, USE_PHYLIB)) {
13097 struct phy_device *phydev;
13098 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13099 return -EAGAIN;
13100 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13101 return phy_mii_ioctl(phydev, ifr, cmd);
13102 }
13103
13104 switch (cmd) {
13105 case SIOCGMIIPHY:
13106 data->phy_id = tp->phy_addr;
13107
13108 /* fallthru */
13109 case SIOCGMIIREG: {
13110 u32 mii_regval;
13111
13112 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13113 break; /* We have no PHY */
13114
13115 if (!netif_running(dev))
13116 return -EAGAIN;
13117
13118 spin_lock_bh(&tp->lock);
13119 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13120 data->reg_num & 0x1f, &mii_regval);
13121 spin_unlock_bh(&tp->lock);
13122
13123 data->val_out = mii_regval;
13124
13125 return err;
13126 }
13127
13128 case SIOCSMIIREG:
13129 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13130 break; /* We have no PHY */
13131
13132 if (!netif_running(dev))
13133 return -EAGAIN;
13134
13135 spin_lock_bh(&tp->lock);
13136 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13137 data->reg_num & 0x1f, data->val_in);
13138 spin_unlock_bh(&tp->lock);
13139
13140 return err;
13141
13142 case SIOCSHWTSTAMP:
13143 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13144
13145 default:
13146 /* do nothing */
13147 break;
13148 }
13149 return -EOPNOTSUPP;
13150 }
13151
13152 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13153 {
13154 struct tg3 *tp = netdev_priv(dev);
13155
13156 memcpy(ec, &tp->coal, sizeof(*ec));
13157 return 0;
13158 }
13159
13160 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13161 {
13162 struct tg3 *tp = netdev_priv(dev);
13163 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13164 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13165
13166 if (!tg3_flag(tp, 5705_PLUS)) {
13167 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13168 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13169 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13170 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13171 }
13172
13173 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13174 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13175 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13176 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13177 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13178 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13179 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13180 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13181 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13182 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13183 return -EINVAL;
13184
13185 /* No rx interrupts will be generated if both are zero */
13186 if ((ec->rx_coalesce_usecs == 0) &&
13187 (ec->rx_max_coalesced_frames == 0))
13188 return -EINVAL;
13189
13190 /* No tx interrupts will be generated if both are zero */
13191 if ((ec->tx_coalesce_usecs == 0) &&
13192 (ec->tx_max_coalesced_frames == 0))
13193 return -EINVAL;
13194
13195 /* Only copy relevant parameters, ignore all others. */
13196 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13197 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13198 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13199 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13200 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13201 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13202 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13203 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13204 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13205
13206 if (netif_running(dev)) {
13207 tg3_full_lock(tp, 0);
13208 __tg3_set_coalesce(tp, &tp->coal);
13209 tg3_full_unlock(tp);
13210 }
13211 return 0;
13212 }
13213
13214 static const struct ethtool_ops tg3_ethtool_ops = {
13215 .get_settings = tg3_get_settings,
13216 .set_settings = tg3_set_settings,
13217 .get_drvinfo = tg3_get_drvinfo,
13218 .get_regs_len = tg3_get_regs_len,
13219 .get_regs = tg3_get_regs,
13220 .get_wol = tg3_get_wol,
13221 .set_wol = tg3_set_wol,
13222 .get_msglevel = tg3_get_msglevel,
13223 .set_msglevel = tg3_set_msglevel,
13224 .nway_reset = tg3_nway_reset,
13225 .get_link = ethtool_op_get_link,
13226 .get_eeprom_len = tg3_get_eeprom_len,
13227 .get_eeprom = tg3_get_eeprom,
13228 .set_eeprom = tg3_set_eeprom,
13229 .get_ringparam = tg3_get_ringparam,
13230 .set_ringparam = tg3_set_ringparam,
13231 .get_pauseparam = tg3_get_pauseparam,
13232 .set_pauseparam = tg3_set_pauseparam,
13233 .self_test = tg3_self_test,
13234 .get_strings = tg3_get_strings,
13235 .set_phys_id = tg3_set_phys_id,
13236 .get_ethtool_stats = tg3_get_ethtool_stats,
13237 .get_coalesce = tg3_get_coalesce,
13238 .set_coalesce = tg3_set_coalesce,
13239 .get_sset_count = tg3_get_sset_count,
13240 .get_rxnfc = tg3_get_rxnfc,
13241 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13242 .get_rxfh_indir = tg3_get_rxfh_indir,
13243 .set_rxfh_indir = tg3_set_rxfh_indir,
13244 .get_channels = tg3_get_channels,
13245 .set_channels = tg3_set_channels,
13246 .get_ts_info = tg3_get_ts_info,
13247 };
13248
13249 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13250 struct rtnl_link_stats64 *stats)
13251 {
13252 struct tg3 *tp = netdev_priv(dev);
13253
13254 spin_lock_bh(&tp->lock);
13255 if (!tp->hw_stats) {
13256 spin_unlock_bh(&tp->lock);
13257 return &tp->net_stats_prev;
13258 }
13259
13260 tg3_get_nstats(tp, stats);
13261 spin_unlock_bh(&tp->lock);
13262
13263 return stats;
13264 }
13265
13266 static void tg3_set_rx_mode(struct net_device *dev)
13267 {
13268 struct tg3 *tp = netdev_priv(dev);
13269
13270 if (!netif_running(dev))
13271 return;
13272
13273 tg3_full_lock(tp, 0);
13274 __tg3_set_rx_mode(dev);
13275 tg3_full_unlock(tp);
13276 }
13277
13278 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13279 int new_mtu)
13280 {
13281 dev->mtu = new_mtu;
13282
13283 if (new_mtu > ETH_DATA_LEN) {
13284 if (tg3_flag(tp, 5780_CLASS)) {
13285 netdev_update_features(dev);
13286 tg3_flag_clear(tp, TSO_CAPABLE);
13287 } else {
13288 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13289 }
13290 } else {
13291 if (tg3_flag(tp, 5780_CLASS)) {
13292 tg3_flag_set(tp, TSO_CAPABLE);
13293 netdev_update_features(dev);
13294 }
13295 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13296 }
13297 }
13298
13299 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13300 {
13301 struct tg3 *tp = netdev_priv(dev);
13302 int err, reset_phy = 0;
13303
13304 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13305 return -EINVAL;
13306
13307 if (!netif_running(dev)) {
13308 /* We'll just catch it later when the
13309 * device is up'd.
13310 */
13311 tg3_set_mtu(dev, tp, new_mtu);
13312 return 0;
13313 }
13314
13315 tg3_phy_stop(tp);
13316
13317 tg3_netif_stop(tp);
13318
13319 tg3_full_lock(tp, 1);
13320
13321 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13322
13323 tg3_set_mtu(dev, tp, new_mtu);
13324
13325 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13326 * breaks all requests to 256 bytes.
13327 */
13328 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13329 reset_phy = 1;
13330
13331 err = tg3_restart_hw(tp, reset_phy);
13332
13333 if (!err)
13334 tg3_netif_start(tp);
13335
13336 tg3_full_unlock(tp);
13337
13338 if (!err)
13339 tg3_phy_start(tp);
13340
13341 return err;
13342 }
13343
13344 static const struct net_device_ops tg3_netdev_ops = {
13345 .ndo_open = tg3_open,
13346 .ndo_stop = tg3_close,
13347 .ndo_start_xmit = tg3_start_xmit,
13348 .ndo_get_stats64 = tg3_get_stats64,
13349 .ndo_validate_addr = eth_validate_addr,
13350 .ndo_set_rx_mode = tg3_set_rx_mode,
13351 .ndo_set_mac_address = tg3_set_mac_addr,
13352 .ndo_do_ioctl = tg3_ioctl,
13353 .ndo_tx_timeout = tg3_tx_timeout,
13354 .ndo_change_mtu = tg3_change_mtu,
13355 .ndo_fix_features = tg3_fix_features,
13356 .ndo_set_features = tg3_set_features,
13357 #ifdef CONFIG_NET_POLL_CONTROLLER
13358 .ndo_poll_controller = tg3_poll_controller,
13359 #endif
13360 };
13361
13362 static void tg3_get_eeprom_size(struct tg3 *tp)
13363 {
13364 u32 cursize, val, magic;
13365
13366 tp->nvram_size = EEPROM_CHIP_SIZE;
13367
13368 if (tg3_nvram_read(tp, 0, &magic) != 0)
13369 return;
13370
13371 if ((magic != TG3_EEPROM_MAGIC) &&
13372 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13373 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13374 return;
13375
13376 /*
13377 * Size the chip by reading offsets at increasing powers of two.
13378 * When we encounter our validation signature, we know the addressing
13379 * has wrapped around, and thus have our chip size.
13380 */
13381 cursize = 0x10;
13382
13383 while (cursize < tp->nvram_size) {
13384 if (tg3_nvram_read(tp, cursize, &val) != 0)
13385 return;
13386
13387 if (val == magic)
13388 break;
13389
13390 cursize <<= 1;
13391 }
13392
13393 tp->nvram_size = cursize;
13394 }
13395
13396 static void tg3_get_nvram_size(struct tg3 *tp)
13397 {
13398 u32 val;
13399
13400 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13401 return;
13402
13403 /* Selfboot format */
13404 if (val != TG3_EEPROM_MAGIC) {
13405 tg3_get_eeprom_size(tp);
13406 return;
13407 }
13408
13409 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13410 if (val != 0) {
13411 /* This is confusing. We want to operate on the
13412 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13413 * call will read from NVRAM and byteswap the data
13414 * according to the byteswapping settings for all
13415 * other register accesses. This ensures the data we
13416 * want will always reside in the lower 16-bits.
13417 * However, the data in NVRAM is in LE format, which
13418 * means the data from the NVRAM read will always be
13419 * opposite the endianness of the CPU. The 16-bit
13420 * byteswap then brings the data to CPU endianness.
13421 */
13422 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13423 return;
13424 }
13425 }
13426 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13427 }
13428
13429 static void tg3_get_nvram_info(struct tg3 *tp)
13430 {
13431 u32 nvcfg1;
13432
13433 nvcfg1 = tr32(NVRAM_CFG1);
13434 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13435 tg3_flag_set(tp, FLASH);
13436 } else {
13437 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13438 tw32(NVRAM_CFG1, nvcfg1);
13439 }
13440
13441 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13442 tg3_flag(tp, 5780_CLASS)) {
13443 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13444 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13445 tp->nvram_jedecnum = JEDEC_ATMEL;
13446 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13447 tg3_flag_set(tp, NVRAM_BUFFERED);
13448 break;
13449 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13450 tp->nvram_jedecnum = JEDEC_ATMEL;
13451 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13452 break;
13453 case FLASH_VENDOR_ATMEL_EEPROM:
13454 tp->nvram_jedecnum = JEDEC_ATMEL;
13455 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13456 tg3_flag_set(tp, NVRAM_BUFFERED);
13457 break;
13458 case FLASH_VENDOR_ST:
13459 tp->nvram_jedecnum = JEDEC_ST;
13460 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13461 tg3_flag_set(tp, NVRAM_BUFFERED);
13462 break;
13463 case FLASH_VENDOR_SAIFUN:
13464 tp->nvram_jedecnum = JEDEC_SAIFUN;
13465 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13466 break;
13467 case FLASH_VENDOR_SST_SMALL:
13468 case FLASH_VENDOR_SST_LARGE:
13469 tp->nvram_jedecnum = JEDEC_SST;
13470 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13471 break;
13472 }
13473 } else {
13474 tp->nvram_jedecnum = JEDEC_ATMEL;
13475 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13476 tg3_flag_set(tp, NVRAM_BUFFERED);
13477 }
13478 }
13479
13480 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13481 {
13482 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13483 case FLASH_5752PAGE_SIZE_256:
13484 tp->nvram_pagesize = 256;
13485 break;
13486 case FLASH_5752PAGE_SIZE_512:
13487 tp->nvram_pagesize = 512;
13488 break;
13489 case FLASH_5752PAGE_SIZE_1K:
13490 tp->nvram_pagesize = 1024;
13491 break;
13492 case FLASH_5752PAGE_SIZE_2K:
13493 tp->nvram_pagesize = 2048;
13494 break;
13495 case FLASH_5752PAGE_SIZE_4K:
13496 tp->nvram_pagesize = 4096;
13497 break;
13498 case FLASH_5752PAGE_SIZE_264:
13499 tp->nvram_pagesize = 264;
13500 break;
13501 case FLASH_5752PAGE_SIZE_528:
13502 tp->nvram_pagesize = 528;
13503 break;
13504 }
13505 }
13506
13507 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13508 {
13509 u32 nvcfg1;
13510
13511 nvcfg1 = tr32(NVRAM_CFG1);
13512
13513 /* NVRAM protection for TPM */
13514 if (nvcfg1 & (1 << 27))
13515 tg3_flag_set(tp, PROTECTED_NVRAM);
13516
13517 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13518 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13519 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13520 tp->nvram_jedecnum = JEDEC_ATMEL;
13521 tg3_flag_set(tp, NVRAM_BUFFERED);
13522 break;
13523 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13524 tp->nvram_jedecnum = JEDEC_ATMEL;
13525 tg3_flag_set(tp, NVRAM_BUFFERED);
13526 tg3_flag_set(tp, FLASH);
13527 break;
13528 case FLASH_5752VENDOR_ST_M45PE10:
13529 case FLASH_5752VENDOR_ST_M45PE20:
13530 case FLASH_5752VENDOR_ST_M45PE40:
13531 tp->nvram_jedecnum = JEDEC_ST;
13532 tg3_flag_set(tp, NVRAM_BUFFERED);
13533 tg3_flag_set(tp, FLASH);
13534 break;
13535 }
13536
13537 if (tg3_flag(tp, FLASH)) {
13538 tg3_nvram_get_pagesize(tp, nvcfg1);
13539 } else {
13540 /* For eeprom, set pagesize to maximum eeprom size */
13541 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13542
13543 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13544 tw32(NVRAM_CFG1, nvcfg1);
13545 }
13546 }
13547
13548 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13549 {
13550 u32 nvcfg1, protect = 0;
13551
13552 nvcfg1 = tr32(NVRAM_CFG1);
13553
13554 /* NVRAM protection for TPM */
13555 if (nvcfg1 & (1 << 27)) {
13556 tg3_flag_set(tp, PROTECTED_NVRAM);
13557 protect = 1;
13558 }
13559
13560 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13561 switch (nvcfg1) {
13562 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13563 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13564 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13565 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13566 tp->nvram_jedecnum = JEDEC_ATMEL;
13567 tg3_flag_set(tp, NVRAM_BUFFERED);
13568 tg3_flag_set(tp, FLASH);
13569 tp->nvram_pagesize = 264;
13570 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13571 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13572 tp->nvram_size = (protect ? 0x3e200 :
13573 TG3_NVRAM_SIZE_512KB);
13574 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13575 tp->nvram_size = (protect ? 0x1f200 :
13576 TG3_NVRAM_SIZE_256KB);
13577 else
13578 tp->nvram_size = (protect ? 0x1f200 :
13579 TG3_NVRAM_SIZE_128KB);
13580 break;
13581 case FLASH_5752VENDOR_ST_M45PE10:
13582 case FLASH_5752VENDOR_ST_M45PE20:
13583 case FLASH_5752VENDOR_ST_M45PE40:
13584 tp->nvram_jedecnum = JEDEC_ST;
13585 tg3_flag_set(tp, NVRAM_BUFFERED);
13586 tg3_flag_set(tp, FLASH);
13587 tp->nvram_pagesize = 256;
13588 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13589 tp->nvram_size = (protect ?
13590 TG3_NVRAM_SIZE_64KB :
13591 TG3_NVRAM_SIZE_128KB);
13592 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13593 tp->nvram_size = (protect ?
13594 TG3_NVRAM_SIZE_64KB :
13595 TG3_NVRAM_SIZE_256KB);
13596 else
13597 tp->nvram_size = (protect ?
13598 TG3_NVRAM_SIZE_128KB :
13599 TG3_NVRAM_SIZE_512KB);
13600 break;
13601 }
13602 }
13603
13604 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13605 {
13606 u32 nvcfg1;
13607
13608 nvcfg1 = tr32(NVRAM_CFG1);
13609
13610 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13611 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13612 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13613 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13614 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13615 tp->nvram_jedecnum = JEDEC_ATMEL;
13616 tg3_flag_set(tp, NVRAM_BUFFERED);
13617 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13618
13619 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13620 tw32(NVRAM_CFG1, nvcfg1);
13621 break;
13622 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13623 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13624 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13625 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13626 tp->nvram_jedecnum = JEDEC_ATMEL;
13627 tg3_flag_set(tp, NVRAM_BUFFERED);
13628 tg3_flag_set(tp, FLASH);
13629 tp->nvram_pagesize = 264;
13630 break;
13631 case FLASH_5752VENDOR_ST_M45PE10:
13632 case FLASH_5752VENDOR_ST_M45PE20:
13633 case FLASH_5752VENDOR_ST_M45PE40:
13634 tp->nvram_jedecnum = JEDEC_ST;
13635 tg3_flag_set(tp, NVRAM_BUFFERED);
13636 tg3_flag_set(tp, FLASH);
13637 tp->nvram_pagesize = 256;
13638 break;
13639 }
13640 }
13641
13642 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13643 {
13644 u32 nvcfg1, protect = 0;
13645
13646 nvcfg1 = tr32(NVRAM_CFG1);
13647
13648 /* NVRAM protection for TPM */
13649 if (nvcfg1 & (1 << 27)) {
13650 tg3_flag_set(tp, PROTECTED_NVRAM);
13651 protect = 1;
13652 }
13653
13654 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13655 switch (nvcfg1) {
13656 case FLASH_5761VENDOR_ATMEL_ADB021D:
13657 case FLASH_5761VENDOR_ATMEL_ADB041D:
13658 case FLASH_5761VENDOR_ATMEL_ADB081D:
13659 case FLASH_5761VENDOR_ATMEL_ADB161D:
13660 case FLASH_5761VENDOR_ATMEL_MDB021D:
13661 case FLASH_5761VENDOR_ATMEL_MDB041D:
13662 case FLASH_5761VENDOR_ATMEL_MDB081D:
13663 case FLASH_5761VENDOR_ATMEL_MDB161D:
13664 tp->nvram_jedecnum = JEDEC_ATMEL;
13665 tg3_flag_set(tp, NVRAM_BUFFERED);
13666 tg3_flag_set(tp, FLASH);
13667 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13668 tp->nvram_pagesize = 256;
13669 break;
13670 case FLASH_5761VENDOR_ST_A_M45PE20:
13671 case FLASH_5761VENDOR_ST_A_M45PE40:
13672 case FLASH_5761VENDOR_ST_A_M45PE80:
13673 case FLASH_5761VENDOR_ST_A_M45PE16:
13674 case FLASH_5761VENDOR_ST_M_M45PE20:
13675 case FLASH_5761VENDOR_ST_M_M45PE40:
13676 case FLASH_5761VENDOR_ST_M_M45PE80:
13677 case FLASH_5761VENDOR_ST_M_M45PE16:
13678 tp->nvram_jedecnum = JEDEC_ST;
13679 tg3_flag_set(tp, NVRAM_BUFFERED);
13680 tg3_flag_set(tp, FLASH);
13681 tp->nvram_pagesize = 256;
13682 break;
13683 }
13684
13685 if (protect) {
13686 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13687 } else {
13688 switch (nvcfg1) {
13689 case FLASH_5761VENDOR_ATMEL_ADB161D:
13690 case FLASH_5761VENDOR_ATMEL_MDB161D:
13691 case FLASH_5761VENDOR_ST_A_M45PE16:
13692 case FLASH_5761VENDOR_ST_M_M45PE16:
13693 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13694 break;
13695 case FLASH_5761VENDOR_ATMEL_ADB081D:
13696 case FLASH_5761VENDOR_ATMEL_MDB081D:
13697 case FLASH_5761VENDOR_ST_A_M45PE80:
13698 case FLASH_5761VENDOR_ST_M_M45PE80:
13699 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13700 break;
13701 case FLASH_5761VENDOR_ATMEL_ADB041D:
13702 case FLASH_5761VENDOR_ATMEL_MDB041D:
13703 case FLASH_5761VENDOR_ST_A_M45PE40:
13704 case FLASH_5761VENDOR_ST_M_M45PE40:
13705 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13706 break;
13707 case FLASH_5761VENDOR_ATMEL_ADB021D:
13708 case FLASH_5761VENDOR_ATMEL_MDB021D:
13709 case FLASH_5761VENDOR_ST_A_M45PE20:
13710 case FLASH_5761VENDOR_ST_M_M45PE20:
13711 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13712 break;
13713 }
13714 }
13715 }
13716
13717 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13718 {
13719 tp->nvram_jedecnum = JEDEC_ATMEL;
13720 tg3_flag_set(tp, NVRAM_BUFFERED);
13721 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13722 }
13723
13724 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13725 {
13726 u32 nvcfg1;
13727
13728 nvcfg1 = tr32(NVRAM_CFG1);
13729
13730 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13731 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13732 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13733 tp->nvram_jedecnum = JEDEC_ATMEL;
13734 tg3_flag_set(tp, NVRAM_BUFFERED);
13735 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13736
13737 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13738 tw32(NVRAM_CFG1, nvcfg1);
13739 return;
13740 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13741 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13742 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13743 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13744 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13745 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13746 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13747 tp->nvram_jedecnum = JEDEC_ATMEL;
13748 tg3_flag_set(tp, NVRAM_BUFFERED);
13749 tg3_flag_set(tp, FLASH);
13750
13751 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13752 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13753 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13754 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13755 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13756 break;
13757 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13758 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13759 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13760 break;
13761 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13762 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13763 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13764 break;
13765 }
13766 break;
13767 case FLASH_5752VENDOR_ST_M45PE10:
13768 case FLASH_5752VENDOR_ST_M45PE20:
13769 case FLASH_5752VENDOR_ST_M45PE40:
13770 tp->nvram_jedecnum = JEDEC_ST;
13771 tg3_flag_set(tp, NVRAM_BUFFERED);
13772 tg3_flag_set(tp, FLASH);
13773
13774 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13775 case FLASH_5752VENDOR_ST_M45PE10:
13776 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13777 break;
13778 case FLASH_5752VENDOR_ST_M45PE20:
13779 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13780 break;
13781 case FLASH_5752VENDOR_ST_M45PE40:
13782 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13783 break;
13784 }
13785 break;
13786 default:
13787 tg3_flag_set(tp, NO_NVRAM);
13788 return;
13789 }
13790
13791 tg3_nvram_get_pagesize(tp, nvcfg1);
13792 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13793 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13794 }
13795
13796
13797 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13798 {
13799 u32 nvcfg1;
13800
13801 nvcfg1 = tr32(NVRAM_CFG1);
13802
13803 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13804 case FLASH_5717VENDOR_ATMEL_EEPROM:
13805 case FLASH_5717VENDOR_MICRO_EEPROM:
13806 tp->nvram_jedecnum = JEDEC_ATMEL;
13807 tg3_flag_set(tp, NVRAM_BUFFERED);
13808 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13809
13810 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13811 tw32(NVRAM_CFG1, nvcfg1);
13812 return;
13813 case FLASH_5717VENDOR_ATMEL_MDB011D:
13814 case FLASH_5717VENDOR_ATMEL_ADB011B:
13815 case FLASH_5717VENDOR_ATMEL_ADB011D:
13816 case FLASH_5717VENDOR_ATMEL_MDB021D:
13817 case FLASH_5717VENDOR_ATMEL_ADB021B:
13818 case FLASH_5717VENDOR_ATMEL_ADB021D:
13819 case FLASH_5717VENDOR_ATMEL_45USPT:
13820 tp->nvram_jedecnum = JEDEC_ATMEL;
13821 tg3_flag_set(tp, NVRAM_BUFFERED);
13822 tg3_flag_set(tp, FLASH);
13823
13824 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13825 case FLASH_5717VENDOR_ATMEL_MDB021D:
13826 /* Detect size with tg3_nvram_get_size() */
13827 break;
13828 case FLASH_5717VENDOR_ATMEL_ADB021B:
13829 case FLASH_5717VENDOR_ATMEL_ADB021D:
13830 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13831 break;
13832 default:
13833 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13834 break;
13835 }
13836 break;
13837 case FLASH_5717VENDOR_ST_M_M25PE10:
13838 case FLASH_5717VENDOR_ST_A_M25PE10:
13839 case FLASH_5717VENDOR_ST_M_M45PE10:
13840 case FLASH_5717VENDOR_ST_A_M45PE10:
13841 case FLASH_5717VENDOR_ST_M_M25PE20:
13842 case FLASH_5717VENDOR_ST_A_M25PE20:
13843 case FLASH_5717VENDOR_ST_M_M45PE20:
13844 case FLASH_5717VENDOR_ST_A_M45PE20:
13845 case FLASH_5717VENDOR_ST_25USPT:
13846 case FLASH_5717VENDOR_ST_45USPT:
13847 tp->nvram_jedecnum = JEDEC_ST;
13848 tg3_flag_set(tp, NVRAM_BUFFERED);
13849 tg3_flag_set(tp, FLASH);
13850
13851 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13852 case FLASH_5717VENDOR_ST_M_M25PE20:
13853 case FLASH_5717VENDOR_ST_M_M45PE20:
13854 /* Detect size with tg3_nvram_get_size() */
13855 break;
13856 case FLASH_5717VENDOR_ST_A_M25PE20:
13857 case FLASH_5717VENDOR_ST_A_M45PE20:
13858 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13859 break;
13860 default:
13861 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13862 break;
13863 }
13864 break;
13865 default:
13866 tg3_flag_set(tp, NO_NVRAM);
13867 return;
13868 }
13869
13870 tg3_nvram_get_pagesize(tp, nvcfg1);
13871 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13872 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13873 }
13874
13875 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13876 {
13877 u32 nvcfg1, nvmpinstrp;
13878
13879 nvcfg1 = tr32(NVRAM_CFG1);
13880 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13881
13882 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13883 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13884 tg3_flag_set(tp, NO_NVRAM);
13885 return;
13886 }
13887
13888 switch (nvmpinstrp) {
13889 case FLASH_5762_EEPROM_HD:
13890 nvmpinstrp = FLASH_5720_EEPROM_HD;
13891 break;
13892 case FLASH_5762_EEPROM_LD:
13893 nvmpinstrp = FLASH_5720_EEPROM_LD;
13894 break;
13895 }
13896 }
13897
13898 switch (nvmpinstrp) {
13899 case FLASH_5720_EEPROM_HD:
13900 case FLASH_5720_EEPROM_LD:
13901 tp->nvram_jedecnum = JEDEC_ATMEL;
13902 tg3_flag_set(tp, NVRAM_BUFFERED);
13903
13904 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13905 tw32(NVRAM_CFG1, nvcfg1);
13906 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13907 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13908 else
13909 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13910 return;
13911 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13912 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13913 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13914 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13915 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13916 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13917 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13918 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13919 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13920 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13921 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13922 case FLASH_5720VENDOR_ATMEL_45USPT:
13923 tp->nvram_jedecnum = JEDEC_ATMEL;
13924 tg3_flag_set(tp, NVRAM_BUFFERED);
13925 tg3_flag_set(tp, FLASH);
13926
13927 switch (nvmpinstrp) {
13928 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13929 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13930 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13931 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13932 break;
13933 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13934 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13935 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13936 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13937 break;
13938 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13939 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13940 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13941 break;
13942 default:
13943 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13944 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13945 break;
13946 }
13947 break;
13948 case FLASH_5720VENDOR_M_ST_M25PE10:
13949 case FLASH_5720VENDOR_M_ST_M45PE10:
13950 case FLASH_5720VENDOR_A_ST_M25PE10:
13951 case FLASH_5720VENDOR_A_ST_M45PE10:
13952 case FLASH_5720VENDOR_M_ST_M25PE20:
13953 case FLASH_5720VENDOR_M_ST_M45PE20:
13954 case FLASH_5720VENDOR_A_ST_M25PE20:
13955 case FLASH_5720VENDOR_A_ST_M45PE20:
13956 case FLASH_5720VENDOR_M_ST_M25PE40:
13957 case FLASH_5720VENDOR_M_ST_M45PE40:
13958 case FLASH_5720VENDOR_A_ST_M25PE40:
13959 case FLASH_5720VENDOR_A_ST_M45PE40:
13960 case FLASH_5720VENDOR_M_ST_M25PE80:
13961 case FLASH_5720VENDOR_M_ST_M45PE80:
13962 case FLASH_5720VENDOR_A_ST_M25PE80:
13963 case FLASH_5720VENDOR_A_ST_M45PE80:
13964 case FLASH_5720VENDOR_ST_25USPT:
13965 case FLASH_5720VENDOR_ST_45USPT:
13966 tp->nvram_jedecnum = JEDEC_ST;
13967 tg3_flag_set(tp, NVRAM_BUFFERED);
13968 tg3_flag_set(tp, FLASH);
13969
13970 switch (nvmpinstrp) {
13971 case FLASH_5720VENDOR_M_ST_M25PE20:
13972 case FLASH_5720VENDOR_M_ST_M45PE20:
13973 case FLASH_5720VENDOR_A_ST_M25PE20:
13974 case FLASH_5720VENDOR_A_ST_M45PE20:
13975 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13976 break;
13977 case FLASH_5720VENDOR_M_ST_M25PE40:
13978 case FLASH_5720VENDOR_M_ST_M45PE40:
13979 case FLASH_5720VENDOR_A_ST_M25PE40:
13980 case FLASH_5720VENDOR_A_ST_M45PE40:
13981 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13982 break;
13983 case FLASH_5720VENDOR_M_ST_M25PE80:
13984 case FLASH_5720VENDOR_M_ST_M45PE80:
13985 case FLASH_5720VENDOR_A_ST_M25PE80:
13986 case FLASH_5720VENDOR_A_ST_M45PE80:
13987 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13988 break;
13989 default:
13990 if (tg3_asic_rev(tp) != ASIC_REV_5762)
13991 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13992 break;
13993 }
13994 break;
13995 default:
13996 tg3_flag_set(tp, NO_NVRAM);
13997 return;
13998 }
13999
14000 tg3_nvram_get_pagesize(tp, nvcfg1);
14001 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14002 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14003
14004 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14005 u32 val;
14006
14007 if (tg3_nvram_read(tp, 0, &val))
14008 return;
14009
14010 if (val != TG3_EEPROM_MAGIC &&
14011 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14012 tg3_flag_set(tp, NO_NVRAM);
14013 }
14014 }
14015
14016 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14017 static void tg3_nvram_init(struct tg3 *tp)
14018 {
14019 if (tg3_flag(tp, IS_SSB_CORE)) {
14020 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14021 tg3_flag_clear(tp, NVRAM);
14022 tg3_flag_clear(tp, NVRAM_BUFFERED);
14023 tg3_flag_set(tp, NO_NVRAM);
14024 return;
14025 }
14026
14027 tw32_f(GRC_EEPROM_ADDR,
14028 (EEPROM_ADDR_FSM_RESET |
14029 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14030 EEPROM_ADDR_CLKPERD_SHIFT)));
14031
14032 msleep(1);
14033
14034 /* Enable seeprom accesses. */
14035 tw32_f(GRC_LOCAL_CTRL,
14036 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14037 udelay(100);
14038
14039 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14040 tg3_asic_rev(tp) != ASIC_REV_5701) {
14041 tg3_flag_set(tp, NVRAM);
14042
14043 if (tg3_nvram_lock(tp)) {
14044 netdev_warn(tp->dev,
14045 "Cannot get nvram lock, %s failed\n",
14046 __func__);
14047 return;
14048 }
14049 tg3_enable_nvram_access(tp);
14050
14051 tp->nvram_size = 0;
14052
14053 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14054 tg3_get_5752_nvram_info(tp);
14055 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14056 tg3_get_5755_nvram_info(tp);
14057 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14058 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14059 tg3_asic_rev(tp) == ASIC_REV_5785)
14060 tg3_get_5787_nvram_info(tp);
14061 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14062 tg3_get_5761_nvram_info(tp);
14063 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14064 tg3_get_5906_nvram_info(tp);
14065 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14066 tg3_flag(tp, 57765_CLASS))
14067 tg3_get_57780_nvram_info(tp);
14068 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14069 tg3_asic_rev(tp) == ASIC_REV_5719)
14070 tg3_get_5717_nvram_info(tp);
14071 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14072 tg3_asic_rev(tp) == ASIC_REV_5762)
14073 tg3_get_5720_nvram_info(tp);
14074 else
14075 tg3_get_nvram_info(tp);
14076
14077 if (tp->nvram_size == 0)
14078 tg3_get_nvram_size(tp);
14079
14080 tg3_disable_nvram_access(tp);
14081 tg3_nvram_unlock(tp);
14082
14083 } else {
14084 tg3_flag_clear(tp, NVRAM);
14085 tg3_flag_clear(tp, NVRAM_BUFFERED);
14086
14087 tg3_get_eeprom_size(tp);
14088 }
14089 }
14090
14091 struct subsys_tbl_ent {
14092 u16 subsys_vendor, subsys_devid;
14093 u32 phy_id;
14094 };
14095
14096 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14097 /* Broadcom boards. */
14098 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14099 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14100 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14101 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14102 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14103 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14104 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14105 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14106 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14107 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14108 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14109 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14110 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14111 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14112 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14113 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14114 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14115 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14116 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14117 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14118 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14119 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14120
14121 /* 3com boards. */
14122 { TG3PCI_SUBVENDOR_ID_3COM,
14123 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14124 { TG3PCI_SUBVENDOR_ID_3COM,
14125 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14126 { TG3PCI_SUBVENDOR_ID_3COM,
14127 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14128 { TG3PCI_SUBVENDOR_ID_3COM,
14129 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14130 { TG3PCI_SUBVENDOR_ID_3COM,
14131 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14132
14133 /* DELL boards. */
14134 { TG3PCI_SUBVENDOR_ID_DELL,
14135 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14136 { TG3PCI_SUBVENDOR_ID_DELL,
14137 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14138 { TG3PCI_SUBVENDOR_ID_DELL,
14139 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14140 { TG3PCI_SUBVENDOR_ID_DELL,
14141 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14142
14143 /* Compaq boards. */
14144 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14145 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14146 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14147 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14148 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14149 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14150 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14151 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14152 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14153 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14154
14155 /* IBM boards. */
14156 { TG3PCI_SUBVENDOR_ID_IBM,
14157 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14158 };
14159
14160 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14161 {
14162 int i;
14163
14164 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14165 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14166 tp->pdev->subsystem_vendor) &&
14167 (subsys_id_to_phy_id[i].subsys_devid ==
14168 tp->pdev->subsystem_device))
14169 return &subsys_id_to_phy_id[i];
14170 }
14171 return NULL;
14172 }
14173
14174 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14175 {
14176 u32 val;
14177
14178 tp->phy_id = TG3_PHY_ID_INVALID;
14179 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14180
14181 /* Assume an onboard device and WOL capable by default. */
14182 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14183 tg3_flag_set(tp, WOL_CAP);
14184
14185 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14186 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14187 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14188 tg3_flag_set(tp, IS_NIC);
14189 }
14190 val = tr32(VCPU_CFGSHDW);
14191 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14192 tg3_flag_set(tp, ASPM_WORKAROUND);
14193 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14194 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14195 tg3_flag_set(tp, WOL_ENABLE);
14196 device_set_wakeup_enable(&tp->pdev->dev, true);
14197 }
14198 goto done;
14199 }
14200
14201 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14202 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14203 u32 nic_cfg, led_cfg;
14204 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14205 int eeprom_phy_serdes = 0;
14206
14207 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14208 tp->nic_sram_data_cfg = nic_cfg;
14209
14210 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14211 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14212 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14213 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14214 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14215 (ver > 0) && (ver < 0x100))
14216 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14217
14218 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14219 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14220
14221 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14222 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14223 eeprom_phy_serdes = 1;
14224
14225 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14226 if (nic_phy_id != 0) {
14227 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14228 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14229
14230 eeprom_phy_id = (id1 >> 16) << 10;
14231 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14232 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14233 } else
14234 eeprom_phy_id = 0;
14235
14236 tp->phy_id = eeprom_phy_id;
14237 if (eeprom_phy_serdes) {
14238 if (!tg3_flag(tp, 5705_PLUS))
14239 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14240 else
14241 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14242 }
14243
14244 if (tg3_flag(tp, 5750_PLUS))
14245 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14246 SHASTA_EXT_LED_MODE_MASK);
14247 else
14248 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14249
14250 switch (led_cfg) {
14251 default:
14252 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14253 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14254 break;
14255
14256 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14257 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14258 break;
14259
14260 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14261 tp->led_ctrl = LED_CTRL_MODE_MAC;
14262
14263 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14264 * read on some older 5700/5701 bootcode.
14265 */
14266 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14267 tg3_asic_rev(tp) == ASIC_REV_5701)
14268 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14269
14270 break;
14271
14272 case SHASTA_EXT_LED_SHARED:
14273 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14274 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14275 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14276 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14277 LED_CTRL_MODE_PHY_2);
14278 break;
14279
14280 case SHASTA_EXT_LED_MAC:
14281 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14282 break;
14283
14284 case SHASTA_EXT_LED_COMBO:
14285 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14286 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14287 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14288 LED_CTRL_MODE_PHY_2);
14289 break;
14290
14291 }
14292
14293 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14294 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14295 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14296 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14297
14298 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14299 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14300
14301 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14302 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14303 if ((tp->pdev->subsystem_vendor ==
14304 PCI_VENDOR_ID_ARIMA) &&
14305 (tp->pdev->subsystem_device == 0x205a ||
14306 tp->pdev->subsystem_device == 0x2063))
14307 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14308 } else {
14309 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14310 tg3_flag_set(tp, IS_NIC);
14311 }
14312
14313 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14314 tg3_flag_set(tp, ENABLE_ASF);
14315 if (tg3_flag(tp, 5750_PLUS))
14316 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14317 }
14318
14319 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14320 tg3_flag(tp, 5750_PLUS))
14321 tg3_flag_set(tp, ENABLE_APE);
14322
14323 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14324 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14325 tg3_flag_clear(tp, WOL_CAP);
14326
14327 if (tg3_flag(tp, WOL_CAP) &&
14328 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14329 tg3_flag_set(tp, WOL_ENABLE);
14330 device_set_wakeup_enable(&tp->pdev->dev, true);
14331 }
14332
14333 if (cfg2 & (1 << 17))
14334 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14335
14336 /* serdes signal pre-emphasis in register 0x590 set by */
14337 /* bootcode if bit 18 is set */
14338 if (cfg2 & (1 << 18))
14339 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14340
14341 if ((tg3_flag(tp, 57765_PLUS) ||
14342 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14343 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14344 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14345 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14346
14347 if (tg3_flag(tp, PCI_EXPRESS) &&
14348 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14349 !tg3_flag(tp, 57765_PLUS)) {
14350 u32 cfg3;
14351
14352 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14353 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14354 tg3_flag_set(tp, ASPM_WORKAROUND);
14355 }
14356
14357 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14358 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14359 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14360 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14361 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14362 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14363 }
14364 done:
14365 if (tg3_flag(tp, WOL_CAP))
14366 device_set_wakeup_enable(&tp->pdev->dev,
14367 tg3_flag(tp, WOL_ENABLE));
14368 else
14369 device_set_wakeup_capable(&tp->pdev->dev, false);
14370 }
14371
14372 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14373 {
14374 int i, err;
14375 u32 val2, off = offset * 8;
14376
14377 err = tg3_nvram_lock(tp);
14378 if (err)
14379 return err;
14380
14381 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14382 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14383 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14384 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14385 udelay(10);
14386
14387 for (i = 0; i < 100; i++) {
14388 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14389 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14390 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14391 break;
14392 }
14393 udelay(10);
14394 }
14395
14396 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14397
14398 tg3_nvram_unlock(tp);
14399 if (val2 & APE_OTP_STATUS_CMD_DONE)
14400 return 0;
14401
14402 return -EBUSY;
14403 }
14404
14405 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14406 {
14407 int i;
14408 u32 val;
14409
14410 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14411 tw32(OTP_CTRL, cmd);
14412
14413 /* Wait for up to 1 ms for command to execute. */
14414 for (i = 0; i < 100; i++) {
14415 val = tr32(OTP_STATUS);
14416 if (val & OTP_STATUS_CMD_DONE)
14417 break;
14418 udelay(10);
14419 }
14420
14421 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14422 }
14423
14424 /* Read the gphy configuration from the OTP region of the chip. The gphy
14425 * configuration is a 32-bit value that straddles the alignment boundary.
14426 * We do two 32-bit reads and then shift and merge the results.
14427 */
14428 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14429 {
14430 u32 bhalf_otp, thalf_otp;
14431
14432 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14433
14434 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14435 return 0;
14436
14437 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14438
14439 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14440 return 0;
14441
14442 thalf_otp = tr32(OTP_READ_DATA);
14443
14444 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14445
14446 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14447 return 0;
14448
14449 bhalf_otp = tr32(OTP_READ_DATA);
14450
14451 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14452 }
14453
14454 static void tg3_phy_init_link_config(struct tg3 *tp)
14455 {
14456 u32 adv = ADVERTISED_Autoneg;
14457
14458 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14459 adv |= ADVERTISED_1000baseT_Half |
14460 ADVERTISED_1000baseT_Full;
14461
14462 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14463 adv |= ADVERTISED_100baseT_Half |
14464 ADVERTISED_100baseT_Full |
14465 ADVERTISED_10baseT_Half |
14466 ADVERTISED_10baseT_Full |
14467 ADVERTISED_TP;
14468 else
14469 adv |= ADVERTISED_FIBRE;
14470
14471 tp->link_config.advertising = adv;
14472 tp->link_config.speed = SPEED_UNKNOWN;
14473 tp->link_config.duplex = DUPLEX_UNKNOWN;
14474 tp->link_config.autoneg = AUTONEG_ENABLE;
14475 tp->link_config.active_speed = SPEED_UNKNOWN;
14476 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14477
14478 tp->old_link = -1;
14479 }
14480
14481 static int tg3_phy_probe(struct tg3 *tp)
14482 {
14483 u32 hw_phy_id_1, hw_phy_id_2;
14484 u32 hw_phy_id, hw_phy_id_masked;
14485 int err;
14486
14487 /* flow control autonegotiation is default behavior */
14488 tg3_flag_set(tp, PAUSE_AUTONEG);
14489 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14490
14491 if (tg3_flag(tp, ENABLE_APE)) {
14492 switch (tp->pci_fn) {
14493 case 0:
14494 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14495 break;
14496 case 1:
14497 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14498 break;
14499 case 2:
14500 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14501 break;
14502 case 3:
14503 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14504 break;
14505 }
14506 }
14507
14508 if (tg3_flag(tp, USE_PHYLIB))
14509 return tg3_phy_init(tp);
14510
14511 /* Reading the PHY ID register can conflict with ASF
14512 * firmware access to the PHY hardware.
14513 */
14514 err = 0;
14515 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14516 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14517 } else {
14518 /* Now read the physical PHY_ID from the chip and verify
14519 * that it is sane. If it doesn't look good, we fall back
14520 * to either the hard-coded table based PHY_ID and failing
14521 * that the value found in the eeprom area.
14522 */
14523 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14524 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14525
14526 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14527 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14528 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14529
14530 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14531 }
14532
14533 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14534 tp->phy_id = hw_phy_id;
14535 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14536 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14537 else
14538 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14539 } else {
14540 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14541 /* Do nothing, phy ID already set up in
14542 * tg3_get_eeprom_hw_cfg().
14543 */
14544 } else {
14545 struct subsys_tbl_ent *p;
14546
14547 /* No eeprom signature? Try the hardcoded
14548 * subsys device table.
14549 */
14550 p = tg3_lookup_by_subsys(tp);
14551 if (p) {
14552 tp->phy_id = p->phy_id;
14553 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14554 /* For now we saw the IDs 0xbc050cd0,
14555 * 0xbc050f80 and 0xbc050c30 on devices
14556 * connected to an BCM4785 and there are
14557 * probably more. Just assume that the phy is
14558 * supported when it is connected to a SSB core
14559 * for now.
14560 */
14561 return -ENODEV;
14562 }
14563
14564 if (!tp->phy_id ||
14565 tp->phy_id == TG3_PHY_ID_BCM8002)
14566 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14567 }
14568 }
14569
14570 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14571 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14572 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14573 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14574 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14575 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14576 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14577 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14578 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14579
14580 tg3_phy_init_link_config(tp);
14581
14582 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14583 !tg3_flag(tp, ENABLE_APE) &&
14584 !tg3_flag(tp, ENABLE_ASF)) {
14585 u32 bmsr, dummy;
14586
14587 tg3_readphy(tp, MII_BMSR, &bmsr);
14588 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14589 (bmsr & BMSR_LSTATUS))
14590 goto skip_phy_reset;
14591
14592 err = tg3_phy_reset(tp);
14593 if (err)
14594 return err;
14595
14596 tg3_phy_set_wirespeed(tp);
14597
14598 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14599 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14600 tp->link_config.flowctrl);
14601
14602 tg3_writephy(tp, MII_BMCR,
14603 BMCR_ANENABLE | BMCR_ANRESTART);
14604 }
14605 }
14606
14607 skip_phy_reset:
14608 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14609 err = tg3_init_5401phy_dsp(tp);
14610 if (err)
14611 return err;
14612
14613 err = tg3_init_5401phy_dsp(tp);
14614 }
14615
14616 return err;
14617 }
14618
14619 static void tg3_read_vpd(struct tg3 *tp)
14620 {
14621 u8 *vpd_data;
14622 unsigned int block_end, rosize, len;
14623 u32 vpdlen;
14624 int j, i = 0;
14625
14626 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14627 if (!vpd_data)
14628 goto out_no_vpd;
14629
14630 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14631 if (i < 0)
14632 goto out_not_found;
14633
14634 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14635 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14636 i += PCI_VPD_LRDT_TAG_SIZE;
14637
14638 if (block_end > vpdlen)
14639 goto out_not_found;
14640
14641 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14642 PCI_VPD_RO_KEYWORD_MFR_ID);
14643 if (j > 0) {
14644 len = pci_vpd_info_field_size(&vpd_data[j]);
14645
14646 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14647 if (j + len > block_end || len != 4 ||
14648 memcmp(&vpd_data[j], "1028", 4))
14649 goto partno;
14650
14651 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14652 PCI_VPD_RO_KEYWORD_VENDOR0);
14653 if (j < 0)
14654 goto partno;
14655
14656 len = pci_vpd_info_field_size(&vpd_data[j]);
14657
14658 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14659 if (j + len > block_end)
14660 goto partno;
14661
14662 memcpy(tp->fw_ver, &vpd_data[j], len);
14663 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14664 }
14665
14666 partno:
14667 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14668 PCI_VPD_RO_KEYWORD_PARTNO);
14669 if (i < 0)
14670 goto out_not_found;
14671
14672 len = pci_vpd_info_field_size(&vpd_data[i]);
14673
14674 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14675 if (len > TG3_BPN_SIZE ||
14676 (len + i) > vpdlen)
14677 goto out_not_found;
14678
14679 memcpy(tp->board_part_number, &vpd_data[i], len);
14680
14681 out_not_found:
14682 kfree(vpd_data);
14683 if (tp->board_part_number[0])
14684 return;
14685
14686 out_no_vpd:
14687 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14688 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14689 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14690 strcpy(tp->board_part_number, "BCM5717");
14691 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14692 strcpy(tp->board_part_number, "BCM5718");
14693 else
14694 goto nomatch;
14695 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14696 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14697 strcpy(tp->board_part_number, "BCM57780");
14698 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14699 strcpy(tp->board_part_number, "BCM57760");
14700 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14701 strcpy(tp->board_part_number, "BCM57790");
14702 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14703 strcpy(tp->board_part_number, "BCM57788");
14704 else
14705 goto nomatch;
14706 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14707 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14708 strcpy(tp->board_part_number, "BCM57761");
14709 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14710 strcpy(tp->board_part_number, "BCM57765");
14711 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14712 strcpy(tp->board_part_number, "BCM57781");
14713 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14714 strcpy(tp->board_part_number, "BCM57785");
14715 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14716 strcpy(tp->board_part_number, "BCM57791");
14717 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14718 strcpy(tp->board_part_number, "BCM57795");
14719 else
14720 goto nomatch;
14721 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14722 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14723 strcpy(tp->board_part_number, "BCM57762");
14724 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14725 strcpy(tp->board_part_number, "BCM57766");
14726 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14727 strcpy(tp->board_part_number, "BCM57782");
14728 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14729 strcpy(tp->board_part_number, "BCM57786");
14730 else
14731 goto nomatch;
14732 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14733 strcpy(tp->board_part_number, "BCM95906");
14734 } else {
14735 nomatch:
14736 strcpy(tp->board_part_number, "none");
14737 }
14738 }
14739
14740 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14741 {
14742 u32 val;
14743
14744 if (tg3_nvram_read(tp, offset, &val) ||
14745 (val & 0xfc000000) != 0x0c000000 ||
14746 tg3_nvram_read(tp, offset + 4, &val) ||
14747 val != 0)
14748 return 0;
14749
14750 return 1;
14751 }
14752
14753 static void tg3_read_bc_ver(struct tg3 *tp)
14754 {
14755 u32 val, offset, start, ver_offset;
14756 int i, dst_off;
14757 bool newver = false;
14758
14759 if (tg3_nvram_read(tp, 0xc, &offset) ||
14760 tg3_nvram_read(tp, 0x4, &start))
14761 return;
14762
14763 offset = tg3_nvram_logical_addr(tp, offset);
14764
14765 if (tg3_nvram_read(tp, offset, &val))
14766 return;
14767
14768 if ((val & 0xfc000000) == 0x0c000000) {
14769 if (tg3_nvram_read(tp, offset + 4, &val))
14770 return;
14771
14772 if (val == 0)
14773 newver = true;
14774 }
14775
14776 dst_off = strlen(tp->fw_ver);
14777
14778 if (newver) {
14779 if (TG3_VER_SIZE - dst_off < 16 ||
14780 tg3_nvram_read(tp, offset + 8, &ver_offset))
14781 return;
14782
14783 offset = offset + ver_offset - start;
14784 for (i = 0; i < 16; i += 4) {
14785 __be32 v;
14786 if (tg3_nvram_read_be32(tp, offset + i, &v))
14787 return;
14788
14789 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14790 }
14791 } else {
14792 u32 major, minor;
14793
14794 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14795 return;
14796
14797 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14798 TG3_NVM_BCVER_MAJSFT;
14799 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14800 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14801 "v%d.%02d", major, minor);
14802 }
14803 }
14804
14805 static void tg3_read_hwsb_ver(struct tg3 *tp)
14806 {
14807 u32 val, major, minor;
14808
14809 /* Use native endian representation */
14810 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14811 return;
14812
14813 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14814 TG3_NVM_HWSB_CFG1_MAJSFT;
14815 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14816 TG3_NVM_HWSB_CFG1_MINSFT;
14817
14818 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14819 }
14820
14821 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14822 {
14823 u32 offset, major, minor, build;
14824
14825 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14826
14827 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14828 return;
14829
14830 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14831 case TG3_EEPROM_SB_REVISION_0:
14832 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14833 break;
14834 case TG3_EEPROM_SB_REVISION_2:
14835 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14836 break;
14837 case TG3_EEPROM_SB_REVISION_3:
14838 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14839 break;
14840 case TG3_EEPROM_SB_REVISION_4:
14841 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14842 break;
14843 case TG3_EEPROM_SB_REVISION_5:
14844 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14845 break;
14846 case TG3_EEPROM_SB_REVISION_6:
14847 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14848 break;
14849 default:
14850 return;
14851 }
14852
14853 if (tg3_nvram_read(tp, offset, &val))
14854 return;
14855
14856 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14857 TG3_EEPROM_SB_EDH_BLD_SHFT;
14858 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14859 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14860 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14861
14862 if (minor > 99 || build > 26)
14863 return;
14864
14865 offset = strlen(tp->fw_ver);
14866 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14867 " v%d.%02d", major, minor);
14868
14869 if (build > 0) {
14870 offset = strlen(tp->fw_ver);
14871 if (offset < TG3_VER_SIZE - 1)
14872 tp->fw_ver[offset] = 'a' + build - 1;
14873 }
14874 }
14875
14876 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14877 {
14878 u32 val, offset, start;
14879 int i, vlen;
14880
14881 for (offset = TG3_NVM_DIR_START;
14882 offset < TG3_NVM_DIR_END;
14883 offset += TG3_NVM_DIRENT_SIZE) {
14884 if (tg3_nvram_read(tp, offset, &val))
14885 return;
14886
14887 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14888 break;
14889 }
14890
14891 if (offset == TG3_NVM_DIR_END)
14892 return;
14893
14894 if (!tg3_flag(tp, 5705_PLUS))
14895 start = 0x08000000;
14896 else if (tg3_nvram_read(tp, offset - 4, &start))
14897 return;
14898
14899 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14900 !tg3_fw_img_is_valid(tp, offset) ||
14901 tg3_nvram_read(tp, offset + 8, &val))
14902 return;
14903
14904 offset += val - start;
14905
14906 vlen = strlen(tp->fw_ver);
14907
14908 tp->fw_ver[vlen++] = ',';
14909 tp->fw_ver[vlen++] = ' ';
14910
14911 for (i = 0; i < 4; i++) {
14912 __be32 v;
14913 if (tg3_nvram_read_be32(tp, offset, &v))
14914 return;
14915
14916 offset += sizeof(v);
14917
14918 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14919 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14920 break;
14921 }
14922
14923 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14924 vlen += sizeof(v);
14925 }
14926 }
14927
14928 static void tg3_probe_ncsi(struct tg3 *tp)
14929 {
14930 u32 apedata;
14931
14932 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14933 if (apedata != APE_SEG_SIG_MAGIC)
14934 return;
14935
14936 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14937 if (!(apedata & APE_FW_STATUS_READY))
14938 return;
14939
14940 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14941 tg3_flag_set(tp, APE_HAS_NCSI);
14942 }
14943
14944 static void tg3_read_dash_ver(struct tg3 *tp)
14945 {
14946 int vlen;
14947 u32 apedata;
14948 char *fwtype;
14949
14950 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14951
14952 if (tg3_flag(tp, APE_HAS_NCSI))
14953 fwtype = "NCSI";
14954 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14955 fwtype = "SMASH";
14956 else
14957 fwtype = "DASH";
14958
14959 vlen = strlen(tp->fw_ver);
14960
14961 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14962 fwtype,
14963 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14964 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14965 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14966 (apedata & APE_FW_VERSION_BLDMSK));
14967 }
14968
14969 static void tg3_read_otp_ver(struct tg3 *tp)
14970 {
14971 u32 val, val2;
14972
14973 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14974 return;
14975
14976 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14977 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14978 TG3_OTP_MAGIC0_VALID(val)) {
14979 u64 val64 = (u64) val << 32 | val2;
14980 u32 ver = 0;
14981 int i, vlen;
14982
14983 for (i = 0; i < 7; i++) {
14984 if ((val64 & 0xff) == 0)
14985 break;
14986 ver = val64 & 0xff;
14987 val64 >>= 8;
14988 }
14989 vlen = strlen(tp->fw_ver);
14990 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14991 }
14992 }
14993
14994 static void tg3_read_fw_ver(struct tg3 *tp)
14995 {
14996 u32 val;
14997 bool vpd_vers = false;
14998
14999 if (tp->fw_ver[0] != 0)
15000 vpd_vers = true;
15001
15002 if (tg3_flag(tp, NO_NVRAM)) {
15003 strcat(tp->fw_ver, "sb");
15004 tg3_read_otp_ver(tp);
15005 return;
15006 }
15007
15008 if (tg3_nvram_read(tp, 0, &val))
15009 return;
15010
15011 if (val == TG3_EEPROM_MAGIC)
15012 tg3_read_bc_ver(tp);
15013 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15014 tg3_read_sb_ver(tp, val);
15015 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15016 tg3_read_hwsb_ver(tp);
15017
15018 if (tg3_flag(tp, ENABLE_ASF)) {
15019 if (tg3_flag(tp, ENABLE_APE)) {
15020 tg3_probe_ncsi(tp);
15021 if (!vpd_vers)
15022 tg3_read_dash_ver(tp);
15023 } else if (!vpd_vers) {
15024 tg3_read_mgmtfw_ver(tp);
15025 }
15026 }
15027
15028 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15029 }
15030
15031 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15032 {
15033 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15034 return TG3_RX_RET_MAX_SIZE_5717;
15035 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15036 return TG3_RX_RET_MAX_SIZE_5700;
15037 else
15038 return TG3_RX_RET_MAX_SIZE_5705;
15039 }
15040
15041 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15042 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15043 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15044 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15045 { },
15046 };
15047
15048 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15049 {
15050 struct pci_dev *peer;
15051 unsigned int func, devnr = tp->pdev->devfn & ~7;
15052
15053 for (func = 0; func < 8; func++) {
15054 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15055 if (peer && peer != tp->pdev)
15056 break;
15057 pci_dev_put(peer);
15058 }
15059 /* 5704 can be configured in single-port mode, set peer to
15060 * tp->pdev in that case.
15061 */
15062 if (!peer) {
15063 peer = tp->pdev;
15064 return peer;
15065 }
15066
15067 /*
15068 * We don't need to keep the refcount elevated; there's no way
15069 * to remove one half of this device without removing the other
15070 */
15071 pci_dev_put(peer);
15072
15073 return peer;
15074 }
15075
15076 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15077 {
15078 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15079 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15080 u32 reg;
15081
15082 /* All devices that use the alternate
15083 * ASIC REV location have a CPMU.
15084 */
15085 tg3_flag_set(tp, CPMU_PRESENT);
15086
15087 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15089 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15095 reg = TG3PCI_GEN2_PRODID_ASICREV;
15096 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15104 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15105 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15106 reg = TG3PCI_GEN15_PRODID_ASICREV;
15107 else
15108 reg = TG3PCI_PRODID_ASICREV;
15109
15110 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15111 }
15112
15113 /* Wrong chip ID in 5752 A0. This code can be removed later
15114 * as A0 is not in production.
15115 */
15116 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15117 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15118
15119 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15120 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15121
15122 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15123 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15124 tg3_asic_rev(tp) == ASIC_REV_5720)
15125 tg3_flag_set(tp, 5717_PLUS);
15126
15127 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15128 tg3_asic_rev(tp) == ASIC_REV_57766)
15129 tg3_flag_set(tp, 57765_CLASS);
15130
15131 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15132 tg3_asic_rev(tp) == ASIC_REV_5762)
15133 tg3_flag_set(tp, 57765_PLUS);
15134
15135 /* Intentionally exclude ASIC_REV_5906 */
15136 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15137 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15138 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15139 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15140 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15141 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15142 tg3_flag(tp, 57765_PLUS))
15143 tg3_flag_set(tp, 5755_PLUS);
15144
15145 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15146 tg3_asic_rev(tp) == ASIC_REV_5714)
15147 tg3_flag_set(tp, 5780_CLASS);
15148
15149 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15150 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15151 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15152 tg3_flag(tp, 5755_PLUS) ||
15153 tg3_flag(tp, 5780_CLASS))
15154 tg3_flag_set(tp, 5750_PLUS);
15155
15156 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15157 tg3_flag(tp, 5750_PLUS))
15158 tg3_flag_set(tp, 5705_PLUS);
15159 }
15160
15161 static bool tg3_10_100_only_device(struct tg3 *tp,
15162 const struct pci_device_id *ent)
15163 {
15164 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15165
15166 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15167 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15168 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15169 return true;
15170
15171 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15172 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15173 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15174 return true;
15175 } else {
15176 return true;
15177 }
15178 }
15179
15180 return false;
15181 }
15182
15183 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15184 {
15185 u32 misc_ctrl_reg;
15186 u32 pci_state_reg, grc_misc_cfg;
15187 u32 val;
15188 u16 pci_cmd;
15189 int err;
15190
15191 /* Force memory write invalidate off. If we leave it on,
15192 * then on 5700_BX chips we have to enable a workaround.
15193 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15194 * to match the cacheline size. The Broadcom driver have this
15195 * workaround but turns MWI off all the times so never uses
15196 * it. This seems to suggest that the workaround is insufficient.
15197 */
15198 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15199 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15200 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15201
15202 /* Important! -- Make sure register accesses are byteswapped
15203 * correctly. Also, for those chips that require it, make
15204 * sure that indirect register accesses are enabled before
15205 * the first operation.
15206 */
15207 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15208 &misc_ctrl_reg);
15209 tp->misc_host_ctrl |= (misc_ctrl_reg &
15210 MISC_HOST_CTRL_CHIPREV);
15211 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15212 tp->misc_host_ctrl);
15213
15214 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15215
15216 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15217 * we need to disable memory and use config. cycles
15218 * only to access all registers. The 5702/03 chips
15219 * can mistakenly decode the special cycles from the
15220 * ICH chipsets as memory write cycles, causing corruption
15221 * of register and memory space. Only certain ICH bridges
15222 * will drive special cycles with non-zero data during the
15223 * address phase which can fall within the 5703's address
15224 * range. This is not an ICH bug as the PCI spec allows
15225 * non-zero address during special cycles. However, only
15226 * these ICH bridges are known to drive non-zero addresses
15227 * during special cycles.
15228 *
15229 * Since special cycles do not cross PCI bridges, we only
15230 * enable this workaround if the 5703 is on the secondary
15231 * bus of these ICH bridges.
15232 */
15233 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15234 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15235 static struct tg3_dev_id {
15236 u32 vendor;
15237 u32 device;
15238 u32 rev;
15239 } ich_chipsets[] = {
15240 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15241 PCI_ANY_ID },
15242 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15243 PCI_ANY_ID },
15244 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15245 0xa },
15246 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15247 PCI_ANY_ID },
15248 { },
15249 };
15250 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15251 struct pci_dev *bridge = NULL;
15252
15253 while (pci_id->vendor != 0) {
15254 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15255 bridge);
15256 if (!bridge) {
15257 pci_id++;
15258 continue;
15259 }
15260 if (pci_id->rev != PCI_ANY_ID) {
15261 if (bridge->revision > pci_id->rev)
15262 continue;
15263 }
15264 if (bridge->subordinate &&
15265 (bridge->subordinate->number ==
15266 tp->pdev->bus->number)) {
15267 tg3_flag_set(tp, ICH_WORKAROUND);
15268 pci_dev_put(bridge);
15269 break;
15270 }
15271 }
15272 }
15273
15274 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15275 static struct tg3_dev_id {
15276 u32 vendor;
15277 u32 device;
15278 } bridge_chipsets[] = {
15279 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15280 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15281 { },
15282 };
15283 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15284 struct pci_dev *bridge = NULL;
15285
15286 while (pci_id->vendor != 0) {
15287 bridge = pci_get_device(pci_id->vendor,
15288 pci_id->device,
15289 bridge);
15290 if (!bridge) {
15291 pci_id++;
15292 continue;
15293 }
15294 if (bridge->subordinate &&
15295 (bridge->subordinate->number <=
15296 tp->pdev->bus->number) &&
15297 (bridge->subordinate->busn_res.end >=
15298 tp->pdev->bus->number)) {
15299 tg3_flag_set(tp, 5701_DMA_BUG);
15300 pci_dev_put(bridge);
15301 break;
15302 }
15303 }
15304 }
15305
15306 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15307 * DMA addresses > 40-bit. This bridge may have other additional
15308 * 57xx devices behind it in some 4-port NIC designs for example.
15309 * Any tg3 device found behind the bridge will also need the 40-bit
15310 * DMA workaround.
15311 */
15312 if (tg3_flag(tp, 5780_CLASS)) {
15313 tg3_flag_set(tp, 40BIT_DMA_BUG);
15314 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15315 } else {
15316 struct pci_dev *bridge = NULL;
15317
15318 do {
15319 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15320 PCI_DEVICE_ID_SERVERWORKS_EPB,
15321 bridge);
15322 if (bridge && bridge->subordinate &&
15323 (bridge->subordinate->number <=
15324 tp->pdev->bus->number) &&
15325 (bridge->subordinate->busn_res.end >=
15326 tp->pdev->bus->number)) {
15327 tg3_flag_set(tp, 40BIT_DMA_BUG);
15328 pci_dev_put(bridge);
15329 break;
15330 }
15331 } while (bridge);
15332 }
15333
15334 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15335 tg3_asic_rev(tp) == ASIC_REV_5714)
15336 tp->pdev_peer = tg3_find_peer(tp);
15337
15338 /* Determine TSO capabilities */
15339 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15340 ; /* Do nothing. HW bug. */
15341 else if (tg3_flag(tp, 57765_PLUS))
15342 tg3_flag_set(tp, HW_TSO_3);
15343 else if (tg3_flag(tp, 5755_PLUS) ||
15344 tg3_asic_rev(tp) == ASIC_REV_5906)
15345 tg3_flag_set(tp, HW_TSO_2);
15346 else if (tg3_flag(tp, 5750_PLUS)) {
15347 tg3_flag_set(tp, HW_TSO_1);
15348 tg3_flag_set(tp, TSO_BUG);
15349 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15350 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15351 tg3_flag_clear(tp, TSO_BUG);
15352 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15353 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15354 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15355 tg3_flag_set(tp, FW_TSO);
15356 tg3_flag_set(tp, TSO_BUG);
15357 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15358 tp->fw_needed = FIRMWARE_TG3TSO5;
15359 else
15360 tp->fw_needed = FIRMWARE_TG3TSO;
15361 }
15362
15363 /* Selectively allow TSO based on operating conditions */
15364 if (tg3_flag(tp, HW_TSO_1) ||
15365 tg3_flag(tp, HW_TSO_2) ||
15366 tg3_flag(tp, HW_TSO_3) ||
15367 tg3_flag(tp, FW_TSO)) {
15368 /* For firmware TSO, assume ASF is disabled.
15369 * We'll disable TSO later if we discover ASF
15370 * is enabled in tg3_get_eeprom_hw_cfg().
15371 */
15372 tg3_flag_set(tp, TSO_CAPABLE);
15373 } else {
15374 tg3_flag_clear(tp, TSO_CAPABLE);
15375 tg3_flag_clear(tp, TSO_BUG);
15376 tp->fw_needed = NULL;
15377 }
15378
15379 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15380 tp->fw_needed = FIRMWARE_TG3;
15381
15382 tp->irq_max = 1;
15383
15384 if (tg3_flag(tp, 5750_PLUS)) {
15385 tg3_flag_set(tp, SUPPORT_MSI);
15386 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15387 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15388 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15389 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15390 tp->pdev_peer == tp->pdev))
15391 tg3_flag_clear(tp, SUPPORT_MSI);
15392
15393 if (tg3_flag(tp, 5755_PLUS) ||
15394 tg3_asic_rev(tp) == ASIC_REV_5906) {
15395 tg3_flag_set(tp, 1SHOT_MSI);
15396 }
15397
15398 if (tg3_flag(tp, 57765_PLUS)) {
15399 tg3_flag_set(tp, SUPPORT_MSIX);
15400 tp->irq_max = TG3_IRQ_MAX_VECS;
15401 }
15402 }
15403
15404 tp->txq_max = 1;
15405 tp->rxq_max = 1;
15406 if (tp->irq_max > 1) {
15407 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15408 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15409
15410 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15411 tg3_asic_rev(tp) == ASIC_REV_5720)
15412 tp->txq_max = tp->irq_max - 1;
15413 }
15414
15415 if (tg3_flag(tp, 5755_PLUS) ||
15416 tg3_asic_rev(tp) == ASIC_REV_5906)
15417 tg3_flag_set(tp, SHORT_DMA_BUG);
15418
15419 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15420 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15421
15422 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15423 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15424 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15425 tg3_asic_rev(tp) == ASIC_REV_5762)
15426 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15427
15428 if (tg3_flag(tp, 57765_PLUS) &&
15429 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15430 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15431
15432 if (!tg3_flag(tp, 5705_PLUS) ||
15433 tg3_flag(tp, 5780_CLASS) ||
15434 tg3_flag(tp, USE_JUMBO_BDFLAG))
15435 tg3_flag_set(tp, JUMBO_CAPABLE);
15436
15437 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15438 &pci_state_reg);
15439
15440 if (pci_is_pcie(tp->pdev)) {
15441 u16 lnkctl;
15442
15443 tg3_flag_set(tp, PCI_EXPRESS);
15444
15445 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15446 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15447 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15448 tg3_flag_clear(tp, HW_TSO_2);
15449 tg3_flag_clear(tp, TSO_CAPABLE);
15450 }
15451 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15452 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15453 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15454 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15455 tg3_flag_set(tp, CLKREQ_BUG);
15456 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15457 tg3_flag_set(tp, L1PLLPD_EN);
15458 }
15459 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15460 /* BCM5785 devices are effectively PCIe devices, and should
15461 * follow PCIe codepaths, but do not have a PCIe capabilities
15462 * section.
15463 */
15464 tg3_flag_set(tp, PCI_EXPRESS);
15465 } else if (!tg3_flag(tp, 5705_PLUS) ||
15466 tg3_flag(tp, 5780_CLASS)) {
15467 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15468 if (!tp->pcix_cap) {
15469 dev_err(&tp->pdev->dev,
15470 "Cannot find PCI-X capability, aborting\n");
15471 return -EIO;
15472 }
15473
15474 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15475 tg3_flag_set(tp, PCIX_MODE);
15476 }
15477
15478 /* If we have an AMD 762 or VIA K8T800 chipset, write
15479 * reordering to the mailbox registers done by the host
15480 * controller can cause major troubles. We read back from
15481 * every mailbox register write to force the writes to be
15482 * posted to the chip in order.
15483 */
15484 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15485 !tg3_flag(tp, PCI_EXPRESS))
15486 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15487
15488 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15489 &tp->pci_cacheline_sz);
15490 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15491 &tp->pci_lat_timer);
15492 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15493 tp->pci_lat_timer < 64) {
15494 tp->pci_lat_timer = 64;
15495 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15496 tp->pci_lat_timer);
15497 }
15498
15499 /* Important! -- It is critical that the PCI-X hw workaround
15500 * situation is decided before the first MMIO register access.
15501 */
15502 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15503 /* 5700 BX chips need to have their TX producer index
15504 * mailboxes written twice to workaround a bug.
15505 */
15506 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15507
15508 /* If we are in PCI-X mode, enable register write workaround.
15509 *
15510 * The workaround is to use indirect register accesses
15511 * for all chip writes not to mailbox registers.
15512 */
15513 if (tg3_flag(tp, PCIX_MODE)) {
15514 u32 pm_reg;
15515
15516 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15517
15518 /* The chip can have it's power management PCI config
15519 * space registers clobbered due to this bug.
15520 * So explicitly force the chip into D0 here.
15521 */
15522 pci_read_config_dword(tp->pdev,
15523 tp->pm_cap + PCI_PM_CTRL,
15524 &pm_reg);
15525 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15526 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15527 pci_write_config_dword(tp->pdev,
15528 tp->pm_cap + PCI_PM_CTRL,
15529 pm_reg);
15530
15531 /* Also, force SERR#/PERR# in PCI command. */
15532 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15533 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15534 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15535 }
15536 }
15537
15538 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15539 tg3_flag_set(tp, PCI_HIGH_SPEED);
15540 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15541 tg3_flag_set(tp, PCI_32BIT);
15542
15543 /* Chip-specific fixup from Broadcom driver */
15544 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15545 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15546 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15547 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15548 }
15549
15550 /* Default fast path register access methods */
15551 tp->read32 = tg3_read32;
15552 tp->write32 = tg3_write32;
15553 tp->read32_mbox = tg3_read32;
15554 tp->write32_mbox = tg3_write32;
15555 tp->write32_tx_mbox = tg3_write32;
15556 tp->write32_rx_mbox = tg3_write32;
15557
15558 /* Various workaround register access methods */
15559 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15560 tp->write32 = tg3_write_indirect_reg32;
15561 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15562 (tg3_flag(tp, PCI_EXPRESS) &&
15563 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15564 /*
15565 * Back to back register writes can cause problems on these
15566 * chips, the workaround is to read back all reg writes
15567 * except those to mailbox regs.
15568 *
15569 * See tg3_write_indirect_reg32().
15570 */
15571 tp->write32 = tg3_write_flush_reg32;
15572 }
15573
15574 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15575 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15576 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15577 tp->write32_rx_mbox = tg3_write_flush_reg32;
15578 }
15579
15580 if (tg3_flag(tp, ICH_WORKAROUND)) {
15581 tp->read32 = tg3_read_indirect_reg32;
15582 tp->write32 = tg3_write_indirect_reg32;
15583 tp->read32_mbox = tg3_read_indirect_mbox;
15584 tp->write32_mbox = tg3_write_indirect_mbox;
15585 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15586 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15587
15588 iounmap(tp->regs);
15589 tp->regs = NULL;
15590
15591 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15592 pci_cmd &= ~PCI_COMMAND_MEMORY;
15593 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15594 }
15595 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15596 tp->read32_mbox = tg3_read32_mbox_5906;
15597 tp->write32_mbox = tg3_write32_mbox_5906;
15598 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15599 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15600 }
15601
15602 if (tp->write32 == tg3_write_indirect_reg32 ||
15603 (tg3_flag(tp, PCIX_MODE) &&
15604 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15605 tg3_asic_rev(tp) == ASIC_REV_5701)))
15606 tg3_flag_set(tp, SRAM_USE_CONFIG);
15607
15608 /* The memory arbiter has to be enabled in order for SRAM accesses
15609 * to succeed. Normally on powerup the tg3 chip firmware will make
15610 * sure it is enabled, but other entities such as system netboot
15611 * code might disable it.
15612 */
15613 val = tr32(MEMARB_MODE);
15614 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15615
15616 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15617 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15618 tg3_flag(tp, 5780_CLASS)) {
15619 if (tg3_flag(tp, PCIX_MODE)) {
15620 pci_read_config_dword(tp->pdev,
15621 tp->pcix_cap + PCI_X_STATUS,
15622 &val);
15623 tp->pci_fn = val & 0x7;
15624 }
15625 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15626 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15627 tg3_asic_rev(tp) == ASIC_REV_5720) {
15628 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15629 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15630 val = tr32(TG3_CPMU_STATUS);
15631
15632 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15633 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15634 else
15635 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15636 TG3_CPMU_STATUS_FSHFT_5719;
15637 }
15638
15639 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15640 tp->write32_tx_mbox = tg3_write_flush_reg32;
15641 tp->write32_rx_mbox = tg3_write_flush_reg32;
15642 }
15643
15644 /* Get eeprom hw config before calling tg3_set_power_state().
15645 * In particular, the TG3_FLAG_IS_NIC flag must be
15646 * determined before calling tg3_set_power_state() so that
15647 * we know whether or not to switch out of Vaux power.
15648 * When the flag is set, it means that GPIO1 is used for eeprom
15649 * write protect and also implies that it is a LOM where GPIOs
15650 * are not used to switch power.
15651 */
15652 tg3_get_eeprom_hw_cfg(tp);
15653
15654 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15655 tg3_flag_clear(tp, TSO_CAPABLE);
15656 tg3_flag_clear(tp, TSO_BUG);
15657 tp->fw_needed = NULL;
15658 }
15659
15660 if (tg3_flag(tp, ENABLE_APE)) {
15661 /* Allow reads and writes to the
15662 * APE register and memory space.
15663 */
15664 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15665 PCISTATE_ALLOW_APE_SHMEM_WR |
15666 PCISTATE_ALLOW_APE_PSPACE_WR;
15667 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15668 pci_state_reg);
15669
15670 tg3_ape_lock_init(tp);
15671 }
15672
15673 /* Set up tp->grc_local_ctrl before calling
15674 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15675 * will bring 5700's external PHY out of reset.
15676 * It is also used as eeprom write protect on LOMs.
15677 */
15678 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15679 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15680 tg3_flag(tp, EEPROM_WRITE_PROT))
15681 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15682 GRC_LCLCTRL_GPIO_OUTPUT1);
15683 /* Unused GPIO3 must be driven as output on 5752 because there
15684 * are no pull-up resistors on unused GPIO pins.
15685 */
15686 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15687 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15688
15689 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15690 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15691 tg3_flag(tp, 57765_CLASS))
15692 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15693
15694 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15695 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15696 /* Turn off the debug UART. */
15697 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15698 if (tg3_flag(tp, IS_NIC))
15699 /* Keep VMain power. */
15700 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15701 GRC_LCLCTRL_GPIO_OUTPUT0;
15702 }
15703
15704 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15705 tp->grc_local_ctrl |=
15706 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15707
15708 /* Switch out of Vaux if it is a NIC */
15709 tg3_pwrsrc_switch_to_vmain(tp);
15710
15711 /* Derive initial jumbo mode from MTU assigned in
15712 * ether_setup() via the alloc_etherdev() call
15713 */
15714 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15715 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15716
15717 /* Determine WakeOnLan speed to use. */
15718 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15719 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15720 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15721 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15722 tg3_flag_clear(tp, WOL_SPEED_100MB);
15723 } else {
15724 tg3_flag_set(tp, WOL_SPEED_100MB);
15725 }
15726
15727 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15728 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15729
15730 /* A few boards don't want Ethernet@WireSpeed phy feature */
15731 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15732 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15733 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15734 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15735 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15736 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15737 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15738
15739 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15740 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15741 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15742 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15743 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15744
15745 if (tg3_flag(tp, 5705_PLUS) &&
15746 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15747 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15748 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15749 !tg3_flag(tp, 57765_PLUS)) {
15750 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15751 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15752 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15753 tg3_asic_rev(tp) == ASIC_REV_5761) {
15754 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15755 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15756 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15757 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15758 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15759 } else
15760 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15761 }
15762
15763 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15764 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15765 tp->phy_otp = tg3_read_otp_phycfg(tp);
15766 if (tp->phy_otp == 0)
15767 tp->phy_otp = TG3_OTP_DEFAULT;
15768 }
15769
15770 if (tg3_flag(tp, CPMU_PRESENT))
15771 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15772 else
15773 tp->mi_mode = MAC_MI_MODE_BASE;
15774
15775 tp->coalesce_mode = 0;
15776 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15777 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15778 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15779
15780 /* Set these bits to enable statistics workaround. */
15781 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15782 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15783 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15784 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15785 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15786 }
15787
15788 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15789 tg3_asic_rev(tp) == ASIC_REV_57780)
15790 tg3_flag_set(tp, USE_PHYLIB);
15791
15792 err = tg3_mdio_init(tp);
15793 if (err)
15794 return err;
15795
15796 /* Initialize data/descriptor byte/word swapping. */
15797 val = tr32(GRC_MODE);
15798 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15799 tg3_asic_rev(tp) == ASIC_REV_5762)
15800 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15801 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15802 GRC_MODE_B2HRX_ENABLE |
15803 GRC_MODE_HTX2B_ENABLE |
15804 GRC_MODE_HOST_STACKUP);
15805 else
15806 val &= GRC_MODE_HOST_STACKUP;
15807
15808 tw32(GRC_MODE, val | tp->grc_mode);
15809
15810 tg3_switch_clocks(tp);
15811
15812 /* Clear this out for sanity. */
15813 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15814
15815 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15816 &pci_state_reg);
15817 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15818 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15819 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15820 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15821 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15822 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15823 void __iomem *sram_base;
15824
15825 /* Write some dummy words into the SRAM status block
15826 * area, see if it reads back correctly. If the return
15827 * value is bad, force enable the PCIX workaround.
15828 */
15829 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15830
15831 writel(0x00000000, sram_base);
15832 writel(0x00000000, sram_base + 4);
15833 writel(0xffffffff, sram_base + 4);
15834 if (readl(sram_base) != 0x00000000)
15835 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15836 }
15837 }
15838
15839 udelay(50);
15840 tg3_nvram_init(tp);
15841
15842 grc_misc_cfg = tr32(GRC_MISC_CFG);
15843 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15844
15845 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15846 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15847 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15848 tg3_flag_set(tp, IS_5788);
15849
15850 if (!tg3_flag(tp, IS_5788) &&
15851 tg3_asic_rev(tp) != ASIC_REV_5700)
15852 tg3_flag_set(tp, TAGGED_STATUS);
15853 if (tg3_flag(tp, TAGGED_STATUS)) {
15854 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15855 HOSTCC_MODE_CLRTICK_TXBD);
15856
15857 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15858 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15859 tp->misc_host_ctrl);
15860 }
15861
15862 /* Preserve the APE MAC_MODE bits */
15863 if (tg3_flag(tp, ENABLE_APE))
15864 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15865 else
15866 tp->mac_mode = 0;
15867
15868 if (tg3_10_100_only_device(tp, ent))
15869 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15870
15871 err = tg3_phy_probe(tp);
15872 if (err) {
15873 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15874 /* ... but do not return immediately ... */
15875 tg3_mdio_fini(tp);
15876 }
15877
15878 tg3_read_vpd(tp);
15879 tg3_read_fw_ver(tp);
15880
15881 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15882 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15883 } else {
15884 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15885 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15886 else
15887 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15888 }
15889
15890 /* 5700 {AX,BX} chips have a broken status block link
15891 * change bit implementation, so we must use the
15892 * status register in those cases.
15893 */
15894 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15895 tg3_flag_set(tp, USE_LINKCHG_REG);
15896 else
15897 tg3_flag_clear(tp, USE_LINKCHG_REG);
15898
15899 /* The led_ctrl is set during tg3_phy_probe, here we might
15900 * have to force the link status polling mechanism based
15901 * upon subsystem IDs.
15902 */
15903 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15904 tg3_asic_rev(tp) == ASIC_REV_5701 &&
15905 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15906 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15907 tg3_flag_set(tp, USE_LINKCHG_REG);
15908 }
15909
15910 /* For all SERDES we poll the MAC status register. */
15911 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15912 tg3_flag_set(tp, POLL_SERDES);
15913 else
15914 tg3_flag_clear(tp, POLL_SERDES);
15915
15916 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15917 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15918 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
15919 tg3_flag(tp, PCIX_MODE)) {
15920 tp->rx_offset = NET_SKB_PAD;
15921 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15922 tp->rx_copy_thresh = ~(u16)0;
15923 #endif
15924 }
15925
15926 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15927 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15928 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15929
15930 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15931
15932 /* Increment the rx prod index on the rx std ring by at most
15933 * 8 for these chips to workaround hw errata.
15934 */
15935 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15936 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15937 tg3_asic_rev(tp) == ASIC_REV_5755)
15938 tp->rx_std_max_post = 8;
15939
15940 if (tg3_flag(tp, ASPM_WORKAROUND))
15941 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15942 PCIE_PWR_MGMT_L1_THRESH_MSK;
15943
15944 return err;
15945 }
15946
15947 #ifdef CONFIG_SPARC
15948 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15949 {
15950 struct net_device *dev = tp->dev;
15951 struct pci_dev *pdev = tp->pdev;
15952 struct device_node *dp = pci_device_to_OF_node(pdev);
15953 const unsigned char *addr;
15954 int len;
15955
15956 addr = of_get_property(dp, "local-mac-address", &len);
15957 if (addr && len == 6) {
15958 memcpy(dev->dev_addr, addr, 6);
15959 return 0;
15960 }
15961 return -ENODEV;
15962 }
15963
15964 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15965 {
15966 struct net_device *dev = tp->dev;
15967
15968 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15969 return 0;
15970 }
15971 #endif
15972
15973 static int tg3_get_device_address(struct tg3 *tp)
15974 {
15975 struct net_device *dev = tp->dev;
15976 u32 hi, lo, mac_offset;
15977 int addr_ok = 0;
15978 int err;
15979
15980 #ifdef CONFIG_SPARC
15981 if (!tg3_get_macaddr_sparc(tp))
15982 return 0;
15983 #endif
15984
15985 if (tg3_flag(tp, IS_SSB_CORE)) {
15986 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15987 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15988 return 0;
15989 }
15990
15991 mac_offset = 0x7c;
15992 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15993 tg3_flag(tp, 5780_CLASS)) {
15994 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15995 mac_offset = 0xcc;
15996 if (tg3_nvram_lock(tp))
15997 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15998 else
15999 tg3_nvram_unlock(tp);
16000 } else if (tg3_flag(tp, 5717_PLUS)) {
16001 if (tp->pci_fn & 1)
16002 mac_offset = 0xcc;
16003 if (tp->pci_fn > 1)
16004 mac_offset += 0x18c;
16005 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16006 mac_offset = 0x10;
16007
16008 /* First try to get it from MAC address mailbox. */
16009 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16010 if ((hi >> 16) == 0x484b) {
16011 dev->dev_addr[0] = (hi >> 8) & 0xff;
16012 dev->dev_addr[1] = (hi >> 0) & 0xff;
16013
16014 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16015 dev->dev_addr[2] = (lo >> 24) & 0xff;
16016 dev->dev_addr[3] = (lo >> 16) & 0xff;
16017 dev->dev_addr[4] = (lo >> 8) & 0xff;
16018 dev->dev_addr[5] = (lo >> 0) & 0xff;
16019
16020 /* Some old bootcode may report a 0 MAC address in SRAM */
16021 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16022 }
16023 if (!addr_ok) {
16024 /* Next, try NVRAM. */
16025 if (!tg3_flag(tp, NO_NVRAM) &&
16026 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16027 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16028 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16029 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16030 }
16031 /* Finally just fetch it out of the MAC control regs. */
16032 else {
16033 hi = tr32(MAC_ADDR_0_HIGH);
16034 lo = tr32(MAC_ADDR_0_LOW);
16035
16036 dev->dev_addr[5] = lo & 0xff;
16037 dev->dev_addr[4] = (lo >> 8) & 0xff;
16038 dev->dev_addr[3] = (lo >> 16) & 0xff;
16039 dev->dev_addr[2] = (lo >> 24) & 0xff;
16040 dev->dev_addr[1] = hi & 0xff;
16041 dev->dev_addr[0] = (hi >> 8) & 0xff;
16042 }
16043 }
16044
16045 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16046 #ifdef CONFIG_SPARC
16047 if (!tg3_get_default_macaddr_sparc(tp))
16048 return 0;
16049 #endif
16050 return -EINVAL;
16051 }
16052 return 0;
16053 }
16054
16055 #define BOUNDARY_SINGLE_CACHELINE 1
16056 #define BOUNDARY_MULTI_CACHELINE 2
16057
16058 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16059 {
16060 int cacheline_size;
16061 u8 byte;
16062 int goal;
16063
16064 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16065 if (byte == 0)
16066 cacheline_size = 1024;
16067 else
16068 cacheline_size = (int) byte * 4;
16069
16070 /* On 5703 and later chips, the boundary bits have no
16071 * effect.
16072 */
16073 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16074 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16075 !tg3_flag(tp, PCI_EXPRESS))
16076 goto out;
16077
16078 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16079 goal = BOUNDARY_MULTI_CACHELINE;
16080 #else
16081 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16082 goal = BOUNDARY_SINGLE_CACHELINE;
16083 #else
16084 goal = 0;
16085 #endif
16086 #endif
16087
16088 if (tg3_flag(tp, 57765_PLUS)) {
16089 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16090 goto out;
16091 }
16092
16093 if (!goal)
16094 goto out;
16095
16096 /* PCI controllers on most RISC systems tend to disconnect
16097 * when a device tries to burst across a cache-line boundary.
16098 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16099 *
16100 * Unfortunately, for PCI-E there are only limited
16101 * write-side controls for this, and thus for reads
16102 * we will still get the disconnects. We'll also waste
16103 * these PCI cycles for both read and write for chips
16104 * other than 5700 and 5701 which do not implement the
16105 * boundary bits.
16106 */
16107 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16108 switch (cacheline_size) {
16109 case 16:
16110 case 32:
16111 case 64:
16112 case 128:
16113 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16114 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16115 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16116 } else {
16117 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16118 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16119 }
16120 break;
16121
16122 case 256:
16123 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16124 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16125 break;
16126
16127 default:
16128 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16129 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16130 break;
16131 }
16132 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16133 switch (cacheline_size) {
16134 case 16:
16135 case 32:
16136 case 64:
16137 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16138 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16139 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16140 break;
16141 }
16142 /* fallthrough */
16143 case 128:
16144 default:
16145 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16146 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16147 break;
16148 }
16149 } else {
16150 switch (cacheline_size) {
16151 case 16:
16152 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16153 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16154 DMA_RWCTRL_WRITE_BNDRY_16);
16155 break;
16156 }
16157 /* fallthrough */
16158 case 32:
16159 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16160 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16161 DMA_RWCTRL_WRITE_BNDRY_32);
16162 break;
16163 }
16164 /* fallthrough */
16165 case 64:
16166 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16167 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16168 DMA_RWCTRL_WRITE_BNDRY_64);
16169 break;
16170 }
16171 /* fallthrough */
16172 case 128:
16173 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16174 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16175 DMA_RWCTRL_WRITE_BNDRY_128);
16176 break;
16177 }
16178 /* fallthrough */
16179 case 256:
16180 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16181 DMA_RWCTRL_WRITE_BNDRY_256);
16182 break;
16183 case 512:
16184 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16185 DMA_RWCTRL_WRITE_BNDRY_512);
16186 break;
16187 case 1024:
16188 default:
16189 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16190 DMA_RWCTRL_WRITE_BNDRY_1024);
16191 break;
16192 }
16193 }
16194
16195 out:
16196 return val;
16197 }
16198
16199 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16200 int size, int to_device)
16201 {
16202 struct tg3_internal_buffer_desc test_desc;
16203 u32 sram_dma_descs;
16204 int i, ret;
16205
16206 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16207
16208 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16209 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16210 tw32(RDMAC_STATUS, 0);
16211 tw32(WDMAC_STATUS, 0);
16212
16213 tw32(BUFMGR_MODE, 0);
16214 tw32(FTQ_RESET, 0);
16215
16216 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16217 test_desc.addr_lo = buf_dma & 0xffffffff;
16218 test_desc.nic_mbuf = 0x00002100;
16219 test_desc.len = size;
16220
16221 /*
16222 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16223 * the *second* time the tg3 driver was getting loaded after an
16224 * initial scan.
16225 *
16226 * Broadcom tells me:
16227 * ...the DMA engine is connected to the GRC block and a DMA
16228 * reset may affect the GRC block in some unpredictable way...
16229 * The behavior of resets to individual blocks has not been tested.
16230 *
16231 * Broadcom noted the GRC reset will also reset all sub-components.
16232 */
16233 if (to_device) {
16234 test_desc.cqid_sqid = (13 << 8) | 2;
16235
16236 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16237 udelay(40);
16238 } else {
16239 test_desc.cqid_sqid = (16 << 8) | 7;
16240
16241 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16242 udelay(40);
16243 }
16244 test_desc.flags = 0x00000005;
16245
16246 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16247 u32 val;
16248
16249 val = *(((u32 *)&test_desc) + i);
16250 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16251 sram_dma_descs + (i * sizeof(u32)));
16252 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16253 }
16254 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16255
16256 if (to_device)
16257 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16258 else
16259 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16260
16261 ret = -ENODEV;
16262 for (i = 0; i < 40; i++) {
16263 u32 val;
16264
16265 if (to_device)
16266 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16267 else
16268 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16269 if ((val & 0xffff) == sram_dma_descs) {
16270 ret = 0;
16271 break;
16272 }
16273
16274 udelay(100);
16275 }
16276
16277 return ret;
16278 }
16279
16280 #define TEST_BUFFER_SIZE 0x2000
16281
16282 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16283 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16284 { },
16285 };
16286
16287 static int tg3_test_dma(struct tg3 *tp)
16288 {
16289 dma_addr_t buf_dma;
16290 u32 *buf, saved_dma_rwctrl;
16291 int ret = 0;
16292
16293 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16294 &buf_dma, GFP_KERNEL);
16295 if (!buf) {
16296 ret = -ENOMEM;
16297 goto out_nofree;
16298 }
16299
16300 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16301 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16302
16303 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16304
16305 if (tg3_flag(tp, 57765_PLUS))
16306 goto out;
16307
16308 if (tg3_flag(tp, PCI_EXPRESS)) {
16309 /* DMA read watermark not used on PCIE */
16310 tp->dma_rwctrl |= 0x00180000;
16311 } else if (!tg3_flag(tp, PCIX_MODE)) {
16312 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16313 tg3_asic_rev(tp) == ASIC_REV_5750)
16314 tp->dma_rwctrl |= 0x003f0000;
16315 else
16316 tp->dma_rwctrl |= 0x003f000f;
16317 } else {
16318 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16319 tg3_asic_rev(tp) == ASIC_REV_5704) {
16320 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16321 u32 read_water = 0x7;
16322
16323 /* If the 5704 is behind the EPB bridge, we can
16324 * do the less restrictive ONE_DMA workaround for
16325 * better performance.
16326 */
16327 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16328 tg3_asic_rev(tp) == ASIC_REV_5704)
16329 tp->dma_rwctrl |= 0x8000;
16330 else if (ccval == 0x6 || ccval == 0x7)
16331 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16332
16333 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16334 read_water = 4;
16335 /* Set bit 23 to enable PCIX hw bug fix */
16336 tp->dma_rwctrl |=
16337 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16338 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16339 (1 << 23);
16340 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16341 /* 5780 always in PCIX mode */
16342 tp->dma_rwctrl |= 0x00144000;
16343 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16344 /* 5714 always in PCIX mode */
16345 tp->dma_rwctrl |= 0x00148000;
16346 } else {
16347 tp->dma_rwctrl |= 0x001b000f;
16348 }
16349 }
16350 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16351 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16352
16353 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16354 tg3_asic_rev(tp) == ASIC_REV_5704)
16355 tp->dma_rwctrl &= 0xfffffff0;
16356
16357 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16358 tg3_asic_rev(tp) == ASIC_REV_5701) {
16359 /* Remove this if it causes problems for some boards. */
16360 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16361
16362 /* On 5700/5701 chips, we need to set this bit.
16363 * Otherwise the chip will issue cacheline transactions
16364 * to streamable DMA memory with not all the byte
16365 * enables turned on. This is an error on several
16366 * RISC PCI controllers, in particular sparc64.
16367 *
16368 * On 5703/5704 chips, this bit has been reassigned
16369 * a different meaning. In particular, it is used
16370 * on those chips to enable a PCI-X workaround.
16371 */
16372 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16373 }
16374
16375 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16376
16377 #if 0
16378 /* Unneeded, already done by tg3_get_invariants. */
16379 tg3_switch_clocks(tp);
16380 #endif
16381
16382 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16383 tg3_asic_rev(tp) != ASIC_REV_5701)
16384 goto out;
16385
16386 /* It is best to perform DMA test with maximum write burst size
16387 * to expose the 5700/5701 write DMA bug.
16388 */
16389 saved_dma_rwctrl = tp->dma_rwctrl;
16390 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16391 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16392
16393 while (1) {
16394 u32 *p = buf, i;
16395
16396 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16397 p[i] = i;
16398
16399 /* Send the buffer to the chip. */
16400 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16401 if (ret) {
16402 dev_err(&tp->pdev->dev,
16403 "%s: Buffer write failed. err = %d\n",
16404 __func__, ret);
16405 break;
16406 }
16407
16408 #if 0
16409 /* validate data reached card RAM correctly. */
16410 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16411 u32 val;
16412 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16413 if (le32_to_cpu(val) != p[i]) {
16414 dev_err(&tp->pdev->dev,
16415 "%s: Buffer corrupted on device! "
16416 "(%d != %d)\n", __func__, val, i);
16417 /* ret = -ENODEV here? */
16418 }
16419 p[i] = 0;
16420 }
16421 #endif
16422 /* Now read it back. */
16423 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16424 if (ret) {
16425 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16426 "err = %d\n", __func__, ret);
16427 break;
16428 }
16429
16430 /* Verify it. */
16431 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16432 if (p[i] == i)
16433 continue;
16434
16435 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16436 DMA_RWCTRL_WRITE_BNDRY_16) {
16437 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16438 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16439 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16440 break;
16441 } else {
16442 dev_err(&tp->pdev->dev,
16443 "%s: Buffer corrupted on read back! "
16444 "(%d != %d)\n", __func__, p[i], i);
16445 ret = -ENODEV;
16446 goto out;
16447 }
16448 }
16449
16450 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16451 /* Success. */
16452 ret = 0;
16453 break;
16454 }
16455 }
16456 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16457 DMA_RWCTRL_WRITE_BNDRY_16) {
16458 /* DMA test passed without adjusting DMA boundary,
16459 * now look for chipsets that are known to expose the
16460 * DMA bug without failing the test.
16461 */
16462 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16463 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16464 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16465 } else {
16466 /* Safe to use the calculated DMA boundary. */
16467 tp->dma_rwctrl = saved_dma_rwctrl;
16468 }
16469
16470 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16471 }
16472
16473 out:
16474 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16475 out_nofree:
16476 return ret;
16477 }
16478
16479 static void tg3_init_bufmgr_config(struct tg3 *tp)
16480 {
16481 if (tg3_flag(tp, 57765_PLUS)) {
16482 tp->bufmgr_config.mbuf_read_dma_low_water =
16483 DEFAULT_MB_RDMA_LOW_WATER_5705;
16484 tp->bufmgr_config.mbuf_mac_rx_low_water =
16485 DEFAULT_MB_MACRX_LOW_WATER_57765;
16486 tp->bufmgr_config.mbuf_high_water =
16487 DEFAULT_MB_HIGH_WATER_57765;
16488
16489 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16490 DEFAULT_MB_RDMA_LOW_WATER_5705;
16491 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16492 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16493 tp->bufmgr_config.mbuf_high_water_jumbo =
16494 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16495 } else if (tg3_flag(tp, 5705_PLUS)) {
16496 tp->bufmgr_config.mbuf_read_dma_low_water =
16497 DEFAULT_MB_RDMA_LOW_WATER_5705;
16498 tp->bufmgr_config.mbuf_mac_rx_low_water =
16499 DEFAULT_MB_MACRX_LOW_WATER_5705;
16500 tp->bufmgr_config.mbuf_high_water =
16501 DEFAULT_MB_HIGH_WATER_5705;
16502 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16503 tp->bufmgr_config.mbuf_mac_rx_low_water =
16504 DEFAULT_MB_MACRX_LOW_WATER_5906;
16505 tp->bufmgr_config.mbuf_high_water =
16506 DEFAULT_MB_HIGH_WATER_5906;
16507 }
16508
16509 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16510 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16511 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16512 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16513 tp->bufmgr_config.mbuf_high_water_jumbo =
16514 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16515 } else {
16516 tp->bufmgr_config.mbuf_read_dma_low_water =
16517 DEFAULT_MB_RDMA_LOW_WATER;
16518 tp->bufmgr_config.mbuf_mac_rx_low_water =
16519 DEFAULT_MB_MACRX_LOW_WATER;
16520 tp->bufmgr_config.mbuf_high_water =
16521 DEFAULT_MB_HIGH_WATER;
16522
16523 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16524 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16525 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16526 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16527 tp->bufmgr_config.mbuf_high_water_jumbo =
16528 DEFAULT_MB_HIGH_WATER_JUMBO;
16529 }
16530
16531 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16532 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16533 }
16534
16535 static char *tg3_phy_string(struct tg3 *tp)
16536 {
16537 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16538 case TG3_PHY_ID_BCM5400: return "5400";
16539 case TG3_PHY_ID_BCM5401: return "5401";
16540 case TG3_PHY_ID_BCM5411: return "5411";
16541 case TG3_PHY_ID_BCM5701: return "5701";
16542 case TG3_PHY_ID_BCM5703: return "5703";
16543 case TG3_PHY_ID_BCM5704: return "5704";
16544 case TG3_PHY_ID_BCM5705: return "5705";
16545 case TG3_PHY_ID_BCM5750: return "5750";
16546 case TG3_PHY_ID_BCM5752: return "5752";
16547 case TG3_PHY_ID_BCM5714: return "5714";
16548 case TG3_PHY_ID_BCM5780: return "5780";
16549 case TG3_PHY_ID_BCM5755: return "5755";
16550 case TG3_PHY_ID_BCM5787: return "5787";
16551 case TG3_PHY_ID_BCM5784: return "5784";
16552 case TG3_PHY_ID_BCM5756: return "5722/5756";
16553 case TG3_PHY_ID_BCM5906: return "5906";
16554 case TG3_PHY_ID_BCM5761: return "5761";
16555 case TG3_PHY_ID_BCM5718C: return "5718C";
16556 case TG3_PHY_ID_BCM5718S: return "5718S";
16557 case TG3_PHY_ID_BCM57765: return "57765";
16558 case TG3_PHY_ID_BCM5719C: return "5719C";
16559 case TG3_PHY_ID_BCM5720C: return "5720C";
16560 case TG3_PHY_ID_BCM5762: return "5762C";
16561 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16562 case 0: return "serdes";
16563 default: return "unknown";
16564 }
16565 }
16566
16567 static char *tg3_bus_string(struct tg3 *tp, char *str)
16568 {
16569 if (tg3_flag(tp, PCI_EXPRESS)) {
16570 strcpy(str, "PCI Express");
16571 return str;
16572 } else if (tg3_flag(tp, PCIX_MODE)) {
16573 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16574
16575 strcpy(str, "PCIX:");
16576
16577 if ((clock_ctrl == 7) ||
16578 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16579 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16580 strcat(str, "133MHz");
16581 else if (clock_ctrl == 0)
16582 strcat(str, "33MHz");
16583 else if (clock_ctrl == 2)
16584 strcat(str, "50MHz");
16585 else if (clock_ctrl == 4)
16586 strcat(str, "66MHz");
16587 else if (clock_ctrl == 6)
16588 strcat(str, "100MHz");
16589 } else {
16590 strcpy(str, "PCI:");
16591 if (tg3_flag(tp, PCI_HIGH_SPEED))
16592 strcat(str, "66MHz");
16593 else
16594 strcat(str, "33MHz");
16595 }
16596 if (tg3_flag(tp, PCI_32BIT))
16597 strcat(str, ":32-bit");
16598 else
16599 strcat(str, ":64-bit");
16600 return str;
16601 }
16602
16603 static void tg3_init_coal(struct tg3 *tp)
16604 {
16605 struct ethtool_coalesce *ec = &tp->coal;
16606
16607 memset(ec, 0, sizeof(*ec));
16608 ec->cmd = ETHTOOL_GCOALESCE;
16609 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16610 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16611 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16612 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16613 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16614 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16615 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16616 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16617 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16618
16619 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16620 HOSTCC_MODE_CLRTICK_TXBD)) {
16621 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16622 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16623 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16624 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16625 }
16626
16627 if (tg3_flag(tp, 5705_PLUS)) {
16628 ec->rx_coalesce_usecs_irq = 0;
16629 ec->tx_coalesce_usecs_irq = 0;
16630 ec->stats_block_coalesce_usecs = 0;
16631 }
16632 }
16633
16634 static int tg3_init_one(struct pci_dev *pdev,
16635 const struct pci_device_id *ent)
16636 {
16637 struct net_device *dev;
16638 struct tg3 *tp;
16639 int i, err, pm_cap;
16640 u32 sndmbx, rcvmbx, intmbx;
16641 char str[40];
16642 u64 dma_mask, persist_dma_mask;
16643 netdev_features_t features = 0;
16644
16645 printk_once(KERN_INFO "%s\n", version);
16646
16647 err = pci_enable_device(pdev);
16648 if (err) {
16649 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16650 return err;
16651 }
16652
16653 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16654 if (err) {
16655 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16656 goto err_out_disable_pdev;
16657 }
16658
16659 pci_set_master(pdev);
16660
16661 /* Find power-management capability. */
16662 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16663 if (pm_cap == 0) {
16664 dev_err(&pdev->dev,
16665 "Cannot find Power Management capability, aborting\n");
16666 err = -EIO;
16667 goto err_out_free_res;
16668 }
16669
16670 err = pci_set_power_state(pdev, PCI_D0);
16671 if (err) {
16672 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16673 goto err_out_free_res;
16674 }
16675
16676 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16677 if (!dev) {
16678 err = -ENOMEM;
16679 goto err_out_power_down;
16680 }
16681
16682 SET_NETDEV_DEV(dev, &pdev->dev);
16683
16684 tp = netdev_priv(dev);
16685 tp->pdev = pdev;
16686 tp->dev = dev;
16687 tp->pm_cap = pm_cap;
16688 tp->rx_mode = TG3_DEF_RX_MODE;
16689 tp->tx_mode = TG3_DEF_TX_MODE;
16690 tp->irq_sync = 1;
16691
16692 if (tg3_debug > 0)
16693 tp->msg_enable = tg3_debug;
16694 else
16695 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16696
16697 if (pdev_is_ssb_gige_core(pdev)) {
16698 tg3_flag_set(tp, IS_SSB_CORE);
16699 if (ssb_gige_must_flush_posted_writes(pdev))
16700 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16701 if (ssb_gige_one_dma_at_once(pdev))
16702 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16703 if (ssb_gige_have_roboswitch(pdev))
16704 tg3_flag_set(tp, ROBOSWITCH);
16705 if (ssb_gige_is_rgmii(pdev))
16706 tg3_flag_set(tp, RGMII_MODE);
16707 }
16708
16709 /* The word/byte swap controls here control register access byte
16710 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16711 * setting below.
16712 */
16713 tp->misc_host_ctrl =
16714 MISC_HOST_CTRL_MASK_PCI_INT |
16715 MISC_HOST_CTRL_WORD_SWAP |
16716 MISC_HOST_CTRL_INDIR_ACCESS |
16717 MISC_HOST_CTRL_PCISTATE_RW;
16718
16719 /* The NONFRM (non-frame) byte/word swap controls take effect
16720 * on descriptor entries, anything which isn't packet data.
16721 *
16722 * The StrongARM chips on the board (one for tx, one for rx)
16723 * are running in big-endian mode.
16724 */
16725 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16726 GRC_MODE_WSWAP_NONFRM_DATA);
16727 #ifdef __BIG_ENDIAN
16728 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16729 #endif
16730 spin_lock_init(&tp->lock);
16731 spin_lock_init(&tp->indirect_lock);
16732 INIT_WORK(&tp->reset_task, tg3_reset_task);
16733
16734 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16735 if (!tp->regs) {
16736 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16737 err = -ENOMEM;
16738 goto err_out_free_dev;
16739 }
16740
16741 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16742 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16743 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16744 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16745 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16746 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16747 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16748 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16749 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16750 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16751 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16753 tg3_flag_set(tp, ENABLE_APE);
16754 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16755 if (!tp->aperegs) {
16756 dev_err(&pdev->dev,
16757 "Cannot map APE registers, aborting\n");
16758 err = -ENOMEM;
16759 goto err_out_iounmap;
16760 }
16761 }
16762
16763 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16764 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16765
16766 dev->ethtool_ops = &tg3_ethtool_ops;
16767 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16768 dev->netdev_ops = &tg3_netdev_ops;
16769 dev->irq = pdev->irq;
16770
16771 err = tg3_get_invariants(tp, ent);
16772 if (err) {
16773 dev_err(&pdev->dev,
16774 "Problem fetching invariants of chip, aborting\n");
16775 goto err_out_apeunmap;
16776 }
16777
16778 /* The EPB bridge inside 5714, 5715, and 5780 and any
16779 * device behind the EPB cannot support DMA addresses > 40-bit.
16780 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16781 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16782 * do DMA address check in tg3_start_xmit().
16783 */
16784 if (tg3_flag(tp, IS_5788))
16785 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16786 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16787 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16788 #ifdef CONFIG_HIGHMEM
16789 dma_mask = DMA_BIT_MASK(64);
16790 #endif
16791 } else
16792 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16793
16794 /* Configure DMA attributes. */
16795 if (dma_mask > DMA_BIT_MASK(32)) {
16796 err = pci_set_dma_mask(pdev, dma_mask);
16797 if (!err) {
16798 features |= NETIF_F_HIGHDMA;
16799 err = pci_set_consistent_dma_mask(pdev,
16800 persist_dma_mask);
16801 if (err < 0) {
16802 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16803 "DMA for consistent allocations\n");
16804 goto err_out_apeunmap;
16805 }
16806 }
16807 }
16808 if (err || dma_mask == DMA_BIT_MASK(32)) {
16809 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16810 if (err) {
16811 dev_err(&pdev->dev,
16812 "No usable DMA configuration, aborting\n");
16813 goto err_out_apeunmap;
16814 }
16815 }
16816
16817 tg3_init_bufmgr_config(tp);
16818
16819 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16820
16821 /* 5700 B0 chips do not support checksumming correctly due
16822 * to hardware bugs.
16823 */
16824 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16825 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16826
16827 if (tg3_flag(tp, 5755_PLUS))
16828 features |= NETIF_F_IPV6_CSUM;
16829 }
16830
16831 /* TSO is on by default on chips that support hardware TSO.
16832 * Firmware TSO on older chips gives lower performance, so it
16833 * is off by default, but can be enabled using ethtool.
16834 */
16835 if ((tg3_flag(tp, HW_TSO_1) ||
16836 tg3_flag(tp, HW_TSO_2) ||
16837 tg3_flag(tp, HW_TSO_3)) &&
16838 (features & NETIF_F_IP_CSUM))
16839 features |= NETIF_F_TSO;
16840 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16841 if (features & NETIF_F_IPV6_CSUM)
16842 features |= NETIF_F_TSO6;
16843 if (tg3_flag(tp, HW_TSO_3) ||
16844 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16845 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16846 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16847 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16848 tg3_asic_rev(tp) == ASIC_REV_57780)
16849 features |= NETIF_F_TSO_ECN;
16850 }
16851
16852 dev->features |= features;
16853 dev->vlan_features |= features;
16854
16855 /*
16856 * Add loopback capability only for a subset of devices that support
16857 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16858 * loopback for the remaining devices.
16859 */
16860 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16861 !tg3_flag(tp, CPMU_PRESENT))
16862 /* Add the loopback capability */
16863 features |= NETIF_F_LOOPBACK;
16864
16865 dev->hw_features |= features;
16866
16867 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16868 !tg3_flag(tp, TSO_CAPABLE) &&
16869 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16870 tg3_flag_set(tp, MAX_RXPEND_64);
16871 tp->rx_pending = 63;
16872 }
16873
16874 err = tg3_get_device_address(tp);
16875 if (err) {
16876 dev_err(&pdev->dev,
16877 "Could not obtain valid ethernet address, aborting\n");
16878 goto err_out_apeunmap;
16879 }
16880
16881 /*
16882 * Reset chip in case UNDI or EFI driver did not shutdown
16883 * DMA self test will enable WDMAC and we'll see (spurious)
16884 * pending DMA on the PCI bus at that point.
16885 */
16886 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16887 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16888 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16889 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16890 }
16891
16892 err = tg3_test_dma(tp);
16893 if (err) {
16894 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16895 goto err_out_apeunmap;
16896 }
16897
16898 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16899 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16900 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16901 for (i = 0; i < tp->irq_max; i++) {
16902 struct tg3_napi *tnapi = &tp->napi[i];
16903
16904 tnapi->tp = tp;
16905 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16906
16907 tnapi->int_mbox = intmbx;
16908 if (i <= 4)
16909 intmbx += 0x8;
16910 else
16911 intmbx += 0x4;
16912
16913 tnapi->consmbox = rcvmbx;
16914 tnapi->prodmbox = sndmbx;
16915
16916 if (i)
16917 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16918 else
16919 tnapi->coal_now = HOSTCC_MODE_NOW;
16920
16921 if (!tg3_flag(tp, SUPPORT_MSIX))
16922 break;
16923
16924 /*
16925 * If we support MSIX, we'll be using RSS. If we're using
16926 * RSS, the first vector only handles link interrupts and the
16927 * remaining vectors handle rx and tx interrupts. Reuse the
16928 * mailbox values for the next iteration. The values we setup
16929 * above are still useful for the single vectored mode.
16930 */
16931 if (!i)
16932 continue;
16933
16934 rcvmbx += 0x8;
16935
16936 if (sndmbx & 0x4)
16937 sndmbx -= 0x4;
16938 else
16939 sndmbx += 0xc;
16940 }
16941
16942 tg3_init_coal(tp);
16943
16944 pci_set_drvdata(pdev, dev);
16945
16946 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16947 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16948 tg3_asic_rev(tp) == ASIC_REV_5762)
16949 tg3_flag_set(tp, PTP_CAPABLE);
16950
16951 if (tg3_flag(tp, 5717_PLUS)) {
16952 /* Resume a low-power mode */
16953 tg3_frob_aux_power(tp, false);
16954 }
16955
16956 tg3_timer_init(tp);
16957
16958 tg3_carrier_off(tp);
16959
16960 err = register_netdev(dev);
16961 if (err) {
16962 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16963 goto err_out_apeunmap;
16964 }
16965
16966 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16967 tp->board_part_number,
16968 tg3_chip_rev_id(tp),
16969 tg3_bus_string(tp, str),
16970 dev->dev_addr);
16971
16972 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16973 struct phy_device *phydev;
16974 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16975 netdev_info(dev,
16976 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16977 phydev->drv->name, dev_name(&phydev->dev));
16978 } else {
16979 char *ethtype;
16980
16981 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16982 ethtype = "10/100Base-TX";
16983 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16984 ethtype = "1000Base-SX";
16985 else
16986 ethtype = "10/100/1000Base-T";
16987
16988 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16989 "(WireSpeed[%d], EEE[%d])\n",
16990 tg3_phy_string(tp), ethtype,
16991 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16992 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16993 }
16994
16995 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16996 (dev->features & NETIF_F_RXCSUM) != 0,
16997 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16998 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16999 tg3_flag(tp, ENABLE_ASF) != 0,
17000 tg3_flag(tp, TSO_CAPABLE) != 0);
17001 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17002 tp->dma_rwctrl,
17003 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17004 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17005
17006 pci_save_state(pdev);
17007
17008 return 0;
17009
17010 err_out_apeunmap:
17011 if (tp->aperegs) {
17012 iounmap(tp->aperegs);
17013 tp->aperegs = NULL;
17014 }
17015
17016 err_out_iounmap:
17017 if (tp->regs) {
17018 iounmap(tp->regs);
17019 tp->regs = NULL;
17020 }
17021
17022 err_out_free_dev:
17023 free_netdev(dev);
17024
17025 err_out_power_down:
17026 pci_set_power_state(pdev, PCI_D3hot);
17027
17028 err_out_free_res:
17029 pci_release_regions(pdev);
17030
17031 err_out_disable_pdev:
17032 pci_disable_device(pdev);
17033 pci_set_drvdata(pdev, NULL);
17034 return err;
17035 }
17036
17037 static void tg3_remove_one(struct pci_dev *pdev)
17038 {
17039 struct net_device *dev = pci_get_drvdata(pdev);
17040
17041 if (dev) {
17042 struct tg3 *tp = netdev_priv(dev);
17043
17044 release_firmware(tp->fw);
17045
17046 tg3_reset_task_cancel(tp);
17047
17048 if (tg3_flag(tp, USE_PHYLIB)) {
17049 tg3_phy_fini(tp);
17050 tg3_mdio_fini(tp);
17051 }
17052
17053 unregister_netdev(dev);
17054 if (tp->aperegs) {
17055 iounmap(tp->aperegs);
17056 tp->aperegs = NULL;
17057 }
17058 if (tp->regs) {
17059 iounmap(tp->regs);
17060 tp->regs = NULL;
17061 }
17062 free_netdev(dev);
17063 pci_release_regions(pdev);
17064 pci_disable_device(pdev);
17065 pci_set_drvdata(pdev, NULL);
17066 }
17067 }
17068
17069 #ifdef CONFIG_PM_SLEEP
17070 static int tg3_suspend(struct device *device)
17071 {
17072 struct pci_dev *pdev = to_pci_dev(device);
17073 struct net_device *dev = pci_get_drvdata(pdev);
17074 struct tg3 *tp = netdev_priv(dev);
17075 int err;
17076
17077 if (!netif_running(dev))
17078 return 0;
17079
17080 tg3_reset_task_cancel(tp);
17081 tg3_phy_stop(tp);
17082 tg3_netif_stop(tp);
17083
17084 tg3_timer_stop(tp);
17085
17086 tg3_full_lock(tp, 1);
17087 tg3_disable_ints(tp);
17088 tg3_full_unlock(tp);
17089
17090 netif_device_detach(dev);
17091
17092 tg3_full_lock(tp, 0);
17093 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17094 tg3_flag_clear(tp, INIT_COMPLETE);
17095 tg3_full_unlock(tp);
17096
17097 err = tg3_power_down_prepare(tp);
17098 if (err) {
17099 int err2;
17100
17101 tg3_full_lock(tp, 0);
17102
17103 tg3_flag_set(tp, INIT_COMPLETE);
17104 err2 = tg3_restart_hw(tp, 1);
17105 if (err2)
17106 goto out;
17107
17108 tg3_timer_start(tp);
17109
17110 netif_device_attach(dev);
17111 tg3_netif_start(tp);
17112
17113 out:
17114 tg3_full_unlock(tp);
17115
17116 if (!err2)
17117 tg3_phy_start(tp);
17118 }
17119
17120 return err;
17121 }
17122
17123 static int tg3_resume(struct device *device)
17124 {
17125 struct pci_dev *pdev = to_pci_dev(device);
17126 struct net_device *dev = pci_get_drvdata(pdev);
17127 struct tg3 *tp = netdev_priv(dev);
17128 int err;
17129
17130 if (!netif_running(dev))
17131 return 0;
17132
17133 netif_device_attach(dev);
17134
17135 tg3_full_lock(tp, 0);
17136
17137 tg3_flag_set(tp, INIT_COMPLETE);
17138 err = tg3_restart_hw(tp, 1);
17139 if (err)
17140 goto out;
17141
17142 tg3_timer_start(tp);
17143
17144 tg3_netif_start(tp);
17145
17146 out:
17147 tg3_full_unlock(tp);
17148
17149 if (!err)
17150 tg3_phy_start(tp);
17151
17152 return err;
17153 }
17154
17155 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17156 #define TG3_PM_OPS (&tg3_pm_ops)
17157
17158 #else
17159
17160 #define TG3_PM_OPS NULL
17161
17162 #endif /* CONFIG_PM_SLEEP */
17163
17164 /**
17165 * tg3_io_error_detected - called when PCI error is detected
17166 * @pdev: Pointer to PCI device
17167 * @state: The current pci connection state
17168 *
17169 * This function is called after a PCI bus error affecting
17170 * this device has been detected.
17171 */
17172 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17173 pci_channel_state_t state)
17174 {
17175 struct net_device *netdev = pci_get_drvdata(pdev);
17176 struct tg3 *tp = netdev_priv(netdev);
17177 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17178
17179 netdev_info(netdev, "PCI I/O error detected\n");
17180
17181 rtnl_lock();
17182
17183 if (!netif_running(netdev))
17184 goto done;
17185
17186 tg3_phy_stop(tp);
17187
17188 tg3_netif_stop(tp);
17189
17190 tg3_timer_stop(tp);
17191
17192 /* Want to make sure that the reset task doesn't run */
17193 tg3_reset_task_cancel(tp);
17194
17195 netif_device_detach(netdev);
17196
17197 /* Clean up software state, even if MMIO is blocked */
17198 tg3_full_lock(tp, 0);
17199 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17200 tg3_full_unlock(tp);
17201
17202 done:
17203 if (state == pci_channel_io_perm_failure)
17204 err = PCI_ERS_RESULT_DISCONNECT;
17205 else
17206 pci_disable_device(pdev);
17207
17208 rtnl_unlock();
17209
17210 return err;
17211 }
17212
17213 /**
17214 * tg3_io_slot_reset - called after the pci bus has been reset.
17215 * @pdev: Pointer to PCI device
17216 *
17217 * Restart the card from scratch, as if from a cold-boot.
17218 * At this point, the card has exprienced a hard reset,
17219 * followed by fixups by BIOS, and has its config space
17220 * set up identically to what it was at cold boot.
17221 */
17222 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17223 {
17224 struct net_device *netdev = pci_get_drvdata(pdev);
17225 struct tg3 *tp = netdev_priv(netdev);
17226 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17227 int err;
17228
17229 rtnl_lock();
17230
17231 if (pci_enable_device(pdev)) {
17232 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17233 goto done;
17234 }
17235
17236 pci_set_master(pdev);
17237 pci_restore_state(pdev);
17238 pci_save_state(pdev);
17239
17240 if (!netif_running(netdev)) {
17241 rc = PCI_ERS_RESULT_RECOVERED;
17242 goto done;
17243 }
17244
17245 err = tg3_power_up(tp);
17246 if (err)
17247 goto done;
17248
17249 rc = PCI_ERS_RESULT_RECOVERED;
17250
17251 done:
17252 rtnl_unlock();
17253
17254 return rc;
17255 }
17256
17257 /**
17258 * tg3_io_resume - called when traffic can start flowing again.
17259 * @pdev: Pointer to PCI device
17260 *
17261 * This callback is called when the error recovery driver tells
17262 * us that its OK to resume normal operation.
17263 */
17264 static void tg3_io_resume(struct pci_dev *pdev)
17265 {
17266 struct net_device *netdev = pci_get_drvdata(pdev);
17267 struct tg3 *tp = netdev_priv(netdev);
17268 int err;
17269
17270 rtnl_lock();
17271
17272 if (!netif_running(netdev))
17273 goto done;
17274
17275 tg3_full_lock(tp, 0);
17276 tg3_flag_set(tp, INIT_COMPLETE);
17277 err = tg3_restart_hw(tp, 1);
17278 if (err) {
17279 tg3_full_unlock(tp);
17280 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17281 goto done;
17282 }
17283
17284 netif_device_attach(netdev);
17285
17286 tg3_timer_start(tp);
17287
17288 tg3_netif_start(tp);
17289
17290 tg3_full_unlock(tp);
17291
17292 tg3_phy_start(tp);
17293
17294 done:
17295 rtnl_unlock();
17296 }
17297
17298 static const struct pci_error_handlers tg3_err_handler = {
17299 .error_detected = tg3_io_error_detected,
17300 .slot_reset = tg3_io_slot_reset,
17301 .resume = tg3_io_resume
17302 };
17303
17304 static struct pci_driver tg3_driver = {
17305 .name = DRV_MODULE_NAME,
17306 .id_table = tg3_pci_tbl,
17307 .probe = tg3_init_one,
17308 .remove = tg3_remove_one,
17309 .err_handler = &tg3_err_handler,
17310 .driver.pm = TG3_PM_OPS,
17311 };
17312
17313 static int __init tg3_init(void)
17314 {
17315 return pci_register_driver(&tg3_driver);
17316 }
17317
17318 static void __exit tg3_cleanup(void)
17319 {
17320 pci_unregister_driver(&tg3_driver);
17321 }
17322
17323 module_init(tg3_init);
17324 module_exit(tg3_cleanup);
This page took 0.694779 seconds and 6 git commands to generate.