ac83c87e0b1b3af379beff49782a66a1e155d61e
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 131
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "April 09, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 udelay(10);
748 }
749
750 if (status != bit) {
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
753 ret = -EBUSY;
754 }
755
756 return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761 u32 gnt, bit;
762
763 if (!tg3_flag(tp, ENABLE_APE))
764 return;
765
766 switch (locknum) {
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 return;
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
772 if (!tp->pci_fn)
773 bit = APE_LOCK_GRANT_DRIVER;
774 else
775 bit = 1 << tp->pci_fn;
776 break;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
782 break;
783 default:
784 return;
785 }
786
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
789 else
790 gnt = TG3_APE_PER_LOCK_GRANT;
791
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797 u32 apedata;
798
799 while (timeout_us) {
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 return -EBUSY;
802
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 break;
806
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809 udelay(10);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 }
812
813 return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818 u32 i, apedata;
819
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 break;
825
826 udelay(10);
827 }
828
829 return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 u32 len)
834 {
835 int err;
836 u32 i, bufoff, msgoff, maxlen, apedata;
837
838 if (!tg3_flag(tp, APE_HAS_NCSI))
839 return 0;
840
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
843 return -ENODEV;
844
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
847 return -EAGAIN;
848
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 TG3_APE_SHMEM_BASE;
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854 while (len) {
855 u32 length;
856
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
859 len -= length;
860
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
863 return -EAGAIN;
864
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
867 if (err)
868 return err;
869
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881 base_off += length;
882
883 if (tg3_ape_wait_for_event(tp, 30000))
884 return -EAGAIN;
885
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
889 data++;
890 }
891 }
892
893 return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898 int err;
899 u32 apedata;
900
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
903 return -EAGAIN;
904
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
907 return -EAGAIN;
908
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
911 if (err)
912 return err;
913
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
916
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920 return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925 u32 event;
926 u32 apedata;
927
928 if (!tg3_flag(tp, ENABLE_APE))
929 return;
930
931 switch (kind) {
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
945
946 event = APE_EVENT_STATUS_STATE_START;
947 break;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
953 */
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 } else
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 break;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
970 break;
971 default:
972 return;
973 }
974
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977 tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 int i;
983
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 int i;
993
994 tp->irq_sync = 0;
995 wmb();
996
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008 tp->coal_now |= tnapi->coal_now;
1009 }
1010
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 else
1016 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1026
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1030 work_exists = 1;
1031 }
1032
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 work_exists = 1;
1036
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 work_exists = 1;
1041
1042 return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1049 */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 struct tg3 *tp = tnapi->tp;
1053
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 mmiowb();
1056
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1060 */
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 u32 clock_ctrl;
1069 u32 orig_clock_ctrl;
1070
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 return;
1073
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1079 0x1f);
1080 tp->pci_clock_ctrl = clock_ctrl;
1081
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 }
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 clock_ctrl |
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 40);
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 40);
1095 }
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS 5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 u32 *val)
1103 {
1104 u32 frame_val;
1105 unsigned int loops;
1106 int ret;
1107
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 tw32_f(MAC_MI_MODE,
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 udelay(80);
1112 }
1113
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116 *val = 0x0;
1117
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124 tw32_f(MAC_MI_COM, frame_val);
1125
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1128 udelay(10);
1129 frame_val = tr32(MAC_MI_COM);
1130
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1132 udelay(5);
1133 frame_val = tr32(MAC_MI_COM);
1134 break;
1135 }
1136 loops -= 1;
1137 }
1138
1139 ret = -EBUSY;
1140 if (loops != 0) {
1141 *val = frame_val & MI_COM_DATA_MASK;
1142 ret = 0;
1143 }
1144
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 udelay(80);
1148 }
1149
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152 return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 u32 val)
1162 {
1163 u32 frame_val;
1164 unsigned int loops;
1165 int ret;
1166
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 return 0;
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE,
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 udelay(80);
1175 }
1176
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186 tw32_f(MAC_MI_COM, frame_val);
1187
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1190 udelay(10);
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1193 udelay(5);
1194 frame_val = tr32(MAC_MI_COM);
1195 break;
1196 }
1197 loops -= 1;
1198 }
1199
1200 ret = -EBUSY;
1201 if (loops != 0)
1202 ret = 0;
1203
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 udelay(80);
1207 }
1208
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211 return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 int err;
1222
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 if (err)
1225 goto done;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 if (err)
1234 goto done;
1235
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239 return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 int err;
1245
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 if (err)
1252 goto done;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 if (err)
1257 goto done;
1258
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262 return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 int err;
1268
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 if (!err)
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273 return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 int err;
1279
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 if (!err)
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284 return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 int err;
1290
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 if (!err)
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297 return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 u32 val;
1311 int err;
1312
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315 if (err)
1316 return err;
1317 if (enable)
1318
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 else
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326 return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 u32 phy_control;
1332 int limit, err;
1333
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1336 */
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 if (err != 0)
1340 return -EBUSY;
1341
1342 limit = 5000;
1343 while (limit--) {
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if (err != 0)
1346 return -EBUSY;
1347
1348 if ((phy_control & BMCR_RESET) == 0) {
1349 udelay(40);
1350 break;
1351 }
1352 udelay(10);
1353 }
1354 if (limit < 0)
1355 return -EBUSY;
1356
1357 return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 struct tg3 *tp = bp->priv;
1363 u32 val;
1364
1365 spin_lock_bh(&tp->lock);
1366
1367 if (tg3_readphy(tp, reg, &val))
1368 val = -EIO;
1369
1370 spin_unlock_bh(&tp->lock);
1371
1372 return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 struct tg3 *tp = bp->priv;
1378 u32 ret = 0;
1379
1380 spin_lock_bh(&tp->lock);
1381
1382 if (tg3_writephy(tp, reg, val))
1383 ret = -EIO;
1384
1385 spin_unlock_bh(&tp->lock);
1386
1387 return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 u32 val;
1398 struct phy_device *phydev;
1399
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1405 break;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 break;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 break;
1415 default:
1416 return;
1417 }
1418
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1421
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1427
1428 return;
1429 }
1430
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1438
1439 tw32(MAC_PHYCFG2, val);
1440
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 }
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1453
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1472 }
1473 tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 udelay(80);
1481
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 int i;
1490 u32 reg;
1491 struct phy_device *phydev;
1492
1493 if (tg3_flag(tp, 5717_PLUS)) {
1494 u32 is_serdes;
1495
1496 tp->phy_addr = tp->pci_fn + 1;
1497
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 else
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 if (is_serdes)
1504 tp->phy_addr += 7;
1505 } else
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508 tg3_mdio_start(tp);
1509
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 return 0;
1512
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1515 return -ENOMEM;
1516
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1535 */
1536 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 tg3_bmcr_reset(tp);
1538
1539 i = mdiobus_register(tp->mdio_bus);
1540 if (i) {
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1543 return i;
1544 }
1545
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1552 return -ENODEV;
1553 }
1554
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 break;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 /* fallthru */
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 break;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 break;
1582 }
1583
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1588
1589 return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1598 }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 u32 val;
1605
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610 tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 int i;
1619 unsigned int delay_cnt;
1620 long time_remain;
1621
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 (long)jiffies;
1626 if (time_remain < 0)
1627 return;
1628
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1634
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break;
1638 udelay(8);
1639 }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645 u32 reg, val;
1646
1647 val = 0;
1648 if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 val = reg << 16;
1650 if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 val |= (reg & 0xffff);
1652 *data++ = val;
1653
1654 val = 0;
1655 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 val = reg << 16;
1657 if (!tg3_readphy(tp, MII_LPA, &reg))
1658 val |= (reg & 0xffff);
1659 *data++ = val;
1660
1661 val = 0;
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 val |= (reg & 0xffff);
1667 }
1668 *data++ = val;
1669
1670 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 val = reg << 16;
1672 else
1673 val = 0;
1674 *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680 u32 data[4];
1681
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 return;
1684
1685 tg3_phy_gather_ump_data(tp, data);
1686
1687 tg3_wait_for_event_ack(tp);
1688
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696 tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1705
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708 tg3_generate_fw_event(tp);
1709
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1712 }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 switch (kind) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 DRV_STATE_START);
1726 break;
1727
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 DRV_STATE_UNLOAD);
1731 break;
1732
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 DRV_STATE_SUSPEND);
1736 break;
1737
1738 default:
1739 break;
1740 }
1741 }
1742
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 switch (kind) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1756 break;
1757
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1761 break;
1762
1763 default:
1764 break;
1765 }
1766 }
1767
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1776 switch (kind) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 DRV_STATE_START);
1780 break;
1781
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 DRV_STATE_UNLOAD);
1785 break;
1786
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 DRV_STATE_SUSPEND);
1790 break;
1791
1792 default:
1793 break;
1794 }
1795 }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800 int i;
1801 u32 val;
1802
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1805 return 0;
1806 }
1807
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 return 0;
1813 udelay(100);
1814 }
1815 return -ENODEV;
1816 }
1817
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 break;
1823 udelay(10);
1824 }
1825
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1830 */
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834 netdev_info(tp->dev, "No firmware running\n");
1835 }
1836
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1840 */
1841 mdelay(10);
1842 }
1843
1844 return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1855 1000 :
1856 (tp->link_config.active_speed == SPEED_100 ?
1857 100 : 10)),
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 "full" : "half"));
1860
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 "on" : "off",
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 "on" : "off");
1866
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1870
1871 tg3_ump_link_report(tp);
1872 }
1873
1874 tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1878 {
1879 u32 flowctrl = 0;
1880
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1887
1888 return flowctrl;
1889 }
1890
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1892 {
1893 u16 miireg;
1894
1895 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 miireg = ADVERTISE_1000XPAUSE;
1897 else if (flow_ctrl & FLOW_CTRL_TX)
1898 miireg = ADVERTISE_1000XPSE_ASYM;
1899 else if (flow_ctrl & FLOW_CTRL_RX)
1900 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1901 else
1902 miireg = 0;
1903
1904 return miireg;
1905 }
1906
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1908 {
1909 u32 flowctrl = 0;
1910
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1917
1918 return flowctrl;
1919 }
1920
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1922 {
1923 u8 cap = 0;
1924
1925 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 if (lcladv & ADVERTISE_1000XPAUSE)
1929 cap = FLOW_CTRL_RX;
1930 if (rmtadv & ADVERTISE_1000XPAUSE)
1931 cap = FLOW_CTRL_TX;
1932 }
1933
1934 return cap;
1935 }
1936
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1938 {
1939 u8 autoneg;
1940 u8 flowctrl = 0;
1941 u32 old_rx_mode = tp->rx_mode;
1942 u32 old_tx_mode = tp->tx_mode;
1943
1944 if (tg3_flag(tp, USE_PHYLIB))
1945 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1946 else
1947 autoneg = tp->link_config.autoneg;
1948
1949 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1952 else
1953 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1954 } else
1955 flowctrl = tp->link_config.flowctrl;
1956
1957 tp->link_config.active_flowctrl = flowctrl;
1958
1959 if (flowctrl & FLOW_CTRL_RX)
1960 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1961 else
1962 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1963
1964 if (old_rx_mode != tp->rx_mode)
1965 tw32_f(MAC_RX_MODE, tp->rx_mode);
1966
1967 if (flowctrl & FLOW_CTRL_TX)
1968 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1969 else
1970 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1971
1972 if (old_tx_mode != tp->tx_mode)
1973 tw32_f(MAC_TX_MODE, tp->tx_mode);
1974 }
1975
1976 static void tg3_adjust_link(struct net_device *dev)
1977 {
1978 u8 oldflowctrl, linkmesg = 0;
1979 u32 mac_mode, lcl_adv, rmt_adv;
1980 struct tg3 *tp = netdev_priv(dev);
1981 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1982
1983 spin_lock_bh(&tp->lock);
1984
1985 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 MAC_MODE_HALF_DUPLEX);
1987
1988 oldflowctrl = tp->link_config.active_flowctrl;
1989
1990 if (phydev->link) {
1991 lcl_adv = 0;
1992 rmt_adv = 0;
1993
1994 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 else if (phydev->speed == SPEED_1000 ||
1997 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1999 else
2000 mac_mode |= MAC_MODE_PORT_MODE_MII;
2001
2002 if (phydev->duplex == DUPLEX_HALF)
2003 mac_mode |= MAC_MODE_HALF_DUPLEX;
2004 else {
2005 lcl_adv = mii_advertise_flowctrl(
2006 tp->link_config.flowctrl);
2007
2008 if (phydev->pause)
2009 rmt_adv = LPA_PAUSE_CAP;
2010 if (phydev->asym_pause)
2011 rmt_adv |= LPA_PAUSE_ASYM;
2012 }
2013
2014 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2015 } else
2016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2017
2018 if (mac_mode != tp->mac_mode) {
2019 tp->mac_mode = mac_mode;
2020 tw32_f(MAC_MODE, tp->mac_mode);
2021 udelay(40);
2022 }
2023
2024 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 if (phydev->speed == SPEED_10)
2026 tw32(MAC_MI_STAT,
2027 MAC_MI_STAT_10MBPS_MODE |
2028 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2029 else
2030 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2031 }
2032
2033 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 tw32(MAC_TX_LENGTHS,
2035 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 (6 << TX_LENGTHS_IPG_SHIFT) |
2037 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2038 else
2039 tw32(MAC_TX_LENGTHS,
2040 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 (6 << TX_LENGTHS_IPG_SHIFT) |
2042 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2043
2044 if (phydev->link != tp->old_link ||
2045 phydev->speed != tp->link_config.active_speed ||
2046 phydev->duplex != tp->link_config.active_duplex ||
2047 oldflowctrl != tp->link_config.active_flowctrl)
2048 linkmesg = 1;
2049
2050 tp->old_link = phydev->link;
2051 tp->link_config.active_speed = phydev->speed;
2052 tp->link_config.active_duplex = phydev->duplex;
2053
2054 spin_unlock_bh(&tp->lock);
2055
2056 if (linkmesg)
2057 tg3_link_report(tp);
2058 }
2059
2060 static int tg3_phy_init(struct tg3 *tp)
2061 {
2062 struct phy_device *phydev;
2063
2064 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2065 return 0;
2066
2067 /* Bring the PHY back to a known state. */
2068 tg3_bmcr_reset(tp);
2069
2070 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2071
2072 /* Attach the MAC to the PHY. */
2073 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 tg3_adjust_link, phydev->interface);
2075 if (IS_ERR(phydev)) {
2076 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 return PTR_ERR(phydev);
2078 }
2079
2080 /* Mask with MAC supported features. */
2081 switch (phydev->interface) {
2082 case PHY_INTERFACE_MODE_GMII:
2083 case PHY_INTERFACE_MODE_RGMII:
2084 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 phydev->supported &= (PHY_GBIT_FEATURES |
2086 SUPPORTED_Pause |
2087 SUPPORTED_Asym_Pause);
2088 break;
2089 }
2090 /* fallthru */
2091 case PHY_INTERFACE_MODE_MII:
2092 phydev->supported &= (PHY_BASIC_FEATURES |
2093 SUPPORTED_Pause |
2094 SUPPORTED_Asym_Pause);
2095 break;
2096 default:
2097 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2098 return -EINVAL;
2099 }
2100
2101 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2102
2103 phydev->advertising = phydev->supported;
2104
2105 return 0;
2106 }
2107
2108 static void tg3_phy_start(struct tg3 *tp)
2109 {
2110 struct phy_device *phydev;
2111
2112 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2113 return;
2114
2115 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2116
2117 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 phydev->speed = tp->link_config.speed;
2120 phydev->duplex = tp->link_config.duplex;
2121 phydev->autoneg = tp->link_config.autoneg;
2122 phydev->advertising = tp->link_config.advertising;
2123 }
2124
2125 phy_start(phydev);
2126
2127 phy_start_aneg(phydev);
2128 }
2129
2130 static void tg3_phy_stop(struct tg3 *tp)
2131 {
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 return;
2134
2135 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2136 }
2137
2138 static void tg3_phy_fini(struct tg3 *tp)
2139 {
2140 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2143 }
2144 }
2145
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2147 {
2148 int err;
2149 u32 val;
2150
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2152 return 0;
2153
2154 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 /* Cannot do read-modify-write on 5401 */
2156 err = tg3_phy_auxctl_write(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2159 0x4c20);
2160 goto done;
2161 }
2162
2163 err = tg3_phy_auxctl_read(tp,
2164 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2165 if (err)
2166 return err;
2167
2168 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 err = tg3_phy_auxctl_write(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2171
2172 done:
2173 return err;
2174 }
2175
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2177 {
2178 u32 phytest;
2179
2180 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2181 u32 phy;
2182
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2186 if (enable)
2187 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2188 else
2189 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2191 }
2192 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2193 }
2194 }
2195
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2197 {
2198 u32 reg;
2199
2200 if (!tg3_flag(tp, 5705_PLUS) ||
2201 (tg3_flag(tp, 5717_PLUS) &&
2202 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2203 return;
2204
2205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 tg3_phy_fet_toggle_apd(tp, enable);
2207 return;
2208 }
2209
2210 reg = MII_TG3_MISC_SHDW_WREN |
2211 MII_TG3_MISC_SHDW_SCR5_SEL |
2212 MII_TG3_MISC_SHDW_SCR5_LPED |
2213 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2218
2219 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2220
2221
2222 reg = MII_TG3_MISC_SHDW_WREN |
2223 MII_TG3_MISC_SHDW_APD_SEL |
2224 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2225 if (enable)
2226 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2227
2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2229 }
2230
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2232 {
2233 u32 phy;
2234
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2237 return;
2238
2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240 u32 ephy;
2241
2242 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2244
2245 tg3_writephy(tp, MII_TG3_FET_TEST,
2246 ephy | MII_TG3_FET_SHADOW_EN);
2247 if (!tg3_readphy(tp, reg, &phy)) {
2248 if (enable)
2249 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2250 else
2251 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 tg3_writephy(tp, reg, phy);
2253 }
2254 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2255 }
2256 } else {
2257 int ret;
2258
2259 ret = tg3_phy_auxctl_read(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2261 if (!ret) {
2262 if (enable)
2263 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2264 else
2265 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 tg3_phy_auxctl_write(tp,
2267 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2268 }
2269 }
2270 }
2271
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2273 {
2274 int ret;
2275 u32 val;
2276
2277 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2278 return;
2279
2280 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2281 if (!ret)
2282 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2284 }
2285
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2287 {
2288 u32 otp, phy;
2289
2290 if (!tp->phy_otp)
2291 return;
2292
2293 otp = tp->phy_otp;
2294
2295 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2296 return;
2297
2298 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2301
2302 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2305
2306 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2309
2310 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2312
2313 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2315
2316 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2319
2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2321 }
2322
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2324 {
2325 u32 val;
2326
2327 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2328 return;
2329
2330 tp->setlpicnt = 0;
2331
2332 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2333 current_link_up &&
2334 tp->link_config.active_duplex == DUPLEX_FULL &&
2335 (tp->link_config.active_speed == SPEED_100 ||
2336 tp->link_config.active_speed == SPEED_1000)) {
2337 u32 eeectl;
2338
2339 if (tp->link_config.active_speed == SPEED_1000)
2340 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2341 else
2342 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2343
2344 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2345
2346 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347 TG3_CL45_D7_EEERES_STAT, &val);
2348
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2351 tp->setlpicnt = 2;
2352 }
2353
2354 if (!tp->setlpicnt) {
2355 if (current_link_up &&
2356 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2359 }
2360
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2363 }
2364 }
2365
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2367 {
2368 u32 val;
2369
2370 if (tp->link_config.active_speed == SPEED_1000 &&
2371 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373 tg3_flag(tp, 57765_CLASS)) &&
2374 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375 val = MII_TG3_DSP_TAP26_ALNOKO |
2376 MII_TG3_DSP_TAP26_RMRXSTO;
2377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378 tg3_phy_toggle_auxctl_smdsp(tp, false);
2379 }
2380
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2383 }
2384
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2386 {
2387 int limit = 100;
2388
2389 while (limit--) {
2390 u32 tmp32;
2391
2392 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393 if ((tmp32 & 0x1000) == 0)
2394 break;
2395 }
2396 }
2397 if (limit < 0)
2398 return -EBUSY;
2399
2400 return 0;
2401 }
2402
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2404 {
2405 static const u32 test_pat[4][6] = {
2406 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2410 };
2411 int chan;
2412
2413 for (chan = 0; chan < 4; chan++) {
2414 int i;
2415
2416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417 (chan * 0x2000) | 0x0200);
2418 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2419
2420 for (i = 0; i < 6; i++)
2421 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2422 test_pat[chan][i]);
2423
2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425 if (tg3_wait_macro_done(tp)) {
2426 *resetp = 1;
2427 return -EBUSY;
2428 }
2429
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431 (chan * 0x2000) | 0x0200);
2432 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433 if (tg3_wait_macro_done(tp)) {
2434 *resetp = 1;
2435 return -EBUSY;
2436 }
2437
2438 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439 if (tg3_wait_macro_done(tp)) {
2440 *resetp = 1;
2441 return -EBUSY;
2442 }
2443
2444 for (i = 0; i < 6; i += 2) {
2445 u32 low, high;
2446
2447 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449 tg3_wait_macro_done(tp)) {
2450 *resetp = 1;
2451 return -EBUSY;
2452 }
2453 low &= 0x7fff;
2454 high &= 0x000f;
2455 if (low != test_pat[chan][i] ||
2456 high != test_pat[chan][i+1]) {
2457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2460
2461 return -EBUSY;
2462 }
2463 }
2464 }
2465
2466 return 0;
2467 }
2468
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2470 {
2471 int chan;
2472
2473 for (chan = 0; chan < 4; chan++) {
2474 int i;
2475
2476 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 (chan * 0x2000) | 0x0200);
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp))
2483 return -EBUSY;
2484 }
2485
2486 return 0;
2487 }
2488
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2490 {
2491 u32 reg32, phy9_orig;
2492 int retries, do_phy_reset, err;
2493
2494 retries = 10;
2495 do_phy_reset = 1;
2496 do {
2497 if (do_phy_reset) {
2498 err = tg3_bmcr_reset(tp);
2499 if (err)
2500 return err;
2501 do_phy_reset = 0;
2502 }
2503
2504 /* Disable transmitter and interrupt. */
2505 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2506 continue;
2507
2508 reg32 |= 0x3000;
2509 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2510
2511 /* Set full-duplex, 1000 mbps. */
2512 tg3_writephy(tp, MII_BMCR,
2513 BMCR_FULLDPLX | BMCR_SPEED1000);
2514
2515 /* Set to master mode. */
2516 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2517 continue;
2518
2519 tg3_writephy(tp, MII_CTRL1000,
2520 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2521
2522 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2523 if (err)
2524 return err;
2525
2526 /* Block the PHY control access. */
2527 tg3_phydsp_write(tp, 0x8005, 0x0800);
2528
2529 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2530 if (!err)
2531 break;
2532 } while (--retries);
2533
2534 err = tg3_phy_reset_chanpat(tp);
2535 if (err)
2536 return err;
2537
2538 tg3_phydsp_write(tp, 0x8005, 0x0000);
2539
2540 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2542
2543 tg3_phy_toggle_auxctl_smdsp(tp, false);
2544
2545 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2546
2547 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2548 reg32 &= ~0x3000;
2549 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2550 } else if (!err)
2551 err = -EBUSY;
2552
2553 return err;
2554 }
2555
2556 static void tg3_carrier_off(struct tg3 *tp)
2557 {
2558 netif_carrier_off(tp->dev);
2559 tp->link_up = false;
2560 }
2561
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2563 {
2564 if (tg3_flag(tp, ENABLE_ASF))
2565 netdev_warn(tp->dev,
2566 "Management side-band traffic will be interrupted during phy settings change\n");
2567 }
2568
2569 /* This will reset the tigon3 PHY if there is no valid
2570 * link unless the FORCE argument is non-zero.
2571 */
2572 static int tg3_phy_reset(struct tg3 *tp)
2573 {
2574 u32 val, cpmuctrl;
2575 int err;
2576
2577 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578 val = tr32(GRC_MISC_CFG);
2579 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2580 udelay(40);
2581 }
2582 err = tg3_readphy(tp, MII_BMSR, &val);
2583 err |= tg3_readphy(tp, MII_BMSR, &val);
2584 if (err != 0)
2585 return -EBUSY;
2586
2587 if (netif_running(tp->dev) && tp->link_up) {
2588 netif_carrier_off(tp->dev);
2589 tg3_link_report(tp);
2590 }
2591
2592 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594 tg3_asic_rev(tp) == ASIC_REV_5705) {
2595 err = tg3_phy_reset_5703_4_5(tp);
2596 if (err)
2597 return err;
2598 goto out;
2599 }
2600
2601 cpmuctrl = 0;
2602 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604 cpmuctrl = tr32(TG3_CPMU_CTRL);
2605 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2606 tw32(TG3_CPMU_CTRL,
2607 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2608 }
2609
2610 err = tg3_bmcr_reset(tp);
2611 if (err)
2612 return err;
2613
2614 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2617
2618 tw32(TG3_CPMU_CTRL, cpmuctrl);
2619 }
2620
2621 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625 CPMU_LSPD_1000MB_MACCLK_12_5) {
2626 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2627 udelay(40);
2628 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2629 }
2630 }
2631
2632 if (tg3_flag(tp, 5717_PLUS) &&
2633 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2634 return 0;
2635
2636 tg3_phy_apply_otp(tp);
2637
2638 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639 tg3_phy_toggle_apd(tp, true);
2640 else
2641 tg3_phy_toggle_apd(tp, false);
2642
2643 out:
2644 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647 tg3_phydsp_write(tp, 0x000a, 0x0323);
2648 tg3_phy_toggle_auxctl_smdsp(tp, false);
2649 }
2650
2651 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2654 }
2655
2656 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658 tg3_phydsp_write(tp, 0x000a, 0x310b);
2659 tg3_phydsp_write(tp, 0x201f, 0x9506);
2660 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661 tg3_phy_toggle_auxctl_smdsp(tp, false);
2662 }
2663 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668 tg3_writephy(tp, MII_TG3_TEST1,
2669 MII_TG3_TEST1_TRIM_EN | 0x4);
2670 } else
2671 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2672
2673 tg3_phy_toggle_auxctl_smdsp(tp, false);
2674 }
2675 }
2676
2677 /* Set Extended packet length bit (bit 14) on all chips that */
2678 /* support jumbo frames */
2679 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680 /* Cannot do read-modify-write on 5401 */
2681 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683 /* Set bit 14 with read-modify-write to preserve other bits */
2684 err = tg3_phy_auxctl_read(tp,
2685 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2686 if (!err)
2687 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2689 }
2690
2691 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692 * jumbo frames transmission.
2693 */
2694 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2698 }
2699
2700 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701 /* adjust output voltage */
2702 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2703 }
2704
2705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706 tg3_phydsp_write(tp, 0xffb, 0x4000);
2707
2708 tg3_phy_toggle_automdix(tp, true);
2709 tg3_phy_set_wirespeed(tp);
2710 return 0;
2711 }
2712
2713 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2715 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2716 TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721 (TG3_GPIO_MSG_DRVR_PRES << 12))
2722
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727 (TG3_GPIO_MSG_NEED_VAUX << 12))
2728
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2730 {
2731 u32 status, shift;
2732
2733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734 tg3_asic_rev(tp) == ASIC_REV_5719)
2735 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2736 else
2737 status = tr32(TG3_CPMU_DRV_STATUS);
2738
2739 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740 status &= ~(TG3_GPIO_MSG_MASK << shift);
2741 status |= (newstat << shift);
2742
2743 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744 tg3_asic_rev(tp) == ASIC_REV_5719)
2745 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2746 else
2747 tw32(TG3_CPMU_DRV_STATUS, status);
2748
2749 return status >> TG3_APE_GPIO_MSG_SHIFT;
2750 }
2751
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2753 {
2754 if (!tg3_flag(tp, IS_NIC))
2755 return 0;
2756
2757 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759 tg3_asic_rev(tp) == ASIC_REV_5720) {
2760 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2761 return -EIO;
2762
2763 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2764
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2769 } else {
2770 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2772 }
2773
2774 return 0;
2775 }
2776
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2778 {
2779 u32 grc_local_ctrl;
2780
2781 if (!tg3_flag(tp, IS_NIC) ||
2782 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783 tg3_asic_rev(tp) == ASIC_REV_5701)
2784 return;
2785
2786 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2787
2788 tw32_wait_f(GRC_LOCAL_CTRL,
2789 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2791
2792 tw32_wait_f(GRC_LOCAL_CTRL,
2793 grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796 tw32_wait_f(GRC_LOCAL_CTRL,
2797 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 }
2800
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2802 {
2803 if (!tg3_flag(tp, IS_NIC))
2804 return;
2805
2806 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5701) {
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809 (GRC_LCLCTRL_GPIO_OE0 |
2810 GRC_LCLCTRL_GPIO_OE1 |
2811 GRC_LCLCTRL_GPIO_OE2 |
2812 GRC_LCLCTRL_GPIO_OUTPUT0 |
2813 GRC_LCLCTRL_GPIO_OUTPUT1),
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT0 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2823 tp->grc_local_ctrl;
2824 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY);
2826
2827 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 } else {
2835 u32 no_gpio2;
2836 u32 grc_local_ctrl = 0;
2837
2838 /* Workaround to prevent overdrawing Amps. */
2839 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2842 grc_local_ctrl,
2843 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 }
2845
2846 /* On 5753 and variants, GPIO2 cannot be used. */
2847 no_gpio2 = tp->nic_sram_data_cfg &
2848 NIC_SRAM_DATA_CFG_NO_GPIO2;
2849
2850 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851 GRC_LCLCTRL_GPIO_OE1 |
2852 GRC_LCLCTRL_GPIO_OE2 |
2853 GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 GRC_LCLCTRL_GPIO_OUTPUT2;
2855 if (no_gpio2) {
2856 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857 GRC_LCLCTRL_GPIO_OUTPUT2);
2858 }
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2860 tp->grc_local_ctrl | grc_local_ctrl,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2862
2863 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2864
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 tp->grc_local_ctrl | grc_local_ctrl,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2868
2869 if (!no_gpio2) {
2870 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL,
2872 tp->grc_local_ctrl | grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 }
2875 }
2876 }
2877
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2879 {
2880 u32 msg = 0;
2881
2882 /* Serialize power state transitions */
2883 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2884 return;
2885
2886 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887 msg = TG3_GPIO_MSG_NEED_VAUX;
2888
2889 msg = tg3_set_function_status(tp, msg);
2890
2891 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2892 goto done;
2893
2894 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895 tg3_pwrsrc_switch_to_vaux(tp);
2896 else
2897 tg3_pwrsrc_die_with_vmain(tp);
2898
2899 done:
2900 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2901 }
2902
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2904 {
2905 bool need_vaux = false;
2906
2907 /* The GPIOs do something completely different on 57765. */
2908 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2909 return;
2910
2911 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913 tg3_asic_rev(tp) == ASIC_REV_5720) {
2914 tg3_frob_aux_power_5717(tp, include_wol ?
2915 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2916 return;
2917 }
2918
2919 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920 struct net_device *dev_peer;
2921
2922 dev_peer = pci_get_drvdata(tp->pdev_peer);
2923
2924 /* remove_one() may have been run on the peer. */
2925 if (dev_peer) {
2926 struct tg3 *tp_peer = netdev_priv(dev_peer);
2927
2928 if (tg3_flag(tp_peer, INIT_COMPLETE))
2929 return;
2930
2931 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932 tg3_flag(tp_peer, ENABLE_ASF))
2933 need_vaux = true;
2934 }
2935 }
2936
2937 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938 tg3_flag(tp, ENABLE_ASF))
2939 need_vaux = true;
2940
2941 if (need_vaux)
2942 tg3_pwrsrc_switch_to_vaux(tp);
2943 else
2944 tg3_pwrsrc_die_with_vmain(tp);
2945 }
2946
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2948 {
2949 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2950 return 1;
2951 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952 if (speed != SPEED_10)
2953 return 1;
2954 } else if (speed == SPEED_10)
2955 return 1;
2956
2957 return 0;
2958 }
2959
2960 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2961 {
2962 u32 val;
2963
2964 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2965 return;
2966
2967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2968 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2969 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2970 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2971
2972 sg_dig_ctrl |=
2973 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2974 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2975 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2976 }
2977 return;
2978 }
2979
2980 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2981 tg3_bmcr_reset(tp);
2982 val = tr32(GRC_MISC_CFG);
2983 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2984 udelay(40);
2985 return;
2986 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2987 u32 phytest;
2988 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2989 u32 phy;
2990
2991 tg3_writephy(tp, MII_ADVERTISE, 0);
2992 tg3_writephy(tp, MII_BMCR,
2993 BMCR_ANENABLE | BMCR_ANRESTART);
2994
2995 tg3_writephy(tp, MII_TG3_FET_TEST,
2996 phytest | MII_TG3_FET_SHADOW_EN);
2997 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2998 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2999 tg3_writephy(tp,
3000 MII_TG3_FET_SHDW_AUXMODE4,
3001 phy);
3002 }
3003 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3004 }
3005 return;
3006 } else if (do_low_power) {
3007 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3008 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3009
3010 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3011 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3012 MII_TG3_AUXCTL_PCTL_VREG_11V;
3013 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3014 }
3015
3016 /* The PHY should not be powered down on some chips because
3017 * of bugs.
3018 */
3019 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3020 tg3_asic_rev(tp) == ASIC_REV_5704 ||
3021 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
3022 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
3023 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
3024 !tp->pci_fn))
3025 return;
3026
3027 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3028 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3029 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3030 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3031 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3032 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3033 }
3034
3035 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3036 }
3037
3038 /* tp->lock is held. */
3039 static int tg3_nvram_lock(struct tg3 *tp)
3040 {
3041 if (tg3_flag(tp, NVRAM)) {
3042 int i;
3043
3044 if (tp->nvram_lock_cnt == 0) {
3045 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3046 for (i = 0; i < 8000; i++) {
3047 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3048 break;
3049 udelay(20);
3050 }
3051 if (i == 8000) {
3052 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3053 return -ENODEV;
3054 }
3055 }
3056 tp->nvram_lock_cnt++;
3057 }
3058 return 0;
3059 }
3060
3061 /* tp->lock is held. */
3062 static void tg3_nvram_unlock(struct tg3 *tp)
3063 {
3064 if (tg3_flag(tp, NVRAM)) {
3065 if (tp->nvram_lock_cnt > 0)
3066 tp->nvram_lock_cnt--;
3067 if (tp->nvram_lock_cnt == 0)
3068 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3069 }
3070 }
3071
3072 /* tp->lock is held. */
3073 static void tg3_enable_nvram_access(struct tg3 *tp)
3074 {
3075 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3076 u32 nvaccess = tr32(NVRAM_ACCESS);
3077
3078 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3079 }
3080 }
3081
3082 /* tp->lock is held. */
3083 static void tg3_disable_nvram_access(struct tg3 *tp)
3084 {
3085 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3086 u32 nvaccess = tr32(NVRAM_ACCESS);
3087
3088 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3089 }
3090 }
3091
3092 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3093 u32 offset, u32 *val)
3094 {
3095 u32 tmp;
3096 int i;
3097
3098 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3099 return -EINVAL;
3100
3101 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3102 EEPROM_ADDR_DEVID_MASK |
3103 EEPROM_ADDR_READ);
3104 tw32(GRC_EEPROM_ADDR,
3105 tmp |
3106 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3107 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3108 EEPROM_ADDR_ADDR_MASK) |
3109 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3110
3111 for (i = 0; i < 1000; i++) {
3112 tmp = tr32(GRC_EEPROM_ADDR);
3113
3114 if (tmp & EEPROM_ADDR_COMPLETE)
3115 break;
3116 msleep(1);
3117 }
3118 if (!(tmp & EEPROM_ADDR_COMPLETE))
3119 return -EBUSY;
3120
3121 tmp = tr32(GRC_EEPROM_DATA);
3122
3123 /*
3124 * The data will always be opposite the native endian
3125 * format. Perform a blind byteswap to compensate.
3126 */
3127 *val = swab32(tmp);
3128
3129 return 0;
3130 }
3131
3132 #define NVRAM_CMD_TIMEOUT 10000
3133
3134 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3135 {
3136 int i;
3137
3138 tw32(NVRAM_CMD, nvram_cmd);
3139 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3140 udelay(10);
3141 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3142 udelay(10);
3143 break;
3144 }
3145 }
3146
3147 if (i == NVRAM_CMD_TIMEOUT)
3148 return -EBUSY;
3149
3150 return 0;
3151 }
3152
3153 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3154 {
3155 if (tg3_flag(tp, NVRAM) &&
3156 tg3_flag(tp, NVRAM_BUFFERED) &&
3157 tg3_flag(tp, FLASH) &&
3158 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3159 (tp->nvram_jedecnum == JEDEC_ATMEL))
3160
3161 addr = ((addr / tp->nvram_pagesize) <<
3162 ATMEL_AT45DB0X1B_PAGE_POS) +
3163 (addr % tp->nvram_pagesize);
3164
3165 return addr;
3166 }
3167
3168 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3169 {
3170 if (tg3_flag(tp, NVRAM) &&
3171 tg3_flag(tp, NVRAM_BUFFERED) &&
3172 tg3_flag(tp, FLASH) &&
3173 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3174 (tp->nvram_jedecnum == JEDEC_ATMEL))
3175
3176 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3177 tp->nvram_pagesize) +
3178 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3179
3180 return addr;
3181 }
3182
3183 /* NOTE: Data read in from NVRAM is byteswapped according to
3184 * the byteswapping settings for all other register accesses.
3185 * tg3 devices are BE devices, so on a BE machine, the data
3186 * returned will be exactly as it is seen in NVRAM. On a LE
3187 * machine, the 32-bit value will be byteswapped.
3188 */
3189 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3190 {
3191 int ret;
3192
3193 if (!tg3_flag(tp, NVRAM))
3194 return tg3_nvram_read_using_eeprom(tp, offset, val);
3195
3196 offset = tg3_nvram_phys_addr(tp, offset);
3197
3198 if (offset > NVRAM_ADDR_MSK)
3199 return -EINVAL;
3200
3201 ret = tg3_nvram_lock(tp);
3202 if (ret)
3203 return ret;
3204
3205 tg3_enable_nvram_access(tp);
3206
3207 tw32(NVRAM_ADDR, offset);
3208 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3209 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3210
3211 if (ret == 0)
3212 *val = tr32(NVRAM_RDDATA);
3213
3214 tg3_disable_nvram_access(tp);
3215
3216 tg3_nvram_unlock(tp);
3217
3218 return ret;
3219 }
3220
3221 /* Ensures NVRAM data is in bytestream format. */
3222 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3223 {
3224 u32 v;
3225 int res = tg3_nvram_read(tp, offset, &v);
3226 if (!res)
3227 *val = cpu_to_be32(v);
3228 return res;
3229 }
3230
3231 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3232 u32 offset, u32 len, u8 *buf)
3233 {
3234 int i, j, rc = 0;
3235 u32 val;
3236
3237 for (i = 0; i < len; i += 4) {
3238 u32 addr;
3239 __be32 data;
3240
3241 addr = offset + i;
3242
3243 memcpy(&data, buf + i, 4);
3244
3245 /*
3246 * The SEEPROM interface expects the data to always be opposite
3247 * the native endian format. We accomplish this by reversing
3248 * all the operations that would have been performed on the
3249 * data from a call to tg3_nvram_read_be32().
3250 */
3251 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3252
3253 val = tr32(GRC_EEPROM_ADDR);
3254 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3255
3256 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3257 EEPROM_ADDR_READ);
3258 tw32(GRC_EEPROM_ADDR, val |
3259 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3260 (addr & EEPROM_ADDR_ADDR_MASK) |
3261 EEPROM_ADDR_START |
3262 EEPROM_ADDR_WRITE);
3263
3264 for (j = 0; j < 1000; j++) {
3265 val = tr32(GRC_EEPROM_ADDR);
3266
3267 if (val & EEPROM_ADDR_COMPLETE)
3268 break;
3269 msleep(1);
3270 }
3271 if (!(val & EEPROM_ADDR_COMPLETE)) {
3272 rc = -EBUSY;
3273 break;
3274 }
3275 }
3276
3277 return rc;
3278 }
3279
3280 /* offset and length are dword aligned */
3281 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3282 u8 *buf)
3283 {
3284 int ret = 0;
3285 u32 pagesize = tp->nvram_pagesize;
3286 u32 pagemask = pagesize - 1;
3287 u32 nvram_cmd;
3288 u8 *tmp;
3289
3290 tmp = kmalloc(pagesize, GFP_KERNEL);
3291 if (tmp == NULL)
3292 return -ENOMEM;
3293
3294 while (len) {
3295 int j;
3296 u32 phy_addr, page_off, size;
3297
3298 phy_addr = offset & ~pagemask;
3299
3300 for (j = 0; j < pagesize; j += 4) {
3301 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3302 (__be32 *) (tmp + j));
3303 if (ret)
3304 break;
3305 }
3306 if (ret)
3307 break;
3308
3309 page_off = offset & pagemask;
3310 size = pagesize;
3311 if (len < size)
3312 size = len;
3313
3314 len -= size;
3315
3316 memcpy(tmp + page_off, buf, size);
3317
3318 offset = offset + (pagesize - page_off);
3319
3320 tg3_enable_nvram_access(tp);
3321
3322 /*
3323 * Before we can erase the flash page, we need
3324 * to issue a special "write enable" command.
3325 */
3326 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3327
3328 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3329 break;
3330
3331 /* Erase the target page */
3332 tw32(NVRAM_ADDR, phy_addr);
3333
3334 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3335 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3336
3337 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3338 break;
3339
3340 /* Issue another write enable to start the write. */
3341 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3342
3343 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3344 break;
3345
3346 for (j = 0; j < pagesize; j += 4) {
3347 __be32 data;
3348
3349 data = *((__be32 *) (tmp + j));
3350
3351 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3352
3353 tw32(NVRAM_ADDR, phy_addr + j);
3354
3355 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3356 NVRAM_CMD_WR;
3357
3358 if (j == 0)
3359 nvram_cmd |= NVRAM_CMD_FIRST;
3360 else if (j == (pagesize - 4))
3361 nvram_cmd |= NVRAM_CMD_LAST;
3362
3363 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3364 if (ret)
3365 break;
3366 }
3367 if (ret)
3368 break;
3369 }
3370
3371 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3372 tg3_nvram_exec_cmd(tp, nvram_cmd);
3373
3374 kfree(tmp);
3375
3376 return ret;
3377 }
3378
3379 /* offset and length are dword aligned */
3380 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3381 u8 *buf)
3382 {
3383 int i, ret = 0;
3384
3385 for (i = 0; i < len; i += 4, offset += 4) {
3386 u32 page_off, phy_addr, nvram_cmd;
3387 __be32 data;
3388
3389 memcpy(&data, buf + i, 4);
3390 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3391
3392 page_off = offset % tp->nvram_pagesize;
3393
3394 phy_addr = tg3_nvram_phys_addr(tp, offset);
3395
3396 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3397
3398 if (page_off == 0 || i == 0)
3399 nvram_cmd |= NVRAM_CMD_FIRST;
3400 if (page_off == (tp->nvram_pagesize - 4))
3401 nvram_cmd |= NVRAM_CMD_LAST;
3402
3403 if (i == (len - 4))
3404 nvram_cmd |= NVRAM_CMD_LAST;
3405
3406 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3407 !tg3_flag(tp, FLASH) ||
3408 !tg3_flag(tp, 57765_PLUS))
3409 tw32(NVRAM_ADDR, phy_addr);
3410
3411 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3412 !tg3_flag(tp, 5755_PLUS) &&
3413 (tp->nvram_jedecnum == JEDEC_ST) &&
3414 (nvram_cmd & NVRAM_CMD_FIRST)) {
3415 u32 cmd;
3416
3417 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 ret = tg3_nvram_exec_cmd(tp, cmd);
3419 if (ret)
3420 break;
3421 }
3422 if (!tg3_flag(tp, FLASH)) {
3423 /* We always do complete word writes to eeprom. */
3424 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3425 }
3426
3427 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3428 if (ret)
3429 break;
3430 }
3431 return ret;
3432 }
3433
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3436 {
3437 int ret;
3438
3439 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3440 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3441 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3442 udelay(40);
3443 }
3444
3445 if (!tg3_flag(tp, NVRAM)) {
3446 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3447 } else {
3448 u32 grc_mode;
3449
3450 ret = tg3_nvram_lock(tp);
3451 if (ret)
3452 return ret;
3453
3454 tg3_enable_nvram_access(tp);
3455 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3456 tw32(NVRAM_WRITE1, 0x406);
3457
3458 grc_mode = tr32(GRC_MODE);
3459 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3460
3461 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3462 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3463 buf);
3464 } else {
3465 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3466 buf);
3467 }
3468
3469 grc_mode = tr32(GRC_MODE);
3470 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3471
3472 tg3_disable_nvram_access(tp);
3473 tg3_nvram_unlock(tp);
3474 }
3475
3476 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3477 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3478 udelay(40);
3479 }
3480
3481 return ret;
3482 }
3483
3484 #define RX_CPU_SCRATCH_BASE 0x30000
3485 #define RX_CPU_SCRATCH_SIZE 0x04000
3486 #define TX_CPU_SCRATCH_BASE 0x34000
3487 #define TX_CPU_SCRATCH_SIZE 0x04000
3488
3489 /* tp->lock is held. */
3490 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3491 {
3492 int i;
3493 const int iters = 10000;
3494
3495 for (i = 0; i < iters; i++) {
3496 tw32(cpu_base + CPU_STATE, 0xffffffff);
3497 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3498 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3499 break;
3500 }
3501
3502 return (i == iters) ? -EBUSY : 0;
3503 }
3504
3505 /* tp->lock is held. */
3506 static int tg3_rxcpu_pause(struct tg3 *tp)
3507 {
3508 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3509
3510 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3512 udelay(10);
3513
3514 return rc;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_txcpu_pause(struct tg3 *tp)
3519 {
3520 return tg3_pause_cpu(tp, TX_CPU_BASE);
3521 }
3522
3523 /* tp->lock is held. */
3524 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3525 {
3526 tw32(cpu_base + CPU_STATE, 0xffffffff);
3527 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3528 }
3529
3530 /* tp->lock is held. */
3531 static void tg3_rxcpu_resume(struct tg3 *tp)
3532 {
3533 tg3_resume_cpu(tp, RX_CPU_BASE);
3534 }
3535
3536 /* tp->lock is held. */
3537 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3538 {
3539 int rc;
3540
3541 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3542
3543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3544 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3545
3546 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3547 return 0;
3548 }
3549 if (cpu_base == RX_CPU_BASE) {
3550 rc = tg3_rxcpu_pause(tp);
3551 } else {
3552 /*
3553 * There is only an Rx CPU for the 5750 derivative in the
3554 * BCM4785.
3555 */
3556 if (tg3_flag(tp, IS_SSB_CORE))
3557 return 0;
3558
3559 rc = tg3_txcpu_pause(tp);
3560 }
3561
3562 if (rc) {
3563 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3564 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3565 return -ENODEV;
3566 }
3567
3568 /* Clear firmware's nvram arbitration. */
3569 if (tg3_flag(tp, NVRAM))
3570 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3571 return 0;
3572 }
3573
3574 static int tg3_fw_data_len(struct tg3 *tp,
3575 const struct tg3_firmware_hdr *fw_hdr)
3576 {
3577 int fw_len;
3578
3579 /* Non fragmented firmware have one firmware header followed by a
3580 * contiguous chunk of data to be written. The length field in that
3581 * header is not the length of data to be written but the complete
3582 * length of the bss. The data length is determined based on
3583 * tp->fw->size minus headers.
3584 *
3585 * Fragmented firmware have a main header followed by multiple
3586 * fragments. Each fragment is identical to non fragmented firmware
3587 * with a firmware header followed by a contiguous chunk of data. In
3588 * the main header, the length field is unused and set to 0xffffffff.
3589 * In each fragment header the length is the entire size of that
3590 * fragment i.e. fragment data + header length. Data length is
3591 * therefore length field in the header minus TG3_FW_HDR_LEN.
3592 */
3593 if (tp->fw_len == 0xffffffff)
3594 fw_len = be32_to_cpu(fw_hdr->len);
3595 else
3596 fw_len = tp->fw->size;
3597
3598 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3599 }
3600
3601 /* tp->lock is held. */
3602 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3603 u32 cpu_scratch_base, int cpu_scratch_size,
3604 const struct tg3_firmware_hdr *fw_hdr)
3605 {
3606 int err, i;
3607 void (*write_op)(struct tg3 *, u32, u32);
3608 int total_len = tp->fw->size;
3609
3610 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3611 netdev_err(tp->dev,
3612 "%s: Trying to load TX cpu firmware which is 5705\n",
3613 __func__);
3614 return -EINVAL;
3615 }
3616
3617 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3618 write_op = tg3_write_mem;
3619 else
3620 write_op = tg3_write_indirect_reg32;
3621
3622 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3623 /* It is possible that bootcode is still loading at this point.
3624 * Get the nvram lock first before halting the cpu.
3625 */
3626 int lock_err = tg3_nvram_lock(tp);
3627 err = tg3_halt_cpu(tp, cpu_base);
3628 if (!lock_err)
3629 tg3_nvram_unlock(tp);
3630 if (err)
3631 goto out;
3632
3633 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3634 write_op(tp, cpu_scratch_base + i, 0);
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32(cpu_base + CPU_MODE,
3637 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3638 } else {
3639 /* Subtract additional main header for fragmented firmware and
3640 * advance to the first fragment
3641 */
3642 total_len -= TG3_FW_HDR_LEN;
3643 fw_hdr++;
3644 }
3645
3646 do {
3647 u32 *fw_data = (u32 *)(fw_hdr + 1);
3648 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3649 write_op(tp, cpu_scratch_base +
3650 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3651 (i * sizeof(u32)),
3652 be32_to_cpu(fw_data[i]));
3653
3654 total_len -= be32_to_cpu(fw_hdr->len);
3655
3656 /* Advance to next fragment */
3657 fw_hdr = (struct tg3_firmware_hdr *)
3658 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3659 } while (total_len > 0);
3660
3661 err = 0;
3662
3663 out:
3664 return err;
3665 }
3666
3667 /* tp->lock is held. */
3668 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3669 {
3670 int i;
3671 const int iters = 5;
3672
3673 tw32(cpu_base + CPU_STATE, 0xffffffff);
3674 tw32_f(cpu_base + CPU_PC, pc);
3675
3676 for (i = 0; i < iters; i++) {
3677 if (tr32(cpu_base + CPU_PC) == pc)
3678 break;
3679 tw32(cpu_base + CPU_STATE, 0xffffffff);
3680 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3681 tw32_f(cpu_base + CPU_PC, pc);
3682 udelay(1000);
3683 }
3684
3685 return (i == iters) ? -EBUSY : 0;
3686 }
3687
3688 /* tp->lock is held. */
3689 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3690 {
3691 const struct tg3_firmware_hdr *fw_hdr;
3692 int err;
3693
3694 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3695
3696 /* Firmware blob starts with version numbers, followed by
3697 start address and length. We are setting complete length.
3698 length = end_address_of_bss - start_address_of_text.
3699 Remainder is the blob to be loaded contiguously
3700 from start address. */
3701
3702 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3703 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3704 fw_hdr);
3705 if (err)
3706 return err;
3707
3708 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3709 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3710 fw_hdr);
3711 if (err)
3712 return err;
3713
3714 /* Now startup only the RX cpu. */
3715 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3716 be32_to_cpu(fw_hdr->base_addr));
3717 if (err) {
3718 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3719 "should be %08x\n", __func__,
3720 tr32(RX_CPU_BASE + CPU_PC),
3721 be32_to_cpu(fw_hdr->base_addr));
3722 return -ENODEV;
3723 }
3724
3725 tg3_rxcpu_resume(tp);
3726
3727 return 0;
3728 }
3729
3730 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3731 {
3732 const int iters = 1000;
3733 int i;
3734 u32 val;
3735
3736 /* Wait for boot code to complete initialization and enter service
3737 * loop. It is then safe to download service patches
3738 */
3739 for (i = 0; i < iters; i++) {
3740 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3741 break;
3742
3743 udelay(10);
3744 }
3745
3746 if (i == iters) {
3747 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3748 return -EBUSY;
3749 }
3750
3751 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3752 if (val & 0xff) {
3753 netdev_warn(tp->dev,
3754 "Other patches exist. Not downloading EEE patch\n");
3755 return -EEXIST;
3756 }
3757
3758 return 0;
3759 }
3760
3761 /* tp->lock is held. */
3762 static void tg3_load_57766_firmware(struct tg3 *tp)
3763 {
3764 struct tg3_firmware_hdr *fw_hdr;
3765
3766 if (!tg3_flag(tp, NO_NVRAM))
3767 return;
3768
3769 if (tg3_validate_rxcpu_state(tp))
3770 return;
3771
3772 if (!tp->fw)
3773 return;
3774
3775 /* This firmware blob has a different format than older firmware
3776 * releases as given below. The main difference is we have fragmented
3777 * data to be written to non-contiguous locations.
3778 *
3779 * In the beginning we have a firmware header identical to other
3780 * firmware which consists of version, base addr and length. The length
3781 * here is unused and set to 0xffffffff.
3782 *
3783 * This is followed by a series of firmware fragments which are
3784 * individually identical to previous firmware. i.e. they have the
3785 * firmware header and followed by data for that fragment. The version
3786 * field of the individual fragment header is unused.
3787 */
3788
3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3791 return;
3792
3793 if (tg3_rxcpu_pause(tp))
3794 return;
3795
3796 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3797 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3798
3799 tg3_rxcpu_resume(tp);
3800 }
3801
3802 /* tp->lock is held. */
3803 static int tg3_load_tso_firmware(struct tg3 *tp)
3804 {
3805 const struct tg3_firmware_hdr *fw_hdr;
3806 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3807 int err;
3808
3809 if (!tg3_flag(tp, FW_TSO))
3810 return 0;
3811
3812 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3813
3814 /* Firmware blob starts with version numbers, followed by
3815 start address and length. We are setting complete length.
3816 length = end_address_of_bss - start_address_of_text.
3817 Remainder is the blob to be loaded contiguously
3818 from start address. */
3819
3820 cpu_scratch_size = tp->fw_len;
3821
3822 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3823 cpu_base = RX_CPU_BASE;
3824 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3825 } else {
3826 cpu_base = TX_CPU_BASE;
3827 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3828 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3829 }
3830
3831 err = tg3_load_firmware_cpu(tp, cpu_base,
3832 cpu_scratch_base, cpu_scratch_size,
3833 fw_hdr);
3834 if (err)
3835 return err;
3836
3837 /* Now startup the cpu. */
3838 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3839 be32_to_cpu(fw_hdr->base_addr));
3840 if (err) {
3841 netdev_err(tp->dev,
3842 "%s fails to set CPU PC, is %08x should be %08x\n",
3843 __func__, tr32(cpu_base + CPU_PC),
3844 be32_to_cpu(fw_hdr->base_addr));
3845 return -ENODEV;
3846 }
3847
3848 tg3_resume_cpu(tp, cpu_base);
3849 return 0;
3850 }
3851
3852
3853 /* tp->lock is held. */
3854 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3855 {
3856 u32 addr_high, addr_low;
3857 int i;
3858
3859 addr_high = ((tp->dev->dev_addr[0] << 8) |
3860 tp->dev->dev_addr[1]);
3861 addr_low = ((tp->dev->dev_addr[2] << 24) |
3862 (tp->dev->dev_addr[3] << 16) |
3863 (tp->dev->dev_addr[4] << 8) |
3864 (tp->dev->dev_addr[5] << 0));
3865 for (i = 0; i < 4; i++) {
3866 if (i == 1 && skip_mac_1)
3867 continue;
3868 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3869 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3870 }
3871
3872 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3873 tg3_asic_rev(tp) == ASIC_REV_5704) {
3874 for (i = 0; i < 12; i++) {
3875 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3876 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3877 }
3878 }
3879
3880 addr_high = (tp->dev->dev_addr[0] +
3881 tp->dev->dev_addr[1] +
3882 tp->dev->dev_addr[2] +
3883 tp->dev->dev_addr[3] +
3884 tp->dev->dev_addr[4] +
3885 tp->dev->dev_addr[5]) &
3886 TX_BACKOFF_SEED_MASK;
3887 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3888 }
3889
3890 static void tg3_enable_register_access(struct tg3 *tp)
3891 {
3892 /*
3893 * Make sure register accesses (indirect or otherwise) will function
3894 * correctly.
3895 */
3896 pci_write_config_dword(tp->pdev,
3897 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3898 }
3899
3900 static int tg3_power_up(struct tg3 *tp)
3901 {
3902 int err;
3903
3904 tg3_enable_register_access(tp);
3905
3906 err = pci_set_power_state(tp->pdev, PCI_D0);
3907 if (!err) {
3908 /* Switch out of Vaux if it is a NIC */
3909 tg3_pwrsrc_switch_to_vmain(tp);
3910 } else {
3911 netdev_err(tp->dev, "Transition to D0 failed\n");
3912 }
3913
3914 return err;
3915 }
3916
3917 static int tg3_setup_phy(struct tg3 *, bool);
3918
3919 static int tg3_power_down_prepare(struct tg3 *tp)
3920 {
3921 u32 misc_host_ctrl;
3922 bool device_should_wake, do_low_power;
3923
3924 tg3_enable_register_access(tp);
3925
3926 /* Restore the CLKREQ setting. */
3927 if (tg3_flag(tp, CLKREQ_BUG))
3928 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3929 PCI_EXP_LNKCTL_CLKREQ_EN);
3930
3931 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3932 tw32(TG3PCI_MISC_HOST_CTRL,
3933 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3934
3935 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3936 tg3_flag(tp, WOL_ENABLE);
3937
3938 if (tg3_flag(tp, USE_PHYLIB)) {
3939 do_low_power = false;
3940 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3941 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3942 struct phy_device *phydev;
3943 u32 phyid, advertising;
3944
3945 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3946
3947 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3948
3949 tp->link_config.speed = phydev->speed;
3950 tp->link_config.duplex = phydev->duplex;
3951 tp->link_config.autoneg = phydev->autoneg;
3952 tp->link_config.advertising = phydev->advertising;
3953
3954 advertising = ADVERTISED_TP |
3955 ADVERTISED_Pause |
3956 ADVERTISED_Autoneg |
3957 ADVERTISED_10baseT_Half;
3958
3959 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3960 if (tg3_flag(tp, WOL_SPEED_100MB))
3961 advertising |=
3962 ADVERTISED_100baseT_Half |
3963 ADVERTISED_100baseT_Full |
3964 ADVERTISED_10baseT_Full;
3965 else
3966 advertising |= ADVERTISED_10baseT_Full;
3967 }
3968
3969 phydev->advertising = advertising;
3970
3971 phy_start_aneg(phydev);
3972
3973 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3974 if (phyid != PHY_ID_BCMAC131) {
3975 phyid &= PHY_BCM_OUI_MASK;
3976 if (phyid == PHY_BCM_OUI_1 ||
3977 phyid == PHY_BCM_OUI_2 ||
3978 phyid == PHY_BCM_OUI_3)
3979 do_low_power = true;
3980 }
3981 }
3982 } else {
3983 do_low_power = true;
3984
3985 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3986 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3987
3988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3989 tg3_setup_phy(tp, false);
3990 }
3991
3992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3993 u32 val;
3994
3995 val = tr32(GRC_VCPU_EXT_CTRL);
3996 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3997 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3998 int i;
3999 u32 val;
4000
4001 for (i = 0; i < 200; i++) {
4002 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4003 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4004 break;
4005 msleep(1);
4006 }
4007 }
4008 if (tg3_flag(tp, WOL_CAP))
4009 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4010 WOL_DRV_STATE_SHUTDOWN |
4011 WOL_DRV_WOL |
4012 WOL_SET_MAGIC_PKT);
4013
4014 if (device_should_wake) {
4015 u32 mac_mode;
4016
4017 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4018 if (do_low_power &&
4019 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4020 tg3_phy_auxctl_write(tp,
4021 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4022 MII_TG3_AUXCTL_PCTL_WOL_EN |
4023 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4024 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4025 udelay(40);
4026 }
4027
4028 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4029 mac_mode = MAC_MODE_PORT_MODE_GMII;
4030 else if (tp->phy_flags &
4031 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4032 if (tp->link_config.active_speed == SPEED_1000)
4033 mac_mode = MAC_MODE_PORT_MODE_GMII;
4034 else
4035 mac_mode = MAC_MODE_PORT_MODE_MII;
4036 } else
4037 mac_mode = MAC_MODE_PORT_MODE_MII;
4038
4039 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4040 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4041 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4042 SPEED_100 : SPEED_10;
4043 if (tg3_5700_link_polarity(tp, speed))
4044 mac_mode |= MAC_MODE_LINK_POLARITY;
4045 else
4046 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4047 }
4048 } else {
4049 mac_mode = MAC_MODE_PORT_MODE_TBI;
4050 }
4051
4052 if (!tg3_flag(tp, 5750_PLUS))
4053 tw32(MAC_LED_CTRL, tp->led_ctrl);
4054
4055 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4056 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4057 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4058 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4059
4060 if (tg3_flag(tp, ENABLE_APE))
4061 mac_mode |= MAC_MODE_APE_TX_EN |
4062 MAC_MODE_APE_RX_EN |
4063 MAC_MODE_TDE_ENABLE;
4064
4065 tw32_f(MAC_MODE, mac_mode);
4066 udelay(100);
4067
4068 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4069 udelay(10);
4070 }
4071
4072 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4073 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4074 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4075 u32 base_val;
4076
4077 base_val = tp->pci_clock_ctrl;
4078 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4079 CLOCK_CTRL_TXCLK_DISABLE);
4080
4081 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4082 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4083 } else if (tg3_flag(tp, 5780_CLASS) ||
4084 tg3_flag(tp, CPMU_PRESENT) ||
4085 tg3_asic_rev(tp) == ASIC_REV_5906) {
4086 /* do nothing */
4087 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4088 u32 newbits1, newbits2;
4089
4090 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4091 tg3_asic_rev(tp) == ASIC_REV_5701) {
4092 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4093 CLOCK_CTRL_TXCLK_DISABLE |
4094 CLOCK_CTRL_ALTCLK);
4095 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4096 } else if (tg3_flag(tp, 5705_PLUS)) {
4097 newbits1 = CLOCK_CTRL_625_CORE;
4098 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4099 } else {
4100 newbits1 = CLOCK_CTRL_ALTCLK;
4101 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4102 }
4103
4104 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4105 40);
4106
4107 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4108 40);
4109
4110 if (!tg3_flag(tp, 5705_PLUS)) {
4111 u32 newbits3;
4112
4113 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4114 tg3_asic_rev(tp) == ASIC_REV_5701) {
4115 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4116 CLOCK_CTRL_TXCLK_DISABLE |
4117 CLOCK_CTRL_44MHZ_CORE);
4118 } else {
4119 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4120 }
4121
4122 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4123 tp->pci_clock_ctrl | newbits3, 40);
4124 }
4125 }
4126
4127 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4128 tg3_power_down_phy(tp, do_low_power);
4129
4130 tg3_frob_aux_power(tp, true);
4131
4132 /* Workaround for unstable PLL clock */
4133 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4134 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4135 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4136 u32 val = tr32(0x7d00);
4137
4138 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4139 tw32(0x7d00, val);
4140 if (!tg3_flag(tp, ENABLE_ASF)) {
4141 int err;
4142
4143 err = tg3_nvram_lock(tp);
4144 tg3_halt_cpu(tp, RX_CPU_BASE);
4145 if (!err)
4146 tg3_nvram_unlock(tp);
4147 }
4148 }
4149
4150 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4151
4152 return 0;
4153 }
4154
4155 static void tg3_power_down(struct tg3 *tp)
4156 {
4157 tg3_power_down_prepare(tp);
4158
4159 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4160 pci_set_power_state(tp->pdev, PCI_D3hot);
4161 }
4162
4163 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4164 {
4165 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4166 case MII_TG3_AUX_STAT_10HALF:
4167 *speed = SPEED_10;
4168 *duplex = DUPLEX_HALF;
4169 break;
4170
4171 case MII_TG3_AUX_STAT_10FULL:
4172 *speed = SPEED_10;
4173 *duplex = DUPLEX_FULL;
4174 break;
4175
4176 case MII_TG3_AUX_STAT_100HALF:
4177 *speed = SPEED_100;
4178 *duplex = DUPLEX_HALF;
4179 break;
4180
4181 case MII_TG3_AUX_STAT_100FULL:
4182 *speed = SPEED_100;
4183 *duplex = DUPLEX_FULL;
4184 break;
4185
4186 case MII_TG3_AUX_STAT_1000HALF:
4187 *speed = SPEED_1000;
4188 *duplex = DUPLEX_HALF;
4189 break;
4190
4191 case MII_TG3_AUX_STAT_1000FULL:
4192 *speed = SPEED_1000;
4193 *duplex = DUPLEX_FULL;
4194 break;
4195
4196 default:
4197 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4198 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4199 SPEED_10;
4200 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4201 DUPLEX_HALF;
4202 break;
4203 }
4204 *speed = SPEED_UNKNOWN;
4205 *duplex = DUPLEX_UNKNOWN;
4206 break;
4207 }
4208 }
4209
4210 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4211 {
4212 int err = 0;
4213 u32 val, new_adv;
4214
4215 new_adv = ADVERTISE_CSMA;
4216 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4217 new_adv |= mii_advertise_flowctrl(flowctrl);
4218
4219 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4220 if (err)
4221 goto done;
4222
4223 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4224 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4225
4226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4227 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4228 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4229
4230 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4231 if (err)
4232 goto done;
4233 }
4234
4235 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4236 goto done;
4237
4238 tw32(TG3_CPMU_EEE_MODE,
4239 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4240
4241 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4242 if (!err) {
4243 u32 err2;
4244
4245 val = 0;
4246 /* Advertise 100-BaseTX EEE ability */
4247 if (advertise & ADVERTISED_100baseT_Full)
4248 val |= MDIO_AN_EEE_ADV_100TX;
4249 /* Advertise 1000-BaseT EEE ability */
4250 if (advertise & ADVERTISED_1000baseT_Full)
4251 val |= MDIO_AN_EEE_ADV_1000T;
4252 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4253 if (err)
4254 val = 0;
4255
4256 switch (tg3_asic_rev(tp)) {
4257 case ASIC_REV_5717:
4258 case ASIC_REV_57765:
4259 case ASIC_REV_57766:
4260 case ASIC_REV_5719:
4261 /* If we advertised any eee advertisements above... */
4262 if (val)
4263 val = MII_TG3_DSP_TAP26_ALNOKO |
4264 MII_TG3_DSP_TAP26_RMRXSTO |
4265 MII_TG3_DSP_TAP26_OPCSINPT;
4266 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4267 /* Fall through */
4268 case ASIC_REV_5720:
4269 case ASIC_REV_5762:
4270 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4271 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4272 MII_TG3_DSP_CH34TP2_HIBW01);
4273 }
4274
4275 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4276 if (!err)
4277 err = err2;
4278 }
4279
4280 done:
4281 return err;
4282 }
4283
4284 static void tg3_phy_copper_begin(struct tg3 *tp)
4285 {
4286 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4287 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4288 u32 adv, fc;
4289
4290 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4291 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4292 adv = ADVERTISED_10baseT_Half |
4293 ADVERTISED_10baseT_Full;
4294 if (tg3_flag(tp, WOL_SPEED_100MB))
4295 adv |= ADVERTISED_100baseT_Half |
4296 ADVERTISED_100baseT_Full;
4297 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4298 adv |= ADVERTISED_1000baseT_Half |
4299 ADVERTISED_1000baseT_Full;
4300
4301 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4302 } else {
4303 adv = tp->link_config.advertising;
4304 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4305 adv &= ~(ADVERTISED_1000baseT_Half |
4306 ADVERTISED_1000baseT_Full);
4307
4308 fc = tp->link_config.flowctrl;
4309 }
4310
4311 tg3_phy_autoneg_cfg(tp, adv, fc);
4312
4313 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4314 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4315 /* Normally during power down we want to autonegotiate
4316 * the lowest possible speed for WOL. However, to avoid
4317 * link flap, we leave it untouched.
4318 */
4319 return;
4320 }
4321
4322 tg3_writephy(tp, MII_BMCR,
4323 BMCR_ANENABLE | BMCR_ANRESTART);
4324 } else {
4325 int i;
4326 u32 bmcr, orig_bmcr;
4327
4328 tp->link_config.active_speed = tp->link_config.speed;
4329 tp->link_config.active_duplex = tp->link_config.duplex;
4330
4331 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4332 /* With autoneg disabled, 5715 only links up when the
4333 * advertisement register has the configured speed
4334 * enabled.
4335 */
4336 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4337 }
4338
4339 bmcr = 0;
4340 switch (tp->link_config.speed) {
4341 default:
4342 case SPEED_10:
4343 break;
4344
4345 case SPEED_100:
4346 bmcr |= BMCR_SPEED100;
4347 break;
4348
4349 case SPEED_1000:
4350 bmcr |= BMCR_SPEED1000;
4351 break;
4352 }
4353
4354 if (tp->link_config.duplex == DUPLEX_FULL)
4355 bmcr |= BMCR_FULLDPLX;
4356
4357 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4358 (bmcr != orig_bmcr)) {
4359 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4360 for (i = 0; i < 1500; i++) {
4361 u32 tmp;
4362
4363 udelay(10);
4364 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4365 tg3_readphy(tp, MII_BMSR, &tmp))
4366 continue;
4367 if (!(tmp & BMSR_LSTATUS)) {
4368 udelay(40);
4369 break;
4370 }
4371 }
4372 tg3_writephy(tp, MII_BMCR, bmcr);
4373 udelay(40);
4374 }
4375 }
4376 }
4377
4378 static int tg3_phy_pull_config(struct tg3 *tp)
4379 {
4380 int err;
4381 u32 val;
4382
4383 err = tg3_readphy(tp, MII_BMCR, &val);
4384 if (err)
4385 goto done;
4386
4387 if (!(val & BMCR_ANENABLE)) {
4388 tp->link_config.autoneg = AUTONEG_DISABLE;
4389 tp->link_config.advertising = 0;
4390 tg3_flag_clear(tp, PAUSE_AUTONEG);
4391
4392 err = -EIO;
4393
4394 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4395 case 0:
4396 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4397 goto done;
4398
4399 tp->link_config.speed = SPEED_10;
4400 break;
4401 case BMCR_SPEED100:
4402 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4403 goto done;
4404
4405 tp->link_config.speed = SPEED_100;
4406 break;
4407 case BMCR_SPEED1000:
4408 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4409 tp->link_config.speed = SPEED_1000;
4410 break;
4411 }
4412 /* Fall through */
4413 default:
4414 goto done;
4415 }
4416
4417 if (val & BMCR_FULLDPLX)
4418 tp->link_config.duplex = DUPLEX_FULL;
4419 else
4420 tp->link_config.duplex = DUPLEX_HALF;
4421
4422 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4423
4424 err = 0;
4425 goto done;
4426 }
4427
4428 tp->link_config.autoneg = AUTONEG_ENABLE;
4429 tp->link_config.advertising = ADVERTISED_Autoneg;
4430 tg3_flag_set(tp, PAUSE_AUTONEG);
4431
4432 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4433 u32 adv;
4434
4435 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4436 if (err)
4437 goto done;
4438
4439 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4440 tp->link_config.advertising |= adv | ADVERTISED_TP;
4441
4442 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4443 } else {
4444 tp->link_config.advertising |= ADVERTISED_FIBRE;
4445 }
4446
4447 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4448 u32 adv;
4449
4450 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4451 err = tg3_readphy(tp, MII_CTRL1000, &val);
4452 if (err)
4453 goto done;
4454
4455 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4456 } else {
4457 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4458 if (err)
4459 goto done;
4460
4461 adv = tg3_decode_flowctrl_1000X(val);
4462 tp->link_config.flowctrl = adv;
4463
4464 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4465 adv = mii_adv_to_ethtool_adv_x(val);
4466 }
4467
4468 tp->link_config.advertising |= adv;
4469 }
4470
4471 done:
4472 return err;
4473 }
4474
4475 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4476 {
4477 int err;
4478
4479 /* Turn off tap power management. */
4480 /* Set Extended packet length bit */
4481 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4482
4483 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4484 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4485 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4486 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4487 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4488
4489 udelay(40);
4490
4491 return err;
4492 }
4493
4494 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4495 {
4496 u32 val;
4497 u32 tgtadv = 0;
4498 u32 advertising = tp->link_config.advertising;
4499
4500 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4501 return true;
4502
4503 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4504 return false;
4505
4506 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4507
4508
4509 if (advertising & ADVERTISED_100baseT_Full)
4510 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4511 if (advertising & ADVERTISED_1000baseT_Full)
4512 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4513
4514 if (val != tgtadv)
4515 return false;
4516
4517 return true;
4518 }
4519
4520 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4521 {
4522 u32 advmsk, tgtadv, advertising;
4523
4524 advertising = tp->link_config.advertising;
4525 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4526
4527 advmsk = ADVERTISE_ALL;
4528 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4529 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4530 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4531 }
4532
4533 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4534 return false;
4535
4536 if ((*lcladv & advmsk) != tgtadv)
4537 return false;
4538
4539 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4540 u32 tg3_ctrl;
4541
4542 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4543
4544 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4545 return false;
4546
4547 if (tgtadv &&
4548 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4549 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4550 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4551 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4552 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4553 } else {
4554 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4555 }
4556
4557 if (tg3_ctrl != tgtadv)
4558 return false;
4559 }
4560
4561 return true;
4562 }
4563
4564 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4565 {
4566 u32 lpeth = 0;
4567
4568 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4569 u32 val;
4570
4571 if (tg3_readphy(tp, MII_STAT1000, &val))
4572 return false;
4573
4574 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4575 }
4576
4577 if (tg3_readphy(tp, MII_LPA, rmtadv))
4578 return false;
4579
4580 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4581 tp->link_config.rmt_adv = lpeth;
4582
4583 return true;
4584 }
4585
4586 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4587 {
4588 if (curr_link_up != tp->link_up) {
4589 if (curr_link_up) {
4590 netif_carrier_on(tp->dev);
4591 } else {
4592 netif_carrier_off(tp->dev);
4593 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4594 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4595 }
4596
4597 tg3_link_report(tp);
4598 return true;
4599 }
4600
4601 return false;
4602 }
4603
4604 static void tg3_clear_mac_status(struct tg3 *tp)
4605 {
4606 tw32(MAC_EVENT, 0);
4607
4608 tw32_f(MAC_STATUS,
4609 MAC_STATUS_SYNC_CHANGED |
4610 MAC_STATUS_CFG_CHANGED |
4611 MAC_STATUS_MI_COMPLETION |
4612 MAC_STATUS_LNKSTATE_CHANGED);
4613 udelay(40);
4614 }
4615
4616 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4617 {
4618 bool current_link_up;
4619 u32 bmsr, val;
4620 u32 lcl_adv, rmt_adv;
4621 u16 current_speed;
4622 u8 current_duplex;
4623 int i, err;
4624
4625 tg3_clear_mac_status(tp);
4626
4627 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4628 tw32_f(MAC_MI_MODE,
4629 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4630 udelay(80);
4631 }
4632
4633 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4634
4635 /* Some third-party PHYs need to be reset on link going
4636 * down.
4637 */
4638 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4639 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4640 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4641 tp->link_up) {
4642 tg3_readphy(tp, MII_BMSR, &bmsr);
4643 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4644 !(bmsr & BMSR_LSTATUS))
4645 force_reset = true;
4646 }
4647 if (force_reset)
4648 tg3_phy_reset(tp);
4649
4650 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4651 tg3_readphy(tp, MII_BMSR, &bmsr);
4652 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4653 !tg3_flag(tp, INIT_COMPLETE))
4654 bmsr = 0;
4655
4656 if (!(bmsr & BMSR_LSTATUS)) {
4657 err = tg3_init_5401phy_dsp(tp);
4658 if (err)
4659 return err;
4660
4661 tg3_readphy(tp, MII_BMSR, &bmsr);
4662 for (i = 0; i < 1000; i++) {
4663 udelay(10);
4664 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4665 (bmsr & BMSR_LSTATUS)) {
4666 udelay(40);
4667 break;
4668 }
4669 }
4670
4671 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4672 TG3_PHY_REV_BCM5401_B0 &&
4673 !(bmsr & BMSR_LSTATUS) &&
4674 tp->link_config.active_speed == SPEED_1000) {
4675 err = tg3_phy_reset(tp);
4676 if (!err)
4677 err = tg3_init_5401phy_dsp(tp);
4678 if (err)
4679 return err;
4680 }
4681 }
4682 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4683 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4684 /* 5701 {A0,B0} CRC bug workaround */
4685 tg3_writephy(tp, 0x15, 0x0a75);
4686 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4687 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4688 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4689 }
4690
4691 /* Clear pending interrupts... */
4692 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4693 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4694
4695 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4696 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4697 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4698 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4699
4700 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4701 tg3_asic_rev(tp) == ASIC_REV_5701) {
4702 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4703 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4704 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4705 else
4706 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4707 }
4708
4709 current_link_up = false;
4710 current_speed = SPEED_UNKNOWN;
4711 current_duplex = DUPLEX_UNKNOWN;
4712 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4713 tp->link_config.rmt_adv = 0;
4714
4715 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4716 err = tg3_phy_auxctl_read(tp,
4717 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4718 &val);
4719 if (!err && !(val & (1 << 10))) {
4720 tg3_phy_auxctl_write(tp,
4721 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4722 val | (1 << 10));
4723 goto relink;
4724 }
4725 }
4726
4727 bmsr = 0;
4728 for (i = 0; i < 100; i++) {
4729 tg3_readphy(tp, MII_BMSR, &bmsr);
4730 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4731 (bmsr & BMSR_LSTATUS))
4732 break;
4733 udelay(40);
4734 }
4735
4736 if (bmsr & BMSR_LSTATUS) {
4737 u32 aux_stat, bmcr;
4738
4739 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4740 for (i = 0; i < 2000; i++) {
4741 udelay(10);
4742 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4743 aux_stat)
4744 break;
4745 }
4746
4747 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4748 &current_speed,
4749 &current_duplex);
4750
4751 bmcr = 0;
4752 for (i = 0; i < 200; i++) {
4753 tg3_readphy(tp, MII_BMCR, &bmcr);
4754 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4755 continue;
4756 if (bmcr && bmcr != 0x7fff)
4757 break;
4758 udelay(10);
4759 }
4760
4761 lcl_adv = 0;
4762 rmt_adv = 0;
4763
4764 tp->link_config.active_speed = current_speed;
4765 tp->link_config.active_duplex = current_duplex;
4766
4767 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4768 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4769
4770 if ((bmcr & BMCR_ANENABLE) &&
4771 eee_config_ok &&
4772 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4773 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4774 current_link_up = true;
4775
4776 /* EEE settings changes take effect only after a phy
4777 * reset. If we have skipped a reset due to Link Flap
4778 * Avoidance being enabled, do it now.
4779 */
4780 if (!eee_config_ok &&
4781 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4782 !force_reset)
4783 tg3_phy_reset(tp);
4784 } else {
4785 if (!(bmcr & BMCR_ANENABLE) &&
4786 tp->link_config.speed == current_speed &&
4787 tp->link_config.duplex == current_duplex) {
4788 current_link_up = true;
4789 }
4790 }
4791
4792 if (current_link_up &&
4793 tp->link_config.active_duplex == DUPLEX_FULL) {
4794 u32 reg, bit;
4795
4796 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4797 reg = MII_TG3_FET_GEN_STAT;
4798 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4799 } else {
4800 reg = MII_TG3_EXT_STAT;
4801 bit = MII_TG3_EXT_STAT_MDIX;
4802 }
4803
4804 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4805 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4806
4807 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4808 }
4809 }
4810
4811 relink:
4812 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4813 tg3_phy_copper_begin(tp);
4814
4815 if (tg3_flag(tp, ROBOSWITCH)) {
4816 current_link_up = true;
4817 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4818 current_speed = SPEED_1000;
4819 current_duplex = DUPLEX_FULL;
4820 tp->link_config.active_speed = current_speed;
4821 tp->link_config.active_duplex = current_duplex;
4822 }
4823
4824 tg3_readphy(tp, MII_BMSR, &bmsr);
4825 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4826 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4827 current_link_up = true;
4828 }
4829
4830 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4831 if (current_link_up) {
4832 if (tp->link_config.active_speed == SPEED_100 ||
4833 tp->link_config.active_speed == SPEED_10)
4834 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4835 else
4836 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4837 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4838 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4839 else
4840 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4841
4842 /* In order for the 5750 core in BCM4785 chip to work properly
4843 * in RGMII mode, the Led Control Register must be set up.
4844 */
4845 if (tg3_flag(tp, RGMII_MODE)) {
4846 u32 led_ctrl = tr32(MAC_LED_CTRL);
4847 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4848
4849 if (tp->link_config.active_speed == SPEED_10)
4850 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4851 else if (tp->link_config.active_speed == SPEED_100)
4852 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4853 LED_CTRL_100MBPS_ON);
4854 else if (tp->link_config.active_speed == SPEED_1000)
4855 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4856 LED_CTRL_1000MBPS_ON);
4857
4858 tw32(MAC_LED_CTRL, led_ctrl);
4859 udelay(40);
4860 }
4861
4862 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4863 if (tp->link_config.active_duplex == DUPLEX_HALF)
4864 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4865
4866 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4867 if (current_link_up &&
4868 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4869 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4870 else
4871 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4872 }
4873
4874 /* ??? Without this setting Netgear GA302T PHY does not
4875 * ??? send/receive packets...
4876 */
4877 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4878 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4879 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4880 tw32_f(MAC_MI_MODE, tp->mi_mode);
4881 udelay(80);
4882 }
4883
4884 tw32_f(MAC_MODE, tp->mac_mode);
4885 udelay(40);
4886
4887 tg3_phy_eee_adjust(tp, current_link_up);
4888
4889 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4890 /* Polled via timer. */
4891 tw32_f(MAC_EVENT, 0);
4892 } else {
4893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894 }
4895 udelay(40);
4896
4897 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4898 current_link_up &&
4899 tp->link_config.active_speed == SPEED_1000 &&
4900 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4901 udelay(120);
4902 tw32_f(MAC_STATUS,
4903 (MAC_STATUS_SYNC_CHANGED |
4904 MAC_STATUS_CFG_CHANGED));
4905 udelay(40);
4906 tg3_write_mem(tp,
4907 NIC_SRAM_FIRMWARE_MBOX,
4908 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4909 }
4910
4911 /* Prevent send BD corruption. */
4912 if (tg3_flag(tp, CLKREQ_BUG)) {
4913 if (tp->link_config.active_speed == SPEED_100 ||
4914 tp->link_config.active_speed == SPEED_10)
4915 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4916 PCI_EXP_LNKCTL_CLKREQ_EN);
4917 else
4918 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4919 PCI_EXP_LNKCTL_CLKREQ_EN);
4920 }
4921
4922 tg3_test_and_report_link_chg(tp, current_link_up);
4923
4924 return 0;
4925 }
4926
4927 struct tg3_fiber_aneginfo {
4928 int state;
4929 #define ANEG_STATE_UNKNOWN 0
4930 #define ANEG_STATE_AN_ENABLE 1
4931 #define ANEG_STATE_RESTART_INIT 2
4932 #define ANEG_STATE_RESTART 3
4933 #define ANEG_STATE_DISABLE_LINK_OK 4
4934 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4935 #define ANEG_STATE_ABILITY_DETECT 6
4936 #define ANEG_STATE_ACK_DETECT_INIT 7
4937 #define ANEG_STATE_ACK_DETECT 8
4938 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4939 #define ANEG_STATE_COMPLETE_ACK 10
4940 #define ANEG_STATE_IDLE_DETECT_INIT 11
4941 #define ANEG_STATE_IDLE_DETECT 12
4942 #define ANEG_STATE_LINK_OK 13
4943 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4944 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4945
4946 u32 flags;
4947 #define MR_AN_ENABLE 0x00000001
4948 #define MR_RESTART_AN 0x00000002
4949 #define MR_AN_COMPLETE 0x00000004
4950 #define MR_PAGE_RX 0x00000008
4951 #define MR_NP_LOADED 0x00000010
4952 #define MR_TOGGLE_TX 0x00000020
4953 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4954 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4955 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4956 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4957 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4958 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4959 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4960 #define MR_TOGGLE_RX 0x00002000
4961 #define MR_NP_RX 0x00004000
4962
4963 #define MR_LINK_OK 0x80000000
4964
4965 unsigned long link_time, cur_time;
4966
4967 u32 ability_match_cfg;
4968 int ability_match_count;
4969
4970 char ability_match, idle_match, ack_match;
4971
4972 u32 txconfig, rxconfig;
4973 #define ANEG_CFG_NP 0x00000080
4974 #define ANEG_CFG_ACK 0x00000040
4975 #define ANEG_CFG_RF2 0x00000020
4976 #define ANEG_CFG_RF1 0x00000010
4977 #define ANEG_CFG_PS2 0x00000001
4978 #define ANEG_CFG_PS1 0x00008000
4979 #define ANEG_CFG_HD 0x00004000
4980 #define ANEG_CFG_FD 0x00002000
4981 #define ANEG_CFG_INVAL 0x00001f06
4982
4983 };
4984 #define ANEG_OK 0
4985 #define ANEG_DONE 1
4986 #define ANEG_TIMER_ENAB 2
4987 #define ANEG_FAILED -1
4988
4989 #define ANEG_STATE_SETTLE_TIME 10000
4990
4991 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4992 struct tg3_fiber_aneginfo *ap)
4993 {
4994 u16 flowctrl;
4995 unsigned long delta;
4996 u32 rx_cfg_reg;
4997 int ret;
4998
4999 if (ap->state == ANEG_STATE_UNKNOWN) {
5000 ap->rxconfig = 0;
5001 ap->link_time = 0;
5002 ap->cur_time = 0;
5003 ap->ability_match_cfg = 0;
5004 ap->ability_match_count = 0;
5005 ap->ability_match = 0;
5006 ap->idle_match = 0;
5007 ap->ack_match = 0;
5008 }
5009 ap->cur_time++;
5010
5011 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5012 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5013
5014 if (rx_cfg_reg != ap->ability_match_cfg) {
5015 ap->ability_match_cfg = rx_cfg_reg;
5016 ap->ability_match = 0;
5017 ap->ability_match_count = 0;
5018 } else {
5019 if (++ap->ability_match_count > 1) {
5020 ap->ability_match = 1;
5021 ap->ability_match_cfg = rx_cfg_reg;
5022 }
5023 }
5024 if (rx_cfg_reg & ANEG_CFG_ACK)
5025 ap->ack_match = 1;
5026 else
5027 ap->ack_match = 0;
5028
5029 ap->idle_match = 0;
5030 } else {
5031 ap->idle_match = 1;
5032 ap->ability_match_cfg = 0;
5033 ap->ability_match_count = 0;
5034 ap->ability_match = 0;
5035 ap->ack_match = 0;
5036
5037 rx_cfg_reg = 0;
5038 }
5039
5040 ap->rxconfig = rx_cfg_reg;
5041 ret = ANEG_OK;
5042
5043 switch (ap->state) {
5044 case ANEG_STATE_UNKNOWN:
5045 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5046 ap->state = ANEG_STATE_AN_ENABLE;
5047
5048 /* fallthru */
5049 case ANEG_STATE_AN_ENABLE:
5050 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5051 if (ap->flags & MR_AN_ENABLE) {
5052 ap->link_time = 0;
5053 ap->cur_time = 0;
5054 ap->ability_match_cfg = 0;
5055 ap->ability_match_count = 0;
5056 ap->ability_match = 0;
5057 ap->idle_match = 0;
5058 ap->ack_match = 0;
5059
5060 ap->state = ANEG_STATE_RESTART_INIT;
5061 } else {
5062 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5063 }
5064 break;
5065
5066 case ANEG_STATE_RESTART_INIT:
5067 ap->link_time = ap->cur_time;
5068 ap->flags &= ~(MR_NP_LOADED);
5069 ap->txconfig = 0;
5070 tw32(MAC_TX_AUTO_NEG, 0);
5071 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5072 tw32_f(MAC_MODE, tp->mac_mode);
5073 udelay(40);
5074
5075 ret = ANEG_TIMER_ENAB;
5076 ap->state = ANEG_STATE_RESTART;
5077
5078 /* fallthru */
5079 case ANEG_STATE_RESTART:
5080 delta = ap->cur_time - ap->link_time;
5081 if (delta > ANEG_STATE_SETTLE_TIME)
5082 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5083 else
5084 ret = ANEG_TIMER_ENAB;
5085 break;
5086
5087 case ANEG_STATE_DISABLE_LINK_OK:
5088 ret = ANEG_DONE;
5089 break;
5090
5091 case ANEG_STATE_ABILITY_DETECT_INIT:
5092 ap->flags &= ~(MR_TOGGLE_TX);
5093 ap->txconfig = ANEG_CFG_FD;
5094 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5095 if (flowctrl & ADVERTISE_1000XPAUSE)
5096 ap->txconfig |= ANEG_CFG_PS1;
5097 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5098 ap->txconfig |= ANEG_CFG_PS2;
5099 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5100 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5101 tw32_f(MAC_MODE, tp->mac_mode);
5102 udelay(40);
5103
5104 ap->state = ANEG_STATE_ABILITY_DETECT;
5105 break;
5106
5107 case ANEG_STATE_ABILITY_DETECT:
5108 if (ap->ability_match != 0 && ap->rxconfig != 0)
5109 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5110 break;
5111
5112 case ANEG_STATE_ACK_DETECT_INIT:
5113 ap->txconfig |= ANEG_CFG_ACK;
5114 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5115 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5116 tw32_f(MAC_MODE, tp->mac_mode);
5117 udelay(40);
5118
5119 ap->state = ANEG_STATE_ACK_DETECT;
5120
5121 /* fallthru */
5122 case ANEG_STATE_ACK_DETECT:
5123 if (ap->ack_match != 0) {
5124 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5125 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5126 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5127 } else {
5128 ap->state = ANEG_STATE_AN_ENABLE;
5129 }
5130 } else if (ap->ability_match != 0 &&
5131 ap->rxconfig == 0) {
5132 ap->state = ANEG_STATE_AN_ENABLE;
5133 }
5134 break;
5135
5136 case ANEG_STATE_COMPLETE_ACK_INIT:
5137 if (ap->rxconfig & ANEG_CFG_INVAL) {
5138 ret = ANEG_FAILED;
5139 break;
5140 }
5141 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5142 MR_LP_ADV_HALF_DUPLEX |
5143 MR_LP_ADV_SYM_PAUSE |
5144 MR_LP_ADV_ASYM_PAUSE |
5145 MR_LP_ADV_REMOTE_FAULT1 |
5146 MR_LP_ADV_REMOTE_FAULT2 |
5147 MR_LP_ADV_NEXT_PAGE |
5148 MR_TOGGLE_RX |
5149 MR_NP_RX);
5150 if (ap->rxconfig & ANEG_CFG_FD)
5151 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5152 if (ap->rxconfig & ANEG_CFG_HD)
5153 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5154 if (ap->rxconfig & ANEG_CFG_PS1)
5155 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5156 if (ap->rxconfig & ANEG_CFG_PS2)
5157 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5158 if (ap->rxconfig & ANEG_CFG_RF1)
5159 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5160 if (ap->rxconfig & ANEG_CFG_RF2)
5161 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5162 if (ap->rxconfig & ANEG_CFG_NP)
5163 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5164
5165 ap->link_time = ap->cur_time;
5166
5167 ap->flags ^= (MR_TOGGLE_TX);
5168 if (ap->rxconfig & 0x0008)
5169 ap->flags |= MR_TOGGLE_RX;
5170 if (ap->rxconfig & ANEG_CFG_NP)
5171 ap->flags |= MR_NP_RX;
5172 ap->flags |= MR_PAGE_RX;
5173
5174 ap->state = ANEG_STATE_COMPLETE_ACK;
5175 ret = ANEG_TIMER_ENAB;
5176 break;
5177
5178 case ANEG_STATE_COMPLETE_ACK:
5179 if (ap->ability_match != 0 &&
5180 ap->rxconfig == 0) {
5181 ap->state = ANEG_STATE_AN_ENABLE;
5182 break;
5183 }
5184 delta = ap->cur_time - ap->link_time;
5185 if (delta > ANEG_STATE_SETTLE_TIME) {
5186 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5187 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5188 } else {
5189 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5190 !(ap->flags & MR_NP_RX)) {
5191 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5192 } else {
5193 ret = ANEG_FAILED;
5194 }
5195 }
5196 }
5197 break;
5198
5199 case ANEG_STATE_IDLE_DETECT_INIT:
5200 ap->link_time = ap->cur_time;
5201 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5202 tw32_f(MAC_MODE, tp->mac_mode);
5203 udelay(40);
5204
5205 ap->state = ANEG_STATE_IDLE_DETECT;
5206 ret = ANEG_TIMER_ENAB;
5207 break;
5208
5209 case ANEG_STATE_IDLE_DETECT:
5210 if (ap->ability_match != 0 &&
5211 ap->rxconfig == 0) {
5212 ap->state = ANEG_STATE_AN_ENABLE;
5213 break;
5214 }
5215 delta = ap->cur_time - ap->link_time;
5216 if (delta > ANEG_STATE_SETTLE_TIME) {
5217 /* XXX another gem from the Broadcom driver :( */
5218 ap->state = ANEG_STATE_LINK_OK;
5219 }
5220 break;
5221
5222 case ANEG_STATE_LINK_OK:
5223 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5224 ret = ANEG_DONE;
5225 break;
5226
5227 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5228 /* ??? unimplemented */
5229 break;
5230
5231 case ANEG_STATE_NEXT_PAGE_WAIT:
5232 /* ??? unimplemented */
5233 break;
5234
5235 default:
5236 ret = ANEG_FAILED;
5237 break;
5238 }
5239
5240 return ret;
5241 }
5242
5243 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5244 {
5245 int res = 0;
5246 struct tg3_fiber_aneginfo aninfo;
5247 int status = ANEG_FAILED;
5248 unsigned int tick;
5249 u32 tmp;
5250
5251 tw32_f(MAC_TX_AUTO_NEG, 0);
5252
5253 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5254 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5255 udelay(40);
5256
5257 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5258 udelay(40);
5259
5260 memset(&aninfo, 0, sizeof(aninfo));
5261 aninfo.flags |= MR_AN_ENABLE;
5262 aninfo.state = ANEG_STATE_UNKNOWN;
5263 aninfo.cur_time = 0;
5264 tick = 0;
5265 while (++tick < 195000) {
5266 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5267 if (status == ANEG_DONE || status == ANEG_FAILED)
5268 break;
5269
5270 udelay(1);
5271 }
5272
5273 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5274 tw32_f(MAC_MODE, tp->mac_mode);
5275 udelay(40);
5276
5277 *txflags = aninfo.txconfig;
5278 *rxflags = aninfo.flags;
5279
5280 if (status == ANEG_DONE &&
5281 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5282 MR_LP_ADV_FULL_DUPLEX)))
5283 res = 1;
5284
5285 return res;
5286 }
5287
5288 static void tg3_init_bcm8002(struct tg3 *tp)
5289 {
5290 u32 mac_status = tr32(MAC_STATUS);
5291 int i;
5292
5293 /* Reset when initting first time or we have a link. */
5294 if (tg3_flag(tp, INIT_COMPLETE) &&
5295 !(mac_status & MAC_STATUS_PCS_SYNCED))
5296 return;
5297
5298 /* Set PLL lock range. */
5299 tg3_writephy(tp, 0x16, 0x8007);
5300
5301 /* SW reset */
5302 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5303
5304 /* Wait for reset to complete. */
5305 /* XXX schedule_timeout() ... */
5306 for (i = 0; i < 500; i++)
5307 udelay(10);
5308
5309 /* Config mode; select PMA/Ch 1 regs. */
5310 tg3_writephy(tp, 0x10, 0x8411);
5311
5312 /* Enable auto-lock and comdet, select txclk for tx. */
5313 tg3_writephy(tp, 0x11, 0x0a10);
5314
5315 tg3_writephy(tp, 0x18, 0x00a0);
5316 tg3_writephy(tp, 0x16, 0x41ff);
5317
5318 /* Assert and deassert POR. */
5319 tg3_writephy(tp, 0x13, 0x0400);
5320 udelay(40);
5321 tg3_writephy(tp, 0x13, 0x0000);
5322
5323 tg3_writephy(tp, 0x11, 0x0a50);
5324 udelay(40);
5325 tg3_writephy(tp, 0x11, 0x0a10);
5326
5327 /* Wait for signal to stabilize */
5328 /* XXX schedule_timeout() ... */
5329 for (i = 0; i < 15000; i++)
5330 udelay(10);
5331
5332 /* Deselect the channel register so we can read the PHYID
5333 * later.
5334 */
5335 tg3_writephy(tp, 0x10, 0x8011);
5336 }
5337
5338 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5339 {
5340 u16 flowctrl;
5341 bool current_link_up;
5342 u32 sg_dig_ctrl, sg_dig_status;
5343 u32 serdes_cfg, expected_sg_dig_ctrl;
5344 int workaround, port_a;
5345
5346 serdes_cfg = 0;
5347 expected_sg_dig_ctrl = 0;
5348 workaround = 0;
5349 port_a = 1;
5350 current_link_up = false;
5351
5352 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5353 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5354 workaround = 1;
5355 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5356 port_a = 0;
5357
5358 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5359 /* preserve bits 20-23 for voltage regulator */
5360 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5361 }
5362
5363 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5364
5365 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5366 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5367 if (workaround) {
5368 u32 val = serdes_cfg;
5369
5370 if (port_a)
5371 val |= 0xc010000;
5372 else
5373 val |= 0x4010000;
5374 tw32_f(MAC_SERDES_CFG, val);
5375 }
5376
5377 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5378 }
5379 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5380 tg3_setup_flow_control(tp, 0, 0);
5381 current_link_up = true;
5382 }
5383 goto out;
5384 }
5385
5386 /* Want auto-negotiation. */
5387 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5388
5389 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5390 if (flowctrl & ADVERTISE_1000XPAUSE)
5391 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5392 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5393 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5394
5395 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5396 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5397 tp->serdes_counter &&
5398 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5399 MAC_STATUS_RCVD_CFG)) ==
5400 MAC_STATUS_PCS_SYNCED)) {
5401 tp->serdes_counter--;
5402 current_link_up = true;
5403 goto out;
5404 }
5405 restart_autoneg:
5406 if (workaround)
5407 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5408 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5409 udelay(5);
5410 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5411
5412 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5413 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5414 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5415 MAC_STATUS_SIGNAL_DET)) {
5416 sg_dig_status = tr32(SG_DIG_STATUS);
5417 mac_status = tr32(MAC_STATUS);
5418
5419 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5420 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5421 u32 local_adv = 0, remote_adv = 0;
5422
5423 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5424 local_adv |= ADVERTISE_1000XPAUSE;
5425 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5426 local_adv |= ADVERTISE_1000XPSE_ASYM;
5427
5428 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5429 remote_adv |= LPA_1000XPAUSE;
5430 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5431 remote_adv |= LPA_1000XPAUSE_ASYM;
5432
5433 tp->link_config.rmt_adv =
5434 mii_adv_to_ethtool_adv_x(remote_adv);
5435
5436 tg3_setup_flow_control(tp, local_adv, remote_adv);
5437 current_link_up = true;
5438 tp->serdes_counter = 0;
5439 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5440 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5441 if (tp->serdes_counter)
5442 tp->serdes_counter--;
5443 else {
5444 if (workaround) {
5445 u32 val = serdes_cfg;
5446
5447 if (port_a)
5448 val |= 0xc010000;
5449 else
5450 val |= 0x4010000;
5451
5452 tw32_f(MAC_SERDES_CFG, val);
5453 }
5454
5455 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5456 udelay(40);
5457
5458 /* Link parallel detection - link is up */
5459 /* only if we have PCS_SYNC and not */
5460 /* receiving config code words */
5461 mac_status = tr32(MAC_STATUS);
5462 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5463 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5464 tg3_setup_flow_control(tp, 0, 0);
5465 current_link_up = true;
5466 tp->phy_flags |=
5467 TG3_PHYFLG_PARALLEL_DETECT;
5468 tp->serdes_counter =
5469 SERDES_PARALLEL_DET_TIMEOUT;
5470 } else
5471 goto restart_autoneg;
5472 }
5473 }
5474 } else {
5475 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5476 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5477 }
5478
5479 out:
5480 return current_link_up;
5481 }
5482
5483 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5484 {
5485 bool current_link_up = false;
5486
5487 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5488 goto out;
5489
5490 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5491 u32 txflags, rxflags;
5492 int i;
5493
5494 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5495 u32 local_adv = 0, remote_adv = 0;
5496
5497 if (txflags & ANEG_CFG_PS1)
5498 local_adv |= ADVERTISE_1000XPAUSE;
5499 if (txflags & ANEG_CFG_PS2)
5500 local_adv |= ADVERTISE_1000XPSE_ASYM;
5501
5502 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5503 remote_adv |= LPA_1000XPAUSE;
5504 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5505 remote_adv |= LPA_1000XPAUSE_ASYM;
5506
5507 tp->link_config.rmt_adv =
5508 mii_adv_to_ethtool_adv_x(remote_adv);
5509
5510 tg3_setup_flow_control(tp, local_adv, remote_adv);
5511
5512 current_link_up = true;
5513 }
5514 for (i = 0; i < 30; i++) {
5515 udelay(20);
5516 tw32_f(MAC_STATUS,
5517 (MAC_STATUS_SYNC_CHANGED |
5518 MAC_STATUS_CFG_CHANGED));
5519 udelay(40);
5520 if ((tr32(MAC_STATUS) &
5521 (MAC_STATUS_SYNC_CHANGED |
5522 MAC_STATUS_CFG_CHANGED)) == 0)
5523 break;
5524 }
5525
5526 mac_status = tr32(MAC_STATUS);
5527 if (!current_link_up &&
5528 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5529 !(mac_status & MAC_STATUS_RCVD_CFG))
5530 current_link_up = true;
5531 } else {
5532 tg3_setup_flow_control(tp, 0, 0);
5533
5534 /* Forcing 1000FD link up. */
5535 current_link_up = true;
5536
5537 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5538 udelay(40);
5539
5540 tw32_f(MAC_MODE, tp->mac_mode);
5541 udelay(40);
5542 }
5543
5544 out:
5545 return current_link_up;
5546 }
5547
5548 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5549 {
5550 u32 orig_pause_cfg;
5551 u16 orig_active_speed;
5552 u8 orig_active_duplex;
5553 u32 mac_status;
5554 bool current_link_up;
5555 int i;
5556
5557 orig_pause_cfg = tp->link_config.active_flowctrl;
5558 orig_active_speed = tp->link_config.active_speed;
5559 orig_active_duplex = tp->link_config.active_duplex;
5560
5561 if (!tg3_flag(tp, HW_AUTONEG) &&
5562 tp->link_up &&
5563 tg3_flag(tp, INIT_COMPLETE)) {
5564 mac_status = tr32(MAC_STATUS);
5565 mac_status &= (MAC_STATUS_PCS_SYNCED |
5566 MAC_STATUS_SIGNAL_DET |
5567 MAC_STATUS_CFG_CHANGED |
5568 MAC_STATUS_RCVD_CFG);
5569 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5570 MAC_STATUS_SIGNAL_DET)) {
5571 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5572 MAC_STATUS_CFG_CHANGED));
5573 return 0;
5574 }
5575 }
5576
5577 tw32_f(MAC_TX_AUTO_NEG, 0);
5578
5579 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5580 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5581 tw32_f(MAC_MODE, tp->mac_mode);
5582 udelay(40);
5583
5584 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5585 tg3_init_bcm8002(tp);
5586
5587 /* Enable link change event even when serdes polling. */
5588 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5589 udelay(40);
5590
5591 current_link_up = false;
5592 tp->link_config.rmt_adv = 0;
5593 mac_status = tr32(MAC_STATUS);
5594
5595 if (tg3_flag(tp, HW_AUTONEG))
5596 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5597 else
5598 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5599
5600 tp->napi[0].hw_status->status =
5601 (SD_STATUS_UPDATED |
5602 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5603
5604 for (i = 0; i < 100; i++) {
5605 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5606 MAC_STATUS_CFG_CHANGED));
5607 udelay(5);
5608 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5609 MAC_STATUS_CFG_CHANGED |
5610 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5611 break;
5612 }
5613
5614 mac_status = tr32(MAC_STATUS);
5615 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5616 current_link_up = false;
5617 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5618 tp->serdes_counter == 0) {
5619 tw32_f(MAC_MODE, (tp->mac_mode |
5620 MAC_MODE_SEND_CONFIGS));
5621 udelay(1);
5622 tw32_f(MAC_MODE, tp->mac_mode);
5623 }
5624 }
5625
5626 if (current_link_up) {
5627 tp->link_config.active_speed = SPEED_1000;
5628 tp->link_config.active_duplex = DUPLEX_FULL;
5629 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5630 LED_CTRL_LNKLED_OVERRIDE |
5631 LED_CTRL_1000MBPS_ON));
5632 } else {
5633 tp->link_config.active_speed = SPEED_UNKNOWN;
5634 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5635 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5636 LED_CTRL_LNKLED_OVERRIDE |
5637 LED_CTRL_TRAFFIC_OVERRIDE));
5638 }
5639
5640 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5641 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5642 if (orig_pause_cfg != now_pause_cfg ||
5643 orig_active_speed != tp->link_config.active_speed ||
5644 orig_active_duplex != tp->link_config.active_duplex)
5645 tg3_link_report(tp);
5646 }
5647
5648 return 0;
5649 }
5650
5651 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5652 {
5653 int err = 0;
5654 u32 bmsr, bmcr;
5655 u16 current_speed = SPEED_UNKNOWN;
5656 u8 current_duplex = DUPLEX_UNKNOWN;
5657 bool current_link_up = false;
5658 u32 local_adv, remote_adv, sgsr;
5659
5660 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5661 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5662 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5663 (sgsr & SERDES_TG3_SGMII_MODE)) {
5664
5665 if (force_reset)
5666 tg3_phy_reset(tp);
5667
5668 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5669
5670 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5671 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5672 } else {
5673 current_link_up = true;
5674 if (sgsr & SERDES_TG3_SPEED_1000) {
5675 current_speed = SPEED_1000;
5676 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5677 } else if (sgsr & SERDES_TG3_SPEED_100) {
5678 current_speed = SPEED_100;
5679 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5680 } else {
5681 current_speed = SPEED_10;
5682 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5683 }
5684
5685 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5686 current_duplex = DUPLEX_FULL;
5687 else
5688 current_duplex = DUPLEX_HALF;
5689 }
5690
5691 tw32_f(MAC_MODE, tp->mac_mode);
5692 udelay(40);
5693
5694 tg3_clear_mac_status(tp);
5695
5696 goto fiber_setup_done;
5697 }
5698
5699 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5700 tw32_f(MAC_MODE, tp->mac_mode);
5701 udelay(40);
5702
5703 tg3_clear_mac_status(tp);
5704
5705 if (force_reset)
5706 tg3_phy_reset(tp);
5707
5708 tp->link_config.rmt_adv = 0;
5709
5710 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5711 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5712 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5713 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5714 bmsr |= BMSR_LSTATUS;
5715 else
5716 bmsr &= ~BMSR_LSTATUS;
5717 }
5718
5719 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5720
5721 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5722 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5723 /* do nothing, just check for link up at the end */
5724 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5725 u32 adv, newadv;
5726
5727 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5728 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5729 ADVERTISE_1000XPAUSE |
5730 ADVERTISE_1000XPSE_ASYM |
5731 ADVERTISE_SLCT);
5732
5733 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5734 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5735
5736 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5737 tg3_writephy(tp, MII_ADVERTISE, newadv);
5738 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5739 tg3_writephy(tp, MII_BMCR, bmcr);
5740
5741 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5742 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5743 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5744
5745 return err;
5746 }
5747 } else {
5748 u32 new_bmcr;
5749
5750 bmcr &= ~BMCR_SPEED1000;
5751 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5752
5753 if (tp->link_config.duplex == DUPLEX_FULL)
5754 new_bmcr |= BMCR_FULLDPLX;
5755
5756 if (new_bmcr != bmcr) {
5757 /* BMCR_SPEED1000 is a reserved bit that needs
5758 * to be set on write.
5759 */
5760 new_bmcr |= BMCR_SPEED1000;
5761
5762 /* Force a linkdown */
5763 if (tp->link_up) {
5764 u32 adv;
5765
5766 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5767 adv &= ~(ADVERTISE_1000XFULL |
5768 ADVERTISE_1000XHALF |
5769 ADVERTISE_SLCT);
5770 tg3_writephy(tp, MII_ADVERTISE, adv);
5771 tg3_writephy(tp, MII_BMCR, bmcr |
5772 BMCR_ANRESTART |
5773 BMCR_ANENABLE);
5774 udelay(10);
5775 tg3_carrier_off(tp);
5776 }
5777 tg3_writephy(tp, MII_BMCR, new_bmcr);
5778 bmcr = new_bmcr;
5779 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5780 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5781 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5782 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5783 bmsr |= BMSR_LSTATUS;
5784 else
5785 bmsr &= ~BMSR_LSTATUS;
5786 }
5787 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5788 }
5789 }
5790
5791 if (bmsr & BMSR_LSTATUS) {
5792 current_speed = SPEED_1000;
5793 current_link_up = true;
5794 if (bmcr & BMCR_FULLDPLX)
5795 current_duplex = DUPLEX_FULL;
5796 else
5797 current_duplex = DUPLEX_HALF;
5798
5799 local_adv = 0;
5800 remote_adv = 0;
5801
5802 if (bmcr & BMCR_ANENABLE) {
5803 u32 common;
5804
5805 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5806 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5807 common = local_adv & remote_adv;
5808 if (common & (ADVERTISE_1000XHALF |
5809 ADVERTISE_1000XFULL)) {
5810 if (common & ADVERTISE_1000XFULL)
5811 current_duplex = DUPLEX_FULL;
5812 else
5813 current_duplex = DUPLEX_HALF;
5814
5815 tp->link_config.rmt_adv =
5816 mii_adv_to_ethtool_adv_x(remote_adv);
5817 } else if (!tg3_flag(tp, 5780_CLASS)) {
5818 /* Link is up via parallel detect */
5819 } else {
5820 current_link_up = false;
5821 }
5822 }
5823 }
5824
5825 fiber_setup_done:
5826 if (current_link_up && current_duplex == DUPLEX_FULL)
5827 tg3_setup_flow_control(tp, local_adv, remote_adv);
5828
5829 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5830 if (tp->link_config.active_duplex == DUPLEX_HALF)
5831 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5832
5833 tw32_f(MAC_MODE, tp->mac_mode);
5834 udelay(40);
5835
5836 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5837
5838 tp->link_config.active_speed = current_speed;
5839 tp->link_config.active_duplex = current_duplex;
5840
5841 tg3_test_and_report_link_chg(tp, current_link_up);
5842 return err;
5843 }
5844
5845 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5846 {
5847 if (tp->serdes_counter) {
5848 /* Give autoneg time to complete. */
5849 tp->serdes_counter--;
5850 return;
5851 }
5852
5853 if (!tp->link_up &&
5854 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5855 u32 bmcr;
5856
5857 tg3_readphy(tp, MII_BMCR, &bmcr);
5858 if (bmcr & BMCR_ANENABLE) {
5859 u32 phy1, phy2;
5860
5861 /* Select shadow register 0x1f */
5862 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5863 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5864
5865 /* Select expansion interrupt status register */
5866 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5867 MII_TG3_DSP_EXP1_INT_STAT);
5868 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5869 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5870
5871 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5872 /* We have signal detect and not receiving
5873 * config code words, link is up by parallel
5874 * detection.
5875 */
5876
5877 bmcr &= ~BMCR_ANENABLE;
5878 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5879 tg3_writephy(tp, MII_BMCR, bmcr);
5880 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5881 }
5882 }
5883 } else if (tp->link_up &&
5884 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5885 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5886 u32 phy2;
5887
5888 /* Select expansion interrupt status register */
5889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5890 MII_TG3_DSP_EXP1_INT_STAT);
5891 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5892 if (phy2 & 0x20) {
5893 u32 bmcr;
5894
5895 /* Config code words received, turn on autoneg. */
5896 tg3_readphy(tp, MII_BMCR, &bmcr);
5897 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5898
5899 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5900
5901 }
5902 }
5903 }
5904
5905 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5906 {
5907 u32 val;
5908 int err;
5909
5910 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5911 err = tg3_setup_fiber_phy(tp, force_reset);
5912 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5913 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5914 else
5915 err = tg3_setup_copper_phy(tp, force_reset);
5916
5917 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5918 u32 scale;
5919
5920 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5921 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5922 scale = 65;
5923 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5924 scale = 6;
5925 else
5926 scale = 12;
5927
5928 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5929 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5930 tw32(GRC_MISC_CFG, val);
5931 }
5932
5933 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5934 (6 << TX_LENGTHS_IPG_SHIFT);
5935 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5936 tg3_asic_rev(tp) == ASIC_REV_5762)
5937 val |= tr32(MAC_TX_LENGTHS) &
5938 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5939 TX_LENGTHS_CNT_DWN_VAL_MSK);
5940
5941 if (tp->link_config.active_speed == SPEED_1000 &&
5942 tp->link_config.active_duplex == DUPLEX_HALF)
5943 tw32(MAC_TX_LENGTHS, val |
5944 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5945 else
5946 tw32(MAC_TX_LENGTHS, val |
5947 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5948
5949 if (!tg3_flag(tp, 5705_PLUS)) {
5950 if (tp->link_up) {
5951 tw32(HOSTCC_STAT_COAL_TICKS,
5952 tp->coal.stats_block_coalesce_usecs);
5953 } else {
5954 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5955 }
5956 }
5957
5958 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5959 val = tr32(PCIE_PWR_MGMT_THRESH);
5960 if (!tp->link_up)
5961 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5962 tp->pwrmgmt_thresh;
5963 else
5964 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5965 tw32(PCIE_PWR_MGMT_THRESH, val);
5966 }
5967
5968 return err;
5969 }
5970
5971 /* tp->lock must be held */
5972 static u64 tg3_refclk_read(struct tg3 *tp)
5973 {
5974 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5975 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5976 }
5977
5978 /* tp->lock must be held */
5979 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5980 {
5981 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5982 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5983 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5984 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5985 }
5986
5987 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5988 static inline void tg3_full_unlock(struct tg3 *tp);
5989 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5990 {
5991 struct tg3 *tp = netdev_priv(dev);
5992
5993 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5994 SOF_TIMESTAMPING_RX_SOFTWARE |
5995 SOF_TIMESTAMPING_SOFTWARE |
5996 SOF_TIMESTAMPING_TX_HARDWARE |
5997 SOF_TIMESTAMPING_RX_HARDWARE |
5998 SOF_TIMESTAMPING_RAW_HARDWARE;
5999
6000 if (tp->ptp_clock)
6001 info->phc_index = ptp_clock_index(tp->ptp_clock);
6002 else
6003 info->phc_index = -1;
6004
6005 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6006
6007 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6008 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6009 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6010 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6011 return 0;
6012 }
6013
6014 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6015 {
6016 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6017 bool neg_adj = false;
6018 u32 correction = 0;
6019
6020 if (ppb < 0) {
6021 neg_adj = true;
6022 ppb = -ppb;
6023 }
6024
6025 /* Frequency adjustment is performed using hardware with a 24 bit
6026 * accumulator and a programmable correction value. On each clk, the
6027 * correction value gets added to the accumulator and when it
6028 * overflows, the time counter is incremented/decremented.
6029 *
6030 * So conversion from ppb to correction value is
6031 * ppb * (1 << 24) / 1000000000
6032 */
6033 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6034 TG3_EAV_REF_CLK_CORRECT_MASK;
6035
6036 tg3_full_lock(tp, 0);
6037
6038 if (correction)
6039 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6040 TG3_EAV_REF_CLK_CORRECT_EN |
6041 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6042 else
6043 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6044
6045 tg3_full_unlock(tp);
6046
6047 return 0;
6048 }
6049
6050 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6051 {
6052 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6053
6054 tg3_full_lock(tp, 0);
6055 tp->ptp_adjust += delta;
6056 tg3_full_unlock(tp);
6057
6058 return 0;
6059 }
6060
6061 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6062 {
6063 u64 ns;
6064 u32 remainder;
6065 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6066
6067 tg3_full_lock(tp, 0);
6068 ns = tg3_refclk_read(tp);
6069 ns += tp->ptp_adjust;
6070 tg3_full_unlock(tp);
6071
6072 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6073 ts->tv_nsec = remainder;
6074
6075 return 0;
6076 }
6077
6078 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6079 const struct timespec *ts)
6080 {
6081 u64 ns;
6082 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6083
6084 ns = timespec_to_ns(ts);
6085
6086 tg3_full_lock(tp, 0);
6087 tg3_refclk_write(tp, ns);
6088 tp->ptp_adjust = 0;
6089 tg3_full_unlock(tp);
6090
6091 return 0;
6092 }
6093
6094 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6095 struct ptp_clock_request *rq, int on)
6096 {
6097 return -EOPNOTSUPP;
6098 }
6099
6100 static const struct ptp_clock_info tg3_ptp_caps = {
6101 .owner = THIS_MODULE,
6102 .name = "tg3 clock",
6103 .max_adj = 250000000,
6104 .n_alarm = 0,
6105 .n_ext_ts = 0,
6106 .n_per_out = 0,
6107 .pps = 0,
6108 .adjfreq = tg3_ptp_adjfreq,
6109 .adjtime = tg3_ptp_adjtime,
6110 .gettime = tg3_ptp_gettime,
6111 .settime = tg3_ptp_settime,
6112 .enable = tg3_ptp_enable,
6113 };
6114
6115 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6116 struct skb_shared_hwtstamps *timestamp)
6117 {
6118 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6119 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6120 tp->ptp_adjust);
6121 }
6122
6123 /* tp->lock must be held */
6124 static void tg3_ptp_init(struct tg3 *tp)
6125 {
6126 if (!tg3_flag(tp, PTP_CAPABLE))
6127 return;
6128
6129 /* Initialize the hardware clock to the system time. */
6130 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6131 tp->ptp_adjust = 0;
6132 tp->ptp_info = tg3_ptp_caps;
6133 }
6134
6135 /* tp->lock must be held */
6136 static void tg3_ptp_resume(struct tg3 *tp)
6137 {
6138 if (!tg3_flag(tp, PTP_CAPABLE))
6139 return;
6140
6141 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6142 tp->ptp_adjust = 0;
6143 }
6144
6145 static void tg3_ptp_fini(struct tg3 *tp)
6146 {
6147 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6148 return;
6149
6150 ptp_clock_unregister(tp->ptp_clock);
6151 tp->ptp_clock = NULL;
6152 tp->ptp_adjust = 0;
6153 }
6154
6155 static inline int tg3_irq_sync(struct tg3 *tp)
6156 {
6157 return tp->irq_sync;
6158 }
6159
6160 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6161 {
6162 int i;
6163
6164 dst = (u32 *)((u8 *)dst + off);
6165 for (i = 0; i < len; i += sizeof(u32))
6166 *dst++ = tr32(off + i);
6167 }
6168
6169 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6170 {
6171 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6172 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6173 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6174 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6175 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6176 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6177 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6178 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6179 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6180 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6181 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6182 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6183 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6184 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6185 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6186 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6187 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6188 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6189 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6190
6191 if (tg3_flag(tp, SUPPORT_MSIX))
6192 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6193
6194 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6195 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6196 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6197 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6198 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6199 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6200 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6201 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6202
6203 if (!tg3_flag(tp, 5705_PLUS)) {
6204 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6205 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6206 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6207 }
6208
6209 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6210 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6211 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6212 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6213 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6214
6215 if (tg3_flag(tp, NVRAM))
6216 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6217 }
6218
6219 static void tg3_dump_state(struct tg3 *tp)
6220 {
6221 int i;
6222 u32 *regs;
6223
6224 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6225 if (!regs)
6226 return;
6227
6228 if (tg3_flag(tp, PCI_EXPRESS)) {
6229 /* Read up to but not including private PCI registers */
6230 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6231 regs[i / sizeof(u32)] = tr32(i);
6232 } else
6233 tg3_dump_legacy_regs(tp, regs);
6234
6235 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6236 if (!regs[i + 0] && !regs[i + 1] &&
6237 !regs[i + 2] && !regs[i + 3])
6238 continue;
6239
6240 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6241 i * 4,
6242 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6243 }
6244
6245 kfree(regs);
6246
6247 for (i = 0; i < tp->irq_cnt; i++) {
6248 struct tg3_napi *tnapi = &tp->napi[i];
6249
6250 /* SW status block */
6251 netdev_err(tp->dev,
6252 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6253 i,
6254 tnapi->hw_status->status,
6255 tnapi->hw_status->status_tag,
6256 tnapi->hw_status->rx_jumbo_consumer,
6257 tnapi->hw_status->rx_consumer,
6258 tnapi->hw_status->rx_mini_consumer,
6259 tnapi->hw_status->idx[0].rx_producer,
6260 tnapi->hw_status->idx[0].tx_consumer);
6261
6262 netdev_err(tp->dev,
6263 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6264 i,
6265 tnapi->last_tag, tnapi->last_irq_tag,
6266 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6267 tnapi->rx_rcb_ptr,
6268 tnapi->prodring.rx_std_prod_idx,
6269 tnapi->prodring.rx_std_cons_idx,
6270 tnapi->prodring.rx_jmb_prod_idx,
6271 tnapi->prodring.rx_jmb_cons_idx);
6272 }
6273 }
6274
6275 /* This is called whenever we suspect that the system chipset is re-
6276 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6277 * is bogus tx completions. We try to recover by setting the
6278 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6279 * in the workqueue.
6280 */
6281 static void tg3_tx_recover(struct tg3 *tp)
6282 {
6283 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6284 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6285
6286 netdev_warn(tp->dev,
6287 "The system may be re-ordering memory-mapped I/O "
6288 "cycles to the network device, attempting to recover. "
6289 "Please report the problem to the driver maintainer "
6290 "and include system chipset information.\n");
6291
6292 spin_lock(&tp->lock);
6293 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6294 spin_unlock(&tp->lock);
6295 }
6296
6297 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6298 {
6299 /* Tell compiler to fetch tx indices from memory. */
6300 barrier();
6301 return tnapi->tx_pending -
6302 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6303 }
6304
6305 /* Tigon3 never reports partial packet sends. So we do not
6306 * need special logic to handle SKBs that have not had all
6307 * of their frags sent yet, like SunGEM does.
6308 */
6309 static void tg3_tx(struct tg3_napi *tnapi)
6310 {
6311 struct tg3 *tp = tnapi->tp;
6312 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6313 u32 sw_idx = tnapi->tx_cons;
6314 struct netdev_queue *txq;
6315 int index = tnapi - tp->napi;
6316 unsigned int pkts_compl = 0, bytes_compl = 0;
6317
6318 if (tg3_flag(tp, ENABLE_TSS))
6319 index--;
6320
6321 txq = netdev_get_tx_queue(tp->dev, index);
6322
6323 while (sw_idx != hw_idx) {
6324 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6325 struct sk_buff *skb = ri->skb;
6326 int i, tx_bug = 0;
6327
6328 if (unlikely(skb == NULL)) {
6329 tg3_tx_recover(tp);
6330 return;
6331 }
6332
6333 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6334 struct skb_shared_hwtstamps timestamp;
6335 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6336 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6337
6338 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6339
6340 skb_tstamp_tx(skb, &timestamp);
6341 }
6342
6343 pci_unmap_single(tp->pdev,
6344 dma_unmap_addr(ri, mapping),
6345 skb_headlen(skb),
6346 PCI_DMA_TODEVICE);
6347
6348 ri->skb = NULL;
6349
6350 while (ri->fragmented) {
6351 ri->fragmented = false;
6352 sw_idx = NEXT_TX(sw_idx);
6353 ri = &tnapi->tx_buffers[sw_idx];
6354 }
6355
6356 sw_idx = NEXT_TX(sw_idx);
6357
6358 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6359 ri = &tnapi->tx_buffers[sw_idx];
6360 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6361 tx_bug = 1;
6362
6363 pci_unmap_page(tp->pdev,
6364 dma_unmap_addr(ri, mapping),
6365 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6366 PCI_DMA_TODEVICE);
6367
6368 while (ri->fragmented) {
6369 ri->fragmented = false;
6370 sw_idx = NEXT_TX(sw_idx);
6371 ri = &tnapi->tx_buffers[sw_idx];
6372 }
6373
6374 sw_idx = NEXT_TX(sw_idx);
6375 }
6376
6377 pkts_compl++;
6378 bytes_compl += skb->len;
6379
6380 dev_kfree_skb(skb);
6381
6382 if (unlikely(tx_bug)) {
6383 tg3_tx_recover(tp);
6384 return;
6385 }
6386 }
6387
6388 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6389
6390 tnapi->tx_cons = sw_idx;
6391
6392 /* Need to make the tx_cons update visible to tg3_start_xmit()
6393 * before checking for netif_queue_stopped(). Without the
6394 * memory barrier, there is a small possibility that tg3_start_xmit()
6395 * will miss it and cause the queue to be stopped forever.
6396 */
6397 smp_mb();
6398
6399 if (unlikely(netif_tx_queue_stopped(txq) &&
6400 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6401 __netif_tx_lock(txq, smp_processor_id());
6402 if (netif_tx_queue_stopped(txq) &&
6403 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6404 netif_tx_wake_queue(txq);
6405 __netif_tx_unlock(txq);
6406 }
6407 }
6408
6409 static void tg3_frag_free(bool is_frag, void *data)
6410 {
6411 if (is_frag)
6412 put_page(virt_to_head_page(data));
6413 else
6414 kfree(data);
6415 }
6416
6417 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6418 {
6419 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6420 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6421
6422 if (!ri->data)
6423 return;
6424
6425 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6426 map_sz, PCI_DMA_FROMDEVICE);
6427 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6428 ri->data = NULL;
6429 }
6430
6431
6432 /* Returns size of skb allocated or < 0 on error.
6433 *
6434 * We only need to fill in the address because the other members
6435 * of the RX descriptor are invariant, see tg3_init_rings.
6436 *
6437 * Note the purposeful assymetry of cpu vs. chip accesses. For
6438 * posting buffers we only dirty the first cache line of the RX
6439 * descriptor (containing the address). Whereas for the RX status
6440 * buffers the cpu only reads the last cacheline of the RX descriptor
6441 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6442 */
6443 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6444 u32 opaque_key, u32 dest_idx_unmasked,
6445 unsigned int *frag_size)
6446 {
6447 struct tg3_rx_buffer_desc *desc;
6448 struct ring_info *map;
6449 u8 *data;
6450 dma_addr_t mapping;
6451 int skb_size, data_size, dest_idx;
6452
6453 switch (opaque_key) {
6454 case RXD_OPAQUE_RING_STD:
6455 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6456 desc = &tpr->rx_std[dest_idx];
6457 map = &tpr->rx_std_buffers[dest_idx];
6458 data_size = tp->rx_pkt_map_sz;
6459 break;
6460
6461 case RXD_OPAQUE_RING_JUMBO:
6462 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6463 desc = &tpr->rx_jmb[dest_idx].std;
6464 map = &tpr->rx_jmb_buffers[dest_idx];
6465 data_size = TG3_RX_JMB_MAP_SZ;
6466 break;
6467
6468 default:
6469 return -EINVAL;
6470 }
6471
6472 /* Do not overwrite any of the map or rp information
6473 * until we are sure we can commit to a new buffer.
6474 *
6475 * Callers depend upon this behavior and assume that
6476 * we leave everything unchanged if we fail.
6477 */
6478 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6479 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6480 if (skb_size <= PAGE_SIZE) {
6481 data = netdev_alloc_frag(skb_size);
6482 *frag_size = skb_size;
6483 } else {
6484 data = kmalloc(skb_size, GFP_ATOMIC);
6485 *frag_size = 0;
6486 }
6487 if (!data)
6488 return -ENOMEM;
6489
6490 mapping = pci_map_single(tp->pdev,
6491 data + TG3_RX_OFFSET(tp),
6492 data_size,
6493 PCI_DMA_FROMDEVICE);
6494 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6495 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6496 return -EIO;
6497 }
6498
6499 map->data = data;
6500 dma_unmap_addr_set(map, mapping, mapping);
6501
6502 desc->addr_hi = ((u64)mapping >> 32);
6503 desc->addr_lo = ((u64)mapping & 0xffffffff);
6504
6505 return data_size;
6506 }
6507
6508 /* We only need to move over in the address because the other
6509 * members of the RX descriptor are invariant. See notes above
6510 * tg3_alloc_rx_data for full details.
6511 */
6512 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6513 struct tg3_rx_prodring_set *dpr,
6514 u32 opaque_key, int src_idx,
6515 u32 dest_idx_unmasked)
6516 {
6517 struct tg3 *tp = tnapi->tp;
6518 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6519 struct ring_info *src_map, *dest_map;
6520 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6521 int dest_idx;
6522
6523 switch (opaque_key) {
6524 case RXD_OPAQUE_RING_STD:
6525 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6526 dest_desc = &dpr->rx_std[dest_idx];
6527 dest_map = &dpr->rx_std_buffers[dest_idx];
6528 src_desc = &spr->rx_std[src_idx];
6529 src_map = &spr->rx_std_buffers[src_idx];
6530 break;
6531
6532 case RXD_OPAQUE_RING_JUMBO:
6533 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6534 dest_desc = &dpr->rx_jmb[dest_idx].std;
6535 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6536 src_desc = &spr->rx_jmb[src_idx].std;
6537 src_map = &spr->rx_jmb_buffers[src_idx];
6538 break;
6539
6540 default:
6541 return;
6542 }
6543
6544 dest_map->data = src_map->data;
6545 dma_unmap_addr_set(dest_map, mapping,
6546 dma_unmap_addr(src_map, mapping));
6547 dest_desc->addr_hi = src_desc->addr_hi;
6548 dest_desc->addr_lo = src_desc->addr_lo;
6549
6550 /* Ensure that the update to the skb happens after the physical
6551 * addresses have been transferred to the new BD location.
6552 */
6553 smp_wmb();
6554
6555 src_map->data = NULL;
6556 }
6557
6558 /* The RX ring scheme is composed of multiple rings which post fresh
6559 * buffers to the chip, and one special ring the chip uses to report
6560 * status back to the host.
6561 *
6562 * The special ring reports the status of received packets to the
6563 * host. The chip does not write into the original descriptor the
6564 * RX buffer was obtained from. The chip simply takes the original
6565 * descriptor as provided by the host, updates the status and length
6566 * field, then writes this into the next status ring entry.
6567 *
6568 * Each ring the host uses to post buffers to the chip is described
6569 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6570 * it is first placed into the on-chip ram. When the packet's length
6571 * is known, it walks down the TG3_BDINFO entries to select the ring.
6572 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6573 * which is within the range of the new packet's length is chosen.
6574 *
6575 * The "separate ring for rx status" scheme may sound queer, but it makes
6576 * sense from a cache coherency perspective. If only the host writes
6577 * to the buffer post rings, and only the chip writes to the rx status
6578 * rings, then cache lines never move beyond shared-modified state.
6579 * If both the host and chip were to write into the same ring, cache line
6580 * eviction could occur since both entities want it in an exclusive state.
6581 */
6582 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6583 {
6584 struct tg3 *tp = tnapi->tp;
6585 u32 work_mask, rx_std_posted = 0;
6586 u32 std_prod_idx, jmb_prod_idx;
6587 u32 sw_idx = tnapi->rx_rcb_ptr;
6588 u16 hw_idx;
6589 int received;
6590 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6591
6592 hw_idx = *(tnapi->rx_rcb_prod_idx);
6593 /*
6594 * We need to order the read of hw_idx and the read of
6595 * the opaque cookie.
6596 */
6597 rmb();
6598 work_mask = 0;
6599 received = 0;
6600 std_prod_idx = tpr->rx_std_prod_idx;
6601 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6602 while (sw_idx != hw_idx && budget > 0) {
6603 struct ring_info *ri;
6604 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6605 unsigned int len;
6606 struct sk_buff *skb;
6607 dma_addr_t dma_addr;
6608 u32 opaque_key, desc_idx, *post_ptr;
6609 u8 *data;
6610 u64 tstamp = 0;
6611
6612 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6613 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6614 if (opaque_key == RXD_OPAQUE_RING_STD) {
6615 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6616 dma_addr = dma_unmap_addr(ri, mapping);
6617 data = ri->data;
6618 post_ptr = &std_prod_idx;
6619 rx_std_posted++;
6620 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6621 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6622 dma_addr = dma_unmap_addr(ri, mapping);
6623 data = ri->data;
6624 post_ptr = &jmb_prod_idx;
6625 } else
6626 goto next_pkt_nopost;
6627
6628 work_mask |= opaque_key;
6629
6630 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6631 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6632 drop_it:
6633 tg3_recycle_rx(tnapi, tpr, opaque_key,
6634 desc_idx, *post_ptr);
6635 drop_it_no_recycle:
6636 /* Other statistics kept track of by card. */
6637 tp->rx_dropped++;
6638 goto next_pkt;
6639 }
6640
6641 prefetch(data + TG3_RX_OFFSET(tp));
6642 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6643 ETH_FCS_LEN;
6644
6645 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6646 RXD_FLAG_PTPSTAT_PTPV1 ||
6647 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6648 RXD_FLAG_PTPSTAT_PTPV2) {
6649 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6650 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6651 }
6652
6653 if (len > TG3_RX_COPY_THRESH(tp)) {
6654 int skb_size;
6655 unsigned int frag_size;
6656
6657 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6658 *post_ptr, &frag_size);
6659 if (skb_size < 0)
6660 goto drop_it;
6661
6662 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6663 PCI_DMA_FROMDEVICE);
6664
6665 skb = build_skb(data, frag_size);
6666 if (!skb) {
6667 tg3_frag_free(frag_size != 0, data);
6668 goto drop_it_no_recycle;
6669 }
6670 skb_reserve(skb, TG3_RX_OFFSET(tp));
6671 /* Ensure that the update to the data happens
6672 * after the usage of the old DMA mapping.
6673 */
6674 smp_wmb();
6675
6676 ri->data = NULL;
6677
6678 } else {
6679 tg3_recycle_rx(tnapi, tpr, opaque_key,
6680 desc_idx, *post_ptr);
6681
6682 skb = netdev_alloc_skb(tp->dev,
6683 len + TG3_RAW_IP_ALIGN);
6684 if (skb == NULL)
6685 goto drop_it_no_recycle;
6686
6687 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6688 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6689 memcpy(skb->data,
6690 data + TG3_RX_OFFSET(tp),
6691 len);
6692 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6693 }
6694
6695 skb_put(skb, len);
6696 if (tstamp)
6697 tg3_hwclock_to_timestamp(tp, tstamp,
6698 skb_hwtstamps(skb));
6699
6700 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6701 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6702 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6703 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6704 skb->ip_summed = CHECKSUM_UNNECESSARY;
6705 else
6706 skb_checksum_none_assert(skb);
6707
6708 skb->protocol = eth_type_trans(skb, tp->dev);
6709
6710 if (len > (tp->dev->mtu + ETH_HLEN) &&
6711 skb->protocol != htons(ETH_P_8021Q)) {
6712 dev_kfree_skb(skb);
6713 goto drop_it_no_recycle;
6714 }
6715
6716 if (desc->type_flags & RXD_FLAG_VLAN &&
6717 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6718 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6719 desc->err_vlan & RXD_VLAN_MASK);
6720
6721 napi_gro_receive(&tnapi->napi, skb);
6722
6723 received++;
6724 budget--;
6725
6726 next_pkt:
6727 (*post_ptr)++;
6728
6729 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6730 tpr->rx_std_prod_idx = std_prod_idx &
6731 tp->rx_std_ring_mask;
6732 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6733 tpr->rx_std_prod_idx);
6734 work_mask &= ~RXD_OPAQUE_RING_STD;
6735 rx_std_posted = 0;
6736 }
6737 next_pkt_nopost:
6738 sw_idx++;
6739 sw_idx &= tp->rx_ret_ring_mask;
6740
6741 /* Refresh hw_idx to see if there is new work */
6742 if (sw_idx == hw_idx) {
6743 hw_idx = *(tnapi->rx_rcb_prod_idx);
6744 rmb();
6745 }
6746 }
6747
6748 /* ACK the status ring. */
6749 tnapi->rx_rcb_ptr = sw_idx;
6750 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6751
6752 /* Refill RX ring(s). */
6753 if (!tg3_flag(tp, ENABLE_RSS)) {
6754 /* Sync BD data before updating mailbox */
6755 wmb();
6756
6757 if (work_mask & RXD_OPAQUE_RING_STD) {
6758 tpr->rx_std_prod_idx = std_prod_idx &
6759 tp->rx_std_ring_mask;
6760 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6761 tpr->rx_std_prod_idx);
6762 }
6763 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6764 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6765 tp->rx_jmb_ring_mask;
6766 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6767 tpr->rx_jmb_prod_idx);
6768 }
6769 mmiowb();
6770 } else if (work_mask) {
6771 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6772 * updated before the producer indices can be updated.
6773 */
6774 smp_wmb();
6775
6776 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6777 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6778
6779 if (tnapi != &tp->napi[1]) {
6780 tp->rx_refill = true;
6781 napi_schedule(&tp->napi[1].napi);
6782 }
6783 }
6784
6785 return received;
6786 }
6787
6788 static void tg3_poll_link(struct tg3 *tp)
6789 {
6790 /* handle link change and other phy events */
6791 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6792 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6793
6794 if (sblk->status & SD_STATUS_LINK_CHG) {
6795 sblk->status = SD_STATUS_UPDATED |
6796 (sblk->status & ~SD_STATUS_LINK_CHG);
6797 spin_lock(&tp->lock);
6798 if (tg3_flag(tp, USE_PHYLIB)) {
6799 tw32_f(MAC_STATUS,
6800 (MAC_STATUS_SYNC_CHANGED |
6801 MAC_STATUS_CFG_CHANGED |
6802 MAC_STATUS_MI_COMPLETION |
6803 MAC_STATUS_LNKSTATE_CHANGED));
6804 udelay(40);
6805 } else
6806 tg3_setup_phy(tp, false);
6807 spin_unlock(&tp->lock);
6808 }
6809 }
6810 }
6811
6812 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6813 struct tg3_rx_prodring_set *dpr,
6814 struct tg3_rx_prodring_set *spr)
6815 {
6816 u32 si, di, cpycnt, src_prod_idx;
6817 int i, err = 0;
6818
6819 while (1) {
6820 src_prod_idx = spr->rx_std_prod_idx;
6821
6822 /* Make sure updates to the rx_std_buffers[] entries and the
6823 * standard producer index are seen in the correct order.
6824 */
6825 smp_rmb();
6826
6827 if (spr->rx_std_cons_idx == src_prod_idx)
6828 break;
6829
6830 if (spr->rx_std_cons_idx < src_prod_idx)
6831 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6832 else
6833 cpycnt = tp->rx_std_ring_mask + 1 -
6834 spr->rx_std_cons_idx;
6835
6836 cpycnt = min(cpycnt,
6837 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6838
6839 si = spr->rx_std_cons_idx;
6840 di = dpr->rx_std_prod_idx;
6841
6842 for (i = di; i < di + cpycnt; i++) {
6843 if (dpr->rx_std_buffers[i].data) {
6844 cpycnt = i - di;
6845 err = -ENOSPC;
6846 break;
6847 }
6848 }
6849
6850 if (!cpycnt)
6851 break;
6852
6853 /* Ensure that updates to the rx_std_buffers ring and the
6854 * shadowed hardware producer ring from tg3_recycle_skb() are
6855 * ordered correctly WRT the skb check above.
6856 */
6857 smp_rmb();
6858
6859 memcpy(&dpr->rx_std_buffers[di],
6860 &spr->rx_std_buffers[si],
6861 cpycnt * sizeof(struct ring_info));
6862
6863 for (i = 0; i < cpycnt; i++, di++, si++) {
6864 struct tg3_rx_buffer_desc *sbd, *dbd;
6865 sbd = &spr->rx_std[si];
6866 dbd = &dpr->rx_std[di];
6867 dbd->addr_hi = sbd->addr_hi;
6868 dbd->addr_lo = sbd->addr_lo;
6869 }
6870
6871 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6872 tp->rx_std_ring_mask;
6873 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6874 tp->rx_std_ring_mask;
6875 }
6876
6877 while (1) {
6878 src_prod_idx = spr->rx_jmb_prod_idx;
6879
6880 /* Make sure updates to the rx_jmb_buffers[] entries and
6881 * the jumbo producer index are seen in the correct order.
6882 */
6883 smp_rmb();
6884
6885 if (spr->rx_jmb_cons_idx == src_prod_idx)
6886 break;
6887
6888 if (spr->rx_jmb_cons_idx < src_prod_idx)
6889 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6890 else
6891 cpycnt = tp->rx_jmb_ring_mask + 1 -
6892 spr->rx_jmb_cons_idx;
6893
6894 cpycnt = min(cpycnt,
6895 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6896
6897 si = spr->rx_jmb_cons_idx;
6898 di = dpr->rx_jmb_prod_idx;
6899
6900 for (i = di; i < di + cpycnt; i++) {
6901 if (dpr->rx_jmb_buffers[i].data) {
6902 cpycnt = i - di;
6903 err = -ENOSPC;
6904 break;
6905 }
6906 }
6907
6908 if (!cpycnt)
6909 break;
6910
6911 /* Ensure that updates to the rx_jmb_buffers ring and the
6912 * shadowed hardware producer ring from tg3_recycle_skb() are
6913 * ordered correctly WRT the skb check above.
6914 */
6915 smp_rmb();
6916
6917 memcpy(&dpr->rx_jmb_buffers[di],
6918 &spr->rx_jmb_buffers[si],
6919 cpycnt * sizeof(struct ring_info));
6920
6921 for (i = 0; i < cpycnt; i++, di++, si++) {
6922 struct tg3_rx_buffer_desc *sbd, *dbd;
6923 sbd = &spr->rx_jmb[si].std;
6924 dbd = &dpr->rx_jmb[di].std;
6925 dbd->addr_hi = sbd->addr_hi;
6926 dbd->addr_lo = sbd->addr_lo;
6927 }
6928
6929 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6930 tp->rx_jmb_ring_mask;
6931 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6932 tp->rx_jmb_ring_mask;
6933 }
6934
6935 return err;
6936 }
6937
6938 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6939 {
6940 struct tg3 *tp = tnapi->tp;
6941
6942 /* run TX completion thread */
6943 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6944 tg3_tx(tnapi);
6945 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6946 return work_done;
6947 }
6948
6949 if (!tnapi->rx_rcb_prod_idx)
6950 return work_done;
6951
6952 /* run RX thread, within the bounds set by NAPI.
6953 * All RX "locking" is done by ensuring outside
6954 * code synchronizes with tg3->napi.poll()
6955 */
6956 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6957 work_done += tg3_rx(tnapi, budget - work_done);
6958
6959 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6960 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6961 int i, err = 0;
6962 u32 std_prod_idx = dpr->rx_std_prod_idx;
6963 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6964
6965 tp->rx_refill = false;
6966 for (i = 1; i <= tp->rxq_cnt; i++)
6967 err |= tg3_rx_prodring_xfer(tp, dpr,
6968 &tp->napi[i].prodring);
6969
6970 wmb();
6971
6972 if (std_prod_idx != dpr->rx_std_prod_idx)
6973 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6974 dpr->rx_std_prod_idx);
6975
6976 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6977 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6978 dpr->rx_jmb_prod_idx);
6979
6980 mmiowb();
6981
6982 if (err)
6983 tw32_f(HOSTCC_MODE, tp->coal_now);
6984 }
6985
6986 return work_done;
6987 }
6988
6989 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6990 {
6991 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6992 schedule_work(&tp->reset_task);
6993 }
6994
6995 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6996 {
6997 cancel_work_sync(&tp->reset_task);
6998 tg3_flag_clear(tp, RESET_TASK_PENDING);
6999 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7000 }
7001
7002 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7003 {
7004 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7005 struct tg3 *tp = tnapi->tp;
7006 int work_done = 0;
7007 struct tg3_hw_status *sblk = tnapi->hw_status;
7008
7009 while (1) {
7010 work_done = tg3_poll_work(tnapi, work_done, budget);
7011
7012 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7013 goto tx_recovery;
7014
7015 if (unlikely(work_done >= budget))
7016 break;
7017
7018 /* tp->last_tag is used in tg3_int_reenable() below
7019 * to tell the hw how much work has been processed,
7020 * so we must read it before checking for more work.
7021 */
7022 tnapi->last_tag = sblk->status_tag;
7023 tnapi->last_irq_tag = tnapi->last_tag;
7024 rmb();
7025
7026 /* check for RX/TX work to do */
7027 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7028 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7029
7030 /* This test here is not race free, but will reduce
7031 * the number of interrupts by looping again.
7032 */
7033 if (tnapi == &tp->napi[1] && tp->rx_refill)
7034 continue;
7035
7036 napi_complete(napi);
7037 /* Reenable interrupts. */
7038 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7039
7040 /* This test here is synchronized by napi_schedule()
7041 * and napi_complete() to close the race condition.
7042 */
7043 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7044 tw32(HOSTCC_MODE, tp->coalesce_mode |
7045 HOSTCC_MODE_ENABLE |
7046 tnapi->coal_now);
7047 }
7048 mmiowb();
7049 break;
7050 }
7051 }
7052
7053 return work_done;
7054
7055 tx_recovery:
7056 /* work_done is guaranteed to be less than budget. */
7057 napi_complete(napi);
7058 tg3_reset_task_schedule(tp);
7059 return work_done;
7060 }
7061
7062 static void tg3_process_error(struct tg3 *tp)
7063 {
7064 u32 val;
7065 bool real_error = false;
7066
7067 if (tg3_flag(tp, ERROR_PROCESSED))
7068 return;
7069
7070 /* Check Flow Attention register */
7071 val = tr32(HOSTCC_FLOW_ATTN);
7072 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7073 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7074 real_error = true;
7075 }
7076
7077 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7078 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7079 real_error = true;
7080 }
7081
7082 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7083 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7084 real_error = true;
7085 }
7086
7087 if (!real_error)
7088 return;
7089
7090 tg3_dump_state(tp);
7091
7092 tg3_flag_set(tp, ERROR_PROCESSED);
7093 tg3_reset_task_schedule(tp);
7094 }
7095
7096 static int tg3_poll(struct napi_struct *napi, int budget)
7097 {
7098 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7099 struct tg3 *tp = tnapi->tp;
7100 int work_done = 0;
7101 struct tg3_hw_status *sblk = tnapi->hw_status;
7102
7103 while (1) {
7104 if (sblk->status & SD_STATUS_ERROR)
7105 tg3_process_error(tp);
7106
7107 tg3_poll_link(tp);
7108
7109 work_done = tg3_poll_work(tnapi, work_done, budget);
7110
7111 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7112 goto tx_recovery;
7113
7114 if (unlikely(work_done >= budget))
7115 break;
7116
7117 if (tg3_flag(tp, TAGGED_STATUS)) {
7118 /* tp->last_tag is used in tg3_int_reenable() below
7119 * to tell the hw how much work has been processed,
7120 * so we must read it before checking for more work.
7121 */
7122 tnapi->last_tag = sblk->status_tag;
7123 tnapi->last_irq_tag = tnapi->last_tag;
7124 rmb();
7125 } else
7126 sblk->status &= ~SD_STATUS_UPDATED;
7127
7128 if (likely(!tg3_has_work(tnapi))) {
7129 napi_complete(napi);
7130 tg3_int_reenable(tnapi);
7131 break;
7132 }
7133 }
7134
7135 return work_done;
7136
7137 tx_recovery:
7138 /* work_done is guaranteed to be less than budget. */
7139 napi_complete(napi);
7140 tg3_reset_task_schedule(tp);
7141 return work_done;
7142 }
7143
7144 static void tg3_napi_disable(struct tg3 *tp)
7145 {
7146 int i;
7147
7148 for (i = tp->irq_cnt - 1; i >= 0; i--)
7149 napi_disable(&tp->napi[i].napi);
7150 }
7151
7152 static void tg3_napi_enable(struct tg3 *tp)
7153 {
7154 int i;
7155
7156 for (i = 0; i < tp->irq_cnt; i++)
7157 napi_enable(&tp->napi[i].napi);
7158 }
7159
7160 static void tg3_napi_init(struct tg3 *tp)
7161 {
7162 int i;
7163
7164 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7165 for (i = 1; i < tp->irq_cnt; i++)
7166 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7167 }
7168
7169 static void tg3_napi_fini(struct tg3 *tp)
7170 {
7171 int i;
7172
7173 for (i = 0; i < tp->irq_cnt; i++)
7174 netif_napi_del(&tp->napi[i].napi);
7175 }
7176
7177 static inline void tg3_netif_stop(struct tg3 *tp)
7178 {
7179 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7180 tg3_napi_disable(tp);
7181 netif_carrier_off(tp->dev);
7182 netif_tx_disable(tp->dev);
7183 }
7184
7185 /* tp->lock must be held */
7186 static inline void tg3_netif_start(struct tg3 *tp)
7187 {
7188 tg3_ptp_resume(tp);
7189
7190 /* NOTE: unconditional netif_tx_wake_all_queues is only
7191 * appropriate so long as all callers are assured to
7192 * have free tx slots (such as after tg3_init_hw)
7193 */
7194 netif_tx_wake_all_queues(tp->dev);
7195
7196 if (tp->link_up)
7197 netif_carrier_on(tp->dev);
7198
7199 tg3_napi_enable(tp);
7200 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7201 tg3_enable_ints(tp);
7202 }
7203
7204 static void tg3_irq_quiesce(struct tg3 *tp)
7205 {
7206 int i;
7207
7208 BUG_ON(tp->irq_sync);
7209
7210 tp->irq_sync = 1;
7211 smp_mb();
7212
7213 for (i = 0; i < tp->irq_cnt; i++)
7214 synchronize_irq(tp->napi[i].irq_vec);
7215 }
7216
7217 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7218 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7219 * with as well. Most of the time, this is not necessary except when
7220 * shutting down the device.
7221 */
7222 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7223 {
7224 spin_lock_bh(&tp->lock);
7225 if (irq_sync)
7226 tg3_irq_quiesce(tp);
7227 }
7228
7229 static inline void tg3_full_unlock(struct tg3 *tp)
7230 {
7231 spin_unlock_bh(&tp->lock);
7232 }
7233
7234 /* One-shot MSI handler - Chip automatically disables interrupt
7235 * after sending MSI so driver doesn't have to do it.
7236 */
7237 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7238 {
7239 struct tg3_napi *tnapi = dev_id;
7240 struct tg3 *tp = tnapi->tp;
7241
7242 prefetch(tnapi->hw_status);
7243 if (tnapi->rx_rcb)
7244 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7245
7246 if (likely(!tg3_irq_sync(tp)))
7247 napi_schedule(&tnapi->napi);
7248
7249 return IRQ_HANDLED;
7250 }
7251
7252 /* MSI ISR - No need to check for interrupt sharing and no need to
7253 * flush status block and interrupt mailbox. PCI ordering rules
7254 * guarantee that MSI will arrive after the status block.
7255 */
7256 static irqreturn_t tg3_msi(int irq, void *dev_id)
7257 {
7258 struct tg3_napi *tnapi = dev_id;
7259 struct tg3 *tp = tnapi->tp;
7260
7261 prefetch(tnapi->hw_status);
7262 if (tnapi->rx_rcb)
7263 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7264 /*
7265 * Writing any value to intr-mbox-0 clears PCI INTA# and
7266 * chip-internal interrupt pending events.
7267 * Writing non-zero to intr-mbox-0 additional tells the
7268 * NIC to stop sending us irqs, engaging "in-intr-handler"
7269 * event coalescing.
7270 */
7271 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7272 if (likely(!tg3_irq_sync(tp)))
7273 napi_schedule(&tnapi->napi);
7274
7275 return IRQ_RETVAL(1);
7276 }
7277
7278 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7279 {
7280 struct tg3_napi *tnapi = dev_id;
7281 struct tg3 *tp = tnapi->tp;
7282 struct tg3_hw_status *sblk = tnapi->hw_status;
7283 unsigned int handled = 1;
7284
7285 /* In INTx mode, it is possible for the interrupt to arrive at
7286 * the CPU before the status block posted prior to the interrupt.
7287 * Reading the PCI State register will confirm whether the
7288 * interrupt is ours and will flush the status block.
7289 */
7290 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7291 if (tg3_flag(tp, CHIP_RESETTING) ||
7292 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7293 handled = 0;
7294 goto out;
7295 }
7296 }
7297
7298 /*
7299 * Writing any value to intr-mbox-0 clears PCI INTA# and
7300 * chip-internal interrupt pending events.
7301 * Writing non-zero to intr-mbox-0 additional tells the
7302 * NIC to stop sending us irqs, engaging "in-intr-handler"
7303 * event coalescing.
7304 *
7305 * Flush the mailbox to de-assert the IRQ immediately to prevent
7306 * spurious interrupts. The flush impacts performance but
7307 * excessive spurious interrupts can be worse in some cases.
7308 */
7309 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7310 if (tg3_irq_sync(tp))
7311 goto out;
7312 sblk->status &= ~SD_STATUS_UPDATED;
7313 if (likely(tg3_has_work(tnapi))) {
7314 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7315 napi_schedule(&tnapi->napi);
7316 } else {
7317 /* No work, shared interrupt perhaps? re-enable
7318 * interrupts, and flush that PCI write
7319 */
7320 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7321 0x00000000);
7322 }
7323 out:
7324 return IRQ_RETVAL(handled);
7325 }
7326
7327 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7328 {
7329 struct tg3_napi *tnapi = dev_id;
7330 struct tg3 *tp = tnapi->tp;
7331 struct tg3_hw_status *sblk = tnapi->hw_status;
7332 unsigned int handled = 1;
7333
7334 /* In INTx mode, it is possible for the interrupt to arrive at
7335 * the CPU before the status block posted prior to the interrupt.
7336 * Reading the PCI State register will confirm whether the
7337 * interrupt is ours and will flush the status block.
7338 */
7339 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7340 if (tg3_flag(tp, CHIP_RESETTING) ||
7341 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7342 handled = 0;
7343 goto out;
7344 }
7345 }
7346
7347 /*
7348 * writing any value to intr-mbox-0 clears PCI INTA# and
7349 * chip-internal interrupt pending events.
7350 * writing non-zero to intr-mbox-0 additional tells the
7351 * NIC to stop sending us irqs, engaging "in-intr-handler"
7352 * event coalescing.
7353 *
7354 * Flush the mailbox to de-assert the IRQ immediately to prevent
7355 * spurious interrupts. The flush impacts performance but
7356 * excessive spurious interrupts can be worse in some cases.
7357 */
7358 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7359
7360 /*
7361 * In a shared interrupt configuration, sometimes other devices'
7362 * interrupts will scream. We record the current status tag here
7363 * so that the above check can report that the screaming interrupts
7364 * are unhandled. Eventually they will be silenced.
7365 */
7366 tnapi->last_irq_tag = sblk->status_tag;
7367
7368 if (tg3_irq_sync(tp))
7369 goto out;
7370
7371 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7372
7373 napi_schedule(&tnapi->napi);
7374
7375 out:
7376 return IRQ_RETVAL(handled);
7377 }
7378
7379 /* ISR for interrupt test */
7380 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7381 {
7382 struct tg3_napi *tnapi = dev_id;
7383 struct tg3 *tp = tnapi->tp;
7384 struct tg3_hw_status *sblk = tnapi->hw_status;
7385
7386 if ((sblk->status & SD_STATUS_UPDATED) ||
7387 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7388 tg3_disable_ints(tp);
7389 return IRQ_RETVAL(1);
7390 }
7391 return IRQ_RETVAL(0);
7392 }
7393
7394 #ifdef CONFIG_NET_POLL_CONTROLLER
7395 static void tg3_poll_controller(struct net_device *dev)
7396 {
7397 int i;
7398 struct tg3 *tp = netdev_priv(dev);
7399
7400 if (tg3_irq_sync(tp))
7401 return;
7402
7403 for (i = 0; i < tp->irq_cnt; i++)
7404 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7405 }
7406 #endif
7407
7408 static void tg3_tx_timeout(struct net_device *dev)
7409 {
7410 struct tg3 *tp = netdev_priv(dev);
7411
7412 if (netif_msg_tx_err(tp)) {
7413 netdev_err(dev, "transmit timed out, resetting\n");
7414 tg3_dump_state(tp);
7415 }
7416
7417 tg3_reset_task_schedule(tp);
7418 }
7419
7420 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7421 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7422 {
7423 u32 base = (u32) mapping & 0xffffffff;
7424
7425 return (base > 0xffffdcc0) && (base + len + 8 < base);
7426 }
7427
7428 /* Test for DMA addresses > 40-bit */
7429 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7430 int len)
7431 {
7432 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7433 if (tg3_flag(tp, 40BIT_DMA_BUG))
7434 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7435 return 0;
7436 #else
7437 return 0;
7438 #endif
7439 }
7440
7441 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7442 dma_addr_t mapping, u32 len, u32 flags,
7443 u32 mss, u32 vlan)
7444 {
7445 txbd->addr_hi = ((u64) mapping >> 32);
7446 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7447 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7448 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7449 }
7450
7451 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7452 dma_addr_t map, u32 len, u32 flags,
7453 u32 mss, u32 vlan)
7454 {
7455 struct tg3 *tp = tnapi->tp;
7456 bool hwbug = false;
7457
7458 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7459 hwbug = true;
7460
7461 if (tg3_4g_overflow_test(map, len))
7462 hwbug = true;
7463
7464 if (tg3_40bit_overflow_test(tp, map, len))
7465 hwbug = true;
7466
7467 if (tp->dma_limit) {
7468 u32 prvidx = *entry;
7469 u32 tmp_flag = flags & ~TXD_FLAG_END;
7470 while (len > tp->dma_limit && *budget) {
7471 u32 frag_len = tp->dma_limit;
7472 len -= tp->dma_limit;
7473
7474 /* Avoid the 8byte DMA problem */
7475 if (len <= 8) {
7476 len += tp->dma_limit / 2;
7477 frag_len = tp->dma_limit / 2;
7478 }
7479
7480 tnapi->tx_buffers[*entry].fragmented = true;
7481
7482 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7483 frag_len, tmp_flag, mss, vlan);
7484 *budget -= 1;
7485 prvidx = *entry;
7486 *entry = NEXT_TX(*entry);
7487
7488 map += frag_len;
7489 }
7490
7491 if (len) {
7492 if (*budget) {
7493 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7494 len, flags, mss, vlan);
7495 *budget -= 1;
7496 *entry = NEXT_TX(*entry);
7497 } else {
7498 hwbug = true;
7499 tnapi->tx_buffers[prvidx].fragmented = false;
7500 }
7501 }
7502 } else {
7503 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7504 len, flags, mss, vlan);
7505 *entry = NEXT_TX(*entry);
7506 }
7507
7508 return hwbug;
7509 }
7510
7511 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7512 {
7513 int i;
7514 struct sk_buff *skb;
7515 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7516
7517 skb = txb->skb;
7518 txb->skb = NULL;
7519
7520 pci_unmap_single(tnapi->tp->pdev,
7521 dma_unmap_addr(txb, mapping),
7522 skb_headlen(skb),
7523 PCI_DMA_TODEVICE);
7524
7525 while (txb->fragmented) {
7526 txb->fragmented = false;
7527 entry = NEXT_TX(entry);
7528 txb = &tnapi->tx_buffers[entry];
7529 }
7530
7531 for (i = 0; i <= last; i++) {
7532 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7533
7534 entry = NEXT_TX(entry);
7535 txb = &tnapi->tx_buffers[entry];
7536
7537 pci_unmap_page(tnapi->tp->pdev,
7538 dma_unmap_addr(txb, mapping),
7539 skb_frag_size(frag), PCI_DMA_TODEVICE);
7540
7541 while (txb->fragmented) {
7542 txb->fragmented = false;
7543 entry = NEXT_TX(entry);
7544 txb = &tnapi->tx_buffers[entry];
7545 }
7546 }
7547 }
7548
7549 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7550 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7551 struct sk_buff **pskb,
7552 u32 *entry, u32 *budget,
7553 u32 base_flags, u32 mss, u32 vlan)
7554 {
7555 struct tg3 *tp = tnapi->tp;
7556 struct sk_buff *new_skb, *skb = *pskb;
7557 dma_addr_t new_addr = 0;
7558 int ret = 0;
7559
7560 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7561 new_skb = skb_copy(skb, GFP_ATOMIC);
7562 else {
7563 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7564
7565 new_skb = skb_copy_expand(skb,
7566 skb_headroom(skb) + more_headroom,
7567 skb_tailroom(skb), GFP_ATOMIC);
7568 }
7569
7570 if (!new_skb) {
7571 ret = -1;
7572 } else {
7573 /* New SKB is guaranteed to be linear. */
7574 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7575 PCI_DMA_TODEVICE);
7576 /* Make sure the mapping succeeded */
7577 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7578 dev_kfree_skb(new_skb);
7579 ret = -1;
7580 } else {
7581 u32 save_entry = *entry;
7582
7583 base_flags |= TXD_FLAG_END;
7584
7585 tnapi->tx_buffers[*entry].skb = new_skb;
7586 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7587 mapping, new_addr);
7588
7589 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7590 new_skb->len, base_flags,
7591 mss, vlan)) {
7592 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7593 dev_kfree_skb(new_skb);
7594 ret = -1;
7595 }
7596 }
7597 }
7598
7599 dev_kfree_skb(skb);
7600 *pskb = new_skb;
7601 return ret;
7602 }
7603
7604 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7605
7606 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7607 * TSO header is greater than 80 bytes.
7608 */
7609 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7610 {
7611 struct sk_buff *segs, *nskb;
7612 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7613
7614 /* Estimate the number of fragments in the worst case */
7615 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7616 netif_stop_queue(tp->dev);
7617
7618 /* netif_tx_stop_queue() must be done before checking
7619 * checking tx index in tg3_tx_avail() below, because in
7620 * tg3_tx(), we update tx index before checking for
7621 * netif_tx_queue_stopped().
7622 */
7623 smp_mb();
7624 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7625 return NETDEV_TX_BUSY;
7626
7627 netif_wake_queue(tp->dev);
7628 }
7629
7630 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7631 if (IS_ERR(segs))
7632 goto tg3_tso_bug_end;
7633
7634 do {
7635 nskb = segs;
7636 segs = segs->next;
7637 nskb->next = NULL;
7638 tg3_start_xmit(nskb, tp->dev);
7639 } while (segs);
7640
7641 tg3_tso_bug_end:
7642 dev_kfree_skb(skb);
7643
7644 return NETDEV_TX_OK;
7645 }
7646
7647 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7648 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7649 */
7650 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7651 {
7652 struct tg3 *tp = netdev_priv(dev);
7653 u32 len, entry, base_flags, mss, vlan = 0;
7654 u32 budget;
7655 int i = -1, would_hit_hwbug;
7656 dma_addr_t mapping;
7657 struct tg3_napi *tnapi;
7658 struct netdev_queue *txq;
7659 unsigned int last;
7660
7661 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7662 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7663 if (tg3_flag(tp, ENABLE_TSS))
7664 tnapi++;
7665
7666 budget = tg3_tx_avail(tnapi);
7667
7668 /* We are running in BH disabled context with netif_tx_lock
7669 * and TX reclaim runs via tp->napi.poll inside of a software
7670 * interrupt. Furthermore, IRQ processing runs lockless so we have
7671 * no IRQ context deadlocks to worry about either. Rejoice!
7672 */
7673 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7674 if (!netif_tx_queue_stopped(txq)) {
7675 netif_tx_stop_queue(txq);
7676
7677 /* This is a hard error, log it. */
7678 netdev_err(dev,
7679 "BUG! Tx Ring full when queue awake!\n");
7680 }
7681 return NETDEV_TX_BUSY;
7682 }
7683
7684 entry = tnapi->tx_prod;
7685 base_flags = 0;
7686 if (skb->ip_summed == CHECKSUM_PARTIAL)
7687 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7688
7689 mss = skb_shinfo(skb)->gso_size;
7690 if (mss) {
7691 struct iphdr *iph;
7692 u32 tcp_opt_len, hdr_len;
7693
7694 if (skb_header_cloned(skb) &&
7695 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7696 goto drop;
7697
7698 iph = ip_hdr(skb);
7699 tcp_opt_len = tcp_optlen(skb);
7700
7701 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7702
7703 if (!skb_is_gso_v6(skb)) {
7704 iph->check = 0;
7705 iph->tot_len = htons(mss + hdr_len);
7706 }
7707
7708 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7709 tg3_flag(tp, TSO_BUG))
7710 return tg3_tso_bug(tp, skb);
7711
7712 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7713 TXD_FLAG_CPU_POST_DMA);
7714
7715 if (tg3_flag(tp, HW_TSO_1) ||
7716 tg3_flag(tp, HW_TSO_2) ||
7717 tg3_flag(tp, HW_TSO_3)) {
7718 tcp_hdr(skb)->check = 0;
7719 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7720 } else
7721 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7722 iph->daddr, 0,
7723 IPPROTO_TCP,
7724 0);
7725
7726 if (tg3_flag(tp, HW_TSO_3)) {
7727 mss |= (hdr_len & 0xc) << 12;
7728 if (hdr_len & 0x10)
7729 base_flags |= 0x00000010;
7730 base_flags |= (hdr_len & 0x3e0) << 5;
7731 } else if (tg3_flag(tp, HW_TSO_2))
7732 mss |= hdr_len << 9;
7733 else if (tg3_flag(tp, HW_TSO_1) ||
7734 tg3_asic_rev(tp) == ASIC_REV_5705) {
7735 if (tcp_opt_len || iph->ihl > 5) {
7736 int tsflags;
7737
7738 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7739 mss |= (tsflags << 11);
7740 }
7741 } else {
7742 if (tcp_opt_len || iph->ihl > 5) {
7743 int tsflags;
7744
7745 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7746 base_flags |= tsflags << 12;
7747 }
7748 }
7749 }
7750
7751 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7752 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7753 base_flags |= TXD_FLAG_JMB_PKT;
7754
7755 if (vlan_tx_tag_present(skb)) {
7756 base_flags |= TXD_FLAG_VLAN;
7757 vlan = vlan_tx_tag_get(skb);
7758 }
7759
7760 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7761 tg3_flag(tp, TX_TSTAMP_EN)) {
7762 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7763 base_flags |= TXD_FLAG_HWTSTAMP;
7764 }
7765
7766 len = skb_headlen(skb);
7767
7768 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7769 if (pci_dma_mapping_error(tp->pdev, mapping))
7770 goto drop;
7771
7772
7773 tnapi->tx_buffers[entry].skb = skb;
7774 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7775
7776 would_hit_hwbug = 0;
7777
7778 if (tg3_flag(tp, 5701_DMA_BUG))
7779 would_hit_hwbug = 1;
7780
7781 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7782 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7783 mss, vlan)) {
7784 would_hit_hwbug = 1;
7785 } else if (skb_shinfo(skb)->nr_frags > 0) {
7786 u32 tmp_mss = mss;
7787
7788 if (!tg3_flag(tp, HW_TSO_1) &&
7789 !tg3_flag(tp, HW_TSO_2) &&
7790 !tg3_flag(tp, HW_TSO_3))
7791 tmp_mss = 0;
7792
7793 /* Now loop through additional data
7794 * fragments, and queue them.
7795 */
7796 last = skb_shinfo(skb)->nr_frags - 1;
7797 for (i = 0; i <= last; i++) {
7798 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7799
7800 len = skb_frag_size(frag);
7801 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7802 len, DMA_TO_DEVICE);
7803
7804 tnapi->tx_buffers[entry].skb = NULL;
7805 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7806 mapping);
7807 if (dma_mapping_error(&tp->pdev->dev, mapping))
7808 goto dma_error;
7809
7810 if (!budget ||
7811 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7812 len, base_flags |
7813 ((i == last) ? TXD_FLAG_END : 0),
7814 tmp_mss, vlan)) {
7815 would_hit_hwbug = 1;
7816 break;
7817 }
7818 }
7819 }
7820
7821 if (would_hit_hwbug) {
7822 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7823
7824 /* If the workaround fails due to memory/mapping
7825 * failure, silently drop this packet.
7826 */
7827 entry = tnapi->tx_prod;
7828 budget = tg3_tx_avail(tnapi);
7829 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7830 base_flags, mss, vlan))
7831 goto drop_nofree;
7832 }
7833
7834 skb_tx_timestamp(skb);
7835 netdev_tx_sent_queue(txq, skb->len);
7836
7837 /* Sync BD data before updating mailbox */
7838 wmb();
7839
7840 /* Packets are ready, update Tx producer idx local and on card. */
7841 tw32_tx_mbox(tnapi->prodmbox, entry);
7842
7843 tnapi->tx_prod = entry;
7844 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7845 netif_tx_stop_queue(txq);
7846
7847 /* netif_tx_stop_queue() must be done before checking
7848 * checking tx index in tg3_tx_avail() below, because in
7849 * tg3_tx(), we update tx index before checking for
7850 * netif_tx_queue_stopped().
7851 */
7852 smp_mb();
7853 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7854 netif_tx_wake_queue(txq);
7855 }
7856
7857 mmiowb();
7858 return NETDEV_TX_OK;
7859
7860 dma_error:
7861 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7862 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7863 drop:
7864 dev_kfree_skb(skb);
7865 drop_nofree:
7866 tp->tx_dropped++;
7867 return NETDEV_TX_OK;
7868 }
7869
7870 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7871 {
7872 if (enable) {
7873 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7874 MAC_MODE_PORT_MODE_MASK);
7875
7876 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7877
7878 if (!tg3_flag(tp, 5705_PLUS))
7879 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7880
7881 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7882 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7883 else
7884 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7885 } else {
7886 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7887
7888 if (tg3_flag(tp, 5705_PLUS) ||
7889 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7890 tg3_asic_rev(tp) == ASIC_REV_5700)
7891 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7892 }
7893
7894 tw32(MAC_MODE, tp->mac_mode);
7895 udelay(40);
7896 }
7897
7898 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7899 {
7900 u32 val, bmcr, mac_mode, ptest = 0;
7901
7902 tg3_phy_toggle_apd(tp, false);
7903 tg3_phy_toggle_automdix(tp, false);
7904
7905 if (extlpbk && tg3_phy_set_extloopbk(tp))
7906 return -EIO;
7907
7908 bmcr = BMCR_FULLDPLX;
7909 switch (speed) {
7910 case SPEED_10:
7911 break;
7912 case SPEED_100:
7913 bmcr |= BMCR_SPEED100;
7914 break;
7915 case SPEED_1000:
7916 default:
7917 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7918 speed = SPEED_100;
7919 bmcr |= BMCR_SPEED100;
7920 } else {
7921 speed = SPEED_1000;
7922 bmcr |= BMCR_SPEED1000;
7923 }
7924 }
7925
7926 if (extlpbk) {
7927 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7928 tg3_readphy(tp, MII_CTRL1000, &val);
7929 val |= CTL1000_AS_MASTER |
7930 CTL1000_ENABLE_MASTER;
7931 tg3_writephy(tp, MII_CTRL1000, val);
7932 } else {
7933 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7934 MII_TG3_FET_PTEST_TRIM_2;
7935 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7936 }
7937 } else
7938 bmcr |= BMCR_LOOPBACK;
7939
7940 tg3_writephy(tp, MII_BMCR, bmcr);
7941
7942 /* The write needs to be flushed for the FETs */
7943 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7944 tg3_readphy(tp, MII_BMCR, &bmcr);
7945
7946 udelay(40);
7947
7948 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7949 tg3_asic_rev(tp) == ASIC_REV_5785) {
7950 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7951 MII_TG3_FET_PTEST_FRC_TX_LINK |
7952 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7953
7954 /* The write needs to be flushed for the AC131 */
7955 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7956 }
7957
7958 /* Reset to prevent losing 1st rx packet intermittently */
7959 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7960 tg3_flag(tp, 5780_CLASS)) {
7961 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7962 udelay(10);
7963 tw32_f(MAC_RX_MODE, tp->rx_mode);
7964 }
7965
7966 mac_mode = tp->mac_mode &
7967 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7968 if (speed == SPEED_1000)
7969 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7970 else
7971 mac_mode |= MAC_MODE_PORT_MODE_MII;
7972
7973 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7974 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7975
7976 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7977 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7978 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7979 mac_mode |= MAC_MODE_LINK_POLARITY;
7980
7981 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7982 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7983 }
7984
7985 tw32(MAC_MODE, mac_mode);
7986 udelay(40);
7987
7988 return 0;
7989 }
7990
7991 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7992 {
7993 struct tg3 *tp = netdev_priv(dev);
7994
7995 if (features & NETIF_F_LOOPBACK) {
7996 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7997 return;
7998
7999 spin_lock_bh(&tp->lock);
8000 tg3_mac_loopback(tp, true);
8001 netif_carrier_on(tp->dev);
8002 spin_unlock_bh(&tp->lock);
8003 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8004 } else {
8005 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8006 return;
8007
8008 spin_lock_bh(&tp->lock);
8009 tg3_mac_loopback(tp, false);
8010 /* Force link status check */
8011 tg3_setup_phy(tp, true);
8012 spin_unlock_bh(&tp->lock);
8013 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8014 }
8015 }
8016
8017 static netdev_features_t tg3_fix_features(struct net_device *dev,
8018 netdev_features_t features)
8019 {
8020 struct tg3 *tp = netdev_priv(dev);
8021
8022 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8023 features &= ~NETIF_F_ALL_TSO;
8024
8025 return features;
8026 }
8027
8028 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8029 {
8030 netdev_features_t changed = dev->features ^ features;
8031
8032 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8033 tg3_set_loopback(dev, features);
8034
8035 return 0;
8036 }
8037
8038 static void tg3_rx_prodring_free(struct tg3 *tp,
8039 struct tg3_rx_prodring_set *tpr)
8040 {
8041 int i;
8042
8043 if (tpr != &tp->napi[0].prodring) {
8044 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8045 i = (i + 1) & tp->rx_std_ring_mask)
8046 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8047 tp->rx_pkt_map_sz);
8048
8049 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8050 for (i = tpr->rx_jmb_cons_idx;
8051 i != tpr->rx_jmb_prod_idx;
8052 i = (i + 1) & tp->rx_jmb_ring_mask) {
8053 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8054 TG3_RX_JMB_MAP_SZ);
8055 }
8056 }
8057
8058 return;
8059 }
8060
8061 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8062 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8063 tp->rx_pkt_map_sz);
8064
8065 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8066 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8067 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8068 TG3_RX_JMB_MAP_SZ);
8069 }
8070 }
8071
8072 /* Initialize rx rings for packet processing.
8073 *
8074 * The chip has been shut down and the driver detached from
8075 * the networking, so no interrupts or new tx packets will
8076 * end up in the driver. tp->{tx,}lock are held and thus
8077 * we may not sleep.
8078 */
8079 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8080 struct tg3_rx_prodring_set *tpr)
8081 {
8082 u32 i, rx_pkt_dma_sz;
8083
8084 tpr->rx_std_cons_idx = 0;
8085 tpr->rx_std_prod_idx = 0;
8086 tpr->rx_jmb_cons_idx = 0;
8087 tpr->rx_jmb_prod_idx = 0;
8088
8089 if (tpr != &tp->napi[0].prodring) {
8090 memset(&tpr->rx_std_buffers[0], 0,
8091 TG3_RX_STD_BUFF_RING_SIZE(tp));
8092 if (tpr->rx_jmb_buffers)
8093 memset(&tpr->rx_jmb_buffers[0], 0,
8094 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8095 goto done;
8096 }
8097
8098 /* Zero out all descriptors. */
8099 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8100
8101 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8102 if (tg3_flag(tp, 5780_CLASS) &&
8103 tp->dev->mtu > ETH_DATA_LEN)
8104 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8105 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8106
8107 /* Initialize invariants of the rings, we only set this
8108 * stuff once. This works because the card does not
8109 * write into the rx buffer posting rings.
8110 */
8111 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8112 struct tg3_rx_buffer_desc *rxd;
8113
8114 rxd = &tpr->rx_std[i];
8115 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8116 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8117 rxd->opaque = (RXD_OPAQUE_RING_STD |
8118 (i << RXD_OPAQUE_INDEX_SHIFT));
8119 }
8120
8121 /* Now allocate fresh SKBs for each rx ring. */
8122 for (i = 0; i < tp->rx_pending; i++) {
8123 unsigned int frag_size;
8124
8125 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8126 &frag_size) < 0) {
8127 netdev_warn(tp->dev,
8128 "Using a smaller RX standard ring. Only "
8129 "%d out of %d buffers were allocated "
8130 "successfully\n", i, tp->rx_pending);
8131 if (i == 0)
8132 goto initfail;
8133 tp->rx_pending = i;
8134 break;
8135 }
8136 }
8137
8138 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8139 goto done;
8140
8141 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8142
8143 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8144 goto done;
8145
8146 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8147 struct tg3_rx_buffer_desc *rxd;
8148
8149 rxd = &tpr->rx_jmb[i].std;
8150 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8151 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8152 RXD_FLAG_JUMBO;
8153 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8154 (i << RXD_OPAQUE_INDEX_SHIFT));
8155 }
8156
8157 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8158 unsigned int frag_size;
8159
8160 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8161 &frag_size) < 0) {
8162 netdev_warn(tp->dev,
8163 "Using a smaller RX jumbo ring. Only %d "
8164 "out of %d buffers were allocated "
8165 "successfully\n", i, tp->rx_jumbo_pending);
8166 if (i == 0)
8167 goto initfail;
8168 tp->rx_jumbo_pending = i;
8169 break;
8170 }
8171 }
8172
8173 done:
8174 return 0;
8175
8176 initfail:
8177 tg3_rx_prodring_free(tp, tpr);
8178 return -ENOMEM;
8179 }
8180
8181 static void tg3_rx_prodring_fini(struct tg3 *tp,
8182 struct tg3_rx_prodring_set *tpr)
8183 {
8184 kfree(tpr->rx_std_buffers);
8185 tpr->rx_std_buffers = NULL;
8186 kfree(tpr->rx_jmb_buffers);
8187 tpr->rx_jmb_buffers = NULL;
8188 if (tpr->rx_std) {
8189 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8190 tpr->rx_std, tpr->rx_std_mapping);
8191 tpr->rx_std = NULL;
8192 }
8193 if (tpr->rx_jmb) {
8194 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8195 tpr->rx_jmb, tpr->rx_jmb_mapping);
8196 tpr->rx_jmb = NULL;
8197 }
8198 }
8199
8200 static int tg3_rx_prodring_init(struct tg3 *tp,
8201 struct tg3_rx_prodring_set *tpr)
8202 {
8203 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8204 GFP_KERNEL);
8205 if (!tpr->rx_std_buffers)
8206 return -ENOMEM;
8207
8208 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8209 TG3_RX_STD_RING_BYTES(tp),
8210 &tpr->rx_std_mapping,
8211 GFP_KERNEL);
8212 if (!tpr->rx_std)
8213 goto err_out;
8214
8215 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8216 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8217 GFP_KERNEL);
8218 if (!tpr->rx_jmb_buffers)
8219 goto err_out;
8220
8221 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8222 TG3_RX_JMB_RING_BYTES(tp),
8223 &tpr->rx_jmb_mapping,
8224 GFP_KERNEL);
8225 if (!tpr->rx_jmb)
8226 goto err_out;
8227 }
8228
8229 return 0;
8230
8231 err_out:
8232 tg3_rx_prodring_fini(tp, tpr);
8233 return -ENOMEM;
8234 }
8235
8236 /* Free up pending packets in all rx/tx rings.
8237 *
8238 * The chip has been shut down and the driver detached from
8239 * the networking, so no interrupts or new tx packets will
8240 * end up in the driver. tp->{tx,}lock is not held and we are not
8241 * in an interrupt context and thus may sleep.
8242 */
8243 static void tg3_free_rings(struct tg3 *tp)
8244 {
8245 int i, j;
8246
8247 for (j = 0; j < tp->irq_cnt; j++) {
8248 struct tg3_napi *tnapi = &tp->napi[j];
8249
8250 tg3_rx_prodring_free(tp, &tnapi->prodring);
8251
8252 if (!tnapi->tx_buffers)
8253 continue;
8254
8255 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8256 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8257
8258 if (!skb)
8259 continue;
8260
8261 tg3_tx_skb_unmap(tnapi, i,
8262 skb_shinfo(skb)->nr_frags - 1);
8263
8264 dev_kfree_skb_any(skb);
8265 }
8266 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8267 }
8268 }
8269
8270 /* Initialize tx/rx rings for packet processing.
8271 *
8272 * The chip has been shut down and the driver detached from
8273 * the networking, so no interrupts or new tx packets will
8274 * end up in the driver. tp->{tx,}lock are held and thus
8275 * we may not sleep.
8276 */
8277 static int tg3_init_rings(struct tg3 *tp)
8278 {
8279 int i;
8280
8281 /* Free up all the SKBs. */
8282 tg3_free_rings(tp);
8283
8284 for (i = 0; i < tp->irq_cnt; i++) {
8285 struct tg3_napi *tnapi = &tp->napi[i];
8286
8287 tnapi->last_tag = 0;
8288 tnapi->last_irq_tag = 0;
8289 tnapi->hw_status->status = 0;
8290 tnapi->hw_status->status_tag = 0;
8291 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8292
8293 tnapi->tx_prod = 0;
8294 tnapi->tx_cons = 0;
8295 if (tnapi->tx_ring)
8296 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8297
8298 tnapi->rx_rcb_ptr = 0;
8299 if (tnapi->rx_rcb)
8300 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8301
8302 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8303 tg3_free_rings(tp);
8304 return -ENOMEM;
8305 }
8306 }
8307
8308 return 0;
8309 }
8310
8311 static void tg3_mem_tx_release(struct tg3 *tp)
8312 {
8313 int i;
8314
8315 for (i = 0; i < tp->irq_max; i++) {
8316 struct tg3_napi *tnapi = &tp->napi[i];
8317
8318 if (tnapi->tx_ring) {
8319 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8320 tnapi->tx_ring, tnapi->tx_desc_mapping);
8321 tnapi->tx_ring = NULL;
8322 }
8323
8324 kfree(tnapi->tx_buffers);
8325 tnapi->tx_buffers = NULL;
8326 }
8327 }
8328
8329 static int tg3_mem_tx_acquire(struct tg3 *tp)
8330 {
8331 int i;
8332 struct tg3_napi *tnapi = &tp->napi[0];
8333
8334 /* If multivector TSS is enabled, vector 0 does not handle
8335 * tx interrupts. Don't allocate any resources for it.
8336 */
8337 if (tg3_flag(tp, ENABLE_TSS))
8338 tnapi++;
8339
8340 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8341 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8342 TG3_TX_RING_SIZE, GFP_KERNEL);
8343 if (!tnapi->tx_buffers)
8344 goto err_out;
8345
8346 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8347 TG3_TX_RING_BYTES,
8348 &tnapi->tx_desc_mapping,
8349 GFP_KERNEL);
8350 if (!tnapi->tx_ring)
8351 goto err_out;
8352 }
8353
8354 return 0;
8355
8356 err_out:
8357 tg3_mem_tx_release(tp);
8358 return -ENOMEM;
8359 }
8360
8361 static void tg3_mem_rx_release(struct tg3 *tp)
8362 {
8363 int i;
8364
8365 for (i = 0; i < tp->irq_max; i++) {
8366 struct tg3_napi *tnapi = &tp->napi[i];
8367
8368 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8369
8370 if (!tnapi->rx_rcb)
8371 continue;
8372
8373 dma_free_coherent(&tp->pdev->dev,
8374 TG3_RX_RCB_RING_BYTES(tp),
8375 tnapi->rx_rcb,
8376 tnapi->rx_rcb_mapping);
8377 tnapi->rx_rcb = NULL;
8378 }
8379 }
8380
8381 static int tg3_mem_rx_acquire(struct tg3 *tp)
8382 {
8383 unsigned int i, limit;
8384
8385 limit = tp->rxq_cnt;
8386
8387 /* If RSS is enabled, we need a (dummy) producer ring
8388 * set on vector zero. This is the true hw prodring.
8389 */
8390 if (tg3_flag(tp, ENABLE_RSS))
8391 limit++;
8392
8393 for (i = 0; i < limit; i++) {
8394 struct tg3_napi *tnapi = &tp->napi[i];
8395
8396 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8397 goto err_out;
8398
8399 /* If multivector RSS is enabled, vector 0
8400 * does not handle rx or tx interrupts.
8401 * Don't allocate any resources for it.
8402 */
8403 if (!i && tg3_flag(tp, ENABLE_RSS))
8404 continue;
8405
8406 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8407 TG3_RX_RCB_RING_BYTES(tp),
8408 &tnapi->rx_rcb_mapping,
8409 GFP_KERNEL | __GFP_ZERO);
8410 if (!tnapi->rx_rcb)
8411 goto err_out;
8412 }
8413
8414 return 0;
8415
8416 err_out:
8417 tg3_mem_rx_release(tp);
8418 return -ENOMEM;
8419 }
8420
8421 /*
8422 * Must not be invoked with interrupt sources disabled and
8423 * the hardware shutdown down.
8424 */
8425 static void tg3_free_consistent(struct tg3 *tp)
8426 {
8427 int i;
8428
8429 for (i = 0; i < tp->irq_cnt; i++) {
8430 struct tg3_napi *tnapi = &tp->napi[i];
8431
8432 if (tnapi->hw_status) {
8433 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8434 tnapi->hw_status,
8435 tnapi->status_mapping);
8436 tnapi->hw_status = NULL;
8437 }
8438 }
8439
8440 tg3_mem_rx_release(tp);
8441 tg3_mem_tx_release(tp);
8442
8443 if (tp->hw_stats) {
8444 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8445 tp->hw_stats, tp->stats_mapping);
8446 tp->hw_stats = NULL;
8447 }
8448 }
8449
8450 /*
8451 * Must not be invoked with interrupt sources disabled and
8452 * the hardware shutdown down. Can sleep.
8453 */
8454 static int tg3_alloc_consistent(struct tg3 *tp)
8455 {
8456 int i;
8457
8458 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8459 sizeof(struct tg3_hw_stats),
8460 &tp->stats_mapping,
8461 GFP_KERNEL | __GFP_ZERO);
8462 if (!tp->hw_stats)
8463 goto err_out;
8464
8465 for (i = 0; i < tp->irq_cnt; i++) {
8466 struct tg3_napi *tnapi = &tp->napi[i];
8467 struct tg3_hw_status *sblk;
8468
8469 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8470 TG3_HW_STATUS_SIZE,
8471 &tnapi->status_mapping,
8472 GFP_KERNEL | __GFP_ZERO);
8473 if (!tnapi->hw_status)
8474 goto err_out;
8475
8476 sblk = tnapi->hw_status;
8477
8478 if (tg3_flag(tp, ENABLE_RSS)) {
8479 u16 *prodptr = NULL;
8480
8481 /*
8482 * When RSS is enabled, the status block format changes
8483 * slightly. The "rx_jumbo_consumer", "reserved",
8484 * and "rx_mini_consumer" members get mapped to the
8485 * other three rx return ring producer indexes.
8486 */
8487 switch (i) {
8488 case 1:
8489 prodptr = &sblk->idx[0].rx_producer;
8490 break;
8491 case 2:
8492 prodptr = &sblk->rx_jumbo_consumer;
8493 break;
8494 case 3:
8495 prodptr = &sblk->reserved;
8496 break;
8497 case 4:
8498 prodptr = &sblk->rx_mini_consumer;
8499 break;
8500 }
8501 tnapi->rx_rcb_prod_idx = prodptr;
8502 } else {
8503 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8504 }
8505 }
8506
8507 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8508 goto err_out;
8509
8510 return 0;
8511
8512 err_out:
8513 tg3_free_consistent(tp);
8514 return -ENOMEM;
8515 }
8516
8517 #define MAX_WAIT_CNT 1000
8518
8519 /* To stop a block, clear the enable bit and poll till it
8520 * clears. tp->lock is held.
8521 */
8522 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8523 {
8524 unsigned int i;
8525 u32 val;
8526
8527 if (tg3_flag(tp, 5705_PLUS)) {
8528 switch (ofs) {
8529 case RCVLSC_MODE:
8530 case DMAC_MODE:
8531 case MBFREE_MODE:
8532 case BUFMGR_MODE:
8533 case MEMARB_MODE:
8534 /* We can't enable/disable these bits of the
8535 * 5705/5750, just say success.
8536 */
8537 return 0;
8538
8539 default:
8540 break;
8541 }
8542 }
8543
8544 val = tr32(ofs);
8545 val &= ~enable_bit;
8546 tw32_f(ofs, val);
8547
8548 for (i = 0; i < MAX_WAIT_CNT; i++) {
8549 udelay(100);
8550 val = tr32(ofs);
8551 if ((val & enable_bit) == 0)
8552 break;
8553 }
8554
8555 if (i == MAX_WAIT_CNT && !silent) {
8556 dev_err(&tp->pdev->dev,
8557 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8558 ofs, enable_bit);
8559 return -ENODEV;
8560 }
8561
8562 return 0;
8563 }
8564
8565 /* tp->lock is held. */
8566 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8567 {
8568 int i, err;
8569
8570 tg3_disable_ints(tp);
8571
8572 tp->rx_mode &= ~RX_MODE_ENABLE;
8573 tw32_f(MAC_RX_MODE, tp->rx_mode);
8574 udelay(10);
8575
8576 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8577 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8578 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8579 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8580 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8581 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8582
8583 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8584 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8585 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8586 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8587 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8588 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8589 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8590
8591 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8592 tw32_f(MAC_MODE, tp->mac_mode);
8593 udelay(40);
8594
8595 tp->tx_mode &= ~TX_MODE_ENABLE;
8596 tw32_f(MAC_TX_MODE, tp->tx_mode);
8597
8598 for (i = 0; i < MAX_WAIT_CNT; i++) {
8599 udelay(100);
8600 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8601 break;
8602 }
8603 if (i >= MAX_WAIT_CNT) {
8604 dev_err(&tp->pdev->dev,
8605 "%s timed out, TX_MODE_ENABLE will not clear "
8606 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8607 err |= -ENODEV;
8608 }
8609
8610 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8611 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8612 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8613
8614 tw32(FTQ_RESET, 0xffffffff);
8615 tw32(FTQ_RESET, 0x00000000);
8616
8617 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8618 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8619
8620 for (i = 0; i < tp->irq_cnt; i++) {
8621 struct tg3_napi *tnapi = &tp->napi[i];
8622 if (tnapi->hw_status)
8623 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8624 }
8625
8626 return err;
8627 }
8628
8629 /* Save PCI command register before chip reset */
8630 static void tg3_save_pci_state(struct tg3 *tp)
8631 {
8632 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8633 }
8634
8635 /* Restore PCI state after chip reset */
8636 static void tg3_restore_pci_state(struct tg3 *tp)
8637 {
8638 u32 val;
8639
8640 /* Re-enable indirect register accesses. */
8641 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8642 tp->misc_host_ctrl);
8643
8644 /* Set MAX PCI retry to zero. */
8645 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8646 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8647 tg3_flag(tp, PCIX_MODE))
8648 val |= PCISTATE_RETRY_SAME_DMA;
8649 /* Allow reads and writes to the APE register and memory space. */
8650 if (tg3_flag(tp, ENABLE_APE))
8651 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8652 PCISTATE_ALLOW_APE_SHMEM_WR |
8653 PCISTATE_ALLOW_APE_PSPACE_WR;
8654 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8655
8656 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8657
8658 if (!tg3_flag(tp, PCI_EXPRESS)) {
8659 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8660 tp->pci_cacheline_sz);
8661 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8662 tp->pci_lat_timer);
8663 }
8664
8665 /* Make sure PCI-X relaxed ordering bit is clear. */
8666 if (tg3_flag(tp, PCIX_MODE)) {
8667 u16 pcix_cmd;
8668
8669 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8670 &pcix_cmd);
8671 pcix_cmd &= ~PCI_X_CMD_ERO;
8672 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8673 pcix_cmd);
8674 }
8675
8676 if (tg3_flag(tp, 5780_CLASS)) {
8677
8678 /* Chip reset on 5780 will reset MSI enable bit,
8679 * so need to restore it.
8680 */
8681 if (tg3_flag(tp, USING_MSI)) {
8682 u16 ctrl;
8683
8684 pci_read_config_word(tp->pdev,
8685 tp->msi_cap + PCI_MSI_FLAGS,
8686 &ctrl);
8687 pci_write_config_word(tp->pdev,
8688 tp->msi_cap + PCI_MSI_FLAGS,
8689 ctrl | PCI_MSI_FLAGS_ENABLE);
8690 val = tr32(MSGINT_MODE);
8691 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8692 }
8693 }
8694 }
8695
8696 /* tp->lock is held. */
8697 static int tg3_chip_reset(struct tg3 *tp)
8698 {
8699 u32 val;
8700 void (*write_op)(struct tg3 *, u32, u32);
8701 int i, err;
8702
8703 tg3_nvram_lock(tp);
8704
8705 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8706
8707 /* No matching tg3_nvram_unlock() after this because
8708 * chip reset below will undo the nvram lock.
8709 */
8710 tp->nvram_lock_cnt = 0;
8711
8712 /* GRC_MISC_CFG core clock reset will clear the memory
8713 * enable bit in PCI register 4 and the MSI enable bit
8714 * on some chips, so we save relevant registers here.
8715 */
8716 tg3_save_pci_state(tp);
8717
8718 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8719 tg3_flag(tp, 5755_PLUS))
8720 tw32(GRC_FASTBOOT_PC, 0);
8721
8722 /*
8723 * We must avoid the readl() that normally takes place.
8724 * It locks machines, causes machine checks, and other
8725 * fun things. So, temporarily disable the 5701
8726 * hardware workaround, while we do the reset.
8727 */
8728 write_op = tp->write32;
8729 if (write_op == tg3_write_flush_reg32)
8730 tp->write32 = tg3_write32;
8731
8732 /* Prevent the irq handler from reading or writing PCI registers
8733 * during chip reset when the memory enable bit in the PCI command
8734 * register may be cleared. The chip does not generate interrupt
8735 * at this time, but the irq handler may still be called due to irq
8736 * sharing or irqpoll.
8737 */
8738 tg3_flag_set(tp, CHIP_RESETTING);
8739 for (i = 0; i < tp->irq_cnt; i++) {
8740 struct tg3_napi *tnapi = &tp->napi[i];
8741 if (tnapi->hw_status) {
8742 tnapi->hw_status->status = 0;
8743 tnapi->hw_status->status_tag = 0;
8744 }
8745 tnapi->last_tag = 0;
8746 tnapi->last_irq_tag = 0;
8747 }
8748 smp_mb();
8749
8750 for (i = 0; i < tp->irq_cnt; i++)
8751 synchronize_irq(tp->napi[i].irq_vec);
8752
8753 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8754 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8755 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8756 }
8757
8758 /* do the reset */
8759 val = GRC_MISC_CFG_CORECLK_RESET;
8760
8761 if (tg3_flag(tp, PCI_EXPRESS)) {
8762 /* Force PCIe 1.0a mode */
8763 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8764 !tg3_flag(tp, 57765_PLUS) &&
8765 tr32(TG3_PCIE_PHY_TSTCTL) ==
8766 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8767 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8768
8769 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8770 tw32(GRC_MISC_CFG, (1 << 29));
8771 val |= (1 << 29);
8772 }
8773 }
8774
8775 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8776 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8777 tw32(GRC_VCPU_EXT_CTRL,
8778 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8779 }
8780
8781 /* Manage gphy power for all CPMU absent PCIe devices. */
8782 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8783 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8784
8785 tw32(GRC_MISC_CFG, val);
8786
8787 /* restore 5701 hardware bug workaround write method */
8788 tp->write32 = write_op;
8789
8790 /* Unfortunately, we have to delay before the PCI read back.
8791 * Some 575X chips even will not respond to a PCI cfg access
8792 * when the reset command is given to the chip.
8793 *
8794 * How do these hardware designers expect things to work
8795 * properly if the PCI write is posted for a long period
8796 * of time? It is always necessary to have some method by
8797 * which a register read back can occur to push the write
8798 * out which does the reset.
8799 *
8800 * For most tg3 variants the trick below was working.
8801 * Ho hum...
8802 */
8803 udelay(120);
8804
8805 /* Flush PCI posted writes. The normal MMIO registers
8806 * are inaccessible at this time so this is the only
8807 * way to make this reliably (actually, this is no longer
8808 * the case, see above). I tried to use indirect
8809 * register read/write but this upset some 5701 variants.
8810 */
8811 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8812
8813 udelay(120);
8814
8815 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8816 u16 val16;
8817
8818 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8819 int j;
8820 u32 cfg_val;
8821
8822 /* Wait for link training to complete. */
8823 for (j = 0; j < 5000; j++)
8824 udelay(100);
8825
8826 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8827 pci_write_config_dword(tp->pdev, 0xc4,
8828 cfg_val | (1 << 15));
8829 }
8830
8831 /* Clear the "no snoop" and "relaxed ordering" bits. */
8832 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8833 /*
8834 * Older PCIe devices only support the 128 byte
8835 * MPS setting. Enforce the restriction.
8836 */
8837 if (!tg3_flag(tp, CPMU_PRESENT))
8838 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8839 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8840
8841 /* Clear error status */
8842 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8843 PCI_EXP_DEVSTA_CED |
8844 PCI_EXP_DEVSTA_NFED |
8845 PCI_EXP_DEVSTA_FED |
8846 PCI_EXP_DEVSTA_URD);
8847 }
8848
8849 tg3_restore_pci_state(tp);
8850
8851 tg3_flag_clear(tp, CHIP_RESETTING);
8852 tg3_flag_clear(tp, ERROR_PROCESSED);
8853
8854 val = 0;
8855 if (tg3_flag(tp, 5780_CLASS))
8856 val = tr32(MEMARB_MODE);
8857 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8858
8859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8860 tg3_stop_fw(tp);
8861 tw32(0x5000, 0x400);
8862 }
8863
8864 if (tg3_flag(tp, IS_SSB_CORE)) {
8865 /*
8866 * BCM4785: In order to avoid repercussions from using
8867 * potentially defective internal ROM, stop the Rx RISC CPU,
8868 * which is not required.
8869 */
8870 tg3_stop_fw(tp);
8871 tg3_halt_cpu(tp, RX_CPU_BASE);
8872 }
8873
8874 tw32(GRC_MODE, tp->grc_mode);
8875
8876 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8877 val = tr32(0xc4);
8878
8879 tw32(0xc4, val | (1 << 15));
8880 }
8881
8882 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8883 tg3_asic_rev(tp) == ASIC_REV_5705) {
8884 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8885 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8886 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8887 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8888 }
8889
8890 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8891 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8892 val = tp->mac_mode;
8893 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8894 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8895 val = tp->mac_mode;
8896 } else
8897 val = 0;
8898
8899 tw32_f(MAC_MODE, val);
8900 udelay(40);
8901
8902 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8903
8904 err = tg3_poll_fw(tp);
8905 if (err)
8906 return err;
8907
8908 tg3_mdio_start(tp);
8909
8910 if (tg3_flag(tp, PCI_EXPRESS) &&
8911 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8912 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8913 !tg3_flag(tp, 57765_PLUS)) {
8914 val = tr32(0x7c00);
8915
8916 tw32(0x7c00, val | (1 << 25));
8917 }
8918
8919 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8920 val = tr32(TG3_CPMU_CLCK_ORIDE);
8921 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8922 }
8923
8924 /* Reprobe ASF enable state. */
8925 tg3_flag_clear(tp, ENABLE_ASF);
8926 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8927 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8928
8929 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8930 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8931 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8932 u32 nic_cfg;
8933
8934 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8935 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8936 tg3_flag_set(tp, ENABLE_ASF);
8937 tp->last_event_jiffies = jiffies;
8938 if (tg3_flag(tp, 5750_PLUS))
8939 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8940
8941 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8942 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8943 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8944 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8945 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8946 }
8947 }
8948
8949 return 0;
8950 }
8951
8952 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8953 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8954
8955 /* tp->lock is held. */
8956 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8957 {
8958 int err;
8959
8960 tg3_stop_fw(tp);
8961
8962 tg3_write_sig_pre_reset(tp, kind);
8963
8964 tg3_abort_hw(tp, silent);
8965 err = tg3_chip_reset(tp);
8966
8967 __tg3_set_mac_addr(tp, false);
8968
8969 tg3_write_sig_legacy(tp, kind);
8970 tg3_write_sig_post_reset(tp, kind);
8971
8972 if (tp->hw_stats) {
8973 /* Save the stats across chip resets... */
8974 tg3_get_nstats(tp, &tp->net_stats_prev);
8975 tg3_get_estats(tp, &tp->estats_prev);
8976
8977 /* And make sure the next sample is new data */
8978 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8979 }
8980
8981 if (err)
8982 return err;
8983
8984 return 0;
8985 }
8986
8987 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8988 {
8989 struct tg3 *tp = netdev_priv(dev);
8990 struct sockaddr *addr = p;
8991 int err = 0;
8992 bool skip_mac_1 = false;
8993
8994 if (!is_valid_ether_addr(addr->sa_data))
8995 return -EADDRNOTAVAIL;
8996
8997 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8998
8999 if (!netif_running(dev))
9000 return 0;
9001
9002 if (tg3_flag(tp, ENABLE_ASF)) {
9003 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9004
9005 addr0_high = tr32(MAC_ADDR_0_HIGH);
9006 addr0_low = tr32(MAC_ADDR_0_LOW);
9007 addr1_high = tr32(MAC_ADDR_1_HIGH);
9008 addr1_low = tr32(MAC_ADDR_1_LOW);
9009
9010 /* Skip MAC addr 1 if ASF is using it. */
9011 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9012 !(addr1_high == 0 && addr1_low == 0))
9013 skip_mac_1 = true;
9014 }
9015 spin_lock_bh(&tp->lock);
9016 __tg3_set_mac_addr(tp, skip_mac_1);
9017 spin_unlock_bh(&tp->lock);
9018
9019 return err;
9020 }
9021
9022 /* tp->lock is held. */
9023 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9024 dma_addr_t mapping, u32 maxlen_flags,
9025 u32 nic_addr)
9026 {
9027 tg3_write_mem(tp,
9028 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9029 ((u64) mapping >> 32));
9030 tg3_write_mem(tp,
9031 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9032 ((u64) mapping & 0xffffffff));
9033 tg3_write_mem(tp,
9034 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9035 maxlen_flags);
9036
9037 if (!tg3_flag(tp, 5705_PLUS))
9038 tg3_write_mem(tp,
9039 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9040 nic_addr);
9041 }
9042
9043
9044 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9045 {
9046 int i = 0;
9047
9048 if (!tg3_flag(tp, ENABLE_TSS)) {
9049 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9050 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9051 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9052 } else {
9053 tw32(HOSTCC_TXCOL_TICKS, 0);
9054 tw32(HOSTCC_TXMAX_FRAMES, 0);
9055 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9056
9057 for (; i < tp->txq_cnt; i++) {
9058 u32 reg;
9059
9060 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9061 tw32(reg, ec->tx_coalesce_usecs);
9062 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9063 tw32(reg, ec->tx_max_coalesced_frames);
9064 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9065 tw32(reg, ec->tx_max_coalesced_frames_irq);
9066 }
9067 }
9068
9069 for (; i < tp->irq_max - 1; i++) {
9070 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9071 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9072 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9073 }
9074 }
9075
9076 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9077 {
9078 int i = 0;
9079 u32 limit = tp->rxq_cnt;
9080
9081 if (!tg3_flag(tp, ENABLE_RSS)) {
9082 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9083 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9084 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9085 limit--;
9086 } else {
9087 tw32(HOSTCC_RXCOL_TICKS, 0);
9088 tw32(HOSTCC_RXMAX_FRAMES, 0);
9089 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9090 }
9091
9092 for (; i < limit; i++) {
9093 u32 reg;
9094
9095 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9096 tw32(reg, ec->rx_coalesce_usecs);
9097 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9098 tw32(reg, ec->rx_max_coalesced_frames);
9099 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9100 tw32(reg, ec->rx_max_coalesced_frames_irq);
9101 }
9102
9103 for (; i < tp->irq_max - 1; i++) {
9104 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9105 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9106 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9107 }
9108 }
9109
9110 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9111 {
9112 tg3_coal_tx_init(tp, ec);
9113 tg3_coal_rx_init(tp, ec);
9114
9115 if (!tg3_flag(tp, 5705_PLUS)) {
9116 u32 val = ec->stats_block_coalesce_usecs;
9117
9118 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9119 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9120
9121 if (!tp->link_up)
9122 val = 0;
9123
9124 tw32(HOSTCC_STAT_COAL_TICKS, val);
9125 }
9126 }
9127
9128 /* tp->lock is held. */
9129 static void tg3_rings_reset(struct tg3 *tp)
9130 {
9131 int i;
9132 u32 stblk, txrcb, rxrcb, limit;
9133 struct tg3_napi *tnapi = &tp->napi[0];
9134
9135 /* Disable all transmit rings but the first. */
9136 if (!tg3_flag(tp, 5705_PLUS))
9137 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9138 else if (tg3_flag(tp, 5717_PLUS))
9139 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9140 else if (tg3_flag(tp, 57765_CLASS) ||
9141 tg3_asic_rev(tp) == ASIC_REV_5762)
9142 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9143 else
9144 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9145
9146 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9147 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9148 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9149 BDINFO_FLAGS_DISABLED);
9150
9151
9152 /* Disable all receive return rings but the first. */
9153 if (tg3_flag(tp, 5717_PLUS))
9154 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9155 else if (!tg3_flag(tp, 5705_PLUS))
9156 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9157 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9158 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9159 tg3_flag(tp, 57765_CLASS))
9160 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9161 else
9162 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9163
9164 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9165 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9166 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9167 BDINFO_FLAGS_DISABLED);
9168
9169 /* Disable interrupts */
9170 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9171 tp->napi[0].chk_msi_cnt = 0;
9172 tp->napi[0].last_rx_cons = 0;
9173 tp->napi[0].last_tx_cons = 0;
9174
9175 /* Zero mailbox registers. */
9176 if (tg3_flag(tp, SUPPORT_MSIX)) {
9177 for (i = 1; i < tp->irq_max; i++) {
9178 tp->napi[i].tx_prod = 0;
9179 tp->napi[i].tx_cons = 0;
9180 if (tg3_flag(tp, ENABLE_TSS))
9181 tw32_mailbox(tp->napi[i].prodmbox, 0);
9182 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9183 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9184 tp->napi[i].chk_msi_cnt = 0;
9185 tp->napi[i].last_rx_cons = 0;
9186 tp->napi[i].last_tx_cons = 0;
9187 }
9188 if (!tg3_flag(tp, ENABLE_TSS))
9189 tw32_mailbox(tp->napi[0].prodmbox, 0);
9190 } else {
9191 tp->napi[0].tx_prod = 0;
9192 tp->napi[0].tx_cons = 0;
9193 tw32_mailbox(tp->napi[0].prodmbox, 0);
9194 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9195 }
9196
9197 /* Make sure the NIC-based send BD rings are disabled. */
9198 if (!tg3_flag(tp, 5705_PLUS)) {
9199 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9200 for (i = 0; i < 16; i++)
9201 tw32_tx_mbox(mbox + i * 8, 0);
9202 }
9203
9204 txrcb = NIC_SRAM_SEND_RCB;
9205 rxrcb = NIC_SRAM_RCV_RET_RCB;
9206
9207 /* Clear status block in ram. */
9208 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9209
9210 /* Set status block DMA address */
9211 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9212 ((u64) tnapi->status_mapping >> 32));
9213 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9214 ((u64) tnapi->status_mapping & 0xffffffff));
9215
9216 if (tnapi->tx_ring) {
9217 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9218 (TG3_TX_RING_SIZE <<
9219 BDINFO_FLAGS_MAXLEN_SHIFT),
9220 NIC_SRAM_TX_BUFFER_DESC);
9221 txrcb += TG3_BDINFO_SIZE;
9222 }
9223
9224 if (tnapi->rx_rcb) {
9225 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9226 (tp->rx_ret_ring_mask + 1) <<
9227 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9228 rxrcb += TG3_BDINFO_SIZE;
9229 }
9230
9231 stblk = HOSTCC_STATBLCK_RING1;
9232
9233 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9234 u64 mapping = (u64)tnapi->status_mapping;
9235 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9236 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9237
9238 /* Clear status block in ram. */
9239 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9240
9241 if (tnapi->tx_ring) {
9242 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9243 (TG3_TX_RING_SIZE <<
9244 BDINFO_FLAGS_MAXLEN_SHIFT),
9245 NIC_SRAM_TX_BUFFER_DESC);
9246 txrcb += TG3_BDINFO_SIZE;
9247 }
9248
9249 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9250 ((tp->rx_ret_ring_mask + 1) <<
9251 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9252
9253 stblk += 8;
9254 rxrcb += TG3_BDINFO_SIZE;
9255 }
9256 }
9257
9258 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9259 {
9260 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9261
9262 if (!tg3_flag(tp, 5750_PLUS) ||
9263 tg3_flag(tp, 5780_CLASS) ||
9264 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9265 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9266 tg3_flag(tp, 57765_PLUS))
9267 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9268 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9269 tg3_asic_rev(tp) == ASIC_REV_5787)
9270 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9271 else
9272 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9273
9274 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9275 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9276
9277 val = min(nic_rep_thresh, host_rep_thresh);
9278 tw32(RCVBDI_STD_THRESH, val);
9279
9280 if (tg3_flag(tp, 57765_PLUS))
9281 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9282
9283 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9284 return;
9285
9286 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9287
9288 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9289
9290 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9291 tw32(RCVBDI_JUMBO_THRESH, val);
9292
9293 if (tg3_flag(tp, 57765_PLUS))
9294 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9295 }
9296
9297 static inline u32 calc_crc(unsigned char *buf, int len)
9298 {
9299 u32 reg;
9300 u32 tmp;
9301 int j, k;
9302
9303 reg = 0xffffffff;
9304
9305 for (j = 0; j < len; j++) {
9306 reg ^= buf[j];
9307
9308 for (k = 0; k < 8; k++) {
9309 tmp = reg & 0x01;
9310
9311 reg >>= 1;
9312
9313 if (tmp)
9314 reg ^= 0xedb88320;
9315 }
9316 }
9317
9318 return ~reg;
9319 }
9320
9321 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9322 {
9323 /* accept or reject all multicast frames */
9324 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9325 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9326 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9327 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9328 }
9329
9330 static void __tg3_set_rx_mode(struct net_device *dev)
9331 {
9332 struct tg3 *tp = netdev_priv(dev);
9333 u32 rx_mode;
9334
9335 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9336 RX_MODE_KEEP_VLAN_TAG);
9337
9338 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9339 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9340 * flag clear.
9341 */
9342 if (!tg3_flag(tp, ENABLE_ASF))
9343 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9344 #endif
9345
9346 if (dev->flags & IFF_PROMISC) {
9347 /* Promiscuous mode. */
9348 rx_mode |= RX_MODE_PROMISC;
9349 } else if (dev->flags & IFF_ALLMULTI) {
9350 /* Accept all multicast. */
9351 tg3_set_multi(tp, 1);
9352 } else if (netdev_mc_empty(dev)) {
9353 /* Reject all multicast. */
9354 tg3_set_multi(tp, 0);
9355 } else {
9356 /* Accept one or more multicast(s). */
9357 struct netdev_hw_addr *ha;
9358 u32 mc_filter[4] = { 0, };
9359 u32 regidx;
9360 u32 bit;
9361 u32 crc;
9362
9363 netdev_for_each_mc_addr(ha, dev) {
9364 crc = calc_crc(ha->addr, ETH_ALEN);
9365 bit = ~crc & 0x7f;
9366 regidx = (bit & 0x60) >> 5;
9367 bit &= 0x1f;
9368 mc_filter[regidx] |= (1 << bit);
9369 }
9370
9371 tw32(MAC_HASH_REG_0, mc_filter[0]);
9372 tw32(MAC_HASH_REG_1, mc_filter[1]);
9373 tw32(MAC_HASH_REG_2, mc_filter[2]);
9374 tw32(MAC_HASH_REG_3, mc_filter[3]);
9375 }
9376
9377 if (rx_mode != tp->rx_mode) {
9378 tp->rx_mode = rx_mode;
9379 tw32_f(MAC_RX_MODE, rx_mode);
9380 udelay(10);
9381 }
9382 }
9383
9384 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9385 {
9386 int i;
9387
9388 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9389 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9390 }
9391
9392 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9393 {
9394 int i;
9395
9396 if (!tg3_flag(tp, SUPPORT_MSIX))
9397 return;
9398
9399 if (tp->rxq_cnt == 1) {
9400 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9401 return;
9402 }
9403
9404 /* Validate table against current IRQ count */
9405 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9406 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9407 break;
9408 }
9409
9410 if (i != TG3_RSS_INDIR_TBL_SIZE)
9411 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9412 }
9413
9414 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9415 {
9416 int i = 0;
9417 u32 reg = MAC_RSS_INDIR_TBL_0;
9418
9419 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9420 u32 val = tp->rss_ind_tbl[i];
9421 i++;
9422 for (; i % 8; i++) {
9423 val <<= 4;
9424 val |= tp->rss_ind_tbl[i];
9425 }
9426 tw32(reg, val);
9427 reg += 4;
9428 }
9429 }
9430
9431 /* tp->lock is held. */
9432 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9433 {
9434 u32 val, rdmac_mode;
9435 int i, err, limit;
9436 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9437
9438 tg3_disable_ints(tp);
9439
9440 tg3_stop_fw(tp);
9441
9442 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9443
9444 if (tg3_flag(tp, INIT_COMPLETE))
9445 tg3_abort_hw(tp, 1);
9446
9447 /* Enable MAC control of LPI */
9448 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9449 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9450 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9451 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9452 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9453
9454 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9455
9456 tw32_f(TG3_CPMU_EEE_CTRL,
9457 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9458
9459 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9460 TG3_CPMU_EEEMD_LPI_IN_TX |
9461 TG3_CPMU_EEEMD_LPI_IN_RX |
9462 TG3_CPMU_EEEMD_EEE_ENABLE;
9463
9464 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9465 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9466
9467 if (tg3_flag(tp, ENABLE_APE))
9468 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9469
9470 tw32_f(TG3_CPMU_EEE_MODE, val);
9471
9472 tw32_f(TG3_CPMU_EEE_DBTMR1,
9473 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9474 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9475
9476 tw32_f(TG3_CPMU_EEE_DBTMR2,
9477 TG3_CPMU_DBTMR2_APE_TX_2047US |
9478 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9479 }
9480
9481 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9482 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9483 tg3_phy_pull_config(tp);
9484 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9485 }
9486
9487 if (reset_phy)
9488 tg3_phy_reset(tp);
9489
9490 err = tg3_chip_reset(tp);
9491 if (err)
9492 return err;
9493
9494 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9495
9496 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9497 val = tr32(TG3_CPMU_CTRL);
9498 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9499 tw32(TG3_CPMU_CTRL, val);
9500
9501 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9502 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9503 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9504 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9505
9506 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9507 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9508 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9509 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9510
9511 val = tr32(TG3_CPMU_HST_ACC);
9512 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9513 val |= CPMU_HST_ACC_MACCLK_6_25;
9514 tw32(TG3_CPMU_HST_ACC, val);
9515 }
9516
9517 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9518 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9519 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9520 PCIE_PWR_MGMT_L1_THRESH_4MS;
9521 tw32(PCIE_PWR_MGMT_THRESH, val);
9522
9523 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9524 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9525
9526 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9527
9528 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9529 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9530 }
9531
9532 if (tg3_flag(tp, L1PLLPD_EN)) {
9533 u32 grc_mode = tr32(GRC_MODE);
9534
9535 /* Access the lower 1K of PL PCIE block registers. */
9536 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9537 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9538
9539 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9540 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9541 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9542
9543 tw32(GRC_MODE, grc_mode);
9544 }
9545
9546 if (tg3_flag(tp, 57765_CLASS)) {
9547 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9548 u32 grc_mode = tr32(GRC_MODE);
9549
9550 /* Access the lower 1K of PL PCIE block registers. */
9551 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9552 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9553
9554 val = tr32(TG3_PCIE_TLDLPL_PORT +
9555 TG3_PCIE_PL_LO_PHYCTL5);
9556 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9557 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9558
9559 tw32(GRC_MODE, grc_mode);
9560 }
9561
9562 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9563 u32 grc_mode;
9564
9565 /* Fix transmit hangs */
9566 val = tr32(TG3_CPMU_PADRNG_CTL);
9567 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9568 tw32(TG3_CPMU_PADRNG_CTL, val);
9569
9570 grc_mode = tr32(GRC_MODE);
9571
9572 /* Access the lower 1K of DL PCIE block registers. */
9573 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9574 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9575
9576 val = tr32(TG3_PCIE_TLDLPL_PORT +
9577 TG3_PCIE_DL_LO_FTSMAX);
9578 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9579 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9580 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9581
9582 tw32(GRC_MODE, grc_mode);
9583 }
9584
9585 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9586 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9587 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9588 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9589 }
9590
9591 /* This works around an issue with Athlon chipsets on
9592 * B3 tigon3 silicon. This bit has no effect on any
9593 * other revision. But do not set this on PCI Express
9594 * chips and don't even touch the clocks if the CPMU is present.
9595 */
9596 if (!tg3_flag(tp, CPMU_PRESENT)) {
9597 if (!tg3_flag(tp, PCI_EXPRESS))
9598 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9599 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9600 }
9601
9602 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9603 tg3_flag(tp, PCIX_MODE)) {
9604 val = tr32(TG3PCI_PCISTATE);
9605 val |= PCISTATE_RETRY_SAME_DMA;
9606 tw32(TG3PCI_PCISTATE, val);
9607 }
9608
9609 if (tg3_flag(tp, ENABLE_APE)) {
9610 /* Allow reads and writes to the
9611 * APE register and memory space.
9612 */
9613 val = tr32(TG3PCI_PCISTATE);
9614 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9615 PCISTATE_ALLOW_APE_SHMEM_WR |
9616 PCISTATE_ALLOW_APE_PSPACE_WR;
9617 tw32(TG3PCI_PCISTATE, val);
9618 }
9619
9620 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9621 /* Enable some hw fixes. */
9622 val = tr32(TG3PCI_MSI_DATA);
9623 val |= (1 << 26) | (1 << 28) | (1 << 29);
9624 tw32(TG3PCI_MSI_DATA, val);
9625 }
9626
9627 /* Descriptor ring init may make accesses to the
9628 * NIC SRAM area to setup the TX descriptors, so we
9629 * can only do this after the hardware has been
9630 * successfully reset.
9631 */
9632 err = tg3_init_rings(tp);
9633 if (err)
9634 return err;
9635
9636 if (tg3_flag(tp, 57765_PLUS)) {
9637 val = tr32(TG3PCI_DMA_RW_CTRL) &
9638 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9639 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9640 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9641 if (!tg3_flag(tp, 57765_CLASS) &&
9642 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9643 tg3_asic_rev(tp) != ASIC_REV_5762)
9644 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9645 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9646 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9647 tg3_asic_rev(tp) != ASIC_REV_5761) {
9648 /* This value is determined during the probe time DMA
9649 * engine test, tg3_test_dma.
9650 */
9651 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9652 }
9653
9654 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9655 GRC_MODE_4X_NIC_SEND_RINGS |
9656 GRC_MODE_NO_TX_PHDR_CSUM |
9657 GRC_MODE_NO_RX_PHDR_CSUM);
9658 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9659
9660 /* Pseudo-header checksum is done by hardware logic and not
9661 * the offload processers, so make the chip do the pseudo-
9662 * header checksums on receive. For transmit it is more
9663 * convenient to do the pseudo-header checksum in software
9664 * as Linux does that on transmit for us in all cases.
9665 */
9666 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9667
9668 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9669 if (tp->rxptpctl)
9670 tw32(TG3_RX_PTP_CTL,
9671 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9672
9673 if (tg3_flag(tp, PTP_CAPABLE))
9674 val |= GRC_MODE_TIME_SYNC_ENABLE;
9675
9676 tw32(GRC_MODE, tp->grc_mode | val);
9677
9678 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9679 val = tr32(GRC_MISC_CFG);
9680 val &= ~0xff;
9681 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9682 tw32(GRC_MISC_CFG, val);
9683
9684 /* Initialize MBUF/DESC pool. */
9685 if (tg3_flag(tp, 5750_PLUS)) {
9686 /* Do nothing. */
9687 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9688 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9689 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9690 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9691 else
9692 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9693 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9694 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9695 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9696 int fw_len;
9697
9698 fw_len = tp->fw_len;
9699 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9700 tw32(BUFMGR_MB_POOL_ADDR,
9701 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9702 tw32(BUFMGR_MB_POOL_SIZE,
9703 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9704 }
9705
9706 if (tp->dev->mtu <= ETH_DATA_LEN) {
9707 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9708 tp->bufmgr_config.mbuf_read_dma_low_water);
9709 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9710 tp->bufmgr_config.mbuf_mac_rx_low_water);
9711 tw32(BUFMGR_MB_HIGH_WATER,
9712 tp->bufmgr_config.mbuf_high_water);
9713 } else {
9714 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9715 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9716 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9717 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9718 tw32(BUFMGR_MB_HIGH_WATER,
9719 tp->bufmgr_config.mbuf_high_water_jumbo);
9720 }
9721 tw32(BUFMGR_DMA_LOW_WATER,
9722 tp->bufmgr_config.dma_low_water);
9723 tw32(BUFMGR_DMA_HIGH_WATER,
9724 tp->bufmgr_config.dma_high_water);
9725
9726 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9727 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9728 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9729 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9730 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9731 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9732 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9733 tw32(BUFMGR_MODE, val);
9734 for (i = 0; i < 2000; i++) {
9735 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9736 break;
9737 udelay(10);
9738 }
9739 if (i >= 2000) {
9740 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9741 return -ENODEV;
9742 }
9743
9744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9745 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9746
9747 tg3_setup_rxbd_thresholds(tp);
9748
9749 /* Initialize TG3_BDINFO's at:
9750 * RCVDBDI_STD_BD: standard eth size rx ring
9751 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9752 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9753 *
9754 * like so:
9755 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9756 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9757 * ring attribute flags
9758 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9759 *
9760 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9761 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9762 *
9763 * The size of each ring is fixed in the firmware, but the location is
9764 * configurable.
9765 */
9766 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9767 ((u64) tpr->rx_std_mapping >> 32));
9768 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9769 ((u64) tpr->rx_std_mapping & 0xffffffff));
9770 if (!tg3_flag(tp, 5717_PLUS))
9771 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9772 NIC_SRAM_RX_BUFFER_DESC);
9773
9774 /* Disable the mini ring */
9775 if (!tg3_flag(tp, 5705_PLUS))
9776 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9777 BDINFO_FLAGS_DISABLED);
9778
9779 /* Program the jumbo buffer descriptor ring control
9780 * blocks on those devices that have them.
9781 */
9782 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9783 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9784
9785 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9786 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9787 ((u64) tpr->rx_jmb_mapping >> 32));
9788 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9789 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9790 val = TG3_RX_JMB_RING_SIZE(tp) <<
9791 BDINFO_FLAGS_MAXLEN_SHIFT;
9792 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9793 val | BDINFO_FLAGS_USE_EXT_RECV);
9794 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9795 tg3_flag(tp, 57765_CLASS) ||
9796 tg3_asic_rev(tp) == ASIC_REV_5762)
9797 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9798 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9799 } else {
9800 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9801 BDINFO_FLAGS_DISABLED);
9802 }
9803
9804 if (tg3_flag(tp, 57765_PLUS)) {
9805 val = TG3_RX_STD_RING_SIZE(tp);
9806 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9807 val |= (TG3_RX_STD_DMA_SZ << 2);
9808 } else
9809 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9810 } else
9811 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9812
9813 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9814
9815 tpr->rx_std_prod_idx = tp->rx_pending;
9816 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9817
9818 tpr->rx_jmb_prod_idx =
9819 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9820 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9821
9822 tg3_rings_reset(tp);
9823
9824 /* Initialize MAC address and backoff seed. */
9825 __tg3_set_mac_addr(tp, false);
9826
9827 /* MTU + ethernet header + FCS + optional VLAN tag */
9828 tw32(MAC_RX_MTU_SIZE,
9829 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9830
9831 /* The slot time is changed by tg3_setup_phy if we
9832 * run at gigabit with half duplex.
9833 */
9834 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9835 (6 << TX_LENGTHS_IPG_SHIFT) |
9836 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9837
9838 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9839 tg3_asic_rev(tp) == ASIC_REV_5762)
9840 val |= tr32(MAC_TX_LENGTHS) &
9841 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9842 TX_LENGTHS_CNT_DWN_VAL_MSK);
9843
9844 tw32(MAC_TX_LENGTHS, val);
9845
9846 /* Receive rules. */
9847 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9848 tw32(RCVLPC_CONFIG, 0x0181);
9849
9850 /* Calculate RDMAC_MODE setting early, we need it to determine
9851 * the RCVLPC_STATE_ENABLE mask.
9852 */
9853 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9854 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9855 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9856 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9857 RDMAC_MODE_LNGREAD_ENAB);
9858
9859 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9860 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9861
9862 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9863 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9864 tg3_asic_rev(tp) == ASIC_REV_57780)
9865 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9866 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9867 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9868
9869 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9870 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9871 if (tg3_flag(tp, TSO_CAPABLE) &&
9872 tg3_asic_rev(tp) == ASIC_REV_5705) {
9873 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9874 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9875 !tg3_flag(tp, IS_5788)) {
9876 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9877 }
9878 }
9879
9880 if (tg3_flag(tp, PCI_EXPRESS))
9881 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9882
9883 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9884 tp->dma_limit = 0;
9885 if (tp->dev->mtu <= ETH_DATA_LEN) {
9886 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9887 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9888 }
9889 }
9890
9891 if (tg3_flag(tp, HW_TSO_1) ||
9892 tg3_flag(tp, HW_TSO_2) ||
9893 tg3_flag(tp, HW_TSO_3))
9894 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9895
9896 if (tg3_flag(tp, 57765_PLUS) ||
9897 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9898 tg3_asic_rev(tp) == ASIC_REV_57780)
9899 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9900
9901 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9902 tg3_asic_rev(tp) == ASIC_REV_5762)
9903 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9904
9905 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9906 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9907 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9908 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9909 tg3_flag(tp, 57765_PLUS)) {
9910 u32 tgtreg;
9911
9912 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9913 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9914 else
9915 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9916
9917 val = tr32(tgtreg);
9918 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9919 tg3_asic_rev(tp) == ASIC_REV_5762) {
9920 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9921 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9922 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9923 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9924 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9925 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9926 }
9927 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9928 }
9929
9930 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9931 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9932 tg3_asic_rev(tp) == ASIC_REV_5762) {
9933 u32 tgtreg;
9934
9935 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9936 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9937 else
9938 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9939
9940 val = tr32(tgtreg);
9941 tw32(tgtreg, val |
9942 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9943 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9944 }
9945
9946 /* Receive/send statistics. */
9947 if (tg3_flag(tp, 5750_PLUS)) {
9948 val = tr32(RCVLPC_STATS_ENABLE);
9949 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9950 tw32(RCVLPC_STATS_ENABLE, val);
9951 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9952 tg3_flag(tp, TSO_CAPABLE)) {
9953 val = tr32(RCVLPC_STATS_ENABLE);
9954 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9955 tw32(RCVLPC_STATS_ENABLE, val);
9956 } else {
9957 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9958 }
9959 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9960 tw32(SNDDATAI_STATSENAB, 0xffffff);
9961 tw32(SNDDATAI_STATSCTRL,
9962 (SNDDATAI_SCTRL_ENABLE |
9963 SNDDATAI_SCTRL_FASTUPD));
9964
9965 /* Setup host coalescing engine. */
9966 tw32(HOSTCC_MODE, 0);
9967 for (i = 0; i < 2000; i++) {
9968 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9969 break;
9970 udelay(10);
9971 }
9972
9973 __tg3_set_coalesce(tp, &tp->coal);
9974
9975 if (!tg3_flag(tp, 5705_PLUS)) {
9976 /* Status/statistics block address. See tg3_timer,
9977 * the tg3_periodic_fetch_stats call there, and
9978 * tg3_get_stats to see how this works for 5705/5750 chips.
9979 */
9980 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9981 ((u64) tp->stats_mapping >> 32));
9982 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9983 ((u64) tp->stats_mapping & 0xffffffff));
9984 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9985
9986 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9987
9988 /* Clear statistics and status block memory areas */
9989 for (i = NIC_SRAM_STATS_BLK;
9990 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9991 i += sizeof(u32)) {
9992 tg3_write_mem(tp, i, 0);
9993 udelay(40);
9994 }
9995 }
9996
9997 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9998
9999 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10000 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10001 if (!tg3_flag(tp, 5705_PLUS))
10002 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10003
10004 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10005 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10006 /* reset to prevent losing 1st rx packet intermittently */
10007 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10008 udelay(10);
10009 }
10010
10011 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10012 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10013 MAC_MODE_FHDE_ENABLE;
10014 if (tg3_flag(tp, ENABLE_APE))
10015 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10016 if (!tg3_flag(tp, 5705_PLUS) &&
10017 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10018 tg3_asic_rev(tp) != ASIC_REV_5700)
10019 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10020 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10021 udelay(40);
10022
10023 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10024 * If TG3_FLAG_IS_NIC is zero, we should read the
10025 * register to preserve the GPIO settings for LOMs. The GPIOs,
10026 * whether used as inputs or outputs, are set by boot code after
10027 * reset.
10028 */
10029 if (!tg3_flag(tp, IS_NIC)) {
10030 u32 gpio_mask;
10031
10032 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10033 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10034 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10035
10036 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10037 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10038 GRC_LCLCTRL_GPIO_OUTPUT3;
10039
10040 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10041 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10042
10043 tp->grc_local_ctrl &= ~gpio_mask;
10044 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10045
10046 /* GPIO1 must be driven high for eeprom write protect */
10047 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10048 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10049 GRC_LCLCTRL_GPIO_OUTPUT1);
10050 }
10051 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10052 udelay(100);
10053
10054 if (tg3_flag(tp, USING_MSIX)) {
10055 val = tr32(MSGINT_MODE);
10056 val |= MSGINT_MODE_ENABLE;
10057 if (tp->irq_cnt > 1)
10058 val |= MSGINT_MODE_MULTIVEC_EN;
10059 if (!tg3_flag(tp, 1SHOT_MSI))
10060 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10061 tw32(MSGINT_MODE, val);
10062 }
10063
10064 if (!tg3_flag(tp, 5705_PLUS)) {
10065 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10066 udelay(40);
10067 }
10068
10069 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10070 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10071 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10072 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10073 WDMAC_MODE_LNGREAD_ENAB);
10074
10075 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10076 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10077 if (tg3_flag(tp, TSO_CAPABLE) &&
10078 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10079 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10080 /* nothing */
10081 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10082 !tg3_flag(tp, IS_5788)) {
10083 val |= WDMAC_MODE_RX_ACCEL;
10084 }
10085 }
10086
10087 /* Enable host coalescing bug fix */
10088 if (tg3_flag(tp, 5755_PLUS))
10089 val |= WDMAC_MODE_STATUS_TAG_FIX;
10090
10091 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10092 val |= WDMAC_MODE_BURST_ALL_DATA;
10093
10094 tw32_f(WDMAC_MODE, val);
10095 udelay(40);
10096
10097 if (tg3_flag(tp, PCIX_MODE)) {
10098 u16 pcix_cmd;
10099
10100 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10101 &pcix_cmd);
10102 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10103 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10104 pcix_cmd |= PCI_X_CMD_READ_2K;
10105 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10106 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10107 pcix_cmd |= PCI_X_CMD_READ_2K;
10108 }
10109 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10110 pcix_cmd);
10111 }
10112
10113 tw32_f(RDMAC_MODE, rdmac_mode);
10114 udelay(40);
10115
10116 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10117 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10118 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10119 break;
10120 }
10121 if (i < TG3_NUM_RDMA_CHANNELS) {
10122 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10123 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10124 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10125 tg3_flag_set(tp, 5719_RDMA_BUG);
10126 }
10127 }
10128
10129 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10130 if (!tg3_flag(tp, 5705_PLUS))
10131 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10132
10133 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10134 tw32(SNDDATAC_MODE,
10135 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10136 else
10137 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10138
10139 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10140 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10141 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10142 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10143 val |= RCVDBDI_MODE_LRG_RING_SZ;
10144 tw32(RCVDBDI_MODE, val);
10145 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10146 if (tg3_flag(tp, HW_TSO_1) ||
10147 tg3_flag(tp, HW_TSO_2) ||
10148 tg3_flag(tp, HW_TSO_3))
10149 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10150 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10151 if (tg3_flag(tp, ENABLE_TSS))
10152 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10153 tw32(SNDBDI_MODE, val);
10154 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10155
10156 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10157 err = tg3_load_5701_a0_firmware_fix(tp);
10158 if (err)
10159 return err;
10160 }
10161
10162 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10163 /* Ignore any errors for the firmware download. If download
10164 * fails, the device will operate with EEE disabled
10165 */
10166 tg3_load_57766_firmware(tp);
10167 }
10168
10169 if (tg3_flag(tp, TSO_CAPABLE)) {
10170 err = tg3_load_tso_firmware(tp);
10171 if (err)
10172 return err;
10173 }
10174
10175 tp->tx_mode = TX_MODE_ENABLE;
10176
10177 if (tg3_flag(tp, 5755_PLUS) ||
10178 tg3_asic_rev(tp) == ASIC_REV_5906)
10179 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10180
10181 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10182 tg3_asic_rev(tp) == ASIC_REV_5762) {
10183 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10184 tp->tx_mode &= ~val;
10185 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10186 }
10187
10188 tw32_f(MAC_TX_MODE, tp->tx_mode);
10189 udelay(100);
10190
10191 if (tg3_flag(tp, ENABLE_RSS)) {
10192 tg3_rss_write_indir_tbl(tp);
10193
10194 /* Setup the "secret" hash key. */
10195 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10196 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10197 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10198 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10199 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10200 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10201 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10202 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10203 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10204 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10205 }
10206
10207 tp->rx_mode = RX_MODE_ENABLE;
10208 if (tg3_flag(tp, 5755_PLUS))
10209 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10210
10211 if (tg3_flag(tp, ENABLE_RSS))
10212 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10213 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10214 RX_MODE_RSS_IPV6_HASH_EN |
10215 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10216 RX_MODE_RSS_IPV4_HASH_EN |
10217 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10218
10219 tw32_f(MAC_RX_MODE, tp->rx_mode);
10220 udelay(10);
10221
10222 tw32(MAC_LED_CTRL, tp->led_ctrl);
10223
10224 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10225 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10226 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10227 udelay(10);
10228 }
10229 tw32_f(MAC_RX_MODE, tp->rx_mode);
10230 udelay(10);
10231
10232 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10233 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10234 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10235 /* Set drive transmission level to 1.2V */
10236 /* only if the signal pre-emphasis bit is not set */
10237 val = tr32(MAC_SERDES_CFG);
10238 val &= 0xfffff000;
10239 val |= 0x880;
10240 tw32(MAC_SERDES_CFG, val);
10241 }
10242 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10243 tw32(MAC_SERDES_CFG, 0x616000);
10244 }
10245
10246 /* Prevent chip from dropping frames when flow control
10247 * is enabled.
10248 */
10249 if (tg3_flag(tp, 57765_CLASS))
10250 val = 1;
10251 else
10252 val = 2;
10253 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10254
10255 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10256 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10257 /* Use hardware link auto-negotiation */
10258 tg3_flag_set(tp, HW_AUTONEG);
10259 }
10260
10261 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10262 tg3_asic_rev(tp) == ASIC_REV_5714) {
10263 u32 tmp;
10264
10265 tmp = tr32(SERDES_RX_CTRL);
10266 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10267 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10268 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10269 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10270 }
10271
10272 if (!tg3_flag(tp, USE_PHYLIB)) {
10273 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10274 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10275
10276 err = tg3_setup_phy(tp, false);
10277 if (err)
10278 return err;
10279
10280 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10281 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10282 u32 tmp;
10283
10284 /* Clear CRC stats. */
10285 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10286 tg3_writephy(tp, MII_TG3_TEST1,
10287 tmp | MII_TG3_TEST1_CRC_EN);
10288 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10289 }
10290 }
10291 }
10292
10293 __tg3_set_rx_mode(tp->dev);
10294
10295 /* Initialize receive rules. */
10296 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10297 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10298 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10299 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10300
10301 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10302 limit = 8;
10303 else
10304 limit = 16;
10305 if (tg3_flag(tp, ENABLE_ASF))
10306 limit -= 4;
10307 switch (limit) {
10308 case 16:
10309 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10310 case 15:
10311 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10312 case 14:
10313 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10314 case 13:
10315 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10316 case 12:
10317 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10318 case 11:
10319 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10320 case 10:
10321 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10322 case 9:
10323 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10324 case 8:
10325 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10326 case 7:
10327 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10328 case 6:
10329 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10330 case 5:
10331 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10332 case 4:
10333 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10334 case 3:
10335 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10336 case 2:
10337 case 1:
10338
10339 default:
10340 break;
10341 }
10342
10343 if (tg3_flag(tp, ENABLE_APE))
10344 /* Write our heartbeat update interval to APE. */
10345 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10346 APE_HOST_HEARTBEAT_INT_DISABLE);
10347
10348 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10349
10350 return 0;
10351 }
10352
10353 /* Called at device open time to get the chip ready for
10354 * packet processing. Invoked with tp->lock held.
10355 */
10356 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10357 {
10358 tg3_switch_clocks(tp);
10359
10360 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10361
10362 return tg3_reset_hw(tp, reset_phy);
10363 }
10364
10365 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10366 {
10367 int i;
10368
10369 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10370 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10371
10372 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10373 off += len;
10374
10375 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10376 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10377 memset(ocir, 0, TG3_OCIR_LEN);
10378 }
10379 }
10380
10381 /* sysfs attributes for hwmon */
10382 static ssize_t tg3_show_temp(struct device *dev,
10383 struct device_attribute *devattr, char *buf)
10384 {
10385 struct pci_dev *pdev = to_pci_dev(dev);
10386 struct net_device *netdev = pci_get_drvdata(pdev);
10387 struct tg3 *tp = netdev_priv(netdev);
10388 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10389 u32 temperature;
10390
10391 spin_lock_bh(&tp->lock);
10392 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10393 sizeof(temperature));
10394 spin_unlock_bh(&tp->lock);
10395 return sprintf(buf, "%u\n", temperature);
10396 }
10397
10398
10399 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10400 TG3_TEMP_SENSOR_OFFSET);
10401 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10402 TG3_TEMP_CAUTION_OFFSET);
10403 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10404 TG3_TEMP_MAX_OFFSET);
10405
10406 static struct attribute *tg3_attributes[] = {
10407 &sensor_dev_attr_temp1_input.dev_attr.attr,
10408 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10409 &sensor_dev_attr_temp1_max.dev_attr.attr,
10410 NULL
10411 };
10412
10413 static const struct attribute_group tg3_group = {
10414 .attrs = tg3_attributes,
10415 };
10416
10417 static void tg3_hwmon_close(struct tg3 *tp)
10418 {
10419 if (tp->hwmon_dev) {
10420 hwmon_device_unregister(tp->hwmon_dev);
10421 tp->hwmon_dev = NULL;
10422 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10423 }
10424 }
10425
10426 static void tg3_hwmon_open(struct tg3 *tp)
10427 {
10428 int i, err;
10429 u32 size = 0;
10430 struct pci_dev *pdev = tp->pdev;
10431 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10432
10433 tg3_sd_scan_scratchpad(tp, ocirs);
10434
10435 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10436 if (!ocirs[i].src_data_length)
10437 continue;
10438
10439 size += ocirs[i].src_hdr_length;
10440 size += ocirs[i].src_data_length;
10441 }
10442
10443 if (!size)
10444 return;
10445
10446 /* Register hwmon sysfs hooks */
10447 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10448 if (err) {
10449 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10450 return;
10451 }
10452
10453 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10454 if (IS_ERR(tp->hwmon_dev)) {
10455 tp->hwmon_dev = NULL;
10456 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10457 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10458 }
10459 }
10460
10461
10462 #define TG3_STAT_ADD32(PSTAT, REG) \
10463 do { u32 __val = tr32(REG); \
10464 (PSTAT)->low += __val; \
10465 if ((PSTAT)->low < __val) \
10466 (PSTAT)->high += 1; \
10467 } while (0)
10468
10469 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10470 {
10471 struct tg3_hw_stats *sp = tp->hw_stats;
10472
10473 if (!tp->link_up)
10474 return;
10475
10476 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10477 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10478 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10479 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10480 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10481 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10482 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10483 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10484 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10485 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10486 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10487 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10488 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10489 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10490 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10491 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10492 u32 val;
10493
10494 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10495 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10496 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10497 tg3_flag_clear(tp, 5719_RDMA_BUG);
10498 }
10499
10500 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10501 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10502 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10503 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10504 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10505 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10506 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10507 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10508 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10509 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10510 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10511 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10512 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10513 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10514
10515 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10516 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10517 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10518 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10519 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10520 } else {
10521 u32 val = tr32(HOSTCC_FLOW_ATTN);
10522 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10523 if (val) {
10524 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10525 sp->rx_discards.low += val;
10526 if (sp->rx_discards.low < val)
10527 sp->rx_discards.high += 1;
10528 }
10529 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10530 }
10531 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10532 }
10533
10534 static void tg3_chk_missed_msi(struct tg3 *tp)
10535 {
10536 u32 i;
10537
10538 for (i = 0; i < tp->irq_cnt; i++) {
10539 struct tg3_napi *tnapi = &tp->napi[i];
10540
10541 if (tg3_has_work(tnapi)) {
10542 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10543 tnapi->last_tx_cons == tnapi->tx_cons) {
10544 if (tnapi->chk_msi_cnt < 1) {
10545 tnapi->chk_msi_cnt++;
10546 return;
10547 }
10548 tg3_msi(0, tnapi);
10549 }
10550 }
10551 tnapi->chk_msi_cnt = 0;
10552 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10553 tnapi->last_tx_cons = tnapi->tx_cons;
10554 }
10555 }
10556
10557 static void tg3_timer(unsigned long __opaque)
10558 {
10559 struct tg3 *tp = (struct tg3 *) __opaque;
10560
10561 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10562 goto restart_timer;
10563
10564 spin_lock(&tp->lock);
10565
10566 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10567 tg3_flag(tp, 57765_CLASS))
10568 tg3_chk_missed_msi(tp);
10569
10570 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10571 /* BCM4785: Flush posted writes from GbE to host memory. */
10572 tr32(HOSTCC_MODE);
10573 }
10574
10575 if (!tg3_flag(tp, TAGGED_STATUS)) {
10576 /* All of this garbage is because when using non-tagged
10577 * IRQ status the mailbox/status_block protocol the chip
10578 * uses with the cpu is race prone.
10579 */
10580 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10581 tw32(GRC_LOCAL_CTRL,
10582 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10583 } else {
10584 tw32(HOSTCC_MODE, tp->coalesce_mode |
10585 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10586 }
10587
10588 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10589 spin_unlock(&tp->lock);
10590 tg3_reset_task_schedule(tp);
10591 goto restart_timer;
10592 }
10593 }
10594
10595 /* This part only runs once per second. */
10596 if (!--tp->timer_counter) {
10597 if (tg3_flag(tp, 5705_PLUS))
10598 tg3_periodic_fetch_stats(tp);
10599
10600 if (tp->setlpicnt && !--tp->setlpicnt)
10601 tg3_phy_eee_enable(tp);
10602
10603 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10604 u32 mac_stat;
10605 int phy_event;
10606
10607 mac_stat = tr32(MAC_STATUS);
10608
10609 phy_event = 0;
10610 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10611 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10612 phy_event = 1;
10613 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10614 phy_event = 1;
10615
10616 if (phy_event)
10617 tg3_setup_phy(tp, false);
10618 } else if (tg3_flag(tp, POLL_SERDES)) {
10619 u32 mac_stat = tr32(MAC_STATUS);
10620 int need_setup = 0;
10621
10622 if (tp->link_up &&
10623 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10624 need_setup = 1;
10625 }
10626 if (!tp->link_up &&
10627 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10628 MAC_STATUS_SIGNAL_DET))) {
10629 need_setup = 1;
10630 }
10631 if (need_setup) {
10632 if (!tp->serdes_counter) {
10633 tw32_f(MAC_MODE,
10634 (tp->mac_mode &
10635 ~MAC_MODE_PORT_MODE_MASK));
10636 udelay(40);
10637 tw32_f(MAC_MODE, tp->mac_mode);
10638 udelay(40);
10639 }
10640 tg3_setup_phy(tp, false);
10641 }
10642 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10643 tg3_flag(tp, 5780_CLASS)) {
10644 tg3_serdes_parallel_detect(tp);
10645 }
10646
10647 tp->timer_counter = tp->timer_multiplier;
10648 }
10649
10650 /* Heartbeat is only sent once every 2 seconds.
10651 *
10652 * The heartbeat is to tell the ASF firmware that the host
10653 * driver is still alive. In the event that the OS crashes,
10654 * ASF needs to reset the hardware to free up the FIFO space
10655 * that may be filled with rx packets destined for the host.
10656 * If the FIFO is full, ASF will no longer function properly.
10657 *
10658 * Unintended resets have been reported on real time kernels
10659 * where the timer doesn't run on time. Netpoll will also have
10660 * same problem.
10661 *
10662 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10663 * to check the ring condition when the heartbeat is expiring
10664 * before doing the reset. This will prevent most unintended
10665 * resets.
10666 */
10667 if (!--tp->asf_counter) {
10668 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10669 tg3_wait_for_event_ack(tp);
10670
10671 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10672 FWCMD_NICDRV_ALIVE3);
10673 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10674 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10675 TG3_FW_UPDATE_TIMEOUT_SEC);
10676
10677 tg3_generate_fw_event(tp);
10678 }
10679 tp->asf_counter = tp->asf_multiplier;
10680 }
10681
10682 spin_unlock(&tp->lock);
10683
10684 restart_timer:
10685 tp->timer.expires = jiffies + tp->timer_offset;
10686 add_timer(&tp->timer);
10687 }
10688
10689 static void tg3_timer_init(struct tg3 *tp)
10690 {
10691 if (tg3_flag(tp, TAGGED_STATUS) &&
10692 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10693 !tg3_flag(tp, 57765_CLASS))
10694 tp->timer_offset = HZ;
10695 else
10696 tp->timer_offset = HZ / 10;
10697
10698 BUG_ON(tp->timer_offset > HZ);
10699
10700 tp->timer_multiplier = (HZ / tp->timer_offset);
10701 tp->asf_multiplier = (HZ / tp->timer_offset) *
10702 TG3_FW_UPDATE_FREQ_SEC;
10703
10704 init_timer(&tp->timer);
10705 tp->timer.data = (unsigned long) tp;
10706 tp->timer.function = tg3_timer;
10707 }
10708
10709 static void tg3_timer_start(struct tg3 *tp)
10710 {
10711 tp->asf_counter = tp->asf_multiplier;
10712 tp->timer_counter = tp->timer_multiplier;
10713
10714 tp->timer.expires = jiffies + tp->timer_offset;
10715 add_timer(&tp->timer);
10716 }
10717
10718 static void tg3_timer_stop(struct tg3 *tp)
10719 {
10720 del_timer_sync(&tp->timer);
10721 }
10722
10723 /* Restart hardware after configuration changes, self-test, etc.
10724 * Invoked with tp->lock held.
10725 */
10726 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10727 __releases(tp->lock)
10728 __acquires(tp->lock)
10729 {
10730 int err;
10731
10732 err = tg3_init_hw(tp, reset_phy);
10733 if (err) {
10734 netdev_err(tp->dev,
10735 "Failed to re-initialize device, aborting\n");
10736 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10737 tg3_full_unlock(tp);
10738 tg3_timer_stop(tp);
10739 tp->irq_sync = 0;
10740 tg3_napi_enable(tp);
10741 dev_close(tp->dev);
10742 tg3_full_lock(tp, 0);
10743 }
10744 return err;
10745 }
10746
10747 static void tg3_reset_task(struct work_struct *work)
10748 {
10749 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10750 int err;
10751
10752 tg3_full_lock(tp, 0);
10753
10754 if (!netif_running(tp->dev)) {
10755 tg3_flag_clear(tp, RESET_TASK_PENDING);
10756 tg3_full_unlock(tp);
10757 return;
10758 }
10759
10760 tg3_full_unlock(tp);
10761
10762 tg3_phy_stop(tp);
10763
10764 tg3_netif_stop(tp);
10765
10766 tg3_full_lock(tp, 1);
10767
10768 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10769 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10770 tp->write32_rx_mbox = tg3_write_flush_reg32;
10771 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10772 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10773 }
10774
10775 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10776 err = tg3_init_hw(tp, true);
10777 if (err)
10778 goto out;
10779
10780 tg3_netif_start(tp);
10781
10782 out:
10783 tg3_full_unlock(tp);
10784
10785 if (!err)
10786 tg3_phy_start(tp);
10787
10788 tg3_flag_clear(tp, RESET_TASK_PENDING);
10789 }
10790
10791 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10792 {
10793 irq_handler_t fn;
10794 unsigned long flags;
10795 char *name;
10796 struct tg3_napi *tnapi = &tp->napi[irq_num];
10797
10798 if (tp->irq_cnt == 1)
10799 name = tp->dev->name;
10800 else {
10801 name = &tnapi->irq_lbl[0];
10802 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10803 name[IFNAMSIZ-1] = 0;
10804 }
10805
10806 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10807 fn = tg3_msi;
10808 if (tg3_flag(tp, 1SHOT_MSI))
10809 fn = tg3_msi_1shot;
10810 flags = 0;
10811 } else {
10812 fn = tg3_interrupt;
10813 if (tg3_flag(tp, TAGGED_STATUS))
10814 fn = tg3_interrupt_tagged;
10815 flags = IRQF_SHARED;
10816 }
10817
10818 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10819 }
10820
10821 static int tg3_test_interrupt(struct tg3 *tp)
10822 {
10823 struct tg3_napi *tnapi = &tp->napi[0];
10824 struct net_device *dev = tp->dev;
10825 int err, i, intr_ok = 0;
10826 u32 val;
10827
10828 if (!netif_running(dev))
10829 return -ENODEV;
10830
10831 tg3_disable_ints(tp);
10832
10833 free_irq(tnapi->irq_vec, tnapi);
10834
10835 /*
10836 * Turn off MSI one shot mode. Otherwise this test has no
10837 * observable way to know whether the interrupt was delivered.
10838 */
10839 if (tg3_flag(tp, 57765_PLUS)) {
10840 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10841 tw32(MSGINT_MODE, val);
10842 }
10843
10844 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10845 IRQF_SHARED, dev->name, tnapi);
10846 if (err)
10847 return err;
10848
10849 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10850 tg3_enable_ints(tp);
10851
10852 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10853 tnapi->coal_now);
10854
10855 for (i = 0; i < 5; i++) {
10856 u32 int_mbox, misc_host_ctrl;
10857
10858 int_mbox = tr32_mailbox(tnapi->int_mbox);
10859 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10860
10861 if ((int_mbox != 0) ||
10862 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10863 intr_ok = 1;
10864 break;
10865 }
10866
10867 if (tg3_flag(tp, 57765_PLUS) &&
10868 tnapi->hw_status->status_tag != tnapi->last_tag)
10869 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10870
10871 msleep(10);
10872 }
10873
10874 tg3_disable_ints(tp);
10875
10876 free_irq(tnapi->irq_vec, tnapi);
10877
10878 err = tg3_request_irq(tp, 0);
10879
10880 if (err)
10881 return err;
10882
10883 if (intr_ok) {
10884 /* Reenable MSI one shot mode. */
10885 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10886 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10887 tw32(MSGINT_MODE, val);
10888 }
10889 return 0;
10890 }
10891
10892 return -EIO;
10893 }
10894
10895 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10896 * successfully restored
10897 */
10898 static int tg3_test_msi(struct tg3 *tp)
10899 {
10900 int err;
10901 u16 pci_cmd;
10902
10903 if (!tg3_flag(tp, USING_MSI))
10904 return 0;
10905
10906 /* Turn off SERR reporting in case MSI terminates with Master
10907 * Abort.
10908 */
10909 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10910 pci_write_config_word(tp->pdev, PCI_COMMAND,
10911 pci_cmd & ~PCI_COMMAND_SERR);
10912
10913 err = tg3_test_interrupt(tp);
10914
10915 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10916
10917 if (!err)
10918 return 0;
10919
10920 /* other failures */
10921 if (err != -EIO)
10922 return err;
10923
10924 /* MSI test failed, go back to INTx mode */
10925 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10926 "to INTx mode. Please report this failure to the PCI "
10927 "maintainer and include system chipset information\n");
10928
10929 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10930
10931 pci_disable_msi(tp->pdev);
10932
10933 tg3_flag_clear(tp, USING_MSI);
10934 tp->napi[0].irq_vec = tp->pdev->irq;
10935
10936 err = tg3_request_irq(tp, 0);
10937 if (err)
10938 return err;
10939
10940 /* Need to reset the chip because the MSI cycle may have terminated
10941 * with Master Abort.
10942 */
10943 tg3_full_lock(tp, 1);
10944
10945 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10946 err = tg3_init_hw(tp, true);
10947
10948 tg3_full_unlock(tp);
10949
10950 if (err)
10951 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10952
10953 return err;
10954 }
10955
10956 static int tg3_request_firmware(struct tg3 *tp)
10957 {
10958 const struct tg3_firmware_hdr *fw_hdr;
10959
10960 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10961 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10962 tp->fw_needed);
10963 return -ENOENT;
10964 }
10965
10966 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10967
10968 /* Firmware blob starts with version numbers, followed by
10969 * start address and _full_ length including BSS sections
10970 * (which must be longer than the actual data, of course
10971 */
10972
10973 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10974 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10975 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10976 tp->fw_len, tp->fw_needed);
10977 release_firmware(tp->fw);
10978 tp->fw = NULL;
10979 return -EINVAL;
10980 }
10981
10982 /* We no longer need firmware; we have it. */
10983 tp->fw_needed = NULL;
10984 return 0;
10985 }
10986
10987 static u32 tg3_irq_count(struct tg3 *tp)
10988 {
10989 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10990
10991 if (irq_cnt > 1) {
10992 /* We want as many rx rings enabled as there are cpus.
10993 * In multiqueue MSI-X mode, the first MSI-X vector
10994 * only deals with link interrupts, etc, so we add
10995 * one to the number of vectors we are requesting.
10996 */
10997 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10998 }
10999
11000 return irq_cnt;
11001 }
11002
11003 static bool tg3_enable_msix(struct tg3 *tp)
11004 {
11005 int i, rc;
11006 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11007
11008 tp->txq_cnt = tp->txq_req;
11009 tp->rxq_cnt = tp->rxq_req;
11010 if (!tp->rxq_cnt)
11011 tp->rxq_cnt = netif_get_num_default_rss_queues();
11012 if (tp->rxq_cnt > tp->rxq_max)
11013 tp->rxq_cnt = tp->rxq_max;
11014
11015 /* Disable multiple TX rings by default. Simple round-robin hardware
11016 * scheduling of the TX rings can cause starvation of rings with
11017 * small packets when other rings have TSO or jumbo packets.
11018 */
11019 if (!tp->txq_req)
11020 tp->txq_cnt = 1;
11021
11022 tp->irq_cnt = tg3_irq_count(tp);
11023
11024 for (i = 0; i < tp->irq_max; i++) {
11025 msix_ent[i].entry = i;
11026 msix_ent[i].vector = 0;
11027 }
11028
11029 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11030 if (rc < 0) {
11031 return false;
11032 } else if (rc != 0) {
11033 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11034 return false;
11035 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11036 tp->irq_cnt, rc);
11037 tp->irq_cnt = rc;
11038 tp->rxq_cnt = max(rc - 1, 1);
11039 if (tp->txq_cnt)
11040 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11041 }
11042
11043 for (i = 0; i < tp->irq_max; i++)
11044 tp->napi[i].irq_vec = msix_ent[i].vector;
11045
11046 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11047 pci_disable_msix(tp->pdev);
11048 return false;
11049 }
11050
11051 if (tp->irq_cnt == 1)
11052 return true;
11053
11054 tg3_flag_set(tp, ENABLE_RSS);
11055
11056 if (tp->txq_cnt > 1)
11057 tg3_flag_set(tp, ENABLE_TSS);
11058
11059 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11060
11061 return true;
11062 }
11063
11064 static void tg3_ints_init(struct tg3 *tp)
11065 {
11066 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11067 !tg3_flag(tp, TAGGED_STATUS)) {
11068 /* All MSI supporting chips should support tagged
11069 * status. Assert that this is the case.
11070 */
11071 netdev_warn(tp->dev,
11072 "MSI without TAGGED_STATUS? Not using MSI\n");
11073 goto defcfg;
11074 }
11075
11076 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11077 tg3_flag_set(tp, USING_MSIX);
11078 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11079 tg3_flag_set(tp, USING_MSI);
11080
11081 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11082 u32 msi_mode = tr32(MSGINT_MODE);
11083 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11084 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11085 if (!tg3_flag(tp, 1SHOT_MSI))
11086 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11087 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11088 }
11089 defcfg:
11090 if (!tg3_flag(tp, USING_MSIX)) {
11091 tp->irq_cnt = 1;
11092 tp->napi[0].irq_vec = tp->pdev->irq;
11093 }
11094
11095 if (tp->irq_cnt == 1) {
11096 tp->txq_cnt = 1;
11097 tp->rxq_cnt = 1;
11098 netif_set_real_num_tx_queues(tp->dev, 1);
11099 netif_set_real_num_rx_queues(tp->dev, 1);
11100 }
11101 }
11102
11103 static void tg3_ints_fini(struct tg3 *tp)
11104 {
11105 if (tg3_flag(tp, USING_MSIX))
11106 pci_disable_msix(tp->pdev);
11107 else if (tg3_flag(tp, USING_MSI))
11108 pci_disable_msi(tp->pdev);
11109 tg3_flag_clear(tp, USING_MSI);
11110 tg3_flag_clear(tp, USING_MSIX);
11111 tg3_flag_clear(tp, ENABLE_RSS);
11112 tg3_flag_clear(tp, ENABLE_TSS);
11113 }
11114
11115 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11116 bool init)
11117 {
11118 struct net_device *dev = tp->dev;
11119 int i, err;
11120
11121 /*
11122 * Setup interrupts first so we know how
11123 * many NAPI resources to allocate
11124 */
11125 tg3_ints_init(tp);
11126
11127 tg3_rss_check_indir_tbl(tp);
11128
11129 /* The placement of this call is tied
11130 * to the setup and use of Host TX descriptors.
11131 */
11132 err = tg3_alloc_consistent(tp);
11133 if (err)
11134 goto err_out1;
11135
11136 tg3_napi_init(tp);
11137
11138 tg3_napi_enable(tp);
11139
11140 for (i = 0; i < tp->irq_cnt; i++) {
11141 struct tg3_napi *tnapi = &tp->napi[i];
11142 err = tg3_request_irq(tp, i);
11143 if (err) {
11144 for (i--; i >= 0; i--) {
11145 tnapi = &tp->napi[i];
11146 free_irq(tnapi->irq_vec, tnapi);
11147 }
11148 goto err_out2;
11149 }
11150 }
11151
11152 tg3_full_lock(tp, 0);
11153
11154 err = tg3_init_hw(tp, reset_phy);
11155 if (err) {
11156 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11157 tg3_free_rings(tp);
11158 }
11159
11160 tg3_full_unlock(tp);
11161
11162 if (err)
11163 goto err_out3;
11164
11165 if (test_irq && tg3_flag(tp, USING_MSI)) {
11166 err = tg3_test_msi(tp);
11167
11168 if (err) {
11169 tg3_full_lock(tp, 0);
11170 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11171 tg3_free_rings(tp);
11172 tg3_full_unlock(tp);
11173
11174 goto err_out2;
11175 }
11176
11177 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11178 u32 val = tr32(PCIE_TRANSACTION_CFG);
11179
11180 tw32(PCIE_TRANSACTION_CFG,
11181 val | PCIE_TRANS_CFG_1SHOT_MSI);
11182 }
11183 }
11184
11185 tg3_phy_start(tp);
11186
11187 tg3_hwmon_open(tp);
11188
11189 tg3_full_lock(tp, 0);
11190
11191 tg3_timer_start(tp);
11192 tg3_flag_set(tp, INIT_COMPLETE);
11193 tg3_enable_ints(tp);
11194
11195 if (init)
11196 tg3_ptp_init(tp);
11197 else
11198 tg3_ptp_resume(tp);
11199
11200
11201 tg3_full_unlock(tp);
11202
11203 netif_tx_start_all_queues(dev);
11204
11205 /*
11206 * Reset loopback feature if it was turned on while the device was down
11207 * make sure that it's installed properly now.
11208 */
11209 if (dev->features & NETIF_F_LOOPBACK)
11210 tg3_set_loopback(dev, dev->features);
11211
11212 return 0;
11213
11214 err_out3:
11215 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11216 struct tg3_napi *tnapi = &tp->napi[i];
11217 free_irq(tnapi->irq_vec, tnapi);
11218 }
11219
11220 err_out2:
11221 tg3_napi_disable(tp);
11222 tg3_napi_fini(tp);
11223 tg3_free_consistent(tp);
11224
11225 err_out1:
11226 tg3_ints_fini(tp);
11227
11228 return err;
11229 }
11230
11231 static void tg3_stop(struct tg3 *tp)
11232 {
11233 int i;
11234
11235 tg3_reset_task_cancel(tp);
11236 tg3_netif_stop(tp);
11237
11238 tg3_timer_stop(tp);
11239
11240 tg3_hwmon_close(tp);
11241
11242 tg3_phy_stop(tp);
11243
11244 tg3_full_lock(tp, 1);
11245
11246 tg3_disable_ints(tp);
11247
11248 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11249 tg3_free_rings(tp);
11250 tg3_flag_clear(tp, INIT_COMPLETE);
11251
11252 tg3_full_unlock(tp);
11253
11254 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11255 struct tg3_napi *tnapi = &tp->napi[i];
11256 free_irq(tnapi->irq_vec, tnapi);
11257 }
11258
11259 tg3_ints_fini(tp);
11260
11261 tg3_napi_fini(tp);
11262
11263 tg3_free_consistent(tp);
11264 }
11265
11266 static int tg3_open(struct net_device *dev)
11267 {
11268 struct tg3 *tp = netdev_priv(dev);
11269 int err;
11270
11271 if (tp->fw_needed) {
11272 err = tg3_request_firmware(tp);
11273 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11274 if (err) {
11275 netdev_warn(tp->dev, "EEE capability disabled\n");
11276 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11277 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11278 netdev_warn(tp->dev, "EEE capability restored\n");
11279 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11280 }
11281 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11282 if (err)
11283 return err;
11284 } else if (err) {
11285 netdev_warn(tp->dev, "TSO capability disabled\n");
11286 tg3_flag_clear(tp, TSO_CAPABLE);
11287 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11288 netdev_notice(tp->dev, "TSO capability restored\n");
11289 tg3_flag_set(tp, TSO_CAPABLE);
11290 }
11291 }
11292
11293 tg3_carrier_off(tp);
11294
11295 err = tg3_power_up(tp);
11296 if (err)
11297 return err;
11298
11299 tg3_full_lock(tp, 0);
11300
11301 tg3_disable_ints(tp);
11302 tg3_flag_clear(tp, INIT_COMPLETE);
11303
11304 tg3_full_unlock(tp);
11305
11306 err = tg3_start(tp,
11307 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11308 true, true);
11309 if (err) {
11310 tg3_frob_aux_power(tp, false);
11311 pci_set_power_state(tp->pdev, PCI_D3hot);
11312 }
11313
11314 if (tg3_flag(tp, PTP_CAPABLE)) {
11315 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11316 &tp->pdev->dev);
11317 if (IS_ERR(tp->ptp_clock))
11318 tp->ptp_clock = NULL;
11319 }
11320
11321 return err;
11322 }
11323
11324 static int tg3_close(struct net_device *dev)
11325 {
11326 struct tg3 *tp = netdev_priv(dev);
11327
11328 tg3_ptp_fini(tp);
11329
11330 tg3_stop(tp);
11331
11332 /* Clear stats across close / open calls */
11333 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11334 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11335
11336 tg3_power_down(tp);
11337
11338 tg3_carrier_off(tp);
11339
11340 return 0;
11341 }
11342
11343 static inline u64 get_stat64(tg3_stat64_t *val)
11344 {
11345 return ((u64)val->high << 32) | ((u64)val->low);
11346 }
11347
11348 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11349 {
11350 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11351
11352 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11353 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11354 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11355 u32 val;
11356
11357 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11358 tg3_writephy(tp, MII_TG3_TEST1,
11359 val | MII_TG3_TEST1_CRC_EN);
11360 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11361 } else
11362 val = 0;
11363
11364 tp->phy_crc_errors += val;
11365
11366 return tp->phy_crc_errors;
11367 }
11368
11369 return get_stat64(&hw_stats->rx_fcs_errors);
11370 }
11371
11372 #define ESTAT_ADD(member) \
11373 estats->member = old_estats->member + \
11374 get_stat64(&hw_stats->member)
11375
11376 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11377 {
11378 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11379 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11380
11381 ESTAT_ADD(rx_octets);
11382 ESTAT_ADD(rx_fragments);
11383 ESTAT_ADD(rx_ucast_packets);
11384 ESTAT_ADD(rx_mcast_packets);
11385 ESTAT_ADD(rx_bcast_packets);
11386 ESTAT_ADD(rx_fcs_errors);
11387 ESTAT_ADD(rx_align_errors);
11388 ESTAT_ADD(rx_xon_pause_rcvd);
11389 ESTAT_ADD(rx_xoff_pause_rcvd);
11390 ESTAT_ADD(rx_mac_ctrl_rcvd);
11391 ESTAT_ADD(rx_xoff_entered);
11392 ESTAT_ADD(rx_frame_too_long_errors);
11393 ESTAT_ADD(rx_jabbers);
11394 ESTAT_ADD(rx_undersize_packets);
11395 ESTAT_ADD(rx_in_length_errors);
11396 ESTAT_ADD(rx_out_length_errors);
11397 ESTAT_ADD(rx_64_or_less_octet_packets);
11398 ESTAT_ADD(rx_65_to_127_octet_packets);
11399 ESTAT_ADD(rx_128_to_255_octet_packets);
11400 ESTAT_ADD(rx_256_to_511_octet_packets);
11401 ESTAT_ADD(rx_512_to_1023_octet_packets);
11402 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11403 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11404 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11405 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11406 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11407
11408 ESTAT_ADD(tx_octets);
11409 ESTAT_ADD(tx_collisions);
11410 ESTAT_ADD(tx_xon_sent);
11411 ESTAT_ADD(tx_xoff_sent);
11412 ESTAT_ADD(tx_flow_control);
11413 ESTAT_ADD(tx_mac_errors);
11414 ESTAT_ADD(tx_single_collisions);
11415 ESTAT_ADD(tx_mult_collisions);
11416 ESTAT_ADD(tx_deferred);
11417 ESTAT_ADD(tx_excessive_collisions);
11418 ESTAT_ADD(tx_late_collisions);
11419 ESTAT_ADD(tx_collide_2times);
11420 ESTAT_ADD(tx_collide_3times);
11421 ESTAT_ADD(tx_collide_4times);
11422 ESTAT_ADD(tx_collide_5times);
11423 ESTAT_ADD(tx_collide_6times);
11424 ESTAT_ADD(tx_collide_7times);
11425 ESTAT_ADD(tx_collide_8times);
11426 ESTAT_ADD(tx_collide_9times);
11427 ESTAT_ADD(tx_collide_10times);
11428 ESTAT_ADD(tx_collide_11times);
11429 ESTAT_ADD(tx_collide_12times);
11430 ESTAT_ADD(tx_collide_13times);
11431 ESTAT_ADD(tx_collide_14times);
11432 ESTAT_ADD(tx_collide_15times);
11433 ESTAT_ADD(tx_ucast_packets);
11434 ESTAT_ADD(tx_mcast_packets);
11435 ESTAT_ADD(tx_bcast_packets);
11436 ESTAT_ADD(tx_carrier_sense_errors);
11437 ESTAT_ADD(tx_discards);
11438 ESTAT_ADD(tx_errors);
11439
11440 ESTAT_ADD(dma_writeq_full);
11441 ESTAT_ADD(dma_write_prioq_full);
11442 ESTAT_ADD(rxbds_empty);
11443 ESTAT_ADD(rx_discards);
11444 ESTAT_ADD(rx_errors);
11445 ESTAT_ADD(rx_threshold_hit);
11446
11447 ESTAT_ADD(dma_readq_full);
11448 ESTAT_ADD(dma_read_prioq_full);
11449 ESTAT_ADD(tx_comp_queue_full);
11450
11451 ESTAT_ADD(ring_set_send_prod_index);
11452 ESTAT_ADD(ring_status_update);
11453 ESTAT_ADD(nic_irqs);
11454 ESTAT_ADD(nic_avoided_irqs);
11455 ESTAT_ADD(nic_tx_threshold_hit);
11456
11457 ESTAT_ADD(mbuf_lwm_thresh_hit);
11458 }
11459
11460 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11461 {
11462 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11463 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11464
11465 stats->rx_packets = old_stats->rx_packets +
11466 get_stat64(&hw_stats->rx_ucast_packets) +
11467 get_stat64(&hw_stats->rx_mcast_packets) +
11468 get_stat64(&hw_stats->rx_bcast_packets);
11469
11470 stats->tx_packets = old_stats->tx_packets +
11471 get_stat64(&hw_stats->tx_ucast_packets) +
11472 get_stat64(&hw_stats->tx_mcast_packets) +
11473 get_stat64(&hw_stats->tx_bcast_packets);
11474
11475 stats->rx_bytes = old_stats->rx_bytes +
11476 get_stat64(&hw_stats->rx_octets);
11477 stats->tx_bytes = old_stats->tx_bytes +
11478 get_stat64(&hw_stats->tx_octets);
11479
11480 stats->rx_errors = old_stats->rx_errors +
11481 get_stat64(&hw_stats->rx_errors);
11482 stats->tx_errors = old_stats->tx_errors +
11483 get_stat64(&hw_stats->tx_errors) +
11484 get_stat64(&hw_stats->tx_mac_errors) +
11485 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11486 get_stat64(&hw_stats->tx_discards);
11487
11488 stats->multicast = old_stats->multicast +
11489 get_stat64(&hw_stats->rx_mcast_packets);
11490 stats->collisions = old_stats->collisions +
11491 get_stat64(&hw_stats->tx_collisions);
11492
11493 stats->rx_length_errors = old_stats->rx_length_errors +
11494 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11495 get_stat64(&hw_stats->rx_undersize_packets);
11496
11497 stats->rx_over_errors = old_stats->rx_over_errors +
11498 get_stat64(&hw_stats->rxbds_empty);
11499 stats->rx_frame_errors = old_stats->rx_frame_errors +
11500 get_stat64(&hw_stats->rx_align_errors);
11501 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11502 get_stat64(&hw_stats->tx_discards);
11503 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11504 get_stat64(&hw_stats->tx_carrier_sense_errors);
11505
11506 stats->rx_crc_errors = old_stats->rx_crc_errors +
11507 tg3_calc_crc_errors(tp);
11508
11509 stats->rx_missed_errors = old_stats->rx_missed_errors +
11510 get_stat64(&hw_stats->rx_discards);
11511
11512 stats->rx_dropped = tp->rx_dropped;
11513 stats->tx_dropped = tp->tx_dropped;
11514 }
11515
11516 static int tg3_get_regs_len(struct net_device *dev)
11517 {
11518 return TG3_REG_BLK_SIZE;
11519 }
11520
11521 static void tg3_get_regs(struct net_device *dev,
11522 struct ethtool_regs *regs, void *_p)
11523 {
11524 struct tg3 *tp = netdev_priv(dev);
11525
11526 regs->version = 0;
11527
11528 memset(_p, 0, TG3_REG_BLK_SIZE);
11529
11530 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11531 return;
11532
11533 tg3_full_lock(tp, 0);
11534
11535 tg3_dump_legacy_regs(tp, (u32 *)_p);
11536
11537 tg3_full_unlock(tp);
11538 }
11539
11540 static int tg3_get_eeprom_len(struct net_device *dev)
11541 {
11542 struct tg3 *tp = netdev_priv(dev);
11543
11544 return tp->nvram_size;
11545 }
11546
11547 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11548 {
11549 struct tg3 *tp = netdev_priv(dev);
11550 int ret;
11551 u8 *pd;
11552 u32 i, offset, len, b_offset, b_count;
11553 __be32 val;
11554
11555 if (tg3_flag(tp, NO_NVRAM))
11556 return -EINVAL;
11557
11558 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11559 return -EAGAIN;
11560
11561 offset = eeprom->offset;
11562 len = eeprom->len;
11563 eeprom->len = 0;
11564
11565 eeprom->magic = TG3_EEPROM_MAGIC;
11566
11567 if (offset & 3) {
11568 /* adjustments to start on required 4 byte boundary */
11569 b_offset = offset & 3;
11570 b_count = 4 - b_offset;
11571 if (b_count > len) {
11572 /* i.e. offset=1 len=2 */
11573 b_count = len;
11574 }
11575 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11576 if (ret)
11577 return ret;
11578 memcpy(data, ((char *)&val) + b_offset, b_count);
11579 len -= b_count;
11580 offset += b_count;
11581 eeprom->len += b_count;
11582 }
11583
11584 /* read bytes up to the last 4 byte boundary */
11585 pd = &data[eeprom->len];
11586 for (i = 0; i < (len - (len & 3)); i += 4) {
11587 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11588 if (ret) {
11589 eeprom->len += i;
11590 return ret;
11591 }
11592 memcpy(pd + i, &val, 4);
11593 }
11594 eeprom->len += i;
11595
11596 if (len & 3) {
11597 /* read last bytes not ending on 4 byte boundary */
11598 pd = &data[eeprom->len];
11599 b_count = len & 3;
11600 b_offset = offset + len - b_count;
11601 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11602 if (ret)
11603 return ret;
11604 memcpy(pd, &val, b_count);
11605 eeprom->len += b_count;
11606 }
11607 return 0;
11608 }
11609
11610 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11611 {
11612 struct tg3 *tp = netdev_priv(dev);
11613 int ret;
11614 u32 offset, len, b_offset, odd_len;
11615 u8 *buf;
11616 __be32 start, end;
11617
11618 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11619 return -EAGAIN;
11620
11621 if (tg3_flag(tp, NO_NVRAM) ||
11622 eeprom->magic != TG3_EEPROM_MAGIC)
11623 return -EINVAL;
11624
11625 offset = eeprom->offset;
11626 len = eeprom->len;
11627
11628 if ((b_offset = (offset & 3))) {
11629 /* adjustments to start on required 4 byte boundary */
11630 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11631 if (ret)
11632 return ret;
11633 len += b_offset;
11634 offset &= ~3;
11635 if (len < 4)
11636 len = 4;
11637 }
11638
11639 odd_len = 0;
11640 if (len & 3) {
11641 /* adjustments to end on required 4 byte boundary */
11642 odd_len = 1;
11643 len = (len + 3) & ~3;
11644 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11645 if (ret)
11646 return ret;
11647 }
11648
11649 buf = data;
11650 if (b_offset || odd_len) {
11651 buf = kmalloc(len, GFP_KERNEL);
11652 if (!buf)
11653 return -ENOMEM;
11654 if (b_offset)
11655 memcpy(buf, &start, 4);
11656 if (odd_len)
11657 memcpy(buf+len-4, &end, 4);
11658 memcpy(buf + b_offset, data, eeprom->len);
11659 }
11660
11661 ret = tg3_nvram_write_block(tp, offset, len, buf);
11662
11663 if (buf != data)
11664 kfree(buf);
11665
11666 return ret;
11667 }
11668
11669 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11670 {
11671 struct tg3 *tp = netdev_priv(dev);
11672
11673 if (tg3_flag(tp, USE_PHYLIB)) {
11674 struct phy_device *phydev;
11675 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11676 return -EAGAIN;
11677 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11678 return phy_ethtool_gset(phydev, cmd);
11679 }
11680
11681 cmd->supported = (SUPPORTED_Autoneg);
11682
11683 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11684 cmd->supported |= (SUPPORTED_1000baseT_Half |
11685 SUPPORTED_1000baseT_Full);
11686
11687 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11688 cmd->supported |= (SUPPORTED_100baseT_Half |
11689 SUPPORTED_100baseT_Full |
11690 SUPPORTED_10baseT_Half |
11691 SUPPORTED_10baseT_Full |
11692 SUPPORTED_TP);
11693 cmd->port = PORT_TP;
11694 } else {
11695 cmd->supported |= SUPPORTED_FIBRE;
11696 cmd->port = PORT_FIBRE;
11697 }
11698
11699 cmd->advertising = tp->link_config.advertising;
11700 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11701 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11702 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11703 cmd->advertising |= ADVERTISED_Pause;
11704 } else {
11705 cmd->advertising |= ADVERTISED_Pause |
11706 ADVERTISED_Asym_Pause;
11707 }
11708 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11709 cmd->advertising |= ADVERTISED_Asym_Pause;
11710 }
11711 }
11712 if (netif_running(dev) && tp->link_up) {
11713 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11714 cmd->duplex = tp->link_config.active_duplex;
11715 cmd->lp_advertising = tp->link_config.rmt_adv;
11716 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11717 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11718 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11719 else
11720 cmd->eth_tp_mdix = ETH_TP_MDI;
11721 }
11722 } else {
11723 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11724 cmd->duplex = DUPLEX_UNKNOWN;
11725 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11726 }
11727 cmd->phy_address = tp->phy_addr;
11728 cmd->transceiver = XCVR_INTERNAL;
11729 cmd->autoneg = tp->link_config.autoneg;
11730 cmd->maxtxpkt = 0;
11731 cmd->maxrxpkt = 0;
11732 return 0;
11733 }
11734
11735 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11736 {
11737 struct tg3 *tp = netdev_priv(dev);
11738 u32 speed = ethtool_cmd_speed(cmd);
11739
11740 if (tg3_flag(tp, USE_PHYLIB)) {
11741 struct phy_device *phydev;
11742 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11743 return -EAGAIN;
11744 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11745 return phy_ethtool_sset(phydev, cmd);
11746 }
11747
11748 if (cmd->autoneg != AUTONEG_ENABLE &&
11749 cmd->autoneg != AUTONEG_DISABLE)
11750 return -EINVAL;
11751
11752 if (cmd->autoneg == AUTONEG_DISABLE &&
11753 cmd->duplex != DUPLEX_FULL &&
11754 cmd->duplex != DUPLEX_HALF)
11755 return -EINVAL;
11756
11757 if (cmd->autoneg == AUTONEG_ENABLE) {
11758 u32 mask = ADVERTISED_Autoneg |
11759 ADVERTISED_Pause |
11760 ADVERTISED_Asym_Pause;
11761
11762 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11763 mask |= ADVERTISED_1000baseT_Half |
11764 ADVERTISED_1000baseT_Full;
11765
11766 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11767 mask |= ADVERTISED_100baseT_Half |
11768 ADVERTISED_100baseT_Full |
11769 ADVERTISED_10baseT_Half |
11770 ADVERTISED_10baseT_Full |
11771 ADVERTISED_TP;
11772 else
11773 mask |= ADVERTISED_FIBRE;
11774
11775 if (cmd->advertising & ~mask)
11776 return -EINVAL;
11777
11778 mask &= (ADVERTISED_1000baseT_Half |
11779 ADVERTISED_1000baseT_Full |
11780 ADVERTISED_100baseT_Half |
11781 ADVERTISED_100baseT_Full |
11782 ADVERTISED_10baseT_Half |
11783 ADVERTISED_10baseT_Full);
11784
11785 cmd->advertising &= mask;
11786 } else {
11787 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11788 if (speed != SPEED_1000)
11789 return -EINVAL;
11790
11791 if (cmd->duplex != DUPLEX_FULL)
11792 return -EINVAL;
11793 } else {
11794 if (speed != SPEED_100 &&
11795 speed != SPEED_10)
11796 return -EINVAL;
11797 }
11798 }
11799
11800 tg3_full_lock(tp, 0);
11801
11802 tp->link_config.autoneg = cmd->autoneg;
11803 if (cmd->autoneg == AUTONEG_ENABLE) {
11804 tp->link_config.advertising = (cmd->advertising |
11805 ADVERTISED_Autoneg);
11806 tp->link_config.speed = SPEED_UNKNOWN;
11807 tp->link_config.duplex = DUPLEX_UNKNOWN;
11808 } else {
11809 tp->link_config.advertising = 0;
11810 tp->link_config.speed = speed;
11811 tp->link_config.duplex = cmd->duplex;
11812 }
11813
11814 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11815
11816 tg3_warn_mgmt_link_flap(tp);
11817
11818 if (netif_running(dev))
11819 tg3_setup_phy(tp, true);
11820
11821 tg3_full_unlock(tp);
11822
11823 return 0;
11824 }
11825
11826 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11827 {
11828 struct tg3 *tp = netdev_priv(dev);
11829
11830 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11831 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11832 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11833 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11834 }
11835
11836 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11837 {
11838 struct tg3 *tp = netdev_priv(dev);
11839
11840 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11841 wol->supported = WAKE_MAGIC;
11842 else
11843 wol->supported = 0;
11844 wol->wolopts = 0;
11845 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11846 wol->wolopts = WAKE_MAGIC;
11847 memset(&wol->sopass, 0, sizeof(wol->sopass));
11848 }
11849
11850 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11851 {
11852 struct tg3 *tp = netdev_priv(dev);
11853 struct device *dp = &tp->pdev->dev;
11854
11855 if (wol->wolopts & ~WAKE_MAGIC)
11856 return -EINVAL;
11857 if ((wol->wolopts & WAKE_MAGIC) &&
11858 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11859 return -EINVAL;
11860
11861 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11862
11863 spin_lock_bh(&tp->lock);
11864 if (device_may_wakeup(dp))
11865 tg3_flag_set(tp, WOL_ENABLE);
11866 else
11867 tg3_flag_clear(tp, WOL_ENABLE);
11868 spin_unlock_bh(&tp->lock);
11869
11870 return 0;
11871 }
11872
11873 static u32 tg3_get_msglevel(struct net_device *dev)
11874 {
11875 struct tg3 *tp = netdev_priv(dev);
11876 return tp->msg_enable;
11877 }
11878
11879 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11880 {
11881 struct tg3 *tp = netdev_priv(dev);
11882 tp->msg_enable = value;
11883 }
11884
11885 static int tg3_nway_reset(struct net_device *dev)
11886 {
11887 struct tg3 *tp = netdev_priv(dev);
11888 int r;
11889
11890 if (!netif_running(dev))
11891 return -EAGAIN;
11892
11893 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11894 return -EINVAL;
11895
11896 tg3_warn_mgmt_link_flap(tp);
11897
11898 if (tg3_flag(tp, USE_PHYLIB)) {
11899 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11900 return -EAGAIN;
11901 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11902 } else {
11903 u32 bmcr;
11904
11905 spin_lock_bh(&tp->lock);
11906 r = -EINVAL;
11907 tg3_readphy(tp, MII_BMCR, &bmcr);
11908 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11909 ((bmcr & BMCR_ANENABLE) ||
11910 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11911 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11912 BMCR_ANENABLE);
11913 r = 0;
11914 }
11915 spin_unlock_bh(&tp->lock);
11916 }
11917
11918 return r;
11919 }
11920
11921 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11922 {
11923 struct tg3 *tp = netdev_priv(dev);
11924
11925 ering->rx_max_pending = tp->rx_std_ring_mask;
11926 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11927 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11928 else
11929 ering->rx_jumbo_max_pending = 0;
11930
11931 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11932
11933 ering->rx_pending = tp->rx_pending;
11934 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11935 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11936 else
11937 ering->rx_jumbo_pending = 0;
11938
11939 ering->tx_pending = tp->napi[0].tx_pending;
11940 }
11941
11942 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11943 {
11944 struct tg3 *tp = netdev_priv(dev);
11945 int i, irq_sync = 0, err = 0;
11946
11947 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11948 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11949 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11950 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11951 (tg3_flag(tp, TSO_BUG) &&
11952 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11953 return -EINVAL;
11954
11955 if (netif_running(dev)) {
11956 tg3_phy_stop(tp);
11957 tg3_netif_stop(tp);
11958 irq_sync = 1;
11959 }
11960
11961 tg3_full_lock(tp, irq_sync);
11962
11963 tp->rx_pending = ering->rx_pending;
11964
11965 if (tg3_flag(tp, MAX_RXPEND_64) &&
11966 tp->rx_pending > 63)
11967 tp->rx_pending = 63;
11968 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11969
11970 for (i = 0; i < tp->irq_max; i++)
11971 tp->napi[i].tx_pending = ering->tx_pending;
11972
11973 if (netif_running(dev)) {
11974 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11975 err = tg3_restart_hw(tp, false);
11976 if (!err)
11977 tg3_netif_start(tp);
11978 }
11979
11980 tg3_full_unlock(tp);
11981
11982 if (irq_sync && !err)
11983 tg3_phy_start(tp);
11984
11985 return err;
11986 }
11987
11988 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11989 {
11990 struct tg3 *tp = netdev_priv(dev);
11991
11992 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11993
11994 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11995 epause->rx_pause = 1;
11996 else
11997 epause->rx_pause = 0;
11998
11999 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12000 epause->tx_pause = 1;
12001 else
12002 epause->tx_pause = 0;
12003 }
12004
12005 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12006 {
12007 struct tg3 *tp = netdev_priv(dev);
12008 int err = 0;
12009
12010 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12011 tg3_warn_mgmt_link_flap(tp);
12012
12013 if (tg3_flag(tp, USE_PHYLIB)) {
12014 u32 newadv;
12015 struct phy_device *phydev;
12016
12017 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12018
12019 if (!(phydev->supported & SUPPORTED_Pause) ||
12020 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12021 (epause->rx_pause != epause->tx_pause)))
12022 return -EINVAL;
12023
12024 tp->link_config.flowctrl = 0;
12025 if (epause->rx_pause) {
12026 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12027
12028 if (epause->tx_pause) {
12029 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12030 newadv = ADVERTISED_Pause;
12031 } else
12032 newadv = ADVERTISED_Pause |
12033 ADVERTISED_Asym_Pause;
12034 } else if (epause->tx_pause) {
12035 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12036 newadv = ADVERTISED_Asym_Pause;
12037 } else
12038 newadv = 0;
12039
12040 if (epause->autoneg)
12041 tg3_flag_set(tp, PAUSE_AUTONEG);
12042 else
12043 tg3_flag_clear(tp, PAUSE_AUTONEG);
12044
12045 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12046 u32 oldadv = phydev->advertising &
12047 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12048 if (oldadv != newadv) {
12049 phydev->advertising &=
12050 ~(ADVERTISED_Pause |
12051 ADVERTISED_Asym_Pause);
12052 phydev->advertising |= newadv;
12053 if (phydev->autoneg) {
12054 /*
12055 * Always renegotiate the link to
12056 * inform our link partner of our
12057 * flow control settings, even if the
12058 * flow control is forced. Let
12059 * tg3_adjust_link() do the final
12060 * flow control setup.
12061 */
12062 return phy_start_aneg(phydev);
12063 }
12064 }
12065
12066 if (!epause->autoneg)
12067 tg3_setup_flow_control(tp, 0, 0);
12068 } else {
12069 tp->link_config.advertising &=
12070 ~(ADVERTISED_Pause |
12071 ADVERTISED_Asym_Pause);
12072 tp->link_config.advertising |= newadv;
12073 }
12074 } else {
12075 int irq_sync = 0;
12076
12077 if (netif_running(dev)) {
12078 tg3_netif_stop(tp);
12079 irq_sync = 1;
12080 }
12081
12082 tg3_full_lock(tp, irq_sync);
12083
12084 if (epause->autoneg)
12085 tg3_flag_set(tp, PAUSE_AUTONEG);
12086 else
12087 tg3_flag_clear(tp, PAUSE_AUTONEG);
12088 if (epause->rx_pause)
12089 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12090 else
12091 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12092 if (epause->tx_pause)
12093 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12094 else
12095 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12096
12097 if (netif_running(dev)) {
12098 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12099 err = tg3_restart_hw(tp, false);
12100 if (!err)
12101 tg3_netif_start(tp);
12102 }
12103
12104 tg3_full_unlock(tp);
12105 }
12106
12107 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12108
12109 return err;
12110 }
12111
12112 static int tg3_get_sset_count(struct net_device *dev, int sset)
12113 {
12114 switch (sset) {
12115 case ETH_SS_TEST:
12116 return TG3_NUM_TEST;
12117 case ETH_SS_STATS:
12118 return TG3_NUM_STATS;
12119 default:
12120 return -EOPNOTSUPP;
12121 }
12122 }
12123
12124 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12125 u32 *rules __always_unused)
12126 {
12127 struct tg3 *tp = netdev_priv(dev);
12128
12129 if (!tg3_flag(tp, SUPPORT_MSIX))
12130 return -EOPNOTSUPP;
12131
12132 switch (info->cmd) {
12133 case ETHTOOL_GRXRINGS:
12134 if (netif_running(tp->dev))
12135 info->data = tp->rxq_cnt;
12136 else {
12137 info->data = num_online_cpus();
12138 if (info->data > TG3_RSS_MAX_NUM_QS)
12139 info->data = TG3_RSS_MAX_NUM_QS;
12140 }
12141
12142 /* The first interrupt vector only
12143 * handles link interrupts.
12144 */
12145 info->data -= 1;
12146 return 0;
12147
12148 default:
12149 return -EOPNOTSUPP;
12150 }
12151 }
12152
12153 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12154 {
12155 u32 size = 0;
12156 struct tg3 *tp = netdev_priv(dev);
12157
12158 if (tg3_flag(tp, SUPPORT_MSIX))
12159 size = TG3_RSS_INDIR_TBL_SIZE;
12160
12161 return size;
12162 }
12163
12164 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12165 {
12166 struct tg3 *tp = netdev_priv(dev);
12167 int i;
12168
12169 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12170 indir[i] = tp->rss_ind_tbl[i];
12171
12172 return 0;
12173 }
12174
12175 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12176 {
12177 struct tg3 *tp = netdev_priv(dev);
12178 size_t i;
12179
12180 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12181 tp->rss_ind_tbl[i] = indir[i];
12182
12183 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12184 return 0;
12185
12186 /* It is legal to write the indirection
12187 * table while the device is running.
12188 */
12189 tg3_full_lock(tp, 0);
12190 tg3_rss_write_indir_tbl(tp);
12191 tg3_full_unlock(tp);
12192
12193 return 0;
12194 }
12195
12196 static void tg3_get_channels(struct net_device *dev,
12197 struct ethtool_channels *channel)
12198 {
12199 struct tg3 *tp = netdev_priv(dev);
12200 u32 deflt_qs = netif_get_num_default_rss_queues();
12201
12202 channel->max_rx = tp->rxq_max;
12203 channel->max_tx = tp->txq_max;
12204
12205 if (netif_running(dev)) {
12206 channel->rx_count = tp->rxq_cnt;
12207 channel->tx_count = tp->txq_cnt;
12208 } else {
12209 if (tp->rxq_req)
12210 channel->rx_count = tp->rxq_req;
12211 else
12212 channel->rx_count = min(deflt_qs, tp->rxq_max);
12213
12214 if (tp->txq_req)
12215 channel->tx_count = tp->txq_req;
12216 else
12217 channel->tx_count = min(deflt_qs, tp->txq_max);
12218 }
12219 }
12220
12221 static int tg3_set_channels(struct net_device *dev,
12222 struct ethtool_channels *channel)
12223 {
12224 struct tg3 *tp = netdev_priv(dev);
12225
12226 if (!tg3_flag(tp, SUPPORT_MSIX))
12227 return -EOPNOTSUPP;
12228
12229 if (channel->rx_count > tp->rxq_max ||
12230 channel->tx_count > tp->txq_max)
12231 return -EINVAL;
12232
12233 tp->rxq_req = channel->rx_count;
12234 tp->txq_req = channel->tx_count;
12235
12236 if (!netif_running(dev))
12237 return 0;
12238
12239 tg3_stop(tp);
12240
12241 tg3_carrier_off(tp);
12242
12243 tg3_start(tp, true, false, false);
12244
12245 return 0;
12246 }
12247
12248 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12249 {
12250 switch (stringset) {
12251 case ETH_SS_STATS:
12252 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12253 break;
12254 case ETH_SS_TEST:
12255 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12256 break;
12257 default:
12258 WARN_ON(1); /* we need a WARN() */
12259 break;
12260 }
12261 }
12262
12263 static int tg3_set_phys_id(struct net_device *dev,
12264 enum ethtool_phys_id_state state)
12265 {
12266 struct tg3 *tp = netdev_priv(dev);
12267
12268 if (!netif_running(tp->dev))
12269 return -EAGAIN;
12270
12271 switch (state) {
12272 case ETHTOOL_ID_ACTIVE:
12273 return 1; /* cycle on/off once per second */
12274
12275 case ETHTOOL_ID_ON:
12276 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12277 LED_CTRL_1000MBPS_ON |
12278 LED_CTRL_100MBPS_ON |
12279 LED_CTRL_10MBPS_ON |
12280 LED_CTRL_TRAFFIC_OVERRIDE |
12281 LED_CTRL_TRAFFIC_BLINK |
12282 LED_CTRL_TRAFFIC_LED);
12283 break;
12284
12285 case ETHTOOL_ID_OFF:
12286 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12287 LED_CTRL_TRAFFIC_OVERRIDE);
12288 break;
12289
12290 case ETHTOOL_ID_INACTIVE:
12291 tw32(MAC_LED_CTRL, tp->led_ctrl);
12292 break;
12293 }
12294
12295 return 0;
12296 }
12297
12298 static void tg3_get_ethtool_stats(struct net_device *dev,
12299 struct ethtool_stats *estats, u64 *tmp_stats)
12300 {
12301 struct tg3 *tp = netdev_priv(dev);
12302
12303 if (tp->hw_stats)
12304 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12305 else
12306 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12307 }
12308
12309 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12310 {
12311 int i;
12312 __be32 *buf;
12313 u32 offset = 0, len = 0;
12314 u32 magic, val;
12315
12316 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12317 return NULL;
12318
12319 if (magic == TG3_EEPROM_MAGIC) {
12320 for (offset = TG3_NVM_DIR_START;
12321 offset < TG3_NVM_DIR_END;
12322 offset += TG3_NVM_DIRENT_SIZE) {
12323 if (tg3_nvram_read(tp, offset, &val))
12324 return NULL;
12325
12326 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12327 TG3_NVM_DIRTYPE_EXTVPD)
12328 break;
12329 }
12330
12331 if (offset != TG3_NVM_DIR_END) {
12332 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12333 if (tg3_nvram_read(tp, offset + 4, &offset))
12334 return NULL;
12335
12336 offset = tg3_nvram_logical_addr(tp, offset);
12337 }
12338 }
12339
12340 if (!offset || !len) {
12341 offset = TG3_NVM_VPD_OFF;
12342 len = TG3_NVM_VPD_LEN;
12343 }
12344
12345 buf = kmalloc(len, GFP_KERNEL);
12346 if (buf == NULL)
12347 return NULL;
12348
12349 if (magic == TG3_EEPROM_MAGIC) {
12350 for (i = 0; i < len; i += 4) {
12351 /* The data is in little-endian format in NVRAM.
12352 * Use the big-endian read routines to preserve
12353 * the byte order as it exists in NVRAM.
12354 */
12355 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12356 goto error;
12357 }
12358 } else {
12359 u8 *ptr;
12360 ssize_t cnt;
12361 unsigned int pos = 0;
12362
12363 ptr = (u8 *)&buf[0];
12364 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12365 cnt = pci_read_vpd(tp->pdev, pos,
12366 len - pos, ptr);
12367 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12368 cnt = 0;
12369 else if (cnt < 0)
12370 goto error;
12371 }
12372 if (pos != len)
12373 goto error;
12374 }
12375
12376 *vpdlen = len;
12377
12378 return buf;
12379
12380 error:
12381 kfree(buf);
12382 return NULL;
12383 }
12384
12385 #define NVRAM_TEST_SIZE 0x100
12386 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12387 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12388 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12389 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12390 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12391 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12392 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12393 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12394
12395 static int tg3_test_nvram(struct tg3 *tp)
12396 {
12397 u32 csum, magic, len;
12398 __be32 *buf;
12399 int i, j, k, err = 0, size;
12400
12401 if (tg3_flag(tp, NO_NVRAM))
12402 return 0;
12403
12404 if (tg3_nvram_read(tp, 0, &magic) != 0)
12405 return -EIO;
12406
12407 if (magic == TG3_EEPROM_MAGIC)
12408 size = NVRAM_TEST_SIZE;
12409 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12410 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12411 TG3_EEPROM_SB_FORMAT_1) {
12412 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12413 case TG3_EEPROM_SB_REVISION_0:
12414 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12415 break;
12416 case TG3_EEPROM_SB_REVISION_2:
12417 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12418 break;
12419 case TG3_EEPROM_SB_REVISION_3:
12420 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12421 break;
12422 case TG3_EEPROM_SB_REVISION_4:
12423 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12424 break;
12425 case TG3_EEPROM_SB_REVISION_5:
12426 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12427 break;
12428 case TG3_EEPROM_SB_REVISION_6:
12429 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12430 break;
12431 default:
12432 return -EIO;
12433 }
12434 } else
12435 return 0;
12436 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12437 size = NVRAM_SELFBOOT_HW_SIZE;
12438 else
12439 return -EIO;
12440
12441 buf = kmalloc(size, GFP_KERNEL);
12442 if (buf == NULL)
12443 return -ENOMEM;
12444
12445 err = -EIO;
12446 for (i = 0, j = 0; i < size; i += 4, j++) {
12447 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12448 if (err)
12449 break;
12450 }
12451 if (i < size)
12452 goto out;
12453
12454 /* Selfboot format */
12455 magic = be32_to_cpu(buf[0]);
12456 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12457 TG3_EEPROM_MAGIC_FW) {
12458 u8 *buf8 = (u8 *) buf, csum8 = 0;
12459
12460 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12461 TG3_EEPROM_SB_REVISION_2) {
12462 /* For rev 2, the csum doesn't include the MBA. */
12463 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12464 csum8 += buf8[i];
12465 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12466 csum8 += buf8[i];
12467 } else {
12468 for (i = 0; i < size; i++)
12469 csum8 += buf8[i];
12470 }
12471
12472 if (csum8 == 0) {
12473 err = 0;
12474 goto out;
12475 }
12476
12477 err = -EIO;
12478 goto out;
12479 }
12480
12481 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12482 TG3_EEPROM_MAGIC_HW) {
12483 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12484 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12485 u8 *buf8 = (u8 *) buf;
12486
12487 /* Separate the parity bits and the data bytes. */
12488 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12489 if ((i == 0) || (i == 8)) {
12490 int l;
12491 u8 msk;
12492
12493 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12494 parity[k++] = buf8[i] & msk;
12495 i++;
12496 } else if (i == 16) {
12497 int l;
12498 u8 msk;
12499
12500 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12501 parity[k++] = buf8[i] & msk;
12502 i++;
12503
12504 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12505 parity[k++] = buf8[i] & msk;
12506 i++;
12507 }
12508 data[j++] = buf8[i];
12509 }
12510
12511 err = -EIO;
12512 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12513 u8 hw8 = hweight8(data[i]);
12514
12515 if ((hw8 & 0x1) && parity[i])
12516 goto out;
12517 else if (!(hw8 & 0x1) && !parity[i])
12518 goto out;
12519 }
12520 err = 0;
12521 goto out;
12522 }
12523
12524 err = -EIO;
12525
12526 /* Bootstrap checksum at offset 0x10 */
12527 csum = calc_crc((unsigned char *) buf, 0x10);
12528 if (csum != le32_to_cpu(buf[0x10/4]))
12529 goto out;
12530
12531 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12532 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12533 if (csum != le32_to_cpu(buf[0xfc/4]))
12534 goto out;
12535
12536 kfree(buf);
12537
12538 buf = tg3_vpd_readblock(tp, &len);
12539 if (!buf)
12540 return -ENOMEM;
12541
12542 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12543 if (i > 0) {
12544 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12545 if (j < 0)
12546 goto out;
12547
12548 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12549 goto out;
12550
12551 i += PCI_VPD_LRDT_TAG_SIZE;
12552 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12553 PCI_VPD_RO_KEYWORD_CHKSUM);
12554 if (j > 0) {
12555 u8 csum8 = 0;
12556
12557 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12558
12559 for (i = 0; i <= j; i++)
12560 csum8 += ((u8 *)buf)[i];
12561
12562 if (csum8)
12563 goto out;
12564 }
12565 }
12566
12567 err = 0;
12568
12569 out:
12570 kfree(buf);
12571 return err;
12572 }
12573
12574 #define TG3_SERDES_TIMEOUT_SEC 2
12575 #define TG3_COPPER_TIMEOUT_SEC 6
12576
12577 static int tg3_test_link(struct tg3 *tp)
12578 {
12579 int i, max;
12580
12581 if (!netif_running(tp->dev))
12582 return -ENODEV;
12583
12584 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12585 max = TG3_SERDES_TIMEOUT_SEC;
12586 else
12587 max = TG3_COPPER_TIMEOUT_SEC;
12588
12589 for (i = 0; i < max; i++) {
12590 if (tp->link_up)
12591 return 0;
12592
12593 if (msleep_interruptible(1000))
12594 break;
12595 }
12596
12597 return -EIO;
12598 }
12599
12600 /* Only test the commonly used registers */
12601 static int tg3_test_registers(struct tg3 *tp)
12602 {
12603 int i, is_5705, is_5750;
12604 u32 offset, read_mask, write_mask, val, save_val, read_val;
12605 static struct {
12606 u16 offset;
12607 u16 flags;
12608 #define TG3_FL_5705 0x1
12609 #define TG3_FL_NOT_5705 0x2
12610 #define TG3_FL_NOT_5788 0x4
12611 #define TG3_FL_NOT_5750 0x8
12612 u32 read_mask;
12613 u32 write_mask;
12614 } reg_tbl[] = {
12615 /* MAC Control Registers */
12616 { MAC_MODE, TG3_FL_NOT_5705,
12617 0x00000000, 0x00ef6f8c },
12618 { MAC_MODE, TG3_FL_5705,
12619 0x00000000, 0x01ef6b8c },
12620 { MAC_STATUS, TG3_FL_NOT_5705,
12621 0x03800107, 0x00000000 },
12622 { MAC_STATUS, TG3_FL_5705,
12623 0x03800100, 0x00000000 },
12624 { MAC_ADDR_0_HIGH, 0x0000,
12625 0x00000000, 0x0000ffff },
12626 { MAC_ADDR_0_LOW, 0x0000,
12627 0x00000000, 0xffffffff },
12628 { MAC_RX_MTU_SIZE, 0x0000,
12629 0x00000000, 0x0000ffff },
12630 { MAC_TX_MODE, 0x0000,
12631 0x00000000, 0x00000070 },
12632 { MAC_TX_LENGTHS, 0x0000,
12633 0x00000000, 0x00003fff },
12634 { MAC_RX_MODE, TG3_FL_NOT_5705,
12635 0x00000000, 0x000007fc },
12636 { MAC_RX_MODE, TG3_FL_5705,
12637 0x00000000, 0x000007dc },
12638 { MAC_HASH_REG_0, 0x0000,
12639 0x00000000, 0xffffffff },
12640 { MAC_HASH_REG_1, 0x0000,
12641 0x00000000, 0xffffffff },
12642 { MAC_HASH_REG_2, 0x0000,
12643 0x00000000, 0xffffffff },
12644 { MAC_HASH_REG_3, 0x0000,
12645 0x00000000, 0xffffffff },
12646
12647 /* Receive Data and Receive BD Initiator Control Registers. */
12648 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12649 0x00000000, 0xffffffff },
12650 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12651 0x00000000, 0xffffffff },
12652 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12653 0x00000000, 0x00000003 },
12654 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12655 0x00000000, 0xffffffff },
12656 { RCVDBDI_STD_BD+0, 0x0000,
12657 0x00000000, 0xffffffff },
12658 { RCVDBDI_STD_BD+4, 0x0000,
12659 0x00000000, 0xffffffff },
12660 { RCVDBDI_STD_BD+8, 0x0000,
12661 0x00000000, 0xffff0002 },
12662 { RCVDBDI_STD_BD+0xc, 0x0000,
12663 0x00000000, 0xffffffff },
12664
12665 /* Receive BD Initiator Control Registers. */
12666 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12667 0x00000000, 0xffffffff },
12668 { RCVBDI_STD_THRESH, TG3_FL_5705,
12669 0x00000000, 0x000003ff },
12670 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12671 0x00000000, 0xffffffff },
12672
12673 /* Host Coalescing Control Registers. */
12674 { HOSTCC_MODE, TG3_FL_NOT_5705,
12675 0x00000000, 0x00000004 },
12676 { HOSTCC_MODE, TG3_FL_5705,
12677 0x00000000, 0x000000f6 },
12678 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12679 0x00000000, 0xffffffff },
12680 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12681 0x00000000, 0x000003ff },
12682 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12683 0x00000000, 0xffffffff },
12684 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12685 0x00000000, 0x000003ff },
12686 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12687 0x00000000, 0xffffffff },
12688 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12689 0x00000000, 0x000000ff },
12690 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12691 0x00000000, 0xffffffff },
12692 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12693 0x00000000, 0x000000ff },
12694 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12695 0x00000000, 0xffffffff },
12696 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12697 0x00000000, 0xffffffff },
12698 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12699 0x00000000, 0xffffffff },
12700 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12701 0x00000000, 0x000000ff },
12702 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12703 0x00000000, 0xffffffff },
12704 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12705 0x00000000, 0x000000ff },
12706 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12707 0x00000000, 0xffffffff },
12708 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12709 0x00000000, 0xffffffff },
12710 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12711 0x00000000, 0xffffffff },
12712 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12713 0x00000000, 0xffffffff },
12714 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12715 0x00000000, 0xffffffff },
12716 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12717 0xffffffff, 0x00000000 },
12718 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12719 0xffffffff, 0x00000000 },
12720
12721 /* Buffer Manager Control Registers. */
12722 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12723 0x00000000, 0x007fff80 },
12724 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12725 0x00000000, 0x007fffff },
12726 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12727 0x00000000, 0x0000003f },
12728 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12729 0x00000000, 0x000001ff },
12730 { BUFMGR_MB_HIGH_WATER, 0x0000,
12731 0x00000000, 0x000001ff },
12732 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12733 0xffffffff, 0x00000000 },
12734 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12735 0xffffffff, 0x00000000 },
12736
12737 /* Mailbox Registers */
12738 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12739 0x00000000, 0x000001ff },
12740 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12741 0x00000000, 0x000001ff },
12742 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12743 0x00000000, 0x000007ff },
12744 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12745 0x00000000, 0x000001ff },
12746
12747 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12748 };
12749
12750 is_5705 = is_5750 = 0;
12751 if (tg3_flag(tp, 5705_PLUS)) {
12752 is_5705 = 1;
12753 if (tg3_flag(tp, 5750_PLUS))
12754 is_5750 = 1;
12755 }
12756
12757 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12758 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12759 continue;
12760
12761 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12762 continue;
12763
12764 if (tg3_flag(tp, IS_5788) &&
12765 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12766 continue;
12767
12768 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12769 continue;
12770
12771 offset = (u32) reg_tbl[i].offset;
12772 read_mask = reg_tbl[i].read_mask;
12773 write_mask = reg_tbl[i].write_mask;
12774
12775 /* Save the original register content */
12776 save_val = tr32(offset);
12777
12778 /* Determine the read-only value. */
12779 read_val = save_val & read_mask;
12780
12781 /* Write zero to the register, then make sure the read-only bits
12782 * are not changed and the read/write bits are all zeros.
12783 */
12784 tw32(offset, 0);
12785
12786 val = tr32(offset);
12787
12788 /* Test the read-only and read/write bits. */
12789 if (((val & read_mask) != read_val) || (val & write_mask))
12790 goto out;
12791
12792 /* Write ones to all the bits defined by RdMask and WrMask, then
12793 * make sure the read-only bits are not changed and the
12794 * read/write bits are all ones.
12795 */
12796 tw32(offset, read_mask | write_mask);
12797
12798 val = tr32(offset);
12799
12800 /* Test the read-only bits. */
12801 if ((val & read_mask) != read_val)
12802 goto out;
12803
12804 /* Test the read/write bits. */
12805 if ((val & write_mask) != write_mask)
12806 goto out;
12807
12808 tw32(offset, save_val);
12809 }
12810
12811 return 0;
12812
12813 out:
12814 if (netif_msg_hw(tp))
12815 netdev_err(tp->dev,
12816 "Register test failed at offset %x\n", offset);
12817 tw32(offset, save_val);
12818 return -EIO;
12819 }
12820
12821 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12822 {
12823 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12824 int i;
12825 u32 j;
12826
12827 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12828 for (j = 0; j < len; j += 4) {
12829 u32 val;
12830
12831 tg3_write_mem(tp, offset + j, test_pattern[i]);
12832 tg3_read_mem(tp, offset + j, &val);
12833 if (val != test_pattern[i])
12834 return -EIO;
12835 }
12836 }
12837 return 0;
12838 }
12839
12840 static int tg3_test_memory(struct tg3 *tp)
12841 {
12842 static struct mem_entry {
12843 u32 offset;
12844 u32 len;
12845 } mem_tbl_570x[] = {
12846 { 0x00000000, 0x00b50},
12847 { 0x00002000, 0x1c000},
12848 { 0xffffffff, 0x00000}
12849 }, mem_tbl_5705[] = {
12850 { 0x00000100, 0x0000c},
12851 { 0x00000200, 0x00008},
12852 { 0x00004000, 0x00800},
12853 { 0x00006000, 0x01000},
12854 { 0x00008000, 0x02000},
12855 { 0x00010000, 0x0e000},
12856 { 0xffffffff, 0x00000}
12857 }, mem_tbl_5755[] = {
12858 { 0x00000200, 0x00008},
12859 { 0x00004000, 0x00800},
12860 { 0x00006000, 0x00800},
12861 { 0x00008000, 0x02000},
12862 { 0x00010000, 0x0c000},
12863 { 0xffffffff, 0x00000}
12864 }, mem_tbl_5906[] = {
12865 { 0x00000200, 0x00008},
12866 { 0x00004000, 0x00400},
12867 { 0x00006000, 0x00400},
12868 { 0x00008000, 0x01000},
12869 { 0x00010000, 0x01000},
12870 { 0xffffffff, 0x00000}
12871 }, mem_tbl_5717[] = {
12872 { 0x00000200, 0x00008},
12873 { 0x00010000, 0x0a000},
12874 { 0x00020000, 0x13c00},
12875 { 0xffffffff, 0x00000}
12876 }, mem_tbl_57765[] = {
12877 { 0x00000200, 0x00008},
12878 { 0x00004000, 0x00800},
12879 { 0x00006000, 0x09800},
12880 { 0x00010000, 0x0a000},
12881 { 0xffffffff, 0x00000}
12882 };
12883 struct mem_entry *mem_tbl;
12884 int err = 0;
12885 int i;
12886
12887 if (tg3_flag(tp, 5717_PLUS))
12888 mem_tbl = mem_tbl_5717;
12889 else if (tg3_flag(tp, 57765_CLASS) ||
12890 tg3_asic_rev(tp) == ASIC_REV_5762)
12891 mem_tbl = mem_tbl_57765;
12892 else if (tg3_flag(tp, 5755_PLUS))
12893 mem_tbl = mem_tbl_5755;
12894 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12895 mem_tbl = mem_tbl_5906;
12896 else if (tg3_flag(tp, 5705_PLUS))
12897 mem_tbl = mem_tbl_5705;
12898 else
12899 mem_tbl = mem_tbl_570x;
12900
12901 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12902 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12903 if (err)
12904 break;
12905 }
12906
12907 return err;
12908 }
12909
12910 #define TG3_TSO_MSS 500
12911
12912 #define TG3_TSO_IP_HDR_LEN 20
12913 #define TG3_TSO_TCP_HDR_LEN 20
12914 #define TG3_TSO_TCP_OPT_LEN 12
12915
12916 static const u8 tg3_tso_header[] = {
12917 0x08, 0x00,
12918 0x45, 0x00, 0x00, 0x00,
12919 0x00, 0x00, 0x40, 0x00,
12920 0x40, 0x06, 0x00, 0x00,
12921 0x0a, 0x00, 0x00, 0x01,
12922 0x0a, 0x00, 0x00, 0x02,
12923 0x0d, 0x00, 0xe0, 0x00,
12924 0x00, 0x00, 0x01, 0x00,
12925 0x00, 0x00, 0x02, 0x00,
12926 0x80, 0x10, 0x10, 0x00,
12927 0x14, 0x09, 0x00, 0x00,
12928 0x01, 0x01, 0x08, 0x0a,
12929 0x11, 0x11, 0x11, 0x11,
12930 0x11, 0x11, 0x11, 0x11,
12931 };
12932
12933 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12934 {
12935 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12936 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12937 u32 budget;
12938 struct sk_buff *skb;
12939 u8 *tx_data, *rx_data;
12940 dma_addr_t map;
12941 int num_pkts, tx_len, rx_len, i, err;
12942 struct tg3_rx_buffer_desc *desc;
12943 struct tg3_napi *tnapi, *rnapi;
12944 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12945
12946 tnapi = &tp->napi[0];
12947 rnapi = &tp->napi[0];
12948 if (tp->irq_cnt > 1) {
12949 if (tg3_flag(tp, ENABLE_RSS))
12950 rnapi = &tp->napi[1];
12951 if (tg3_flag(tp, ENABLE_TSS))
12952 tnapi = &tp->napi[1];
12953 }
12954 coal_now = tnapi->coal_now | rnapi->coal_now;
12955
12956 err = -EIO;
12957
12958 tx_len = pktsz;
12959 skb = netdev_alloc_skb(tp->dev, tx_len);
12960 if (!skb)
12961 return -ENOMEM;
12962
12963 tx_data = skb_put(skb, tx_len);
12964 memcpy(tx_data, tp->dev->dev_addr, 6);
12965 memset(tx_data + 6, 0x0, 8);
12966
12967 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12968
12969 if (tso_loopback) {
12970 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12971
12972 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12973 TG3_TSO_TCP_OPT_LEN;
12974
12975 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12976 sizeof(tg3_tso_header));
12977 mss = TG3_TSO_MSS;
12978
12979 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12980 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12981
12982 /* Set the total length field in the IP header */
12983 iph->tot_len = htons((u16)(mss + hdr_len));
12984
12985 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12986 TXD_FLAG_CPU_POST_DMA);
12987
12988 if (tg3_flag(tp, HW_TSO_1) ||
12989 tg3_flag(tp, HW_TSO_2) ||
12990 tg3_flag(tp, HW_TSO_3)) {
12991 struct tcphdr *th;
12992 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12993 th = (struct tcphdr *)&tx_data[val];
12994 th->check = 0;
12995 } else
12996 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12997
12998 if (tg3_flag(tp, HW_TSO_3)) {
12999 mss |= (hdr_len & 0xc) << 12;
13000 if (hdr_len & 0x10)
13001 base_flags |= 0x00000010;
13002 base_flags |= (hdr_len & 0x3e0) << 5;
13003 } else if (tg3_flag(tp, HW_TSO_2))
13004 mss |= hdr_len << 9;
13005 else if (tg3_flag(tp, HW_TSO_1) ||
13006 tg3_asic_rev(tp) == ASIC_REV_5705) {
13007 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13008 } else {
13009 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13010 }
13011
13012 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13013 } else {
13014 num_pkts = 1;
13015 data_off = ETH_HLEN;
13016
13017 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13018 tx_len > VLAN_ETH_FRAME_LEN)
13019 base_flags |= TXD_FLAG_JMB_PKT;
13020 }
13021
13022 for (i = data_off; i < tx_len; i++)
13023 tx_data[i] = (u8) (i & 0xff);
13024
13025 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13026 if (pci_dma_mapping_error(tp->pdev, map)) {
13027 dev_kfree_skb(skb);
13028 return -EIO;
13029 }
13030
13031 val = tnapi->tx_prod;
13032 tnapi->tx_buffers[val].skb = skb;
13033 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13034
13035 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13036 rnapi->coal_now);
13037
13038 udelay(10);
13039
13040 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13041
13042 budget = tg3_tx_avail(tnapi);
13043 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13044 base_flags | TXD_FLAG_END, mss, 0)) {
13045 tnapi->tx_buffers[val].skb = NULL;
13046 dev_kfree_skb(skb);
13047 return -EIO;
13048 }
13049
13050 tnapi->tx_prod++;
13051
13052 /* Sync BD data before updating mailbox */
13053 wmb();
13054
13055 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13056 tr32_mailbox(tnapi->prodmbox);
13057
13058 udelay(10);
13059
13060 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13061 for (i = 0; i < 35; i++) {
13062 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13063 coal_now);
13064
13065 udelay(10);
13066
13067 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13068 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13069 if ((tx_idx == tnapi->tx_prod) &&
13070 (rx_idx == (rx_start_idx + num_pkts)))
13071 break;
13072 }
13073
13074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13075 dev_kfree_skb(skb);
13076
13077 if (tx_idx != tnapi->tx_prod)
13078 goto out;
13079
13080 if (rx_idx != rx_start_idx + num_pkts)
13081 goto out;
13082
13083 val = data_off;
13084 while (rx_idx != rx_start_idx) {
13085 desc = &rnapi->rx_rcb[rx_start_idx++];
13086 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13087 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13088
13089 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13090 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13091 goto out;
13092
13093 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13094 - ETH_FCS_LEN;
13095
13096 if (!tso_loopback) {
13097 if (rx_len != tx_len)
13098 goto out;
13099
13100 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13101 if (opaque_key != RXD_OPAQUE_RING_STD)
13102 goto out;
13103 } else {
13104 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13105 goto out;
13106 }
13107 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13108 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13109 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13110 goto out;
13111 }
13112
13113 if (opaque_key == RXD_OPAQUE_RING_STD) {
13114 rx_data = tpr->rx_std_buffers[desc_idx].data;
13115 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13116 mapping);
13117 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13118 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13119 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13120 mapping);
13121 } else
13122 goto out;
13123
13124 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13125 PCI_DMA_FROMDEVICE);
13126
13127 rx_data += TG3_RX_OFFSET(tp);
13128 for (i = data_off; i < rx_len; i++, val++) {
13129 if (*(rx_data + i) != (u8) (val & 0xff))
13130 goto out;
13131 }
13132 }
13133
13134 err = 0;
13135
13136 /* tg3_free_rings will unmap and free the rx_data */
13137 out:
13138 return err;
13139 }
13140
13141 #define TG3_STD_LOOPBACK_FAILED 1
13142 #define TG3_JMB_LOOPBACK_FAILED 2
13143 #define TG3_TSO_LOOPBACK_FAILED 4
13144 #define TG3_LOOPBACK_FAILED \
13145 (TG3_STD_LOOPBACK_FAILED | \
13146 TG3_JMB_LOOPBACK_FAILED | \
13147 TG3_TSO_LOOPBACK_FAILED)
13148
13149 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13150 {
13151 int err = -EIO;
13152 u32 eee_cap;
13153 u32 jmb_pkt_sz = 9000;
13154
13155 if (tp->dma_limit)
13156 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13157
13158 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13159 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13160
13161 if (!netif_running(tp->dev)) {
13162 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13163 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13164 if (do_extlpbk)
13165 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13166 goto done;
13167 }
13168
13169 err = tg3_reset_hw(tp, true);
13170 if (err) {
13171 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13172 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13173 if (do_extlpbk)
13174 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13175 goto done;
13176 }
13177
13178 if (tg3_flag(tp, ENABLE_RSS)) {
13179 int i;
13180
13181 /* Reroute all rx packets to the 1st queue */
13182 for (i = MAC_RSS_INDIR_TBL_0;
13183 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13184 tw32(i, 0x0);
13185 }
13186
13187 /* HW errata - mac loopback fails in some cases on 5780.
13188 * Normal traffic and PHY loopback are not affected by
13189 * errata. Also, the MAC loopback test is deprecated for
13190 * all newer ASIC revisions.
13191 */
13192 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13193 !tg3_flag(tp, CPMU_PRESENT)) {
13194 tg3_mac_loopback(tp, true);
13195
13196 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13197 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13198
13199 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13200 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13201 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13202
13203 tg3_mac_loopback(tp, false);
13204 }
13205
13206 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13207 !tg3_flag(tp, USE_PHYLIB)) {
13208 int i;
13209
13210 tg3_phy_lpbk_set(tp, 0, false);
13211
13212 /* Wait for link */
13213 for (i = 0; i < 100; i++) {
13214 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13215 break;
13216 mdelay(1);
13217 }
13218
13219 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13220 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13221 if (tg3_flag(tp, TSO_CAPABLE) &&
13222 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13223 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13224 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13225 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13226 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13227
13228 if (do_extlpbk) {
13229 tg3_phy_lpbk_set(tp, 0, true);
13230
13231 /* All link indications report up, but the hardware
13232 * isn't really ready for about 20 msec. Double it
13233 * to be sure.
13234 */
13235 mdelay(40);
13236
13237 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13238 data[TG3_EXT_LOOPB_TEST] |=
13239 TG3_STD_LOOPBACK_FAILED;
13240 if (tg3_flag(tp, TSO_CAPABLE) &&
13241 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13242 data[TG3_EXT_LOOPB_TEST] |=
13243 TG3_TSO_LOOPBACK_FAILED;
13244 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13245 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13246 data[TG3_EXT_LOOPB_TEST] |=
13247 TG3_JMB_LOOPBACK_FAILED;
13248 }
13249
13250 /* Re-enable gphy autopowerdown. */
13251 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13252 tg3_phy_toggle_apd(tp, true);
13253 }
13254
13255 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13256 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13257
13258 done:
13259 tp->phy_flags |= eee_cap;
13260
13261 return err;
13262 }
13263
13264 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13265 u64 *data)
13266 {
13267 struct tg3 *tp = netdev_priv(dev);
13268 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13269
13270 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13271 tg3_power_up(tp)) {
13272 etest->flags |= ETH_TEST_FL_FAILED;
13273 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13274 return;
13275 }
13276
13277 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13278
13279 if (tg3_test_nvram(tp) != 0) {
13280 etest->flags |= ETH_TEST_FL_FAILED;
13281 data[TG3_NVRAM_TEST] = 1;
13282 }
13283 if (!doextlpbk && tg3_test_link(tp)) {
13284 etest->flags |= ETH_TEST_FL_FAILED;
13285 data[TG3_LINK_TEST] = 1;
13286 }
13287 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13288 int err, err2 = 0, irq_sync = 0;
13289
13290 if (netif_running(dev)) {
13291 tg3_phy_stop(tp);
13292 tg3_netif_stop(tp);
13293 irq_sync = 1;
13294 }
13295
13296 tg3_full_lock(tp, irq_sync);
13297 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13298 err = tg3_nvram_lock(tp);
13299 tg3_halt_cpu(tp, RX_CPU_BASE);
13300 if (!tg3_flag(tp, 5705_PLUS))
13301 tg3_halt_cpu(tp, TX_CPU_BASE);
13302 if (!err)
13303 tg3_nvram_unlock(tp);
13304
13305 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13306 tg3_phy_reset(tp);
13307
13308 if (tg3_test_registers(tp) != 0) {
13309 etest->flags |= ETH_TEST_FL_FAILED;
13310 data[TG3_REGISTER_TEST] = 1;
13311 }
13312
13313 if (tg3_test_memory(tp) != 0) {
13314 etest->flags |= ETH_TEST_FL_FAILED;
13315 data[TG3_MEMORY_TEST] = 1;
13316 }
13317
13318 if (doextlpbk)
13319 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13320
13321 if (tg3_test_loopback(tp, data, doextlpbk))
13322 etest->flags |= ETH_TEST_FL_FAILED;
13323
13324 tg3_full_unlock(tp);
13325
13326 if (tg3_test_interrupt(tp) != 0) {
13327 etest->flags |= ETH_TEST_FL_FAILED;
13328 data[TG3_INTERRUPT_TEST] = 1;
13329 }
13330
13331 tg3_full_lock(tp, 0);
13332
13333 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13334 if (netif_running(dev)) {
13335 tg3_flag_set(tp, INIT_COMPLETE);
13336 err2 = tg3_restart_hw(tp, true);
13337 if (!err2)
13338 tg3_netif_start(tp);
13339 }
13340
13341 tg3_full_unlock(tp);
13342
13343 if (irq_sync && !err2)
13344 tg3_phy_start(tp);
13345 }
13346 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13347 tg3_power_down(tp);
13348
13349 }
13350
13351 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13352 struct ifreq *ifr, int cmd)
13353 {
13354 struct tg3 *tp = netdev_priv(dev);
13355 struct hwtstamp_config stmpconf;
13356
13357 if (!tg3_flag(tp, PTP_CAPABLE))
13358 return -EINVAL;
13359
13360 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13361 return -EFAULT;
13362
13363 if (stmpconf.flags)
13364 return -EINVAL;
13365
13366 switch (stmpconf.tx_type) {
13367 case HWTSTAMP_TX_ON:
13368 tg3_flag_set(tp, TX_TSTAMP_EN);
13369 break;
13370 case HWTSTAMP_TX_OFF:
13371 tg3_flag_clear(tp, TX_TSTAMP_EN);
13372 break;
13373 default:
13374 return -ERANGE;
13375 }
13376
13377 switch (stmpconf.rx_filter) {
13378 case HWTSTAMP_FILTER_NONE:
13379 tp->rxptpctl = 0;
13380 break;
13381 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13382 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13383 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13384 break;
13385 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13386 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13387 TG3_RX_PTP_CTL_SYNC_EVNT;
13388 break;
13389 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13390 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13391 TG3_RX_PTP_CTL_DELAY_REQ;
13392 break;
13393 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13394 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13395 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13396 break;
13397 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13398 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13399 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13400 break;
13401 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13402 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13403 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13404 break;
13405 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13406 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13407 TG3_RX_PTP_CTL_SYNC_EVNT;
13408 break;
13409 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13410 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13411 TG3_RX_PTP_CTL_SYNC_EVNT;
13412 break;
13413 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13414 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13415 TG3_RX_PTP_CTL_SYNC_EVNT;
13416 break;
13417 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13418 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13419 TG3_RX_PTP_CTL_DELAY_REQ;
13420 break;
13421 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13422 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13423 TG3_RX_PTP_CTL_DELAY_REQ;
13424 break;
13425 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13426 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13427 TG3_RX_PTP_CTL_DELAY_REQ;
13428 break;
13429 default:
13430 return -ERANGE;
13431 }
13432
13433 if (netif_running(dev) && tp->rxptpctl)
13434 tw32(TG3_RX_PTP_CTL,
13435 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13436
13437 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13438 -EFAULT : 0;
13439 }
13440
13441 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13442 {
13443 struct mii_ioctl_data *data = if_mii(ifr);
13444 struct tg3 *tp = netdev_priv(dev);
13445 int err;
13446
13447 if (tg3_flag(tp, USE_PHYLIB)) {
13448 struct phy_device *phydev;
13449 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13450 return -EAGAIN;
13451 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13452 return phy_mii_ioctl(phydev, ifr, cmd);
13453 }
13454
13455 switch (cmd) {
13456 case SIOCGMIIPHY:
13457 data->phy_id = tp->phy_addr;
13458
13459 /* fallthru */
13460 case SIOCGMIIREG: {
13461 u32 mii_regval;
13462
13463 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13464 break; /* We have no PHY */
13465
13466 if (!netif_running(dev))
13467 return -EAGAIN;
13468
13469 spin_lock_bh(&tp->lock);
13470 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13471 data->reg_num & 0x1f, &mii_regval);
13472 spin_unlock_bh(&tp->lock);
13473
13474 data->val_out = mii_regval;
13475
13476 return err;
13477 }
13478
13479 case SIOCSMIIREG:
13480 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13481 break; /* We have no PHY */
13482
13483 if (!netif_running(dev))
13484 return -EAGAIN;
13485
13486 spin_lock_bh(&tp->lock);
13487 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13488 data->reg_num & 0x1f, data->val_in);
13489 spin_unlock_bh(&tp->lock);
13490
13491 return err;
13492
13493 case SIOCSHWTSTAMP:
13494 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13495
13496 default:
13497 /* do nothing */
13498 break;
13499 }
13500 return -EOPNOTSUPP;
13501 }
13502
13503 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13504 {
13505 struct tg3 *tp = netdev_priv(dev);
13506
13507 memcpy(ec, &tp->coal, sizeof(*ec));
13508 return 0;
13509 }
13510
13511 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13512 {
13513 struct tg3 *tp = netdev_priv(dev);
13514 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13515 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13516
13517 if (!tg3_flag(tp, 5705_PLUS)) {
13518 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13519 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13520 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13521 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13522 }
13523
13524 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13525 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13526 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13527 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13528 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13529 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13530 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13531 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13532 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13533 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13534 return -EINVAL;
13535
13536 /* No rx interrupts will be generated if both are zero */
13537 if ((ec->rx_coalesce_usecs == 0) &&
13538 (ec->rx_max_coalesced_frames == 0))
13539 return -EINVAL;
13540
13541 /* No tx interrupts will be generated if both are zero */
13542 if ((ec->tx_coalesce_usecs == 0) &&
13543 (ec->tx_max_coalesced_frames == 0))
13544 return -EINVAL;
13545
13546 /* Only copy relevant parameters, ignore all others. */
13547 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13548 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13549 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13550 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13551 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13552 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13553 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13554 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13555 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13556
13557 if (netif_running(dev)) {
13558 tg3_full_lock(tp, 0);
13559 __tg3_set_coalesce(tp, &tp->coal);
13560 tg3_full_unlock(tp);
13561 }
13562 return 0;
13563 }
13564
13565 static const struct ethtool_ops tg3_ethtool_ops = {
13566 .get_settings = tg3_get_settings,
13567 .set_settings = tg3_set_settings,
13568 .get_drvinfo = tg3_get_drvinfo,
13569 .get_regs_len = tg3_get_regs_len,
13570 .get_regs = tg3_get_regs,
13571 .get_wol = tg3_get_wol,
13572 .set_wol = tg3_set_wol,
13573 .get_msglevel = tg3_get_msglevel,
13574 .set_msglevel = tg3_set_msglevel,
13575 .nway_reset = tg3_nway_reset,
13576 .get_link = ethtool_op_get_link,
13577 .get_eeprom_len = tg3_get_eeprom_len,
13578 .get_eeprom = tg3_get_eeprom,
13579 .set_eeprom = tg3_set_eeprom,
13580 .get_ringparam = tg3_get_ringparam,
13581 .set_ringparam = tg3_set_ringparam,
13582 .get_pauseparam = tg3_get_pauseparam,
13583 .set_pauseparam = tg3_set_pauseparam,
13584 .self_test = tg3_self_test,
13585 .get_strings = tg3_get_strings,
13586 .set_phys_id = tg3_set_phys_id,
13587 .get_ethtool_stats = tg3_get_ethtool_stats,
13588 .get_coalesce = tg3_get_coalesce,
13589 .set_coalesce = tg3_set_coalesce,
13590 .get_sset_count = tg3_get_sset_count,
13591 .get_rxnfc = tg3_get_rxnfc,
13592 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13593 .get_rxfh_indir = tg3_get_rxfh_indir,
13594 .set_rxfh_indir = tg3_set_rxfh_indir,
13595 .get_channels = tg3_get_channels,
13596 .set_channels = tg3_set_channels,
13597 .get_ts_info = tg3_get_ts_info,
13598 };
13599
13600 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13601 struct rtnl_link_stats64 *stats)
13602 {
13603 struct tg3 *tp = netdev_priv(dev);
13604
13605 spin_lock_bh(&tp->lock);
13606 if (!tp->hw_stats) {
13607 spin_unlock_bh(&tp->lock);
13608 return &tp->net_stats_prev;
13609 }
13610
13611 tg3_get_nstats(tp, stats);
13612 spin_unlock_bh(&tp->lock);
13613
13614 return stats;
13615 }
13616
13617 static void tg3_set_rx_mode(struct net_device *dev)
13618 {
13619 struct tg3 *tp = netdev_priv(dev);
13620
13621 if (!netif_running(dev))
13622 return;
13623
13624 tg3_full_lock(tp, 0);
13625 __tg3_set_rx_mode(dev);
13626 tg3_full_unlock(tp);
13627 }
13628
13629 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13630 int new_mtu)
13631 {
13632 dev->mtu = new_mtu;
13633
13634 if (new_mtu > ETH_DATA_LEN) {
13635 if (tg3_flag(tp, 5780_CLASS)) {
13636 netdev_update_features(dev);
13637 tg3_flag_clear(tp, TSO_CAPABLE);
13638 } else {
13639 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13640 }
13641 } else {
13642 if (tg3_flag(tp, 5780_CLASS)) {
13643 tg3_flag_set(tp, TSO_CAPABLE);
13644 netdev_update_features(dev);
13645 }
13646 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13647 }
13648 }
13649
13650 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13651 {
13652 struct tg3 *tp = netdev_priv(dev);
13653 int err;
13654 bool reset_phy = false;
13655
13656 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13657 return -EINVAL;
13658
13659 if (!netif_running(dev)) {
13660 /* We'll just catch it later when the
13661 * device is up'd.
13662 */
13663 tg3_set_mtu(dev, tp, new_mtu);
13664 return 0;
13665 }
13666
13667 tg3_phy_stop(tp);
13668
13669 tg3_netif_stop(tp);
13670
13671 tg3_full_lock(tp, 1);
13672
13673 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13674
13675 tg3_set_mtu(dev, tp, new_mtu);
13676
13677 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13678 * breaks all requests to 256 bytes.
13679 */
13680 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13681 reset_phy = true;
13682
13683 err = tg3_restart_hw(tp, reset_phy);
13684
13685 if (!err)
13686 tg3_netif_start(tp);
13687
13688 tg3_full_unlock(tp);
13689
13690 if (!err)
13691 tg3_phy_start(tp);
13692
13693 return err;
13694 }
13695
13696 static const struct net_device_ops tg3_netdev_ops = {
13697 .ndo_open = tg3_open,
13698 .ndo_stop = tg3_close,
13699 .ndo_start_xmit = tg3_start_xmit,
13700 .ndo_get_stats64 = tg3_get_stats64,
13701 .ndo_validate_addr = eth_validate_addr,
13702 .ndo_set_rx_mode = tg3_set_rx_mode,
13703 .ndo_set_mac_address = tg3_set_mac_addr,
13704 .ndo_do_ioctl = tg3_ioctl,
13705 .ndo_tx_timeout = tg3_tx_timeout,
13706 .ndo_change_mtu = tg3_change_mtu,
13707 .ndo_fix_features = tg3_fix_features,
13708 .ndo_set_features = tg3_set_features,
13709 #ifdef CONFIG_NET_POLL_CONTROLLER
13710 .ndo_poll_controller = tg3_poll_controller,
13711 #endif
13712 };
13713
13714 static void tg3_get_eeprom_size(struct tg3 *tp)
13715 {
13716 u32 cursize, val, magic;
13717
13718 tp->nvram_size = EEPROM_CHIP_SIZE;
13719
13720 if (tg3_nvram_read(tp, 0, &magic) != 0)
13721 return;
13722
13723 if ((magic != TG3_EEPROM_MAGIC) &&
13724 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13725 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13726 return;
13727
13728 /*
13729 * Size the chip by reading offsets at increasing powers of two.
13730 * When we encounter our validation signature, we know the addressing
13731 * has wrapped around, and thus have our chip size.
13732 */
13733 cursize = 0x10;
13734
13735 while (cursize < tp->nvram_size) {
13736 if (tg3_nvram_read(tp, cursize, &val) != 0)
13737 return;
13738
13739 if (val == magic)
13740 break;
13741
13742 cursize <<= 1;
13743 }
13744
13745 tp->nvram_size = cursize;
13746 }
13747
13748 static void tg3_get_nvram_size(struct tg3 *tp)
13749 {
13750 u32 val;
13751
13752 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13753 return;
13754
13755 /* Selfboot format */
13756 if (val != TG3_EEPROM_MAGIC) {
13757 tg3_get_eeprom_size(tp);
13758 return;
13759 }
13760
13761 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13762 if (val != 0) {
13763 /* This is confusing. We want to operate on the
13764 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13765 * call will read from NVRAM and byteswap the data
13766 * according to the byteswapping settings for all
13767 * other register accesses. This ensures the data we
13768 * want will always reside in the lower 16-bits.
13769 * However, the data in NVRAM is in LE format, which
13770 * means the data from the NVRAM read will always be
13771 * opposite the endianness of the CPU. The 16-bit
13772 * byteswap then brings the data to CPU endianness.
13773 */
13774 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13775 return;
13776 }
13777 }
13778 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13779 }
13780
13781 static void tg3_get_nvram_info(struct tg3 *tp)
13782 {
13783 u32 nvcfg1;
13784
13785 nvcfg1 = tr32(NVRAM_CFG1);
13786 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13787 tg3_flag_set(tp, FLASH);
13788 } else {
13789 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13790 tw32(NVRAM_CFG1, nvcfg1);
13791 }
13792
13793 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13794 tg3_flag(tp, 5780_CLASS)) {
13795 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13796 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13797 tp->nvram_jedecnum = JEDEC_ATMEL;
13798 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13799 tg3_flag_set(tp, NVRAM_BUFFERED);
13800 break;
13801 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13802 tp->nvram_jedecnum = JEDEC_ATMEL;
13803 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13804 break;
13805 case FLASH_VENDOR_ATMEL_EEPROM:
13806 tp->nvram_jedecnum = JEDEC_ATMEL;
13807 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13808 tg3_flag_set(tp, NVRAM_BUFFERED);
13809 break;
13810 case FLASH_VENDOR_ST:
13811 tp->nvram_jedecnum = JEDEC_ST;
13812 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13813 tg3_flag_set(tp, NVRAM_BUFFERED);
13814 break;
13815 case FLASH_VENDOR_SAIFUN:
13816 tp->nvram_jedecnum = JEDEC_SAIFUN;
13817 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13818 break;
13819 case FLASH_VENDOR_SST_SMALL:
13820 case FLASH_VENDOR_SST_LARGE:
13821 tp->nvram_jedecnum = JEDEC_SST;
13822 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13823 break;
13824 }
13825 } else {
13826 tp->nvram_jedecnum = JEDEC_ATMEL;
13827 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13828 tg3_flag_set(tp, NVRAM_BUFFERED);
13829 }
13830 }
13831
13832 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13833 {
13834 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13835 case FLASH_5752PAGE_SIZE_256:
13836 tp->nvram_pagesize = 256;
13837 break;
13838 case FLASH_5752PAGE_SIZE_512:
13839 tp->nvram_pagesize = 512;
13840 break;
13841 case FLASH_5752PAGE_SIZE_1K:
13842 tp->nvram_pagesize = 1024;
13843 break;
13844 case FLASH_5752PAGE_SIZE_2K:
13845 tp->nvram_pagesize = 2048;
13846 break;
13847 case FLASH_5752PAGE_SIZE_4K:
13848 tp->nvram_pagesize = 4096;
13849 break;
13850 case FLASH_5752PAGE_SIZE_264:
13851 tp->nvram_pagesize = 264;
13852 break;
13853 case FLASH_5752PAGE_SIZE_528:
13854 tp->nvram_pagesize = 528;
13855 break;
13856 }
13857 }
13858
13859 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13860 {
13861 u32 nvcfg1;
13862
13863 nvcfg1 = tr32(NVRAM_CFG1);
13864
13865 /* NVRAM protection for TPM */
13866 if (nvcfg1 & (1 << 27))
13867 tg3_flag_set(tp, PROTECTED_NVRAM);
13868
13869 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13870 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13871 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13872 tp->nvram_jedecnum = JEDEC_ATMEL;
13873 tg3_flag_set(tp, NVRAM_BUFFERED);
13874 break;
13875 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13876 tp->nvram_jedecnum = JEDEC_ATMEL;
13877 tg3_flag_set(tp, NVRAM_BUFFERED);
13878 tg3_flag_set(tp, FLASH);
13879 break;
13880 case FLASH_5752VENDOR_ST_M45PE10:
13881 case FLASH_5752VENDOR_ST_M45PE20:
13882 case FLASH_5752VENDOR_ST_M45PE40:
13883 tp->nvram_jedecnum = JEDEC_ST;
13884 tg3_flag_set(tp, NVRAM_BUFFERED);
13885 tg3_flag_set(tp, FLASH);
13886 break;
13887 }
13888
13889 if (tg3_flag(tp, FLASH)) {
13890 tg3_nvram_get_pagesize(tp, nvcfg1);
13891 } else {
13892 /* For eeprom, set pagesize to maximum eeprom size */
13893 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13894
13895 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13896 tw32(NVRAM_CFG1, nvcfg1);
13897 }
13898 }
13899
13900 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13901 {
13902 u32 nvcfg1, protect = 0;
13903
13904 nvcfg1 = tr32(NVRAM_CFG1);
13905
13906 /* NVRAM protection for TPM */
13907 if (nvcfg1 & (1 << 27)) {
13908 tg3_flag_set(tp, PROTECTED_NVRAM);
13909 protect = 1;
13910 }
13911
13912 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13913 switch (nvcfg1) {
13914 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13915 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13916 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13917 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13918 tp->nvram_jedecnum = JEDEC_ATMEL;
13919 tg3_flag_set(tp, NVRAM_BUFFERED);
13920 tg3_flag_set(tp, FLASH);
13921 tp->nvram_pagesize = 264;
13922 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13923 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13924 tp->nvram_size = (protect ? 0x3e200 :
13925 TG3_NVRAM_SIZE_512KB);
13926 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13927 tp->nvram_size = (protect ? 0x1f200 :
13928 TG3_NVRAM_SIZE_256KB);
13929 else
13930 tp->nvram_size = (protect ? 0x1f200 :
13931 TG3_NVRAM_SIZE_128KB);
13932 break;
13933 case FLASH_5752VENDOR_ST_M45PE10:
13934 case FLASH_5752VENDOR_ST_M45PE20:
13935 case FLASH_5752VENDOR_ST_M45PE40:
13936 tp->nvram_jedecnum = JEDEC_ST;
13937 tg3_flag_set(tp, NVRAM_BUFFERED);
13938 tg3_flag_set(tp, FLASH);
13939 tp->nvram_pagesize = 256;
13940 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13941 tp->nvram_size = (protect ?
13942 TG3_NVRAM_SIZE_64KB :
13943 TG3_NVRAM_SIZE_128KB);
13944 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13945 tp->nvram_size = (protect ?
13946 TG3_NVRAM_SIZE_64KB :
13947 TG3_NVRAM_SIZE_256KB);
13948 else
13949 tp->nvram_size = (protect ?
13950 TG3_NVRAM_SIZE_128KB :
13951 TG3_NVRAM_SIZE_512KB);
13952 break;
13953 }
13954 }
13955
13956 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13957 {
13958 u32 nvcfg1;
13959
13960 nvcfg1 = tr32(NVRAM_CFG1);
13961
13962 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13963 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13964 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13965 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13966 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13967 tp->nvram_jedecnum = JEDEC_ATMEL;
13968 tg3_flag_set(tp, NVRAM_BUFFERED);
13969 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13970
13971 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13972 tw32(NVRAM_CFG1, nvcfg1);
13973 break;
13974 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13975 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13976 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13977 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13978 tp->nvram_jedecnum = JEDEC_ATMEL;
13979 tg3_flag_set(tp, NVRAM_BUFFERED);
13980 tg3_flag_set(tp, FLASH);
13981 tp->nvram_pagesize = 264;
13982 break;
13983 case FLASH_5752VENDOR_ST_M45PE10:
13984 case FLASH_5752VENDOR_ST_M45PE20:
13985 case FLASH_5752VENDOR_ST_M45PE40:
13986 tp->nvram_jedecnum = JEDEC_ST;
13987 tg3_flag_set(tp, NVRAM_BUFFERED);
13988 tg3_flag_set(tp, FLASH);
13989 tp->nvram_pagesize = 256;
13990 break;
13991 }
13992 }
13993
13994 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13995 {
13996 u32 nvcfg1, protect = 0;
13997
13998 nvcfg1 = tr32(NVRAM_CFG1);
13999
14000 /* NVRAM protection for TPM */
14001 if (nvcfg1 & (1 << 27)) {
14002 tg3_flag_set(tp, PROTECTED_NVRAM);
14003 protect = 1;
14004 }
14005
14006 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14007 switch (nvcfg1) {
14008 case FLASH_5761VENDOR_ATMEL_ADB021D:
14009 case FLASH_5761VENDOR_ATMEL_ADB041D:
14010 case FLASH_5761VENDOR_ATMEL_ADB081D:
14011 case FLASH_5761VENDOR_ATMEL_ADB161D:
14012 case FLASH_5761VENDOR_ATMEL_MDB021D:
14013 case FLASH_5761VENDOR_ATMEL_MDB041D:
14014 case FLASH_5761VENDOR_ATMEL_MDB081D:
14015 case FLASH_5761VENDOR_ATMEL_MDB161D:
14016 tp->nvram_jedecnum = JEDEC_ATMEL;
14017 tg3_flag_set(tp, NVRAM_BUFFERED);
14018 tg3_flag_set(tp, FLASH);
14019 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14020 tp->nvram_pagesize = 256;
14021 break;
14022 case FLASH_5761VENDOR_ST_A_M45PE20:
14023 case FLASH_5761VENDOR_ST_A_M45PE40:
14024 case FLASH_5761VENDOR_ST_A_M45PE80:
14025 case FLASH_5761VENDOR_ST_A_M45PE16:
14026 case FLASH_5761VENDOR_ST_M_M45PE20:
14027 case FLASH_5761VENDOR_ST_M_M45PE40:
14028 case FLASH_5761VENDOR_ST_M_M45PE80:
14029 case FLASH_5761VENDOR_ST_M_M45PE16:
14030 tp->nvram_jedecnum = JEDEC_ST;
14031 tg3_flag_set(tp, NVRAM_BUFFERED);
14032 tg3_flag_set(tp, FLASH);
14033 tp->nvram_pagesize = 256;
14034 break;
14035 }
14036
14037 if (protect) {
14038 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14039 } else {
14040 switch (nvcfg1) {
14041 case FLASH_5761VENDOR_ATMEL_ADB161D:
14042 case FLASH_5761VENDOR_ATMEL_MDB161D:
14043 case FLASH_5761VENDOR_ST_A_M45PE16:
14044 case FLASH_5761VENDOR_ST_M_M45PE16:
14045 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14046 break;
14047 case FLASH_5761VENDOR_ATMEL_ADB081D:
14048 case FLASH_5761VENDOR_ATMEL_MDB081D:
14049 case FLASH_5761VENDOR_ST_A_M45PE80:
14050 case FLASH_5761VENDOR_ST_M_M45PE80:
14051 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14052 break;
14053 case FLASH_5761VENDOR_ATMEL_ADB041D:
14054 case FLASH_5761VENDOR_ATMEL_MDB041D:
14055 case FLASH_5761VENDOR_ST_A_M45PE40:
14056 case FLASH_5761VENDOR_ST_M_M45PE40:
14057 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14058 break;
14059 case FLASH_5761VENDOR_ATMEL_ADB021D:
14060 case FLASH_5761VENDOR_ATMEL_MDB021D:
14061 case FLASH_5761VENDOR_ST_A_M45PE20:
14062 case FLASH_5761VENDOR_ST_M_M45PE20:
14063 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14064 break;
14065 }
14066 }
14067 }
14068
14069 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14070 {
14071 tp->nvram_jedecnum = JEDEC_ATMEL;
14072 tg3_flag_set(tp, NVRAM_BUFFERED);
14073 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14074 }
14075
14076 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14077 {
14078 u32 nvcfg1;
14079
14080 nvcfg1 = tr32(NVRAM_CFG1);
14081
14082 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14083 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14084 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14085 tp->nvram_jedecnum = JEDEC_ATMEL;
14086 tg3_flag_set(tp, NVRAM_BUFFERED);
14087 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14088
14089 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14090 tw32(NVRAM_CFG1, nvcfg1);
14091 return;
14092 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14093 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14094 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14095 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14096 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14097 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14098 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14099 tp->nvram_jedecnum = JEDEC_ATMEL;
14100 tg3_flag_set(tp, NVRAM_BUFFERED);
14101 tg3_flag_set(tp, FLASH);
14102
14103 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14104 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14105 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14106 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14107 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14108 break;
14109 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14110 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14111 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14112 break;
14113 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14114 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14115 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14116 break;
14117 }
14118 break;
14119 case FLASH_5752VENDOR_ST_M45PE10:
14120 case FLASH_5752VENDOR_ST_M45PE20:
14121 case FLASH_5752VENDOR_ST_M45PE40:
14122 tp->nvram_jedecnum = JEDEC_ST;
14123 tg3_flag_set(tp, NVRAM_BUFFERED);
14124 tg3_flag_set(tp, FLASH);
14125
14126 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14127 case FLASH_5752VENDOR_ST_M45PE10:
14128 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14129 break;
14130 case FLASH_5752VENDOR_ST_M45PE20:
14131 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14132 break;
14133 case FLASH_5752VENDOR_ST_M45PE40:
14134 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14135 break;
14136 }
14137 break;
14138 default:
14139 tg3_flag_set(tp, NO_NVRAM);
14140 return;
14141 }
14142
14143 tg3_nvram_get_pagesize(tp, nvcfg1);
14144 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14145 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14146 }
14147
14148
14149 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14150 {
14151 u32 nvcfg1;
14152
14153 nvcfg1 = tr32(NVRAM_CFG1);
14154
14155 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14156 case FLASH_5717VENDOR_ATMEL_EEPROM:
14157 case FLASH_5717VENDOR_MICRO_EEPROM:
14158 tp->nvram_jedecnum = JEDEC_ATMEL;
14159 tg3_flag_set(tp, NVRAM_BUFFERED);
14160 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14161
14162 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14163 tw32(NVRAM_CFG1, nvcfg1);
14164 return;
14165 case FLASH_5717VENDOR_ATMEL_MDB011D:
14166 case FLASH_5717VENDOR_ATMEL_ADB011B:
14167 case FLASH_5717VENDOR_ATMEL_ADB011D:
14168 case FLASH_5717VENDOR_ATMEL_MDB021D:
14169 case FLASH_5717VENDOR_ATMEL_ADB021B:
14170 case FLASH_5717VENDOR_ATMEL_ADB021D:
14171 case FLASH_5717VENDOR_ATMEL_45USPT:
14172 tp->nvram_jedecnum = JEDEC_ATMEL;
14173 tg3_flag_set(tp, NVRAM_BUFFERED);
14174 tg3_flag_set(tp, FLASH);
14175
14176 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14177 case FLASH_5717VENDOR_ATMEL_MDB021D:
14178 /* Detect size with tg3_nvram_get_size() */
14179 break;
14180 case FLASH_5717VENDOR_ATMEL_ADB021B:
14181 case FLASH_5717VENDOR_ATMEL_ADB021D:
14182 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14183 break;
14184 default:
14185 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14186 break;
14187 }
14188 break;
14189 case FLASH_5717VENDOR_ST_M_M25PE10:
14190 case FLASH_5717VENDOR_ST_A_M25PE10:
14191 case FLASH_5717VENDOR_ST_M_M45PE10:
14192 case FLASH_5717VENDOR_ST_A_M45PE10:
14193 case FLASH_5717VENDOR_ST_M_M25PE20:
14194 case FLASH_5717VENDOR_ST_A_M25PE20:
14195 case FLASH_5717VENDOR_ST_M_M45PE20:
14196 case FLASH_5717VENDOR_ST_A_M45PE20:
14197 case FLASH_5717VENDOR_ST_25USPT:
14198 case FLASH_5717VENDOR_ST_45USPT:
14199 tp->nvram_jedecnum = JEDEC_ST;
14200 tg3_flag_set(tp, NVRAM_BUFFERED);
14201 tg3_flag_set(tp, FLASH);
14202
14203 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14204 case FLASH_5717VENDOR_ST_M_M25PE20:
14205 case FLASH_5717VENDOR_ST_M_M45PE20:
14206 /* Detect size with tg3_nvram_get_size() */
14207 break;
14208 case FLASH_5717VENDOR_ST_A_M25PE20:
14209 case FLASH_5717VENDOR_ST_A_M45PE20:
14210 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14211 break;
14212 default:
14213 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14214 break;
14215 }
14216 break;
14217 default:
14218 tg3_flag_set(tp, NO_NVRAM);
14219 return;
14220 }
14221
14222 tg3_nvram_get_pagesize(tp, nvcfg1);
14223 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14224 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14225 }
14226
14227 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14228 {
14229 u32 nvcfg1, nvmpinstrp;
14230
14231 nvcfg1 = tr32(NVRAM_CFG1);
14232 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14233
14234 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14235 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14236 tg3_flag_set(tp, NO_NVRAM);
14237 return;
14238 }
14239
14240 switch (nvmpinstrp) {
14241 case FLASH_5762_EEPROM_HD:
14242 nvmpinstrp = FLASH_5720_EEPROM_HD;
14243 break;
14244 case FLASH_5762_EEPROM_LD:
14245 nvmpinstrp = FLASH_5720_EEPROM_LD;
14246 break;
14247 case FLASH_5720VENDOR_M_ST_M45PE20:
14248 /* This pinstrap supports multiple sizes, so force it
14249 * to read the actual size from location 0xf0.
14250 */
14251 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14252 break;
14253 }
14254 }
14255
14256 switch (nvmpinstrp) {
14257 case FLASH_5720_EEPROM_HD:
14258 case FLASH_5720_EEPROM_LD:
14259 tp->nvram_jedecnum = JEDEC_ATMEL;
14260 tg3_flag_set(tp, NVRAM_BUFFERED);
14261
14262 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14263 tw32(NVRAM_CFG1, nvcfg1);
14264 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14265 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14266 else
14267 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14268 return;
14269 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14270 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14271 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14272 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14273 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14274 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14275 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14276 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14277 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14278 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14279 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14280 case FLASH_5720VENDOR_ATMEL_45USPT:
14281 tp->nvram_jedecnum = JEDEC_ATMEL;
14282 tg3_flag_set(tp, NVRAM_BUFFERED);
14283 tg3_flag_set(tp, FLASH);
14284
14285 switch (nvmpinstrp) {
14286 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14287 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14288 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14289 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14290 break;
14291 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14292 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14293 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14294 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14295 break;
14296 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14297 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14298 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14299 break;
14300 default:
14301 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14302 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14303 break;
14304 }
14305 break;
14306 case FLASH_5720VENDOR_M_ST_M25PE10:
14307 case FLASH_5720VENDOR_M_ST_M45PE10:
14308 case FLASH_5720VENDOR_A_ST_M25PE10:
14309 case FLASH_5720VENDOR_A_ST_M45PE10:
14310 case FLASH_5720VENDOR_M_ST_M25PE20:
14311 case FLASH_5720VENDOR_M_ST_M45PE20:
14312 case FLASH_5720VENDOR_A_ST_M25PE20:
14313 case FLASH_5720VENDOR_A_ST_M45PE20:
14314 case FLASH_5720VENDOR_M_ST_M25PE40:
14315 case FLASH_5720VENDOR_M_ST_M45PE40:
14316 case FLASH_5720VENDOR_A_ST_M25PE40:
14317 case FLASH_5720VENDOR_A_ST_M45PE40:
14318 case FLASH_5720VENDOR_M_ST_M25PE80:
14319 case FLASH_5720VENDOR_M_ST_M45PE80:
14320 case FLASH_5720VENDOR_A_ST_M25PE80:
14321 case FLASH_5720VENDOR_A_ST_M45PE80:
14322 case FLASH_5720VENDOR_ST_25USPT:
14323 case FLASH_5720VENDOR_ST_45USPT:
14324 tp->nvram_jedecnum = JEDEC_ST;
14325 tg3_flag_set(tp, NVRAM_BUFFERED);
14326 tg3_flag_set(tp, FLASH);
14327
14328 switch (nvmpinstrp) {
14329 case FLASH_5720VENDOR_M_ST_M25PE20:
14330 case FLASH_5720VENDOR_M_ST_M45PE20:
14331 case FLASH_5720VENDOR_A_ST_M25PE20:
14332 case FLASH_5720VENDOR_A_ST_M45PE20:
14333 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14334 break;
14335 case FLASH_5720VENDOR_M_ST_M25PE40:
14336 case FLASH_5720VENDOR_M_ST_M45PE40:
14337 case FLASH_5720VENDOR_A_ST_M25PE40:
14338 case FLASH_5720VENDOR_A_ST_M45PE40:
14339 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14340 break;
14341 case FLASH_5720VENDOR_M_ST_M25PE80:
14342 case FLASH_5720VENDOR_M_ST_M45PE80:
14343 case FLASH_5720VENDOR_A_ST_M25PE80:
14344 case FLASH_5720VENDOR_A_ST_M45PE80:
14345 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14346 break;
14347 default:
14348 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14349 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14350 break;
14351 }
14352 break;
14353 default:
14354 tg3_flag_set(tp, NO_NVRAM);
14355 return;
14356 }
14357
14358 tg3_nvram_get_pagesize(tp, nvcfg1);
14359 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14360 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14361
14362 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14363 u32 val;
14364
14365 if (tg3_nvram_read(tp, 0, &val))
14366 return;
14367
14368 if (val != TG3_EEPROM_MAGIC &&
14369 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14370 tg3_flag_set(tp, NO_NVRAM);
14371 }
14372 }
14373
14374 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14375 static void tg3_nvram_init(struct tg3 *tp)
14376 {
14377 if (tg3_flag(tp, IS_SSB_CORE)) {
14378 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14379 tg3_flag_clear(tp, NVRAM);
14380 tg3_flag_clear(tp, NVRAM_BUFFERED);
14381 tg3_flag_set(tp, NO_NVRAM);
14382 return;
14383 }
14384
14385 tw32_f(GRC_EEPROM_ADDR,
14386 (EEPROM_ADDR_FSM_RESET |
14387 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14388 EEPROM_ADDR_CLKPERD_SHIFT)));
14389
14390 msleep(1);
14391
14392 /* Enable seeprom accesses. */
14393 tw32_f(GRC_LOCAL_CTRL,
14394 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14395 udelay(100);
14396
14397 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14398 tg3_asic_rev(tp) != ASIC_REV_5701) {
14399 tg3_flag_set(tp, NVRAM);
14400
14401 if (tg3_nvram_lock(tp)) {
14402 netdev_warn(tp->dev,
14403 "Cannot get nvram lock, %s failed\n",
14404 __func__);
14405 return;
14406 }
14407 tg3_enable_nvram_access(tp);
14408
14409 tp->nvram_size = 0;
14410
14411 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14412 tg3_get_5752_nvram_info(tp);
14413 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14414 tg3_get_5755_nvram_info(tp);
14415 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14416 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14417 tg3_asic_rev(tp) == ASIC_REV_5785)
14418 tg3_get_5787_nvram_info(tp);
14419 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14420 tg3_get_5761_nvram_info(tp);
14421 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14422 tg3_get_5906_nvram_info(tp);
14423 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14424 tg3_flag(tp, 57765_CLASS))
14425 tg3_get_57780_nvram_info(tp);
14426 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14427 tg3_asic_rev(tp) == ASIC_REV_5719)
14428 tg3_get_5717_nvram_info(tp);
14429 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14430 tg3_asic_rev(tp) == ASIC_REV_5762)
14431 tg3_get_5720_nvram_info(tp);
14432 else
14433 tg3_get_nvram_info(tp);
14434
14435 if (tp->nvram_size == 0)
14436 tg3_get_nvram_size(tp);
14437
14438 tg3_disable_nvram_access(tp);
14439 tg3_nvram_unlock(tp);
14440
14441 } else {
14442 tg3_flag_clear(tp, NVRAM);
14443 tg3_flag_clear(tp, NVRAM_BUFFERED);
14444
14445 tg3_get_eeprom_size(tp);
14446 }
14447 }
14448
14449 struct subsys_tbl_ent {
14450 u16 subsys_vendor, subsys_devid;
14451 u32 phy_id;
14452 };
14453
14454 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14455 /* Broadcom boards. */
14456 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14457 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14458 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14459 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14460 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14461 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14462 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14463 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14464 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14465 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14466 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14467 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14468 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14469 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14470 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14471 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14472 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14473 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14474 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14475 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14476 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14477 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14478
14479 /* 3com boards. */
14480 { TG3PCI_SUBVENDOR_ID_3COM,
14481 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14482 { TG3PCI_SUBVENDOR_ID_3COM,
14483 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14484 { TG3PCI_SUBVENDOR_ID_3COM,
14485 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14486 { TG3PCI_SUBVENDOR_ID_3COM,
14487 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14488 { TG3PCI_SUBVENDOR_ID_3COM,
14489 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14490
14491 /* DELL boards. */
14492 { TG3PCI_SUBVENDOR_ID_DELL,
14493 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14494 { TG3PCI_SUBVENDOR_ID_DELL,
14495 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14496 { TG3PCI_SUBVENDOR_ID_DELL,
14497 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14498 { TG3PCI_SUBVENDOR_ID_DELL,
14499 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14500
14501 /* Compaq boards. */
14502 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14503 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14504 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14505 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14506 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14507 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14508 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14509 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14510 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14511 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14512
14513 /* IBM boards. */
14514 { TG3PCI_SUBVENDOR_ID_IBM,
14515 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14516 };
14517
14518 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14519 {
14520 int i;
14521
14522 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14523 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14524 tp->pdev->subsystem_vendor) &&
14525 (subsys_id_to_phy_id[i].subsys_devid ==
14526 tp->pdev->subsystem_device))
14527 return &subsys_id_to_phy_id[i];
14528 }
14529 return NULL;
14530 }
14531
14532 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14533 {
14534 u32 val;
14535
14536 tp->phy_id = TG3_PHY_ID_INVALID;
14537 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14538
14539 /* Assume an onboard device and WOL capable by default. */
14540 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14541 tg3_flag_set(tp, WOL_CAP);
14542
14543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14544 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14545 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14546 tg3_flag_set(tp, IS_NIC);
14547 }
14548 val = tr32(VCPU_CFGSHDW);
14549 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14550 tg3_flag_set(tp, ASPM_WORKAROUND);
14551 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14552 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14553 tg3_flag_set(tp, WOL_ENABLE);
14554 device_set_wakeup_enable(&tp->pdev->dev, true);
14555 }
14556 goto done;
14557 }
14558
14559 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14560 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14561 u32 nic_cfg, led_cfg;
14562 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14563 int eeprom_phy_serdes = 0;
14564
14565 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14566 tp->nic_sram_data_cfg = nic_cfg;
14567
14568 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14569 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14570 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14571 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14572 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14573 (ver > 0) && (ver < 0x100))
14574 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14575
14576 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14577 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14578
14579 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14580 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14581 eeprom_phy_serdes = 1;
14582
14583 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14584 if (nic_phy_id != 0) {
14585 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14586 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14587
14588 eeprom_phy_id = (id1 >> 16) << 10;
14589 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14590 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14591 } else
14592 eeprom_phy_id = 0;
14593
14594 tp->phy_id = eeprom_phy_id;
14595 if (eeprom_phy_serdes) {
14596 if (!tg3_flag(tp, 5705_PLUS))
14597 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14598 else
14599 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14600 }
14601
14602 if (tg3_flag(tp, 5750_PLUS))
14603 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14604 SHASTA_EXT_LED_MODE_MASK);
14605 else
14606 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14607
14608 switch (led_cfg) {
14609 default:
14610 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14611 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14612 break;
14613
14614 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14615 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14616 break;
14617
14618 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14619 tp->led_ctrl = LED_CTRL_MODE_MAC;
14620
14621 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14622 * read on some older 5700/5701 bootcode.
14623 */
14624 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14625 tg3_asic_rev(tp) == ASIC_REV_5701)
14626 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14627
14628 break;
14629
14630 case SHASTA_EXT_LED_SHARED:
14631 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14632 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14633 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14634 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14635 LED_CTRL_MODE_PHY_2);
14636 break;
14637
14638 case SHASTA_EXT_LED_MAC:
14639 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14640 break;
14641
14642 case SHASTA_EXT_LED_COMBO:
14643 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14644 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14645 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14646 LED_CTRL_MODE_PHY_2);
14647 break;
14648
14649 }
14650
14651 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14652 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14653 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14654 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14655
14656 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14657 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14658
14659 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14660 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14661 if ((tp->pdev->subsystem_vendor ==
14662 PCI_VENDOR_ID_ARIMA) &&
14663 (tp->pdev->subsystem_device == 0x205a ||
14664 tp->pdev->subsystem_device == 0x2063))
14665 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14666 } else {
14667 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14668 tg3_flag_set(tp, IS_NIC);
14669 }
14670
14671 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14672 tg3_flag_set(tp, ENABLE_ASF);
14673 if (tg3_flag(tp, 5750_PLUS))
14674 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14675 }
14676
14677 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14678 tg3_flag(tp, 5750_PLUS))
14679 tg3_flag_set(tp, ENABLE_APE);
14680
14681 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14682 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14683 tg3_flag_clear(tp, WOL_CAP);
14684
14685 if (tg3_flag(tp, WOL_CAP) &&
14686 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14687 tg3_flag_set(tp, WOL_ENABLE);
14688 device_set_wakeup_enable(&tp->pdev->dev, true);
14689 }
14690
14691 if (cfg2 & (1 << 17))
14692 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14693
14694 /* serdes signal pre-emphasis in register 0x590 set by */
14695 /* bootcode if bit 18 is set */
14696 if (cfg2 & (1 << 18))
14697 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14698
14699 if ((tg3_flag(tp, 57765_PLUS) ||
14700 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14701 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14702 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14703 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14704
14705 if (tg3_flag(tp, PCI_EXPRESS)) {
14706 u32 cfg3;
14707
14708 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14709 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14710 !tg3_flag(tp, 57765_PLUS) &&
14711 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14712 tg3_flag_set(tp, ASPM_WORKAROUND);
14713 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14714 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14715 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14716 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14717 }
14718
14719 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14720 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14721 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14722 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14723 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14724 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14725 }
14726 done:
14727 if (tg3_flag(tp, WOL_CAP))
14728 device_set_wakeup_enable(&tp->pdev->dev,
14729 tg3_flag(tp, WOL_ENABLE));
14730 else
14731 device_set_wakeup_capable(&tp->pdev->dev, false);
14732 }
14733
14734 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14735 {
14736 int i, err;
14737 u32 val2, off = offset * 8;
14738
14739 err = tg3_nvram_lock(tp);
14740 if (err)
14741 return err;
14742
14743 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14744 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14745 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14746 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14747 udelay(10);
14748
14749 for (i = 0; i < 100; i++) {
14750 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14751 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14752 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14753 break;
14754 }
14755 udelay(10);
14756 }
14757
14758 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14759
14760 tg3_nvram_unlock(tp);
14761 if (val2 & APE_OTP_STATUS_CMD_DONE)
14762 return 0;
14763
14764 return -EBUSY;
14765 }
14766
14767 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14768 {
14769 int i;
14770 u32 val;
14771
14772 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14773 tw32(OTP_CTRL, cmd);
14774
14775 /* Wait for up to 1 ms for command to execute. */
14776 for (i = 0; i < 100; i++) {
14777 val = tr32(OTP_STATUS);
14778 if (val & OTP_STATUS_CMD_DONE)
14779 break;
14780 udelay(10);
14781 }
14782
14783 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14784 }
14785
14786 /* Read the gphy configuration from the OTP region of the chip. The gphy
14787 * configuration is a 32-bit value that straddles the alignment boundary.
14788 * We do two 32-bit reads and then shift and merge the results.
14789 */
14790 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14791 {
14792 u32 bhalf_otp, thalf_otp;
14793
14794 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14795
14796 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14797 return 0;
14798
14799 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14800
14801 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14802 return 0;
14803
14804 thalf_otp = tr32(OTP_READ_DATA);
14805
14806 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14807
14808 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14809 return 0;
14810
14811 bhalf_otp = tr32(OTP_READ_DATA);
14812
14813 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14814 }
14815
14816 static void tg3_phy_init_link_config(struct tg3 *tp)
14817 {
14818 u32 adv = ADVERTISED_Autoneg;
14819
14820 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14821 adv |= ADVERTISED_1000baseT_Half |
14822 ADVERTISED_1000baseT_Full;
14823
14824 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14825 adv |= ADVERTISED_100baseT_Half |
14826 ADVERTISED_100baseT_Full |
14827 ADVERTISED_10baseT_Half |
14828 ADVERTISED_10baseT_Full |
14829 ADVERTISED_TP;
14830 else
14831 adv |= ADVERTISED_FIBRE;
14832
14833 tp->link_config.advertising = adv;
14834 tp->link_config.speed = SPEED_UNKNOWN;
14835 tp->link_config.duplex = DUPLEX_UNKNOWN;
14836 tp->link_config.autoneg = AUTONEG_ENABLE;
14837 tp->link_config.active_speed = SPEED_UNKNOWN;
14838 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14839
14840 tp->old_link = -1;
14841 }
14842
14843 static int tg3_phy_probe(struct tg3 *tp)
14844 {
14845 u32 hw_phy_id_1, hw_phy_id_2;
14846 u32 hw_phy_id, hw_phy_id_masked;
14847 int err;
14848
14849 /* flow control autonegotiation is default behavior */
14850 tg3_flag_set(tp, PAUSE_AUTONEG);
14851 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14852
14853 if (tg3_flag(tp, ENABLE_APE)) {
14854 switch (tp->pci_fn) {
14855 case 0:
14856 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14857 break;
14858 case 1:
14859 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14860 break;
14861 case 2:
14862 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14863 break;
14864 case 3:
14865 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14866 break;
14867 }
14868 }
14869
14870 if (!tg3_flag(tp, ENABLE_ASF) &&
14871 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14872 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14873 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14874 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14875
14876 if (tg3_flag(tp, USE_PHYLIB))
14877 return tg3_phy_init(tp);
14878
14879 /* Reading the PHY ID register can conflict with ASF
14880 * firmware access to the PHY hardware.
14881 */
14882 err = 0;
14883 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14884 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14885 } else {
14886 /* Now read the physical PHY_ID from the chip and verify
14887 * that it is sane. If it doesn't look good, we fall back
14888 * to either the hard-coded table based PHY_ID and failing
14889 * that the value found in the eeprom area.
14890 */
14891 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14892 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14893
14894 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14895 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14896 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14897
14898 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14899 }
14900
14901 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14902 tp->phy_id = hw_phy_id;
14903 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14904 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14905 else
14906 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14907 } else {
14908 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14909 /* Do nothing, phy ID already set up in
14910 * tg3_get_eeprom_hw_cfg().
14911 */
14912 } else {
14913 struct subsys_tbl_ent *p;
14914
14915 /* No eeprom signature? Try the hardcoded
14916 * subsys device table.
14917 */
14918 p = tg3_lookup_by_subsys(tp);
14919 if (p) {
14920 tp->phy_id = p->phy_id;
14921 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14922 /* For now we saw the IDs 0xbc050cd0,
14923 * 0xbc050f80 and 0xbc050c30 on devices
14924 * connected to an BCM4785 and there are
14925 * probably more. Just assume that the phy is
14926 * supported when it is connected to a SSB core
14927 * for now.
14928 */
14929 return -ENODEV;
14930 }
14931
14932 if (!tp->phy_id ||
14933 tp->phy_id == TG3_PHY_ID_BCM8002)
14934 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14935 }
14936 }
14937
14938 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14939 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14940 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14941 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14942 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14943 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14944 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14945 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14946 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14947 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14948
14949 tg3_phy_init_link_config(tp);
14950
14951 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14952 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14953 !tg3_flag(tp, ENABLE_APE) &&
14954 !tg3_flag(tp, ENABLE_ASF)) {
14955 u32 bmsr, dummy;
14956
14957 tg3_readphy(tp, MII_BMSR, &bmsr);
14958 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14959 (bmsr & BMSR_LSTATUS))
14960 goto skip_phy_reset;
14961
14962 err = tg3_phy_reset(tp);
14963 if (err)
14964 return err;
14965
14966 tg3_phy_set_wirespeed(tp);
14967
14968 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14969 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14970 tp->link_config.flowctrl);
14971
14972 tg3_writephy(tp, MII_BMCR,
14973 BMCR_ANENABLE | BMCR_ANRESTART);
14974 }
14975 }
14976
14977 skip_phy_reset:
14978 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14979 err = tg3_init_5401phy_dsp(tp);
14980 if (err)
14981 return err;
14982
14983 err = tg3_init_5401phy_dsp(tp);
14984 }
14985
14986 return err;
14987 }
14988
14989 static void tg3_read_vpd(struct tg3 *tp)
14990 {
14991 u8 *vpd_data;
14992 unsigned int block_end, rosize, len;
14993 u32 vpdlen;
14994 int j, i = 0;
14995
14996 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14997 if (!vpd_data)
14998 goto out_no_vpd;
14999
15000 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15001 if (i < 0)
15002 goto out_not_found;
15003
15004 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15005 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15006 i += PCI_VPD_LRDT_TAG_SIZE;
15007
15008 if (block_end > vpdlen)
15009 goto out_not_found;
15010
15011 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15012 PCI_VPD_RO_KEYWORD_MFR_ID);
15013 if (j > 0) {
15014 len = pci_vpd_info_field_size(&vpd_data[j]);
15015
15016 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15017 if (j + len > block_end || len != 4 ||
15018 memcmp(&vpd_data[j], "1028", 4))
15019 goto partno;
15020
15021 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15022 PCI_VPD_RO_KEYWORD_VENDOR0);
15023 if (j < 0)
15024 goto partno;
15025
15026 len = pci_vpd_info_field_size(&vpd_data[j]);
15027
15028 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15029 if (j + len > block_end)
15030 goto partno;
15031
15032 if (len >= sizeof(tp->fw_ver))
15033 len = sizeof(tp->fw_ver) - 1;
15034 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15035 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15036 &vpd_data[j]);
15037 }
15038
15039 partno:
15040 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15041 PCI_VPD_RO_KEYWORD_PARTNO);
15042 if (i < 0)
15043 goto out_not_found;
15044
15045 len = pci_vpd_info_field_size(&vpd_data[i]);
15046
15047 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15048 if (len > TG3_BPN_SIZE ||
15049 (len + i) > vpdlen)
15050 goto out_not_found;
15051
15052 memcpy(tp->board_part_number, &vpd_data[i], len);
15053
15054 out_not_found:
15055 kfree(vpd_data);
15056 if (tp->board_part_number[0])
15057 return;
15058
15059 out_no_vpd:
15060 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15062 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15063 strcpy(tp->board_part_number, "BCM5717");
15064 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15065 strcpy(tp->board_part_number, "BCM5718");
15066 else
15067 goto nomatch;
15068 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15069 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15070 strcpy(tp->board_part_number, "BCM57780");
15071 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15072 strcpy(tp->board_part_number, "BCM57760");
15073 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15074 strcpy(tp->board_part_number, "BCM57790");
15075 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15076 strcpy(tp->board_part_number, "BCM57788");
15077 else
15078 goto nomatch;
15079 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15080 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15081 strcpy(tp->board_part_number, "BCM57761");
15082 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15083 strcpy(tp->board_part_number, "BCM57765");
15084 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15085 strcpy(tp->board_part_number, "BCM57781");
15086 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15087 strcpy(tp->board_part_number, "BCM57785");
15088 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15089 strcpy(tp->board_part_number, "BCM57791");
15090 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15091 strcpy(tp->board_part_number, "BCM57795");
15092 else
15093 goto nomatch;
15094 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15095 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15096 strcpy(tp->board_part_number, "BCM57762");
15097 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15098 strcpy(tp->board_part_number, "BCM57766");
15099 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15100 strcpy(tp->board_part_number, "BCM57782");
15101 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15102 strcpy(tp->board_part_number, "BCM57786");
15103 else
15104 goto nomatch;
15105 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15106 strcpy(tp->board_part_number, "BCM95906");
15107 } else {
15108 nomatch:
15109 strcpy(tp->board_part_number, "none");
15110 }
15111 }
15112
15113 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15114 {
15115 u32 val;
15116
15117 if (tg3_nvram_read(tp, offset, &val) ||
15118 (val & 0xfc000000) != 0x0c000000 ||
15119 tg3_nvram_read(tp, offset + 4, &val) ||
15120 val != 0)
15121 return 0;
15122
15123 return 1;
15124 }
15125
15126 static void tg3_read_bc_ver(struct tg3 *tp)
15127 {
15128 u32 val, offset, start, ver_offset;
15129 int i, dst_off;
15130 bool newver = false;
15131
15132 if (tg3_nvram_read(tp, 0xc, &offset) ||
15133 tg3_nvram_read(tp, 0x4, &start))
15134 return;
15135
15136 offset = tg3_nvram_logical_addr(tp, offset);
15137
15138 if (tg3_nvram_read(tp, offset, &val))
15139 return;
15140
15141 if ((val & 0xfc000000) == 0x0c000000) {
15142 if (tg3_nvram_read(tp, offset + 4, &val))
15143 return;
15144
15145 if (val == 0)
15146 newver = true;
15147 }
15148
15149 dst_off = strlen(tp->fw_ver);
15150
15151 if (newver) {
15152 if (TG3_VER_SIZE - dst_off < 16 ||
15153 tg3_nvram_read(tp, offset + 8, &ver_offset))
15154 return;
15155
15156 offset = offset + ver_offset - start;
15157 for (i = 0; i < 16; i += 4) {
15158 __be32 v;
15159 if (tg3_nvram_read_be32(tp, offset + i, &v))
15160 return;
15161
15162 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15163 }
15164 } else {
15165 u32 major, minor;
15166
15167 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15168 return;
15169
15170 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15171 TG3_NVM_BCVER_MAJSFT;
15172 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15173 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15174 "v%d.%02d", major, minor);
15175 }
15176 }
15177
15178 static void tg3_read_hwsb_ver(struct tg3 *tp)
15179 {
15180 u32 val, major, minor;
15181
15182 /* Use native endian representation */
15183 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15184 return;
15185
15186 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15187 TG3_NVM_HWSB_CFG1_MAJSFT;
15188 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15189 TG3_NVM_HWSB_CFG1_MINSFT;
15190
15191 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15192 }
15193
15194 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15195 {
15196 u32 offset, major, minor, build;
15197
15198 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15199
15200 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15201 return;
15202
15203 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15204 case TG3_EEPROM_SB_REVISION_0:
15205 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15206 break;
15207 case TG3_EEPROM_SB_REVISION_2:
15208 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15209 break;
15210 case TG3_EEPROM_SB_REVISION_3:
15211 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15212 break;
15213 case TG3_EEPROM_SB_REVISION_4:
15214 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15215 break;
15216 case TG3_EEPROM_SB_REVISION_5:
15217 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15218 break;
15219 case TG3_EEPROM_SB_REVISION_6:
15220 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15221 break;
15222 default:
15223 return;
15224 }
15225
15226 if (tg3_nvram_read(tp, offset, &val))
15227 return;
15228
15229 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15230 TG3_EEPROM_SB_EDH_BLD_SHFT;
15231 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15232 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15233 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15234
15235 if (minor > 99 || build > 26)
15236 return;
15237
15238 offset = strlen(tp->fw_ver);
15239 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15240 " v%d.%02d", major, minor);
15241
15242 if (build > 0) {
15243 offset = strlen(tp->fw_ver);
15244 if (offset < TG3_VER_SIZE - 1)
15245 tp->fw_ver[offset] = 'a' + build - 1;
15246 }
15247 }
15248
15249 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15250 {
15251 u32 val, offset, start;
15252 int i, vlen;
15253
15254 for (offset = TG3_NVM_DIR_START;
15255 offset < TG3_NVM_DIR_END;
15256 offset += TG3_NVM_DIRENT_SIZE) {
15257 if (tg3_nvram_read(tp, offset, &val))
15258 return;
15259
15260 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15261 break;
15262 }
15263
15264 if (offset == TG3_NVM_DIR_END)
15265 return;
15266
15267 if (!tg3_flag(tp, 5705_PLUS))
15268 start = 0x08000000;
15269 else if (tg3_nvram_read(tp, offset - 4, &start))
15270 return;
15271
15272 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15273 !tg3_fw_img_is_valid(tp, offset) ||
15274 tg3_nvram_read(tp, offset + 8, &val))
15275 return;
15276
15277 offset += val - start;
15278
15279 vlen = strlen(tp->fw_ver);
15280
15281 tp->fw_ver[vlen++] = ',';
15282 tp->fw_ver[vlen++] = ' ';
15283
15284 for (i = 0; i < 4; i++) {
15285 __be32 v;
15286 if (tg3_nvram_read_be32(tp, offset, &v))
15287 return;
15288
15289 offset += sizeof(v);
15290
15291 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15292 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15293 break;
15294 }
15295
15296 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15297 vlen += sizeof(v);
15298 }
15299 }
15300
15301 static void tg3_probe_ncsi(struct tg3 *tp)
15302 {
15303 u32 apedata;
15304
15305 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15306 if (apedata != APE_SEG_SIG_MAGIC)
15307 return;
15308
15309 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15310 if (!(apedata & APE_FW_STATUS_READY))
15311 return;
15312
15313 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15314 tg3_flag_set(tp, APE_HAS_NCSI);
15315 }
15316
15317 static void tg3_read_dash_ver(struct tg3 *tp)
15318 {
15319 int vlen;
15320 u32 apedata;
15321 char *fwtype;
15322
15323 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15324
15325 if (tg3_flag(tp, APE_HAS_NCSI))
15326 fwtype = "NCSI";
15327 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15328 fwtype = "SMASH";
15329 else
15330 fwtype = "DASH";
15331
15332 vlen = strlen(tp->fw_ver);
15333
15334 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15335 fwtype,
15336 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15337 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15338 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15339 (apedata & APE_FW_VERSION_BLDMSK));
15340 }
15341
15342 static void tg3_read_otp_ver(struct tg3 *tp)
15343 {
15344 u32 val, val2;
15345
15346 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15347 return;
15348
15349 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15350 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15351 TG3_OTP_MAGIC0_VALID(val)) {
15352 u64 val64 = (u64) val << 32 | val2;
15353 u32 ver = 0;
15354 int i, vlen;
15355
15356 for (i = 0; i < 7; i++) {
15357 if ((val64 & 0xff) == 0)
15358 break;
15359 ver = val64 & 0xff;
15360 val64 >>= 8;
15361 }
15362 vlen = strlen(tp->fw_ver);
15363 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15364 }
15365 }
15366
15367 static void tg3_read_fw_ver(struct tg3 *tp)
15368 {
15369 u32 val;
15370 bool vpd_vers = false;
15371
15372 if (tp->fw_ver[0] != 0)
15373 vpd_vers = true;
15374
15375 if (tg3_flag(tp, NO_NVRAM)) {
15376 strcat(tp->fw_ver, "sb");
15377 tg3_read_otp_ver(tp);
15378 return;
15379 }
15380
15381 if (tg3_nvram_read(tp, 0, &val))
15382 return;
15383
15384 if (val == TG3_EEPROM_MAGIC)
15385 tg3_read_bc_ver(tp);
15386 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15387 tg3_read_sb_ver(tp, val);
15388 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15389 tg3_read_hwsb_ver(tp);
15390
15391 if (tg3_flag(tp, ENABLE_ASF)) {
15392 if (tg3_flag(tp, ENABLE_APE)) {
15393 tg3_probe_ncsi(tp);
15394 if (!vpd_vers)
15395 tg3_read_dash_ver(tp);
15396 } else if (!vpd_vers) {
15397 tg3_read_mgmtfw_ver(tp);
15398 }
15399 }
15400
15401 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15402 }
15403
15404 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15405 {
15406 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15407 return TG3_RX_RET_MAX_SIZE_5717;
15408 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15409 return TG3_RX_RET_MAX_SIZE_5700;
15410 else
15411 return TG3_RX_RET_MAX_SIZE_5705;
15412 }
15413
15414 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15415 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15416 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15417 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15418 { },
15419 };
15420
15421 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15422 {
15423 struct pci_dev *peer;
15424 unsigned int func, devnr = tp->pdev->devfn & ~7;
15425
15426 for (func = 0; func < 8; func++) {
15427 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15428 if (peer && peer != tp->pdev)
15429 break;
15430 pci_dev_put(peer);
15431 }
15432 /* 5704 can be configured in single-port mode, set peer to
15433 * tp->pdev in that case.
15434 */
15435 if (!peer) {
15436 peer = tp->pdev;
15437 return peer;
15438 }
15439
15440 /*
15441 * We don't need to keep the refcount elevated; there's no way
15442 * to remove one half of this device without removing the other
15443 */
15444 pci_dev_put(peer);
15445
15446 return peer;
15447 }
15448
15449 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15450 {
15451 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15452 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15453 u32 reg;
15454
15455 /* All devices that use the alternate
15456 * ASIC REV location have a CPMU.
15457 */
15458 tg3_flag_set(tp, CPMU_PRESENT);
15459
15460 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15461 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15462 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15463 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15464 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15465 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15466 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15467 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15468 reg = TG3PCI_GEN2_PRODID_ASICREV;
15469 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15470 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15471 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15472 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15473 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15474 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15475 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15476 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15477 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15478 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15479 reg = TG3PCI_GEN15_PRODID_ASICREV;
15480 else
15481 reg = TG3PCI_PRODID_ASICREV;
15482
15483 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15484 }
15485
15486 /* Wrong chip ID in 5752 A0. This code can be removed later
15487 * as A0 is not in production.
15488 */
15489 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15490 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15491
15492 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15493 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15494
15495 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15496 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15497 tg3_asic_rev(tp) == ASIC_REV_5720)
15498 tg3_flag_set(tp, 5717_PLUS);
15499
15500 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15501 tg3_asic_rev(tp) == ASIC_REV_57766)
15502 tg3_flag_set(tp, 57765_CLASS);
15503
15504 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15505 tg3_asic_rev(tp) == ASIC_REV_5762)
15506 tg3_flag_set(tp, 57765_PLUS);
15507
15508 /* Intentionally exclude ASIC_REV_5906 */
15509 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15510 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15511 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15512 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15513 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15514 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15515 tg3_flag(tp, 57765_PLUS))
15516 tg3_flag_set(tp, 5755_PLUS);
15517
15518 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15519 tg3_asic_rev(tp) == ASIC_REV_5714)
15520 tg3_flag_set(tp, 5780_CLASS);
15521
15522 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15523 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15524 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15525 tg3_flag(tp, 5755_PLUS) ||
15526 tg3_flag(tp, 5780_CLASS))
15527 tg3_flag_set(tp, 5750_PLUS);
15528
15529 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15530 tg3_flag(tp, 5750_PLUS))
15531 tg3_flag_set(tp, 5705_PLUS);
15532 }
15533
15534 static bool tg3_10_100_only_device(struct tg3 *tp,
15535 const struct pci_device_id *ent)
15536 {
15537 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15538
15539 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15540 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15541 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15542 return true;
15543
15544 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15545 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15546 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15547 return true;
15548 } else {
15549 return true;
15550 }
15551 }
15552
15553 return false;
15554 }
15555
15556 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15557 {
15558 u32 misc_ctrl_reg;
15559 u32 pci_state_reg, grc_misc_cfg;
15560 u32 val;
15561 u16 pci_cmd;
15562 int err;
15563
15564 /* Force memory write invalidate off. If we leave it on,
15565 * then on 5700_BX chips we have to enable a workaround.
15566 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15567 * to match the cacheline size. The Broadcom driver have this
15568 * workaround but turns MWI off all the times so never uses
15569 * it. This seems to suggest that the workaround is insufficient.
15570 */
15571 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15572 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15573 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15574
15575 /* Important! -- Make sure register accesses are byteswapped
15576 * correctly. Also, for those chips that require it, make
15577 * sure that indirect register accesses are enabled before
15578 * the first operation.
15579 */
15580 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15581 &misc_ctrl_reg);
15582 tp->misc_host_ctrl |= (misc_ctrl_reg &
15583 MISC_HOST_CTRL_CHIPREV);
15584 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15585 tp->misc_host_ctrl);
15586
15587 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15588
15589 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15590 * we need to disable memory and use config. cycles
15591 * only to access all registers. The 5702/03 chips
15592 * can mistakenly decode the special cycles from the
15593 * ICH chipsets as memory write cycles, causing corruption
15594 * of register and memory space. Only certain ICH bridges
15595 * will drive special cycles with non-zero data during the
15596 * address phase which can fall within the 5703's address
15597 * range. This is not an ICH bug as the PCI spec allows
15598 * non-zero address during special cycles. However, only
15599 * these ICH bridges are known to drive non-zero addresses
15600 * during special cycles.
15601 *
15602 * Since special cycles do not cross PCI bridges, we only
15603 * enable this workaround if the 5703 is on the secondary
15604 * bus of these ICH bridges.
15605 */
15606 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15607 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15608 static struct tg3_dev_id {
15609 u32 vendor;
15610 u32 device;
15611 u32 rev;
15612 } ich_chipsets[] = {
15613 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15614 PCI_ANY_ID },
15615 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15616 PCI_ANY_ID },
15617 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15618 0xa },
15619 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15620 PCI_ANY_ID },
15621 { },
15622 };
15623 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15624 struct pci_dev *bridge = NULL;
15625
15626 while (pci_id->vendor != 0) {
15627 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15628 bridge);
15629 if (!bridge) {
15630 pci_id++;
15631 continue;
15632 }
15633 if (pci_id->rev != PCI_ANY_ID) {
15634 if (bridge->revision > pci_id->rev)
15635 continue;
15636 }
15637 if (bridge->subordinate &&
15638 (bridge->subordinate->number ==
15639 tp->pdev->bus->number)) {
15640 tg3_flag_set(tp, ICH_WORKAROUND);
15641 pci_dev_put(bridge);
15642 break;
15643 }
15644 }
15645 }
15646
15647 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15648 static struct tg3_dev_id {
15649 u32 vendor;
15650 u32 device;
15651 } bridge_chipsets[] = {
15652 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15653 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15654 { },
15655 };
15656 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15657 struct pci_dev *bridge = NULL;
15658
15659 while (pci_id->vendor != 0) {
15660 bridge = pci_get_device(pci_id->vendor,
15661 pci_id->device,
15662 bridge);
15663 if (!bridge) {
15664 pci_id++;
15665 continue;
15666 }
15667 if (bridge->subordinate &&
15668 (bridge->subordinate->number <=
15669 tp->pdev->bus->number) &&
15670 (bridge->subordinate->busn_res.end >=
15671 tp->pdev->bus->number)) {
15672 tg3_flag_set(tp, 5701_DMA_BUG);
15673 pci_dev_put(bridge);
15674 break;
15675 }
15676 }
15677 }
15678
15679 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15680 * DMA addresses > 40-bit. This bridge may have other additional
15681 * 57xx devices behind it in some 4-port NIC designs for example.
15682 * Any tg3 device found behind the bridge will also need the 40-bit
15683 * DMA workaround.
15684 */
15685 if (tg3_flag(tp, 5780_CLASS)) {
15686 tg3_flag_set(tp, 40BIT_DMA_BUG);
15687 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15688 } else {
15689 struct pci_dev *bridge = NULL;
15690
15691 do {
15692 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15693 PCI_DEVICE_ID_SERVERWORKS_EPB,
15694 bridge);
15695 if (bridge && bridge->subordinate &&
15696 (bridge->subordinate->number <=
15697 tp->pdev->bus->number) &&
15698 (bridge->subordinate->busn_res.end >=
15699 tp->pdev->bus->number)) {
15700 tg3_flag_set(tp, 40BIT_DMA_BUG);
15701 pci_dev_put(bridge);
15702 break;
15703 }
15704 } while (bridge);
15705 }
15706
15707 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15708 tg3_asic_rev(tp) == ASIC_REV_5714)
15709 tp->pdev_peer = tg3_find_peer(tp);
15710
15711 /* Determine TSO capabilities */
15712 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15713 ; /* Do nothing. HW bug. */
15714 else if (tg3_flag(tp, 57765_PLUS))
15715 tg3_flag_set(tp, HW_TSO_3);
15716 else if (tg3_flag(tp, 5755_PLUS) ||
15717 tg3_asic_rev(tp) == ASIC_REV_5906)
15718 tg3_flag_set(tp, HW_TSO_2);
15719 else if (tg3_flag(tp, 5750_PLUS)) {
15720 tg3_flag_set(tp, HW_TSO_1);
15721 tg3_flag_set(tp, TSO_BUG);
15722 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15723 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15724 tg3_flag_clear(tp, TSO_BUG);
15725 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15726 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15727 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15728 tg3_flag_set(tp, FW_TSO);
15729 tg3_flag_set(tp, TSO_BUG);
15730 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15731 tp->fw_needed = FIRMWARE_TG3TSO5;
15732 else
15733 tp->fw_needed = FIRMWARE_TG3TSO;
15734 }
15735
15736 /* Selectively allow TSO based on operating conditions */
15737 if (tg3_flag(tp, HW_TSO_1) ||
15738 tg3_flag(tp, HW_TSO_2) ||
15739 tg3_flag(tp, HW_TSO_3) ||
15740 tg3_flag(tp, FW_TSO)) {
15741 /* For firmware TSO, assume ASF is disabled.
15742 * We'll disable TSO later if we discover ASF
15743 * is enabled in tg3_get_eeprom_hw_cfg().
15744 */
15745 tg3_flag_set(tp, TSO_CAPABLE);
15746 } else {
15747 tg3_flag_clear(tp, TSO_CAPABLE);
15748 tg3_flag_clear(tp, TSO_BUG);
15749 tp->fw_needed = NULL;
15750 }
15751
15752 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15753 tp->fw_needed = FIRMWARE_TG3;
15754
15755 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15756 tp->fw_needed = FIRMWARE_TG357766;
15757
15758 tp->irq_max = 1;
15759
15760 if (tg3_flag(tp, 5750_PLUS)) {
15761 tg3_flag_set(tp, SUPPORT_MSI);
15762 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15763 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15764 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15765 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15766 tp->pdev_peer == tp->pdev))
15767 tg3_flag_clear(tp, SUPPORT_MSI);
15768
15769 if (tg3_flag(tp, 5755_PLUS) ||
15770 tg3_asic_rev(tp) == ASIC_REV_5906) {
15771 tg3_flag_set(tp, 1SHOT_MSI);
15772 }
15773
15774 if (tg3_flag(tp, 57765_PLUS)) {
15775 tg3_flag_set(tp, SUPPORT_MSIX);
15776 tp->irq_max = TG3_IRQ_MAX_VECS;
15777 }
15778 }
15779
15780 tp->txq_max = 1;
15781 tp->rxq_max = 1;
15782 if (tp->irq_max > 1) {
15783 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15784 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15785
15786 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15787 tg3_asic_rev(tp) == ASIC_REV_5720)
15788 tp->txq_max = tp->irq_max - 1;
15789 }
15790
15791 if (tg3_flag(tp, 5755_PLUS) ||
15792 tg3_asic_rev(tp) == ASIC_REV_5906)
15793 tg3_flag_set(tp, SHORT_DMA_BUG);
15794
15795 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15796 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15797
15798 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15799 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15800 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15801 tg3_asic_rev(tp) == ASIC_REV_5762)
15802 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15803
15804 if (tg3_flag(tp, 57765_PLUS) &&
15805 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15806 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15807
15808 if (!tg3_flag(tp, 5705_PLUS) ||
15809 tg3_flag(tp, 5780_CLASS) ||
15810 tg3_flag(tp, USE_JUMBO_BDFLAG))
15811 tg3_flag_set(tp, JUMBO_CAPABLE);
15812
15813 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15814 &pci_state_reg);
15815
15816 if (pci_is_pcie(tp->pdev)) {
15817 u16 lnkctl;
15818
15819 tg3_flag_set(tp, PCI_EXPRESS);
15820
15821 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15822 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15823 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15824 tg3_flag_clear(tp, HW_TSO_2);
15825 tg3_flag_clear(tp, TSO_CAPABLE);
15826 }
15827 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15828 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15829 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15830 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15831 tg3_flag_set(tp, CLKREQ_BUG);
15832 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15833 tg3_flag_set(tp, L1PLLPD_EN);
15834 }
15835 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15836 /* BCM5785 devices are effectively PCIe devices, and should
15837 * follow PCIe codepaths, but do not have a PCIe capabilities
15838 * section.
15839 */
15840 tg3_flag_set(tp, PCI_EXPRESS);
15841 } else if (!tg3_flag(tp, 5705_PLUS) ||
15842 tg3_flag(tp, 5780_CLASS)) {
15843 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15844 if (!tp->pcix_cap) {
15845 dev_err(&tp->pdev->dev,
15846 "Cannot find PCI-X capability, aborting\n");
15847 return -EIO;
15848 }
15849
15850 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15851 tg3_flag_set(tp, PCIX_MODE);
15852 }
15853
15854 /* If we have an AMD 762 or VIA K8T800 chipset, write
15855 * reordering to the mailbox registers done by the host
15856 * controller can cause major troubles. We read back from
15857 * every mailbox register write to force the writes to be
15858 * posted to the chip in order.
15859 */
15860 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15861 !tg3_flag(tp, PCI_EXPRESS))
15862 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15863
15864 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15865 &tp->pci_cacheline_sz);
15866 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15867 &tp->pci_lat_timer);
15868 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15869 tp->pci_lat_timer < 64) {
15870 tp->pci_lat_timer = 64;
15871 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15872 tp->pci_lat_timer);
15873 }
15874
15875 /* Important! -- It is critical that the PCI-X hw workaround
15876 * situation is decided before the first MMIO register access.
15877 */
15878 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15879 /* 5700 BX chips need to have their TX producer index
15880 * mailboxes written twice to workaround a bug.
15881 */
15882 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15883
15884 /* If we are in PCI-X mode, enable register write workaround.
15885 *
15886 * The workaround is to use indirect register accesses
15887 * for all chip writes not to mailbox registers.
15888 */
15889 if (tg3_flag(tp, PCIX_MODE)) {
15890 u32 pm_reg;
15891
15892 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15893
15894 /* The chip can have it's power management PCI config
15895 * space registers clobbered due to this bug.
15896 * So explicitly force the chip into D0 here.
15897 */
15898 pci_read_config_dword(tp->pdev,
15899 tp->pm_cap + PCI_PM_CTRL,
15900 &pm_reg);
15901 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15902 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15903 pci_write_config_dword(tp->pdev,
15904 tp->pm_cap + PCI_PM_CTRL,
15905 pm_reg);
15906
15907 /* Also, force SERR#/PERR# in PCI command. */
15908 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15909 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15910 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15911 }
15912 }
15913
15914 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15915 tg3_flag_set(tp, PCI_HIGH_SPEED);
15916 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15917 tg3_flag_set(tp, PCI_32BIT);
15918
15919 /* Chip-specific fixup from Broadcom driver */
15920 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15921 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15922 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15923 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15924 }
15925
15926 /* Default fast path register access methods */
15927 tp->read32 = tg3_read32;
15928 tp->write32 = tg3_write32;
15929 tp->read32_mbox = tg3_read32;
15930 tp->write32_mbox = tg3_write32;
15931 tp->write32_tx_mbox = tg3_write32;
15932 tp->write32_rx_mbox = tg3_write32;
15933
15934 /* Various workaround register access methods */
15935 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15936 tp->write32 = tg3_write_indirect_reg32;
15937 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15938 (tg3_flag(tp, PCI_EXPRESS) &&
15939 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15940 /*
15941 * Back to back register writes can cause problems on these
15942 * chips, the workaround is to read back all reg writes
15943 * except those to mailbox regs.
15944 *
15945 * See tg3_write_indirect_reg32().
15946 */
15947 tp->write32 = tg3_write_flush_reg32;
15948 }
15949
15950 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15951 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15952 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15953 tp->write32_rx_mbox = tg3_write_flush_reg32;
15954 }
15955
15956 if (tg3_flag(tp, ICH_WORKAROUND)) {
15957 tp->read32 = tg3_read_indirect_reg32;
15958 tp->write32 = tg3_write_indirect_reg32;
15959 tp->read32_mbox = tg3_read_indirect_mbox;
15960 tp->write32_mbox = tg3_write_indirect_mbox;
15961 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15962 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15963
15964 iounmap(tp->regs);
15965 tp->regs = NULL;
15966
15967 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15968 pci_cmd &= ~PCI_COMMAND_MEMORY;
15969 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15970 }
15971 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15972 tp->read32_mbox = tg3_read32_mbox_5906;
15973 tp->write32_mbox = tg3_write32_mbox_5906;
15974 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15975 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15976 }
15977
15978 if (tp->write32 == tg3_write_indirect_reg32 ||
15979 (tg3_flag(tp, PCIX_MODE) &&
15980 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15981 tg3_asic_rev(tp) == ASIC_REV_5701)))
15982 tg3_flag_set(tp, SRAM_USE_CONFIG);
15983
15984 /* The memory arbiter has to be enabled in order for SRAM accesses
15985 * to succeed. Normally on powerup the tg3 chip firmware will make
15986 * sure it is enabled, but other entities such as system netboot
15987 * code might disable it.
15988 */
15989 val = tr32(MEMARB_MODE);
15990 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15991
15992 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15993 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15994 tg3_flag(tp, 5780_CLASS)) {
15995 if (tg3_flag(tp, PCIX_MODE)) {
15996 pci_read_config_dword(tp->pdev,
15997 tp->pcix_cap + PCI_X_STATUS,
15998 &val);
15999 tp->pci_fn = val & 0x7;
16000 }
16001 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16002 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16003 tg3_asic_rev(tp) == ASIC_REV_5720) {
16004 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16005 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16006 val = tr32(TG3_CPMU_STATUS);
16007
16008 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16009 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16010 else
16011 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16012 TG3_CPMU_STATUS_FSHFT_5719;
16013 }
16014
16015 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16016 tp->write32_tx_mbox = tg3_write_flush_reg32;
16017 tp->write32_rx_mbox = tg3_write_flush_reg32;
16018 }
16019
16020 /* Get eeprom hw config before calling tg3_set_power_state().
16021 * In particular, the TG3_FLAG_IS_NIC flag must be
16022 * determined before calling tg3_set_power_state() so that
16023 * we know whether or not to switch out of Vaux power.
16024 * When the flag is set, it means that GPIO1 is used for eeprom
16025 * write protect and also implies that it is a LOM where GPIOs
16026 * are not used to switch power.
16027 */
16028 tg3_get_eeprom_hw_cfg(tp);
16029
16030 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16031 tg3_flag_clear(tp, TSO_CAPABLE);
16032 tg3_flag_clear(tp, TSO_BUG);
16033 tp->fw_needed = NULL;
16034 }
16035
16036 if (tg3_flag(tp, ENABLE_APE)) {
16037 /* Allow reads and writes to the
16038 * APE register and memory space.
16039 */
16040 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16041 PCISTATE_ALLOW_APE_SHMEM_WR |
16042 PCISTATE_ALLOW_APE_PSPACE_WR;
16043 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16044 pci_state_reg);
16045
16046 tg3_ape_lock_init(tp);
16047 }
16048
16049 /* Set up tp->grc_local_ctrl before calling
16050 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16051 * will bring 5700's external PHY out of reset.
16052 * It is also used as eeprom write protect on LOMs.
16053 */
16054 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16055 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16056 tg3_flag(tp, EEPROM_WRITE_PROT))
16057 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16058 GRC_LCLCTRL_GPIO_OUTPUT1);
16059 /* Unused GPIO3 must be driven as output on 5752 because there
16060 * are no pull-up resistors on unused GPIO pins.
16061 */
16062 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16063 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16064
16065 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16066 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16067 tg3_flag(tp, 57765_CLASS))
16068 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16069
16070 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16071 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16072 /* Turn off the debug UART. */
16073 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16074 if (tg3_flag(tp, IS_NIC))
16075 /* Keep VMain power. */
16076 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16077 GRC_LCLCTRL_GPIO_OUTPUT0;
16078 }
16079
16080 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16081 tp->grc_local_ctrl |=
16082 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16083
16084 /* Switch out of Vaux if it is a NIC */
16085 tg3_pwrsrc_switch_to_vmain(tp);
16086
16087 /* Derive initial jumbo mode from MTU assigned in
16088 * ether_setup() via the alloc_etherdev() call
16089 */
16090 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16091 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16092
16093 /* Determine WakeOnLan speed to use. */
16094 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16095 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16096 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16097 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16098 tg3_flag_clear(tp, WOL_SPEED_100MB);
16099 } else {
16100 tg3_flag_set(tp, WOL_SPEED_100MB);
16101 }
16102
16103 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16104 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16105
16106 /* A few boards don't want Ethernet@WireSpeed phy feature */
16107 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16108 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16109 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16110 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16111 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16112 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16113 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16114
16115 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16116 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16117 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16118 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16119 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16120
16121 if (tg3_flag(tp, 5705_PLUS) &&
16122 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16123 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16124 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16125 !tg3_flag(tp, 57765_PLUS)) {
16126 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16127 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16128 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16129 tg3_asic_rev(tp) == ASIC_REV_5761) {
16130 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16131 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16132 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16133 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16134 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16135 } else
16136 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16137 }
16138
16139 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16140 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16141 tp->phy_otp = tg3_read_otp_phycfg(tp);
16142 if (tp->phy_otp == 0)
16143 tp->phy_otp = TG3_OTP_DEFAULT;
16144 }
16145
16146 if (tg3_flag(tp, CPMU_PRESENT))
16147 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16148 else
16149 tp->mi_mode = MAC_MI_MODE_BASE;
16150
16151 tp->coalesce_mode = 0;
16152 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16153 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16154 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16155
16156 /* Set these bits to enable statistics workaround. */
16157 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16158 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16159 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16160 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16161 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16162 }
16163
16164 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16165 tg3_asic_rev(tp) == ASIC_REV_57780)
16166 tg3_flag_set(tp, USE_PHYLIB);
16167
16168 err = tg3_mdio_init(tp);
16169 if (err)
16170 return err;
16171
16172 /* Initialize data/descriptor byte/word swapping. */
16173 val = tr32(GRC_MODE);
16174 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16175 tg3_asic_rev(tp) == ASIC_REV_5762)
16176 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16177 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16178 GRC_MODE_B2HRX_ENABLE |
16179 GRC_MODE_HTX2B_ENABLE |
16180 GRC_MODE_HOST_STACKUP);
16181 else
16182 val &= GRC_MODE_HOST_STACKUP;
16183
16184 tw32(GRC_MODE, val | tp->grc_mode);
16185
16186 tg3_switch_clocks(tp);
16187
16188 /* Clear this out for sanity. */
16189 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16190
16191 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16192 &pci_state_reg);
16193 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16194 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16195 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16196 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16197 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16198 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16199 void __iomem *sram_base;
16200
16201 /* Write some dummy words into the SRAM status block
16202 * area, see if it reads back correctly. If the return
16203 * value is bad, force enable the PCIX workaround.
16204 */
16205 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16206
16207 writel(0x00000000, sram_base);
16208 writel(0x00000000, sram_base + 4);
16209 writel(0xffffffff, sram_base + 4);
16210 if (readl(sram_base) != 0x00000000)
16211 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16212 }
16213 }
16214
16215 udelay(50);
16216 tg3_nvram_init(tp);
16217
16218 /* If the device has an NVRAM, no need to load patch firmware */
16219 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16220 !tg3_flag(tp, NO_NVRAM))
16221 tp->fw_needed = NULL;
16222
16223 grc_misc_cfg = tr32(GRC_MISC_CFG);
16224 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16225
16226 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16227 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16228 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16229 tg3_flag_set(tp, IS_5788);
16230
16231 if (!tg3_flag(tp, IS_5788) &&
16232 tg3_asic_rev(tp) != ASIC_REV_5700)
16233 tg3_flag_set(tp, TAGGED_STATUS);
16234 if (tg3_flag(tp, TAGGED_STATUS)) {
16235 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16236 HOSTCC_MODE_CLRTICK_TXBD);
16237
16238 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16239 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16240 tp->misc_host_ctrl);
16241 }
16242
16243 /* Preserve the APE MAC_MODE bits */
16244 if (tg3_flag(tp, ENABLE_APE))
16245 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16246 else
16247 tp->mac_mode = 0;
16248
16249 if (tg3_10_100_only_device(tp, ent))
16250 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16251
16252 err = tg3_phy_probe(tp);
16253 if (err) {
16254 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16255 /* ... but do not return immediately ... */
16256 tg3_mdio_fini(tp);
16257 }
16258
16259 tg3_read_vpd(tp);
16260 tg3_read_fw_ver(tp);
16261
16262 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16263 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16264 } else {
16265 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16266 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16267 else
16268 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16269 }
16270
16271 /* 5700 {AX,BX} chips have a broken status block link
16272 * change bit implementation, so we must use the
16273 * status register in those cases.
16274 */
16275 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16276 tg3_flag_set(tp, USE_LINKCHG_REG);
16277 else
16278 tg3_flag_clear(tp, USE_LINKCHG_REG);
16279
16280 /* The led_ctrl is set during tg3_phy_probe, here we might
16281 * have to force the link status polling mechanism based
16282 * upon subsystem IDs.
16283 */
16284 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16285 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16286 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16287 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16288 tg3_flag_set(tp, USE_LINKCHG_REG);
16289 }
16290
16291 /* For all SERDES we poll the MAC status register. */
16292 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16293 tg3_flag_set(tp, POLL_SERDES);
16294 else
16295 tg3_flag_clear(tp, POLL_SERDES);
16296
16297 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16298 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16299 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16300 tg3_flag(tp, PCIX_MODE)) {
16301 tp->rx_offset = NET_SKB_PAD;
16302 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16303 tp->rx_copy_thresh = ~(u16)0;
16304 #endif
16305 }
16306
16307 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16308 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16309 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16310
16311 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16312
16313 /* Increment the rx prod index on the rx std ring by at most
16314 * 8 for these chips to workaround hw errata.
16315 */
16316 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16317 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16318 tg3_asic_rev(tp) == ASIC_REV_5755)
16319 tp->rx_std_max_post = 8;
16320
16321 if (tg3_flag(tp, ASPM_WORKAROUND))
16322 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16323 PCIE_PWR_MGMT_L1_THRESH_MSK;
16324
16325 return err;
16326 }
16327
16328 #ifdef CONFIG_SPARC
16329 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16330 {
16331 struct net_device *dev = tp->dev;
16332 struct pci_dev *pdev = tp->pdev;
16333 struct device_node *dp = pci_device_to_OF_node(pdev);
16334 const unsigned char *addr;
16335 int len;
16336
16337 addr = of_get_property(dp, "local-mac-address", &len);
16338 if (addr && len == 6) {
16339 memcpy(dev->dev_addr, addr, 6);
16340 return 0;
16341 }
16342 return -ENODEV;
16343 }
16344
16345 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16346 {
16347 struct net_device *dev = tp->dev;
16348
16349 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16350 return 0;
16351 }
16352 #endif
16353
16354 static int tg3_get_device_address(struct tg3 *tp)
16355 {
16356 struct net_device *dev = tp->dev;
16357 u32 hi, lo, mac_offset;
16358 int addr_ok = 0;
16359 int err;
16360
16361 #ifdef CONFIG_SPARC
16362 if (!tg3_get_macaddr_sparc(tp))
16363 return 0;
16364 #endif
16365
16366 if (tg3_flag(tp, IS_SSB_CORE)) {
16367 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16368 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16369 return 0;
16370 }
16371
16372 mac_offset = 0x7c;
16373 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16374 tg3_flag(tp, 5780_CLASS)) {
16375 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16376 mac_offset = 0xcc;
16377 if (tg3_nvram_lock(tp))
16378 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16379 else
16380 tg3_nvram_unlock(tp);
16381 } else if (tg3_flag(tp, 5717_PLUS)) {
16382 if (tp->pci_fn & 1)
16383 mac_offset = 0xcc;
16384 if (tp->pci_fn > 1)
16385 mac_offset += 0x18c;
16386 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16387 mac_offset = 0x10;
16388
16389 /* First try to get it from MAC address mailbox. */
16390 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16391 if ((hi >> 16) == 0x484b) {
16392 dev->dev_addr[0] = (hi >> 8) & 0xff;
16393 dev->dev_addr[1] = (hi >> 0) & 0xff;
16394
16395 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16396 dev->dev_addr[2] = (lo >> 24) & 0xff;
16397 dev->dev_addr[3] = (lo >> 16) & 0xff;
16398 dev->dev_addr[4] = (lo >> 8) & 0xff;
16399 dev->dev_addr[5] = (lo >> 0) & 0xff;
16400
16401 /* Some old bootcode may report a 0 MAC address in SRAM */
16402 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16403 }
16404 if (!addr_ok) {
16405 /* Next, try NVRAM. */
16406 if (!tg3_flag(tp, NO_NVRAM) &&
16407 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16408 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16409 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16410 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16411 }
16412 /* Finally just fetch it out of the MAC control regs. */
16413 else {
16414 hi = tr32(MAC_ADDR_0_HIGH);
16415 lo = tr32(MAC_ADDR_0_LOW);
16416
16417 dev->dev_addr[5] = lo & 0xff;
16418 dev->dev_addr[4] = (lo >> 8) & 0xff;
16419 dev->dev_addr[3] = (lo >> 16) & 0xff;
16420 dev->dev_addr[2] = (lo >> 24) & 0xff;
16421 dev->dev_addr[1] = hi & 0xff;
16422 dev->dev_addr[0] = (hi >> 8) & 0xff;
16423 }
16424 }
16425
16426 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16427 #ifdef CONFIG_SPARC
16428 if (!tg3_get_default_macaddr_sparc(tp))
16429 return 0;
16430 #endif
16431 return -EINVAL;
16432 }
16433 return 0;
16434 }
16435
16436 #define BOUNDARY_SINGLE_CACHELINE 1
16437 #define BOUNDARY_MULTI_CACHELINE 2
16438
16439 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16440 {
16441 int cacheline_size;
16442 u8 byte;
16443 int goal;
16444
16445 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16446 if (byte == 0)
16447 cacheline_size = 1024;
16448 else
16449 cacheline_size = (int) byte * 4;
16450
16451 /* On 5703 and later chips, the boundary bits have no
16452 * effect.
16453 */
16454 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16455 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16456 !tg3_flag(tp, PCI_EXPRESS))
16457 goto out;
16458
16459 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16460 goal = BOUNDARY_MULTI_CACHELINE;
16461 #else
16462 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16463 goal = BOUNDARY_SINGLE_CACHELINE;
16464 #else
16465 goal = 0;
16466 #endif
16467 #endif
16468
16469 if (tg3_flag(tp, 57765_PLUS)) {
16470 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16471 goto out;
16472 }
16473
16474 if (!goal)
16475 goto out;
16476
16477 /* PCI controllers on most RISC systems tend to disconnect
16478 * when a device tries to burst across a cache-line boundary.
16479 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16480 *
16481 * Unfortunately, for PCI-E there are only limited
16482 * write-side controls for this, and thus for reads
16483 * we will still get the disconnects. We'll also waste
16484 * these PCI cycles for both read and write for chips
16485 * other than 5700 and 5701 which do not implement the
16486 * boundary bits.
16487 */
16488 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16489 switch (cacheline_size) {
16490 case 16:
16491 case 32:
16492 case 64:
16493 case 128:
16494 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16495 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16496 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16497 } else {
16498 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16499 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16500 }
16501 break;
16502
16503 case 256:
16504 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16505 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16506 break;
16507
16508 default:
16509 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16510 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16511 break;
16512 }
16513 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16514 switch (cacheline_size) {
16515 case 16:
16516 case 32:
16517 case 64:
16518 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16519 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16520 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16521 break;
16522 }
16523 /* fallthrough */
16524 case 128:
16525 default:
16526 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16527 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16528 break;
16529 }
16530 } else {
16531 switch (cacheline_size) {
16532 case 16:
16533 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16534 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16535 DMA_RWCTRL_WRITE_BNDRY_16);
16536 break;
16537 }
16538 /* fallthrough */
16539 case 32:
16540 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16541 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16542 DMA_RWCTRL_WRITE_BNDRY_32);
16543 break;
16544 }
16545 /* fallthrough */
16546 case 64:
16547 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16548 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16549 DMA_RWCTRL_WRITE_BNDRY_64);
16550 break;
16551 }
16552 /* fallthrough */
16553 case 128:
16554 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16555 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16556 DMA_RWCTRL_WRITE_BNDRY_128);
16557 break;
16558 }
16559 /* fallthrough */
16560 case 256:
16561 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16562 DMA_RWCTRL_WRITE_BNDRY_256);
16563 break;
16564 case 512:
16565 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16566 DMA_RWCTRL_WRITE_BNDRY_512);
16567 break;
16568 case 1024:
16569 default:
16570 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16571 DMA_RWCTRL_WRITE_BNDRY_1024);
16572 break;
16573 }
16574 }
16575
16576 out:
16577 return val;
16578 }
16579
16580 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16581 int size, bool to_device)
16582 {
16583 struct tg3_internal_buffer_desc test_desc;
16584 u32 sram_dma_descs;
16585 int i, ret;
16586
16587 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16588
16589 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16590 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16591 tw32(RDMAC_STATUS, 0);
16592 tw32(WDMAC_STATUS, 0);
16593
16594 tw32(BUFMGR_MODE, 0);
16595 tw32(FTQ_RESET, 0);
16596
16597 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16598 test_desc.addr_lo = buf_dma & 0xffffffff;
16599 test_desc.nic_mbuf = 0x00002100;
16600 test_desc.len = size;
16601
16602 /*
16603 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16604 * the *second* time the tg3 driver was getting loaded after an
16605 * initial scan.
16606 *
16607 * Broadcom tells me:
16608 * ...the DMA engine is connected to the GRC block and a DMA
16609 * reset may affect the GRC block in some unpredictable way...
16610 * The behavior of resets to individual blocks has not been tested.
16611 *
16612 * Broadcom noted the GRC reset will also reset all sub-components.
16613 */
16614 if (to_device) {
16615 test_desc.cqid_sqid = (13 << 8) | 2;
16616
16617 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16618 udelay(40);
16619 } else {
16620 test_desc.cqid_sqid = (16 << 8) | 7;
16621
16622 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16623 udelay(40);
16624 }
16625 test_desc.flags = 0x00000005;
16626
16627 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16628 u32 val;
16629
16630 val = *(((u32 *)&test_desc) + i);
16631 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16632 sram_dma_descs + (i * sizeof(u32)));
16633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16634 }
16635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16636
16637 if (to_device)
16638 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16639 else
16640 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16641
16642 ret = -ENODEV;
16643 for (i = 0; i < 40; i++) {
16644 u32 val;
16645
16646 if (to_device)
16647 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16648 else
16649 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16650 if ((val & 0xffff) == sram_dma_descs) {
16651 ret = 0;
16652 break;
16653 }
16654
16655 udelay(100);
16656 }
16657
16658 return ret;
16659 }
16660
16661 #define TEST_BUFFER_SIZE 0x2000
16662
16663 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16664 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16665 { },
16666 };
16667
16668 static int tg3_test_dma(struct tg3 *tp)
16669 {
16670 dma_addr_t buf_dma;
16671 u32 *buf, saved_dma_rwctrl;
16672 int ret = 0;
16673
16674 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16675 &buf_dma, GFP_KERNEL);
16676 if (!buf) {
16677 ret = -ENOMEM;
16678 goto out_nofree;
16679 }
16680
16681 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16682 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16683
16684 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16685
16686 if (tg3_flag(tp, 57765_PLUS))
16687 goto out;
16688
16689 if (tg3_flag(tp, PCI_EXPRESS)) {
16690 /* DMA read watermark not used on PCIE */
16691 tp->dma_rwctrl |= 0x00180000;
16692 } else if (!tg3_flag(tp, PCIX_MODE)) {
16693 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16694 tg3_asic_rev(tp) == ASIC_REV_5750)
16695 tp->dma_rwctrl |= 0x003f0000;
16696 else
16697 tp->dma_rwctrl |= 0x003f000f;
16698 } else {
16699 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16700 tg3_asic_rev(tp) == ASIC_REV_5704) {
16701 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16702 u32 read_water = 0x7;
16703
16704 /* If the 5704 is behind the EPB bridge, we can
16705 * do the less restrictive ONE_DMA workaround for
16706 * better performance.
16707 */
16708 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16709 tg3_asic_rev(tp) == ASIC_REV_5704)
16710 tp->dma_rwctrl |= 0x8000;
16711 else if (ccval == 0x6 || ccval == 0x7)
16712 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16713
16714 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16715 read_water = 4;
16716 /* Set bit 23 to enable PCIX hw bug fix */
16717 tp->dma_rwctrl |=
16718 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16719 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16720 (1 << 23);
16721 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16722 /* 5780 always in PCIX mode */
16723 tp->dma_rwctrl |= 0x00144000;
16724 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16725 /* 5714 always in PCIX mode */
16726 tp->dma_rwctrl |= 0x00148000;
16727 } else {
16728 tp->dma_rwctrl |= 0x001b000f;
16729 }
16730 }
16731 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16732 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16733
16734 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16735 tg3_asic_rev(tp) == ASIC_REV_5704)
16736 tp->dma_rwctrl &= 0xfffffff0;
16737
16738 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16739 tg3_asic_rev(tp) == ASIC_REV_5701) {
16740 /* Remove this if it causes problems for some boards. */
16741 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16742
16743 /* On 5700/5701 chips, we need to set this bit.
16744 * Otherwise the chip will issue cacheline transactions
16745 * to streamable DMA memory with not all the byte
16746 * enables turned on. This is an error on several
16747 * RISC PCI controllers, in particular sparc64.
16748 *
16749 * On 5703/5704 chips, this bit has been reassigned
16750 * a different meaning. In particular, it is used
16751 * on those chips to enable a PCI-X workaround.
16752 */
16753 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16754 }
16755
16756 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16757
16758 #if 0
16759 /* Unneeded, already done by tg3_get_invariants. */
16760 tg3_switch_clocks(tp);
16761 #endif
16762
16763 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16764 tg3_asic_rev(tp) != ASIC_REV_5701)
16765 goto out;
16766
16767 /* It is best to perform DMA test with maximum write burst size
16768 * to expose the 5700/5701 write DMA bug.
16769 */
16770 saved_dma_rwctrl = tp->dma_rwctrl;
16771 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16772 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16773
16774 while (1) {
16775 u32 *p = buf, i;
16776
16777 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16778 p[i] = i;
16779
16780 /* Send the buffer to the chip. */
16781 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16782 if (ret) {
16783 dev_err(&tp->pdev->dev,
16784 "%s: Buffer write failed. err = %d\n",
16785 __func__, ret);
16786 break;
16787 }
16788
16789 #if 0
16790 /* validate data reached card RAM correctly. */
16791 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16792 u32 val;
16793 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16794 if (le32_to_cpu(val) != p[i]) {
16795 dev_err(&tp->pdev->dev,
16796 "%s: Buffer corrupted on device! "
16797 "(%d != %d)\n", __func__, val, i);
16798 /* ret = -ENODEV here? */
16799 }
16800 p[i] = 0;
16801 }
16802 #endif
16803 /* Now read it back. */
16804 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16805 if (ret) {
16806 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16807 "err = %d\n", __func__, ret);
16808 break;
16809 }
16810
16811 /* Verify it. */
16812 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16813 if (p[i] == i)
16814 continue;
16815
16816 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16817 DMA_RWCTRL_WRITE_BNDRY_16) {
16818 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16819 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16820 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16821 break;
16822 } else {
16823 dev_err(&tp->pdev->dev,
16824 "%s: Buffer corrupted on read back! "
16825 "(%d != %d)\n", __func__, p[i], i);
16826 ret = -ENODEV;
16827 goto out;
16828 }
16829 }
16830
16831 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16832 /* Success. */
16833 ret = 0;
16834 break;
16835 }
16836 }
16837 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16838 DMA_RWCTRL_WRITE_BNDRY_16) {
16839 /* DMA test passed without adjusting DMA boundary,
16840 * now look for chipsets that are known to expose the
16841 * DMA bug without failing the test.
16842 */
16843 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16844 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16845 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16846 } else {
16847 /* Safe to use the calculated DMA boundary. */
16848 tp->dma_rwctrl = saved_dma_rwctrl;
16849 }
16850
16851 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16852 }
16853
16854 out:
16855 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16856 out_nofree:
16857 return ret;
16858 }
16859
16860 static void tg3_init_bufmgr_config(struct tg3 *tp)
16861 {
16862 if (tg3_flag(tp, 57765_PLUS)) {
16863 tp->bufmgr_config.mbuf_read_dma_low_water =
16864 DEFAULT_MB_RDMA_LOW_WATER_5705;
16865 tp->bufmgr_config.mbuf_mac_rx_low_water =
16866 DEFAULT_MB_MACRX_LOW_WATER_57765;
16867 tp->bufmgr_config.mbuf_high_water =
16868 DEFAULT_MB_HIGH_WATER_57765;
16869
16870 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16871 DEFAULT_MB_RDMA_LOW_WATER_5705;
16872 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16873 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16874 tp->bufmgr_config.mbuf_high_water_jumbo =
16875 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16876 } else if (tg3_flag(tp, 5705_PLUS)) {
16877 tp->bufmgr_config.mbuf_read_dma_low_water =
16878 DEFAULT_MB_RDMA_LOW_WATER_5705;
16879 tp->bufmgr_config.mbuf_mac_rx_low_water =
16880 DEFAULT_MB_MACRX_LOW_WATER_5705;
16881 tp->bufmgr_config.mbuf_high_water =
16882 DEFAULT_MB_HIGH_WATER_5705;
16883 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16884 tp->bufmgr_config.mbuf_mac_rx_low_water =
16885 DEFAULT_MB_MACRX_LOW_WATER_5906;
16886 tp->bufmgr_config.mbuf_high_water =
16887 DEFAULT_MB_HIGH_WATER_5906;
16888 }
16889
16890 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16891 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16892 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16893 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16894 tp->bufmgr_config.mbuf_high_water_jumbo =
16895 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16896 } else {
16897 tp->bufmgr_config.mbuf_read_dma_low_water =
16898 DEFAULT_MB_RDMA_LOW_WATER;
16899 tp->bufmgr_config.mbuf_mac_rx_low_water =
16900 DEFAULT_MB_MACRX_LOW_WATER;
16901 tp->bufmgr_config.mbuf_high_water =
16902 DEFAULT_MB_HIGH_WATER;
16903
16904 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16905 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16906 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16907 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16908 tp->bufmgr_config.mbuf_high_water_jumbo =
16909 DEFAULT_MB_HIGH_WATER_JUMBO;
16910 }
16911
16912 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16913 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16914 }
16915
16916 static char *tg3_phy_string(struct tg3 *tp)
16917 {
16918 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16919 case TG3_PHY_ID_BCM5400: return "5400";
16920 case TG3_PHY_ID_BCM5401: return "5401";
16921 case TG3_PHY_ID_BCM5411: return "5411";
16922 case TG3_PHY_ID_BCM5701: return "5701";
16923 case TG3_PHY_ID_BCM5703: return "5703";
16924 case TG3_PHY_ID_BCM5704: return "5704";
16925 case TG3_PHY_ID_BCM5705: return "5705";
16926 case TG3_PHY_ID_BCM5750: return "5750";
16927 case TG3_PHY_ID_BCM5752: return "5752";
16928 case TG3_PHY_ID_BCM5714: return "5714";
16929 case TG3_PHY_ID_BCM5780: return "5780";
16930 case TG3_PHY_ID_BCM5755: return "5755";
16931 case TG3_PHY_ID_BCM5787: return "5787";
16932 case TG3_PHY_ID_BCM5784: return "5784";
16933 case TG3_PHY_ID_BCM5756: return "5722/5756";
16934 case TG3_PHY_ID_BCM5906: return "5906";
16935 case TG3_PHY_ID_BCM5761: return "5761";
16936 case TG3_PHY_ID_BCM5718C: return "5718C";
16937 case TG3_PHY_ID_BCM5718S: return "5718S";
16938 case TG3_PHY_ID_BCM57765: return "57765";
16939 case TG3_PHY_ID_BCM5719C: return "5719C";
16940 case TG3_PHY_ID_BCM5720C: return "5720C";
16941 case TG3_PHY_ID_BCM5762: return "5762C";
16942 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16943 case 0: return "serdes";
16944 default: return "unknown";
16945 }
16946 }
16947
16948 static char *tg3_bus_string(struct tg3 *tp, char *str)
16949 {
16950 if (tg3_flag(tp, PCI_EXPRESS)) {
16951 strcpy(str, "PCI Express");
16952 return str;
16953 } else if (tg3_flag(tp, PCIX_MODE)) {
16954 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16955
16956 strcpy(str, "PCIX:");
16957
16958 if ((clock_ctrl == 7) ||
16959 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16960 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16961 strcat(str, "133MHz");
16962 else if (clock_ctrl == 0)
16963 strcat(str, "33MHz");
16964 else if (clock_ctrl == 2)
16965 strcat(str, "50MHz");
16966 else if (clock_ctrl == 4)
16967 strcat(str, "66MHz");
16968 else if (clock_ctrl == 6)
16969 strcat(str, "100MHz");
16970 } else {
16971 strcpy(str, "PCI:");
16972 if (tg3_flag(tp, PCI_HIGH_SPEED))
16973 strcat(str, "66MHz");
16974 else
16975 strcat(str, "33MHz");
16976 }
16977 if (tg3_flag(tp, PCI_32BIT))
16978 strcat(str, ":32-bit");
16979 else
16980 strcat(str, ":64-bit");
16981 return str;
16982 }
16983
16984 static void tg3_init_coal(struct tg3 *tp)
16985 {
16986 struct ethtool_coalesce *ec = &tp->coal;
16987
16988 memset(ec, 0, sizeof(*ec));
16989 ec->cmd = ETHTOOL_GCOALESCE;
16990 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16991 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16992 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16993 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16994 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16995 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16996 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16997 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16998 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16999
17000 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17001 HOSTCC_MODE_CLRTICK_TXBD)) {
17002 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17003 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17004 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17005 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17006 }
17007
17008 if (tg3_flag(tp, 5705_PLUS)) {
17009 ec->rx_coalesce_usecs_irq = 0;
17010 ec->tx_coalesce_usecs_irq = 0;
17011 ec->stats_block_coalesce_usecs = 0;
17012 }
17013 }
17014
17015 static int tg3_init_one(struct pci_dev *pdev,
17016 const struct pci_device_id *ent)
17017 {
17018 struct net_device *dev;
17019 struct tg3 *tp;
17020 int i, err, pm_cap;
17021 u32 sndmbx, rcvmbx, intmbx;
17022 char str[40];
17023 u64 dma_mask, persist_dma_mask;
17024 netdev_features_t features = 0;
17025
17026 printk_once(KERN_INFO "%s\n", version);
17027
17028 err = pci_enable_device(pdev);
17029 if (err) {
17030 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17031 return err;
17032 }
17033
17034 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17035 if (err) {
17036 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17037 goto err_out_disable_pdev;
17038 }
17039
17040 pci_set_master(pdev);
17041
17042 /* Find power-management capability. */
17043 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17044 if (pm_cap == 0) {
17045 dev_err(&pdev->dev,
17046 "Cannot find Power Management capability, aborting\n");
17047 err = -EIO;
17048 goto err_out_free_res;
17049 }
17050
17051 err = pci_set_power_state(pdev, PCI_D0);
17052 if (err) {
17053 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17054 goto err_out_free_res;
17055 }
17056
17057 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17058 if (!dev) {
17059 err = -ENOMEM;
17060 goto err_out_power_down;
17061 }
17062
17063 SET_NETDEV_DEV(dev, &pdev->dev);
17064
17065 tp = netdev_priv(dev);
17066 tp->pdev = pdev;
17067 tp->dev = dev;
17068 tp->pm_cap = pm_cap;
17069 tp->rx_mode = TG3_DEF_RX_MODE;
17070 tp->tx_mode = TG3_DEF_TX_MODE;
17071 tp->irq_sync = 1;
17072
17073 if (tg3_debug > 0)
17074 tp->msg_enable = tg3_debug;
17075 else
17076 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17077
17078 if (pdev_is_ssb_gige_core(pdev)) {
17079 tg3_flag_set(tp, IS_SSB_CORE);
17080 if (ssb_gige_must_flush_posted_writes(pdev))
17081 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17082 if (ssb_gige_one_dma_at_once(pdev))
17083 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17084 if (ssb_gige_have_roboswitch(pdev))
17085 tg3_flag_set(tp, ROBOSWITCH);
17086 if (ssb_gige_is_rgmii(pdev))
17087 tg3_flag_set(tp, RGMII_MODE);
17088 }
17089
17090 /* The word/byte swap controls here control register access byte
17091 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17092 * setting below.
17093 */
17094 tp->misc_host_ctrl =
17095 MISC_HOST_CTRL_MASK_PCI_INT |
17096 MISC_HOST_CTRL_WORD_SWAP |
17097 MISC_HOST_CTRL_INDIR_ACCESS |
17098 MISC_HOST_CTRL_PCISTATE_RW;
17099
17100 /* The NONFRM (non-frame) byte/word swap controls take effect
17101 * on descriptor entries, anything which isn't packet data.
17102 *
17103 * The StrongARM chips on the board (one for tx, one for rx)
17104 * are running in big-endian mode.
17105 */
17106 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17107 GRC_MODE_WSWAP_NONFRM_DATA);
17108 #ifdef __BIG_ENDIAN
17109 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17110 #endif
17111 spin_lock_init(&tp->lock);
17112 spin_lock_init(&tp->indirect_lock);
17113 INIT_WORK(&tp->reset_task, tg3_reset_task);
17114
17115 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17116 if (!tp->regs) {
17117 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17118 err = -ENOMEM;
17119 goto err_out_free_dev;
17120 }
17121
17122 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17123 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17124 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17125 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17126 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17127 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17129 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17130 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17131 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17132 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17133 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17134 tg3_flag_set(tp, ENABLE_APE);
17135 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17136 if (!tp->aperegs) {
17137 dev_err(&pdev->dev,
17138 "Cannot map APE registers, aborting\n");
17139 err = -ENOMEM;
17140 goto err_out_iounmap;
17141 }
17142 }
17143
17144 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17145 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17146
17147 dev->ethtool_ops = &tg3_ethtool_ops;
17148 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17149 dev->netdev_ops = &tg3_netdev_ops;
17150 dev->irq = pdev->irq;
17151
17152 err = tg3_get_invariants(tp, ent);
17153 if (err) {
17154 dev_err(&pdev->dev,
17155 "Problem fetching invariants of chip, aborting\n");
17156 goto err_out_apeunmap;
17157 }
17158
17159 /* The EPB bridge inside 5714, 5715, and 5780 and any
17160 * device behind the EPB cannot support DMA addresses > 40-bit.
17161 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17162 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17163 * do DMA address check in tg3_start_xmit().
17164 */
17165 if (tg3_flag(tp, IS_5788))
17166 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17167 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17168 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17169 #ifdef CONFIG_HIGHMEM
17170 dma_mask = DMA_BIT_MASK(64);
17171 #endif
17172 } else
17173 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17174
17175 /* Configure DMA attributes. */
17176 if (dma_mask > DMA_BIT_MASK(32)) {
17177 err = pci_set_dma_mask(pdev, dma_mask);
17178 if (!err) {
17179 features |= NETIF_F_HIGHDMA;
17180 err = pci_set_consistent_dma_mask(pdev,
17181 persist_dma_mask);
17182 if (err < 0) {
17183 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17184 "DMA for consistent allocations\n");
17185 goto err_out_apeunmap;
17186 }
17187 }
17188 }
17189 if (err || dma_mask == DMA_BIT_MASK(32)) {
17190 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17191 if (err) {
17192 dev_err(&pdev->dev,
17193 "No usable DMA configuration, aborting\n");
17194 goto err_out_apeunmap;
17195 }
17196 }
17197
17198 tg3_init_bufmgr_config(tp);
17199
17200 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17201
17202 /* 5700 B0 chips do not support checksumming correctly due
17203 * to hardware bugs.
17204 */
17205 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17206 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17207
17208 if (tg3_flag(tp, 5755_PLUS))
17209 features |= NETIF_F_IPV6_CSUM;
17210 }
17211
17212 /* TSO is on by default on chips that support hardware TSO.
17213 * Firmware TSO on older chips gives lower performance, so it
17214 * is off by default, but can be enabled using ethtool.
17215 */
17216 if ((tg3_flag(tp, HW_TSO_1) ||
17217 tg3_flag(tp, HW_TSO_2) ||
17218 tg3_flag(tp, HW_TSO_3)) &&
17219 (features & NETIF_F_IP_CSUM))
17220 features |= NETIF_F_TSO;
17221 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17222 if (features & NETIF_F_IPV6_CSUM)
17223 features |= NETIF_F_TSO6;
17224 if (tg3_flag(tp, HW_TSO_3) ||
17225 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17226 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17227 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17228 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17229 tg3_asic_rev(tp) == ASIC_REV_57780)
17230 features |= NETIF_F_TSO_ECN;
17231 }
17232
17233 dev->features |= features;
17234 dev->vlan_features |= features;
17235
17236 /*
17237 * Add loopback capability only for a subset of devices that support
17238 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17239 * loopback for the remaining devices.
17240 */
17241 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17242 !tg3_flag(tp, CPMU_PRESENT))
17243 /* Add the loopback capability */
17244 features |= NETIF_F_LOOPBACK;
17245
17246 dev->hw_features |= features;
17247
17248 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17249 !tg3_flag(tp, TSO_CAPABLE) &&
17250 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17251 tg3_flag_set(tp, MAX_RXPEND_64);
17252 tp->rx_pending = 63;
17253 }
17254
17255 err = tg3_get_device_address(tp);
17256 if (err) {
17257 dev_err(&pdev->dev,
17258 "Could not obtain valid ethernet address, aborting\n");
17259 goto err_out_apeunmap;
17260 }
17261
17262 /*
17263 * Reset chip in case UNDI or EFI driver did not shutdown
17264 * DMA self test will enable WDMAC and we'll see (spurious)
17265 * pending DMA on the PCI bus at that point.
17266 */
17267 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17268 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17269 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17270 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17271 }
17272
17273 err = tg3_test_dma(tp);
17274 if (err) {
17275 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17276 goto err_out_apeunmap;
17277 }
17278
17279 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17280 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17281 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17282 for (i = 0; i < tp->irq_max; i++) {
17283 struct tg3_napi *tnapi = &tp->napi[i];
17284
17285 tnapi->tp = tp;
17286 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17287
17288 tnapi->int_mbox = intmbx;
17289 if (i <= 4)
17290 intmbx += 0x8;
17291 else
17292 intmbx += 0x4;
17293
17294 tnapi->consmbox = rcvmbx;
17295 tnapi->prodmbox = sndmbx;
17296
17297 if (i)
17298 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17299 else
17300 tnapi->coal_now = HOSTCC_MODE_NOW;
17301
17302 if (!tg3_flag(tp, SUPPORT_MSIX))
17303 break;
17304
17305 /*
17306 * If we support MSIX, we'll be using RSS. If we're using
17307 * RSS, the first vector only handles link interrupts and the
17308 * remaining vectors handle rx and tx interrupts. Reuse the
17309 * mailbox values for the next iteration. The values we setup
17310 * above are still useful for the single vectored mode.
17311 */
17312 if (!i)
17313 continue;
17314
17315 rcvmbx += 0x8;
17316
17317 if (sndmbx & 0x4)
17318 sndmbx -= 0x4;
17319 else
17320 sndmbx += 0xc;
17321 }
17322
17323 tg3_init_coal(tp);
17324
17325 pci_set_drvdata(pdev, dev);
17326
17327 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17328 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17329 tg3_asic_rev(tp) == ASIC_REV_5762)
17330 tg3_flag_set(tp, PTP_CAPABLE);
17331
17332 if (tg3_flag(tp, 5717_PLUS)) {
17333 /* Resume a low-power mode */
17334 tg3_frob_aux_power(tp, false);
17335 }
17336
17337 tg3_timer_init(tp);
17338
17339 tg3_carrier_off(tp);
17340
17341 err = register_netdev(dev);
17342 if (err) {
17343 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17344 goto err_out_apeunmap;
17345 }
17346
17347 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17348 tp->board_part_number,
17349 tg3_chip_rev_id(tp),
17350 tg3_bus_string(tp, str),
17351 dev->dev_addr);
17352
17353 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17354 struct phy_device *phydev;
17355 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17356 netdev_info(dev,
17357 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17358 phydev->drv->name, dev_name(&phydev->dev));
17359 } else {
17360 char *ethtype;
17361
17362 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17363 ethtype = "10/100Base-TX";
17364 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17365 ethtype = "1000Base-SX";
17366 else
17367 ethtype = "10/100/1000Base-T";
17368
17369 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17370 "(WireSpeed[%d], EEE[%d])\n",
17371 tg3_phy_string(tp), ethtype,
17372 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17373 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17374 }
17375
17376 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17377 (dev->features & NETIF_F_RXCSUM) != 0,
17378 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17379 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17380 tg3_flag(tp, ENABLE_ASF) != 0,
17381 tg3_flag(tp, TSO_CAPABLE) != 0);
17382 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17383 tp->dma_rwctrl,
17384 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17385 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17386
17387 pci_save_state(pdev);
17388
17389 return 0;
17390
17391 err_out_apeunmap:
17392 if (tp->aperegs) {
17393 iounmap(tp->aperegs);
17394 tp->aperegs = NULL;
17395 }
17396
17397 err_out_iounmap:
17398 if (tp->regs) {
17399 iounmap(tp->regs);
17400 tp->regs = NULL;
17401 }
17402
17403 err_out_free_dev:
17404 free_netdev(dev);
17405
17406 err_out_power_down:
17407 pci_set_power_state(pdev, PCI_D3hot);
17408
17409 err_out_free_res:
17410 pci_release_regions(pdev);
17411
17412 err_out_disable_pdev:
17413 pci_disable_device(pdev);
17414 pci_set_drvdata(pdev, NULL);
17415 return err;
17416 }
17417
17418 static void tg3_remove_one(struct pci_dev *pdev)
17419 {
17420 struct net_device *dev = pci_get_drvdata(pdev);
17421
17422 if (dev) {
17423 struct tg3 *tp = netdev_priv(dev);
17424
17425 release_firmware(tp->fw);
17426
17427 tg3_reset_task_cancel(tp);
17428
17429 if (tg3_flag(tp, USE_PHYLIB)) {
17430 tg3_phy_fini(tp);
17431 tg3_mdio_fini(tp);
17432 }
17433
17434 unregister_netdev(dev);
17435 if (tp->aperegs) {
17436 iounmap(tp->aperegs);
17437 tp->aperegs = NULL;
17438 }
17439 if (tp->regs) {
17440 iounmap(tp->regs);
17441 tp->regs = NULL;
17442 }
17443 free_netdev(dev);
17444 pci_release_regions(pdev);
17445 pci_disable_device(pdev);
17446 pci_set_drvdata(pdev, NULL);
17447 }
17448 }
17449
17450 #ifdef CONFIG_PM_SLEEP
17451 static int tg3_suspend(struct device *device)
17452 {
17453 struct pci_dev *pdev = to_pci_dev(device);
17454 struct net_device *dev = pci_get_drvdata(pdev);
17455 struct tg3 *tp = netdev_priv(dev);
17456 int err;
17457
17458 if (!netif_running(dev))
17459 return 0;
17460
17461 tg3_reset_task_cancel(tp);
17462 tg3_phy_stop(tp);
17463 tg3_netif_stop(tp);
17464
17465 tg3_timer_stop(tp);
17466
17467 tg3_full_lock(tp, 1);
17468 tg3_disable_ints(tp);
17469 tg3_full_unlock(tp);
17470
17471 netif_device_detach(dev);
17472
17473 tg3_full_lock(tp, 0);
17474 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17475 tg3_flag_clear(tp, INIT_COMPLETE);
17476 tg3_full_unlock(tp);
17477
17478 err = tg3_power_down_prepare(tp);
17479 if (err) {
17480 int err2;
17481
17482 tg3_full_lock(tp, 0);
17483
17484 tg3_flag_set(tp, INIT_COMPLETE);
17485 err2 = tg3_restart_hw(tp, true);
17486 if (err2)
17487 goto out;
17488
17489 tg3_timer_start(tp);
17490
17491 netif_device_attach(dev);
17492 tg3_netif_start(tp);
17493
17494 out:
17495 tg3_full_unlock(tp);
17496
17497 if (!err2)
17498 tg3_phy_start(tp);
17499 }
17500
17501 return err;
17502 }
17503
17504 static int tg3_resume(struct device *device)
17505 {
17506 struct pci_dev *pdev = to_pci_dev(device);
17507 struct net_device *dev = pci_get_drvdata(pdev);
17508 struct tg3 *tp = netdev_priv(dev);
17509 int err;
17510
17511 if (!netif_running(dev))
17512 return 0;
17513
17514 netif_device_attach(dev);
17515
17516 tg3_full_lock(tp, 0);
17517
17518 tg3_flag_set(tp, INIT_COMPLETE);
17519 err = tg3_restart_hw(tp,
17520 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17521 if (err)
17522 goto out;
17523
17524 tg3_timer_start(tp);
17525
17526 tg3_netif_start(tp);
17527
17528 out:
17529 tg3_full_unlock(tp);
17530
17531 if (!err)
17532 tg3_phy_start(tp);
17533
17534 return err;
17535 }
17536 #endif /* CONFIG_PM_SLEEP */
17537
17538 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17539
17540 /**
17541 * tg3_io_error_detected - called when PCI error is detected
17542 * @pdev: Pointer to PCI device
17543 * @state: The current pci connection state
17544 *
17545 * This function is called after a PCI bus error affecting
17546 * this device has been detected.
17547 */
17548 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17549 pci_channel_state_t state)
17550 {
17551 struct net_device *netdev = pci_get_drvdata(pdev);
17552 struct tg3 *tp = netdev_priv(netdev);
17553 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17554
17555 netdev_info(netdev, "PCI I/O error detected\n");
17556
17557 rtnl_lock();
17558
17559 if (!netif_running(netdev))
17560 goto done;
17561
17562 tg3_phy_stop(tp);
17563
17564 tg3_netif_stop(tp);
17565
17566 tg3_timer_stop(tp);
17567
17568 /* Want to make sure that the reset task doesn't run */
17569 tg3_reset_task_cancel(tp);
17570
17571 netif_device_detach(netdev);
17572
17573 /* Clean up software state, even if MMIO is blocked */
17574 tg3_full_lock(tp, 0);
17575 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17576 tg3_full_unlock(tp);
17577
17578 done:
17579 if (state == pci_channel_io_perm_failure)
17580 err = PCI_ERS_RESULT_DISCONNECT;
17581 else
17582 pci_disable_device(pdev);
17583
17584 rtnl_unlock();
17585
17586 return err;
17587 }
17588
17589 /**
17590 * tg3_io_slot_reset - called after the pci bus has been reset.
17591 * @pdev: Pointer to PCI device
17592 *
17593 * Restart the card from scratch, as if from a cold-boot.
17594 * At this point, the card has exprienced a hard reset,
17595 * followed by fixups by BIOS, and has its config space
17596 * set up identically to what it was at cold boot.
17597 */
17598 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17599 {
17600 struct net_device *netdev = pci_get_drvdata(pdev);
17601 struct tg3 *tp = netdev_priv(netdev);
17602 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17603 int err;
17604
17605 rtnl_lock();
17606
17607 if (pci_enable_device(pdev)) {
17608 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17609 goto done;
17610 }
17611
17612 pci_set_master(pdev);
17613 pci_restore_state(pdev);
17614 pci_save_state(pdev);
17615
17616 if (!netif_running(netdev)) {
17617 rc = PCI_ERS_RESULT_RECOVERED;
17618 goto done;
17619 }
17620
17621 err = tg3_power_up(tp);
17622 if (err)
17623 goto done;
17624
17625 rc = PCI_ERS_RESULT_RECOVERED;
17626
17627 done:
17628 rtnl_unlock();
17629
17630 return rc;
17631 }
17632
17633 /**
17634 * tg3_io_resume - called when traffic can start flowing again.
17635 * @pdev: Pointer to PCI device
17636 *
17637 * This callback is called when the error recovery driver tells
17638 * us that its OK to resume normal operation.
17639 */
17640 static void tg3_io_resume(struct pci_dev *pdev)
17641 {
17642 struct net_device *netdev = pci_get_drvdata(pdev);
17643 struct tg3 *tp = netdev_priv(netdev);
17644 int err;
17645
17646 rtnl_lock();
17647
17648 if (!netif_running(netdev))
17649 goto done;
17650
17651 tg3_full_lock(tp, 0);
17652 tg3_flag_set(tp, INIT_COMPLETE);
17653 err = tg3_restart_hw(tp, true);
17654 if (err) {
17655 tg3_full_unlock(tp);
17656 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17657 goto done;
17658 }
17659
17660 netif_device_attach(netdev);
17661
17662 tg3_timer_start(tp);
17663
17664 tg3_netif_start(tp);
17665
17666 tg3_full_unlock(tp);
17667
17668 tg3_phy_start(tp);
17669
17670 done:
17671 rtnl_unlock();
17672 }
17673
17674 static const struct pci_error_handlers tg3_err_handler = {
17675 .error_detected = tg3_io_error_detected,
17676 .slot_reset = tg3_io_slot_reset,
17677 .resume = tg3_io_resume
17678 };
17679
17680 static struct pci_driver tg3_driver = {
17681 .name = DRV_MODULE_NAME,
17682 .id_table = tg3_pci_tbl,
17683 .probe = tg3_init_one,
17684 .remove = tg3_remove_one,
17685 .err_handler = &tg3_err_handler,
17686 .driver.pm = &tg3_pm_ops,
17687 };
17688
17689 static int __init tg3_init(void)
17690 {
17691 return pci_register_driver(&tg3_driver);
17692 }
17693
17694 static void __exit tg3_cleanup(void)
17695 {
17696 pci_unregister_driver(&tg3_driver);
17697 }
17698
17699 module_init(tg3_init);
17700 module_exit(tg3_cleanup);
This page took 0.489762 seconds and 4 git commands to generate.