3e7bc3837ef92c1649b5ef87ace6d16bbc519d20
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 131
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "April 09, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 udelay(10);
748 }
749
750 if (status != bit) {
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
753 ret = -EBUSY;
754 }
755
756 return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761 u32 gnt, bit;
762
763 if (!tg3_flag(tp, ENABLE_APE))
764 return;
765
766 switch (locknum) {
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 return;
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
772 if (!tp->pci_fn)
773 bit = APE_LOCK_GRANT_DRIVER;
774 else
775 bit = 1 << tp->pci_fn;
776 break;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
782 break;
783 default:
784 return;
785 }
786
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
789 else
790 gnt = TG3_APE_PER_LOCK_GRANT;
791
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797 u32 apedata;
798
799 while (timeout_us) {
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 return -EBUSY;
802
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 break;
806
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809 udelay(10);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 }
812
813 return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818 u32 i, apedata;
819
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 break;
825
826 udelay(10);
827 }
828
829 return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 u32 len)
834 {
835 int err;
836 u32 i, bufoff, msgoff, maxlen, apedata;
837
838 if (!tg3_flag(tp, APE_HAS_NCSI))
839 return 0;
840
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
843 return -ENODEV;
844
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
847 return -EAGAIN;
848
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 TG3_APE_SHMEM_BASE;
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854 while (len) {
855 u32 length;
856
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
859 len -= length;
860
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
863 return -EAGAIN;
864
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
867 if (err)
868 return err;
869
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881 base_off += length;
882
883 if (tg3_ape_wait_for_event(tp, 30000))
884 return -EAGAIN;
885
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
889 data++;
890 }
891 }
892
893 return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898 int err;
899 u32 apedata;
900
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
903 return -EAGAIN;
904
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
907 return -EAGAIN;
908
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
911 if (err)
912 return err;
913
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
916
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920 return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925 u32 event;
926 u32 apedata;
927
928 if (!tg3_flag(tp, ENABLE_APE))
929 return;
930
931 switch (kind) {
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
945
946 event = APE_EVENT_STATUS_STATE_START;
947 break;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
953 */
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 } else
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 break;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
970 break;
971 default:
972 return;
973 }
974
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977 tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 int i;
983
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 int i;
993
994 tp->irq_sync = 0;
995 wmb();
996
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008 tp->coal_now |= tnapi->coal_now;
1009 }
1010
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 else
1016 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1026
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1030 work_exists = 1;
1031 }
1032
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 work_exists = 1;
1036
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 work_exists = 1;
1041
1042 return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1049 */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 struct tg3 *tp = tnapi->tp;
1053
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 mmiowb();
1056
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1060 */
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 u32 clock_ctrl;
1069 u32 orig_clock_ctrl;
1070
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 return;
1073
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1079 0x1f);
1080 tp->pci_clock_ctrl = clock_ctrl;
1081
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 }
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 clock_ctrl |
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 40);
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 40);
1095 }
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS 5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 u32 *val)
1103 {
1104 u32 frame_val;
1105 unsigned int loops;
1106 int ret;
1107
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 tw32_f(MAC_MI_MODE,
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 udelay(80);
1112 }
1113
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116 *val = 0x0;
1117
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124 tw32_f(MAC_MI_COM, frame_val);
1125
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1128 udelay(10);
1129 frame_val = tr32(MAC_MI_COM);
1130
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1132 udelay(5);
1133 frame_val = tr32(MAC_MI_COM);
1134 break;
1135 }
1136 loops -= 1;
1137 }
1138
1139 ret = -EBUSY;
1140 if (loops != 0) {
1141 *val = frame_val & MI_COM_DATA_MASK;
1142 ret = 0;
1143 }
1144
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 udelay(80);
1148 }
1149
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152 return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 u32 val)
1162 {
1163 u32 frame_val;
1164 unsigned int loops;
1165 int ret;
1166
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 return 0;
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE,
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 udelay(80);
1175 }
1176
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186 tw32_f(MAC_MI_COM, frame_val);
1187
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1190 udelay(10);
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1193 udelay(5);
1194 frame_val = tr32(MAC_MI_COM);
1195 break;
1196 }
1197 loops -= 1;
1198 }
1199
1200 ret = -EBUSY;
1201 if (loops != 0)
1202 ret = 0;
1203
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 udelay(80);
1207 }
1208
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211 return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 int err;
1222
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 if (err)
1225 goto done;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 if (err)
1234 goto done;
1235
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239 return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 int err;
1245
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 if (err)
1252 goto done;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 if (err)
1257 goto done;
1258
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262 return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 int err;
1268
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 if (!err)
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273 return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 int err;
1279
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 if (!err)
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284 return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 int err;
1290
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 if (!err)
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297 return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 u32 val;
1311 int err;
1312
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315 if (err)
1316 return err;
1317 if (enable)
1318
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 else
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326 return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 u32 phy_control;
1332 int limit, err;
1333
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1336 */
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 if (err != 0)
1340 return -EBUSY;
1341
1342 limit = 5000;
1343 while (limit--) {
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if (err != 0)
1346 return -EBUSY;
1347
1348 if ((phy_control & BMCR_RESET) == 0) {
1349 udelay(40);
1350 break;
1351 }
1352 udelay(10);
1353 }
1354 if (limit < 0)
1355 return -EBUSY;
1356
1357 return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 struct tg3 *tp = bp->priv;
1363 u32 val;
1364
1365 spin_lock_bh(&tp->lock);
1366
1367 if (tg3_readphy(tp, reg, &val))
1368 val = -EIO;
1369
1370 spin_unlock_bh(&tp->lock);
1371
1372 return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 struct tg3 *tp = bp->priv;
1378 u32 ret = 0;
1379
1380 spin_lock_bh(&tp->lock);
1381
1382 if (tg3_writephy(tp, reg, val))
1383 ret = -EIO;
1384
1385 spin_unlock_bh(&tp->lock);
1386
1387 return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 u32 val;
1398 struct phy_device *phydev;
1399
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1405 break;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 break;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 break;
1415 default:
1416 return;
1417 }
1418
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1421
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1427
1428 return;
1429 }
1430
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1438
1439 tw32(MAC_PHYCFG2, val);
1440
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 }
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1453
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1472 }
1473 tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 udelay(80);
1481
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 int i;
1490 u32 reg;
1491 struct phy_device *phydev;
1492
1493 if (tg3_flag(tp, 5717_PLUS)) {
1494 u32 is_serdes;
1495
1496 tp->phy_addr = tp->pci_fn + 1;
1497
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 else
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 if (is_serdes)
1504 tp->phy_addr += 7;
1505 } else
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508 tg3_mdio_start(tp);
1509
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 return 0;
1512
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1515 return -ENOMEM;
1516
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1535 */
1536 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 tg3_bmcr_reset(tp);
1538
1539 i = mdiobus_register(tp->mdio_bus);
1540 if (i) {
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1543 return i;
1544 }
1545
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1552 return -ENODEV;
1553 }
1554
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 break;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 /* fallthru */
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 break;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 break;
1582 }
1583
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1588
1589 return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1598 }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 u32 val;
1605
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610 tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 int i;
1619 unsigned int delay_cnt;
1620 long time_remain;
1621
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 (long)jiffies;
1626 if (time_remain < 0)
1627 return;
1628
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1634
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break;
1638 udelay(8);
1639 }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645 u32 reg, val;
1646
1647 val = 0;
1648 if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 val = reg << 16;
1650 if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 val |= (reg & 0xffff);
1652 *data++ = val;
1653
1654 val = 0;
1655 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 val = reg << 16;
1657 if (!tg3_readphy(tp, MII_LPA, &reg))
1658 val |= (reg & 0xffff);
1659 *data++ = val;
1660
1661 val = 0;
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 val |= (reg & 0xffff);
1667 }
1668 *data++ = val;
1669
1670 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 val = reg << 16;
1672 else
1673 val = 0;
1674 *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680 u32 data[4];
1681
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 return;
1684
1685 tg3_phy_gather_ump_data(tp, data);
1686
1687 tg3_wait_for_event_ack(tp);
1688
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696 tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1705
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708 tg3_generate_fw_event(tp);
1709
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1712 }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 switch (kind) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 DRV_STATE_START);
1726 break;
1727
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 DRV_STATE_UNLOAD);
1731 break;
1732
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 DRV_STATE_SUSPEND);
1736 break;
1737
1738 default:
1739 break;
1740 }
1741 }
1742
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 switch (kind) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1756 break;
1757
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1761 break;
1762
1763 default:
1764 break;
1765 }
1766 }
1767
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1776 switch (kind) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 DRV_STATE_START);
1780 break;
1781
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 DRV_STATE_UNLOAD);
1785 break;
1786
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 DRV_STATE_SUSPEND);
1790 break;
1791
1792 default:
1793 break;
1794 }
1795 }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800 int i;
1801 u32 val;
1802
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1805 return 0;
1806 }
1807
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 return 0;
1813 udelay(100);
1814 }
1815 return -ENODEV;
1816 }
1817
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 break;
1823 udelay(10);
1824 }
1825
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1830 */
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834 netdev_info(tp->dev, "No firmware running\n");
1835 }
1836
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1840 */
1841 mdelay(10);
1842 }
1843
1844 return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1855 1000 :
1856 (tp->link_config.active_speed == SPEED_100 ?
1857 100 : 10)),
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 "full" : "half"));
1860
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 "on" : "off",
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 "on" : "off");
1866
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1870
1871 tg3_ump_link_report(tp);
1872 }
1873
1874 tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1878 {
1879 u32 flowctrl = 0;
1880
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1887
1888 return flowctrl;
1889 }
1890
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1892 {
1893 u16 miireg;
1894
1895 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 miireg = ADVERTISE_1000XPAUSE;
1897 else if (flow_ctrl & FLOW_CTRL_TX)
1898 miireg = ADVERTISE_1000XPSE_ASYM;
1899 else if (flow_ctrl & FLOW_CTRL_RX)
1900 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1901 else
1902 miireg = 0;
1903
1904 return miireg;
1905 }
1906
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1908 {
1909 u32 flowctrl = 0;
1910
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1917
1918 return flowctrl;
1919 }
1920
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1922 {
1923 u8 cap = 0;
1924
1925 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 if (lcladv & ADVERTISE_1000XPAUSE)
1929 cap = FLOW_CTRL_RX;
1930 if (rmtadv & ADVERTISE_1000XPAUSE)
1931 cap = FLOW_CTRL_TX;
1932 }
1933
1934 return cap;
1935 }
1936
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1938 {
1939 u8 autoneg;
1940 u8 flowctrl = 0;
1941 u32 old_rx_mode = tp->rx_mode;
1942 u32 old_tx_mode = tp->tx_mode;
1943
1944 if (tg3_flag(tp, USE_PHYLIB))
1945 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1946 else
1947 autoneg = tp->link_config.autoneg;
1948
1949 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1952 else
1953 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1954 } else
1955 flowctrl = tp->link_config.flowctrl;
1956
1957 tp->link_config.active_flowctrl = flowctrl;
1958
1959 if (flowctrl & FLOW_CTRL_RX)
1960 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1961 else
1962 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1963
1964 if (old_rx_mode != tp->rx_mode)
1965 tw32_f(MAC_RX_MODE, tp->rx_mode);
1966
1967 if (flowctrl & FLOW_CTRL_TX)
1968 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1969 else
1970 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1971
1972 if (old_tx_mode != tp->tx_mode)
1973 tw32_f(MAC_TX_MODE, tp->tx_mode);
1974 }
1975
1976 static void tg3_adjust_link(struct net_device *dev)
1977 {
1978 u8 oldflowctrl, linkmesg = 0;
1979 u32 mac_mode, lcl_adv, rmt_adv;
1980 struct tg3 *tp = netdev_priv(dev);
1981 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1982
1983 spin_lock_bh(&tp->lock);
1984
1985 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 MAC_MODE_HALF_DUPLEX);
1987
1988 oldflowctrl = tp->link_config.active_flowctrl;
1989
1990 if (phydev->link) {
1991 lcl_adv = 0;
1992 rmt_adv = 0;
1993
1994 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 else if (phydev->speed == SPEED_1000 ||
1997 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1999 else
2000 mac_mode |= MAC_MODE_PORT_MODE_MII;
2001
2002 if (phydev->duplex == DUPLEX_HALF)
2003 mac_mode |= MAC_MODE_HALF_DUPLEX;
2004 else {
2005 lcl_adv = mii_advertise_flowctrl(
2006 tp->link_config.flowctrl);
2007
2008 if (phydev->pause)
2009 rmt_adv = LPA_PAUSE_CAP;
2010 if (phydev->asym_pause)
2011 rmt_adv |= LPA_PAUSE_ASYM;
2012 }
2013
2014 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2015 } else
2016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2017
2018 if (mac_mode != tp->mac_mode) {
2019 tp->mac_mode = mac_mode;
2020 tw32_f(MAC_MODE, tp->mac_mode);
2021 udelay(40);
2022 }
2023
2024 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 if (phydev->speed == SPEED_10)
2026 tw32(MAC_MI_STAT,
2027 MAC_MI_STAT_10MBPS_MODE |
2028 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2029 else
2030 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2031 }
2032
2033 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 tw32(MAC_TX_LENGTHS,
2035 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 (6 << TX_LENGTHS_IPG_SHIFT) |
2037 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2038 else
2039 tw32(MAC_TX_LENGTHS,
2040 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 (6 << TX_LENGTHS_IPG_SHIFT) |
2042 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2043
2044 if (phydev->link != tp->old_link ||
2045 phydev->speed != tp->link_config.active_speed ||
2046 phydev->duplex != tp->link_config.active_duplex ||
2047 oldflowctrl != tp->link_config.active_flowctrl)
2048 linkmesg = 1;
2049
2050 tp->old_link = phydev->link;
2051 tp->link_config.active_speed = phydev->speed;
2052 tp->link_config.active_duplex = phydev->duplex;
2053
2054 spin_unlock_bh(&tp->lock);
2055
2056 if (linkmesg)
2057 tg3_link_report(tp);
2058 }
2059
2060 static int tg3_phy_init(struct tg3 *tp)
2061 {
2062 struct phy_device *phydev;
2063
2064 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2065 return 0;
2066
2067 /* Bring the PHY back to a known state. */
2068 tg3_bmcr_reset(tp);
2069
2070 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2071
2072 /* Attach the MAC to the PHY. */
2073 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 tg3_adjust_link, phydev->interface);
2075 if (IS_ERR(phydev)) {
2076 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 return PTR_ERR(phydev);
2078 }
2079
2080 /* Mask with MAC supported features. */
2081 switch (phydev->interface) {
2082 case PHY_INTERFACE_MODE_GMII:
2083 case PHY_INTERFACE_MODE_RGMII:
2084 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 phydev->supported &= (PHY_GBIT_FEATURES |
2086 SUPPORTED_Pause |
2087 SUPPORTED_Asym_Pause);
2088 break;
2089 }
2090 /* fallthru */
2091 case PHY_INTERFACE_MODE_MII:
2092 phydev->supported &= (PHY_BASIC_FEATURES |
2093 SUPPORTED_Pause |
2094 SUPPORTED_Asym_Pause);
2095 break;
2096 default:
2097 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2098 return -EINVAL;
2099 }
2100
2101 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2102
2103 phydev->advertising = phydev->supported;
2104
2105 return 0;
2106 }
2107
2108 static void tg3_phy_start(struct tg3 *tp)
2109 {
2110 struct phy_device *phydev;
2111
2112 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2113 return;
2114
2115 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2116
2117 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 phydev->speed = tp->link_config.speed;
2120 phydev->duplex = tp->link_config.duplex;
2121 phydev->autoneg = tp->link_config.autoneg;
2122 phydev->advertising = tp->link_config.advertising;
2123 }
2124
2125 phy_start(phydev);
2126
2127 phy_start_aneg(phydev);
2128 }
2129
2130 static void tg3_phy_stop(struct tg3 *tp)
2131 {
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 return;
2134
2135 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2136 }
2137
2138 static void tg3_phy_fini(struct tg3 *tp)
2139 {
2140 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2143 }
2144 }
2145
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2147 {
2148 int err;
2149 u32 val;
2150
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2152 return 0;
2153
2154 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 /* Cannot do read-modify-write on 5401 */
2156 err = tg3_phy_auxctl_write(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2159 0x4c20);
2160 goto done;
2161 }
2162
2163 err = tg3_phy_auxctl_read(tp,
2164 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2165 if (err)
2166 return err;
2167
2168 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 err = tg3_phy_auxctl_write(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2171
2172 done:
2173 return err;
2174 }
2175
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2177 {
2178 u32 phytest;
2179
2180 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2181 u32 phy;
2182
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2186 if (enable)
2187 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2188 else
2189 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2191 }
2192 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2193 }
2194 }
2195
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2197 {
2198 u32 reg;
2199
2200 if (!tg3_flag(tp, 5705_PLUS) ||
2201 (tg3_flag(tp, 5717_PLUS) &&
2202 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2203 return;
2204
2205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 tg3_phy_fet_toggle_apd(tp, enable);
2207 return;
2208 }
2209
2210 reg = MII_TG3_MISC_SHDW_WREN |
2211 MII_TG3_MISC_SHDW_SCR5_SEL |
2212 MII_TG3_MISC_SHDW_SCR5_LPED |
2213 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2218
2219 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2220
2221
2222 reg = MII_TG3_MISC_SHDW_WREN |
2223 MII_TG3_MISC_SHDW_APD_SEL |
2224 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2225 if (enable)
2226 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2227
2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2229 }
2230
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2232 {
2233 u32 phy;
2234
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2237 return;
2238
2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240 u32 ephy;
2241
2242 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2244
2245 tg3_writephy(tp, MII_TG3_FET_TEST,
2246 ephy | MII_TG3_FET_SHADOW_EN);
2247 if (!tg3_readphy(tp, reg, &phy)) {
2248 if (enable)
2249 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2250 else
2251 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 tg3_writephy(tp, reg, phy);
2253 }
2254 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2255 }
2256 } else {
2257 int ret;
2258
2259 ret = tg3_phy_auxctl_read(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2261 if (!ret) {
2262 if (enable)
2263 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2264 else
2265 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 tg3_phy_auxctl_write(tp,
2267 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2268 }
2269 }
2270 }
2271
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2273 {
2274 int ret;
2275 u32 val;
2276
2277 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2278 return;
2279
2280 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2281 if (!ret)
2282 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2284 }
2285
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2287 {
2288 u32 otp, phy;
2289
2290 if (!tp->phy_otp)
2291 return;
2292
2293 otp = tp->phy_otp;
2294
2295 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2296 return;
2297
2298 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2301
2302 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2305
2306 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2309
2310 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2312
2313 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2315
2316 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2319
2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2321 }
2322
2323 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2324 {
2325 u32 val;
2326 struct ethtool_eee *dest = &tp->eee;
2327
2328 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2329 return;
2330
2331 if (eee)
2332 dest = eee;
2333
2334 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2335 return;
2336
2337 /* Pull eee_active */
2338 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2339 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2340 dest->eee_active = 1;
2341 } else
2342 dest->eee_active = 0;
2343
2344 /* Pull lp advertised settings */
2345 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2346 return;
2347 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2348
2349 /* Pull advertised and eee_enabled settings */
2350 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2351 return;
2352 dest->eee_enabled = !!val;
2353 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2354
2355 /* Pull tx_lpi_enabled */
2356 val = tr32(TG3_CPMU_EEE_MODE);
2357 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2358
2359 /* Pull lpi timer value */
2360 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2361 }
2362
2363 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2364 {
2365 u32 val;
2366
2367 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2368 return;
2369
2370 tp->setlpicnt = 0;
2371
2372 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2373 current_link_up &&
2374 tp->link_config.active_duplex == DUPLEX_FULL &&
2375 (tp->link_config.active_speed == SPEED_100 ||
2376 tp->link_config.active_speed == SPEED_1000)) {
2377 u32 eeectl;
2378
2379 if (tp->link_config.active_speed == SPEED_1000)
2380 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2381 else
2382 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2383
2384 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2385
2386 tg3_eee_pull_config(tp, NULL);
2387 if (tp->eee.eee_active)
2388 tp->setlpicnt = 2;
2389 }
2390
2391 if (!tp->setlpicnt) {
2392 if (current_link_up &&
2393 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2394 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2395 tg3_phy_toggle_auxctl_smdsp(tp, false);
2396 }
2397
2398 val = tr32(TG3_CPMU_EEE_MODE);
2399 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2400 }
2401 }
2402
2403 static void tg3_phy_eee_enable(struct tg3 *tp)
2404 {
2405 u32 val;
2406
2407 if (tp->link_config.active_speed == SPEED_1000 &&
2408 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2409 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2410 tg3_flag(tp, 57765_CLASS)) &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 val = MII_TG3_DSP_TAP26_ALNOKO |
2413 MII_TG3_DSP_TAP26_RMRXSTO;
2414 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2415 tg3_phy_toggle_auxctl_smdsp(tp, false);
2416 }
2417
2418 val = tr32(TG3_CPMU_EEE_MODE);
2419 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2420 }
2421
2422 static int tg3_wait_macro_done(struct tg3 *tp)
2423 {
2424 int limit = 100;
2425
2426 while (limit--) {
2427 u32 tmp32;
2428
2429 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2430 if ((tmp32 & 0x1000) == 0)
2431 break;
2432 }
2433 }
2434 if (limit < 0)
2435 return -EBUSY;
2436
2437 return 0;
2438 }
2439
2440 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2441 {
2442 static const u32 test_pat[4][6] = {
2443 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2444 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2445 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2446 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2447 };
2448 int chan;
2449
2450 for (chan = 0; chan < 4; chan++) {
2451 int i;
2452
2453 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2454 (chan * 0x2000) | 0x0200);
2455 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2456
2457 for (i = 0; i < 6; i++)
2458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2459 test_pat[chan][i]);
2460
2461 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2462 if (tg3_wait_macro_done(tp)) {
2463 *resetp = 1;
2464 return -EBUSY;
2465 }
2466
2467 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2468 (chan * 0x2000) | 0x0200);
2469 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2470 if (tg3_wait_macro_done(tp)) {
2471 *resetp = 1;
2472 return -EBUSY;
2473 }
2474
2475 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2476 if (tg3_wait_macro_done(tp)) {
2477 *resetp = 1;
2478 return -EBUSY;
2479 }
2480
2481 for (i = 0; i < 6; i += 2) {
2482 u32 low, high;
2483
2484 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2485 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2486 tg3_wait_macro_done(tp)) {
2487 *resetp = 1;
2488 return -EBUSY;
2489 }
2490 low &= 0x7fff;
2491 high &= 0x000f;
2492 if (low != test_pat[chan][i] ||
2493 high != test_pat[chan][i+1]) {
2494 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2495 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2496 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2497
2498 return -EBUSY;
2499 }
2500 }
2501 }
2502
2503 return 0;
2504 }
2505
2506 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2507 {
2508 int chan;
2509
2510 for (chan = 0; chan < 4; chan++) {
2511 int i;
2512
2513 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2514 (chan * 0x2000) | 0x0200);
2515 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2516 for (i = 0; i < 6; i++)
2517 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2518 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2519 if (tg3_wait_macro_done(tp))
2520 return -EBUSY;
2521 }
2522
2523 return 0;
2524 }
2525
2526 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2527 {
2528 u32 reg32, phy9_orig;
2529 int retries, do_phy_reset, err;
2530
2531 retries = 10;
2532 do_phy_reset = 1;
2533 do {
2534 if (do_phy_reset) {
2535 err = tg3_bmcr_reset(tp);
2536 if (err)
2537 return err;
2538 do_phy_reset = 0;
2539 }
2540
2541 /* Disable transmitter and interrupt. */
2542 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2543 continue;
2544
2545 reg32 |= 0x3000;
2546 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2547
2548 /* Set full-duplex, 1000 mbps. */
2549 tg3_writephy(tp, MII_BMCR,
2550 BMCR_FULLDPLX | BMCR_SPEED1000);
2551
2552 /* Set to master mode. */
2553 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2554 continue;
2555
2556 tg3_writephy(tp, MII_CTRL1000,
2557 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2558
2559 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2560 if (err)
2561 return err;
2562
2563 /* Block the PHY control access. */
2564 tg3_phydsp_write(tp, 0x8005, 0x0800);
2565
2566 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2567 if (!err)
2568 break;
2569 } while (--retries);
2570
2571 err = tg3_phy_reset_chanpat(tp);
2572 if (err)
2573 return err;
2574
2575 tg3_phydsp_write(tp, 0x8005, 0x0000);
2576
2577 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2578 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2579
2580 tg3_phy_toggle_auxctl_smdsp(tp, false);
2581
2582 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2583
2584 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2585 reg32 &= ~0x3000;
2586 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2587 } else if (!err)
2588 err = -EBUSY;
2589
2590 return err;
2591 }
2592
2593 static void tg3_carrier_off(struct tg3 *tp)
2594 {
2595 netif_carrier_off(tp->dev);
2596 tp->link_up = false;
2597 }
2598
2599 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2600 {
2601 if (tg3_flag(tp, ENABLE_ASF))
2602 netdev_warn(tp->dev,
2603 "Management side-band traffic will be interrupted during phy settings change\n");
2604 }
2605
2606 /* This will reset the tigon3 PHY if there is no valid
2607 * link unless the FORCE argument is non-zero.
2608 */
2609 static int tg3_phy_reset(struct tg3 *tp)
2610 {
2611 u32 val, cpmuctrl;
2612 int err;
2613
2614 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2615 val = tr32(GRC_MISC_CFG);
2616 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2617 udelay(40);
2618 }
2619 err = tg3_readphy(tp, MII_BMSR, &val);
2620 err |= tg3_readphy(tp, MII_BMSR, &val);
2621 if (err != 0)
2622 return -EBUSY;
2623
2624 if (netif_running(tp->dev) && tp->link_up) {
2625 netif_carrier_off(tp->dev);
2626 tg3_link_report(tp);
2627 }
2628
2629 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2630 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2631 tg3_asic_rev(tp) == ASIC_REV_5705) {
2632 err = tg3_phy_reset_5703_4_5(tp);
2633 if (err)
2634 return err;
2635 goto out;
2636 }
2637
2638 cpmuctrl = 0;
2639 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2640 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2641 cpmuctrl = tr32(TG3_CPMU_CTRL);
2642 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2643 tw32(TG3_CPMU_CTRL,
2644 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2645 }
2646
2647 err = tg3_bmcr_reset(tp);
2648 if (err)
2649 return err;
2650
2651 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2652 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2653 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2654
2655 tw32(TG3_CPMU_CTRL, cpmuctrl);
2656 }
2657
2658 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2659 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2660 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2661 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2662 CPMU_LSPD_1000MB_MACCLK_12_5) {
2663 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2664 udelay(40);
2665 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2666 }
2667 }
2668
2669 if (tg3_flag(tp, 5717_PLUS) &&
2670 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2671 return 0;
2672
2673 tg3_phy_apply_otp(tp);
2674
2675 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2676 tg3_phy_toggle_apd(tp, true);
2677 else
2678 tg3_phy_toggle_apd(tp, false);
2679
2680 out:
2681 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2682 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2683 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2684 tg3_phydsp_write(tp, 0x000a, 0x0323);
2685 tg3_phy_toggle_auxctl_smdsp(tp, false);
2686 }
2687
2688 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2689 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2690 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2691 }
2692
2693 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2694 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2695 tg3_phydsp_write(tp, 0x000a, 0x310b);
2696 tg3_phydsp_write(tp, 0x201f, 0x9506);
2697 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2698 tg3_phy_toggle_auxctl_smdsp(tp, false);
2699 }
2700 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2701 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2703 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2704 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2705 tg3_writephy(tp, MII_TG3_TEST1,
2706 MII_TG3_TEST1_TRIM_EN | 0x4);
2707 } else
2708 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2709
2710 tg3_phy_toggle_auxctl_smdsp(tp, false);
2711 }
2712 }
2713
2714 /* Set Extended packet length bit (bit 14) on all chips that */
2715 /* support jumbo frames */
2716 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2717 /* Cannot do read-modify-write on 5401 */
2718 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2719 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2720 /* Set bit 14 with read-modify-write to preserve other bits */
2721 err = tg3_phy_auxctl_read(tp,
2722 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2723 if (!err)
2724 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2725 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2726 }
2727
2728 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2729 * jumbo frames transmission.
2730 */
2731 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2732 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2733 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2734 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2735 }
2736
2737 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2738 /* adjust output voltage */
2739 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2740 }
2741
2742 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2743 tg3_phydsp_write(tp, 0xffb, 0x4000);
2744
2745 tg3_phy_toggle_automdix(tp, true);
2746 tg3_phy_set_wirespeed(tp);
2747 return 0;
2748 }
2749
2750 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2751 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2752 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2753 TG3_GPIO_MSG_NEED_VAUX)
2754 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2755 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2756 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2757 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2758 (TG3_GPIO_MSG_DRVR_PRES << 12))
2759
2760 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2761 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2762 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2763 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2764 (TG3_GPIO_MSG_NEED_VAUX << 12))
2765
2766 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2767 {
2768 u32 status, shift;
2769
2770 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2771 tg3_asic_rev(tp) == ASIC_REV_5719)
2772 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2773 else
2774 status = tr32(TG3_CPMU_DRV_STATUS);
2775
2776 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2777 status &= ~(TG3_GPIO_MSG_MASK << shift);
2778 status |= (newstat << shift);
2779
2780 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2781 tg3_asic_rev(tp) == ASIC_REV_5719)
2782 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2783 else
2784 tw32(TG3_CPMU_DRV_STATUS, status);
2785
2786 return status >> TG3_APE_GPIO_MSG_SHIFT;
2787 }
2788
2789 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2790 {
2791 if (!tg3_flag(tp, IS_NIC))
2792 return 0;
2793
2794 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2795 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2796 tg3_asic_rev(tp) == ASIC_REV_5720) {
2797 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2798 return -EIO;
2799
2800 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2801
2802 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2803 TG3_GRC_LCLCTL_PWRSW_DELAY);
2804
2805 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2806 } else {
2807 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2808 TG3_GRC_LCLCTL_PWRSW_DELAY);
2809 }
2810
2811 return 0;
2812 }
2813
2814 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2815 {
2816 u32 grc_local_ctrl;
2817
2818 if (!tg3_flag(tp, IS_NIC) ||
2819 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2820 tg3_asic_rev(tp) == ASIC_REV_5701)
2821 return;
2822
2823 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2824
2825 tw32_wait_f(GRC_LOCAL_CTRL,
2826 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828
2829 tw32_wait_f(GRC_LOCAL_CTRL,
2830 grc_local_ctrl,
2831 TG3_GRC_LCLCTL_PWRSW_DELAY);
2832
2833 tw32_wait_f(GRC_LOCAL_CTRL,
2834 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 }
2837
2838 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2839 {
2840 if (!tg3_flag(tp, IS_NIC))
2841 return;
2842
2843 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2844 tg3_asic_rev(tp) == ASIC_REV_5701) {
2845 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2846 (GRC_LCLCTRL_GPIO_OE0 |
2847 GRC_LCLCTRL_GPIO_OE1 |
2848 GRC_LCLCTRL_GPIO_OE2 |
2849 GRC_LCLCTRL_GPIO_OUTPUT0 |
2850 GRC_LCLCTRL_GPIO_OUTPUT1),
2851 TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2853 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2854 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2855 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2856 GRC_LCLCTRL_GPIO_OE1 |
2857 GRC_LCLCTRL_GPIO_OE2 |
2858 GRC_LCLCTRL_GPIO_OUTPUT0 |
2859 GRC_LCLCTRL_GPIO_OUTPUT1 |
2860 tp->grc_local_ctrl;
2861 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2862 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863
2864 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2865 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2866 TG3_GRC_LCLCTL_PWRSW_DELAY);
2867
2868 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2869 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else {
2872 u32 no_gpio2;
2873 u32 grc_local_ctrl = 0;
2874
2875 /* Workaround to prevent overdrawing Amps. */
2876 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2877 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2878 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2879 grc_local_ctrl,
2880 TG3_GRC_LCLCTL_PWRSW_DELAY);
2881 }
2882
2883 /* On 5753 and variants, GPIO2 cannot be used. */
2884 no_gpio2 = tp->nic_sram_data_cfg &
2885 NIC_SRAM_DATA_CFG_NO_GPIO2;
2886
2887 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2888 GRC_LCLCTRL_GPIO_OE1 |
2889 GRC_LCLCTRL_GPIO_OE2 |
2890 GRC_LCLCTRL_GPIO_OUTPUT1 |
2891 GRC_LCLCTRL_GPIO_OUTPUT2;
2892 if (no_gpio2) {
2893 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2894 GRC_LCLCTRL_GPIO_OUTPUT2);
2895 }
2896 tw32_wait_f(GRC_LOCAL_CTRL,
2897 tp->grc_local_ctrl | grc_local_ctrl,
2898 TG3_GRC_LCLCTL_PWRSW_DELAY);
2899
2900 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2901
2902 tw32_wait_f(GRC_LOCAL_CTRL,
2903 tp->grc_local_ctrl | grc_local_ctrl,
2904 TG3_GRC_LCLCTL_PWRSW_DELAY);
2905
2906 if (!no_gpio2) {
2907 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2908 tw32_wait_f(GRC_LOCAL_CTRL,
2909 tp->grc_local_ctrl | grc_local_ctrl,
2910 TG3_GRC_LCLCTL_PWRSW_DELAY);
2911 }
2912 }
2913 }
2914
2915 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2916 {
2917 u32 msg = 0;
2918
2919 /* Serialize power state transitions */
2920 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2921 return;
2922
2923 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2924 msg = TG3_GPIO_MSG_NEED_VAUX;
2925
2926 msg = tg3_set_function_status(tp, msg);
2927
2928 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2929 goto done;
2930
2931 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2932 tg3_pwrsrc_switch_to_vaux(tp);
2933 else
2934 tg3_pwrsrc_die_with_vmain(tp);
2935
2936 done:
2937 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2938 }
2939
2940 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2941 {
2942 bool need_vaux = false;
2943
2944 /* The GPIOs do something completely different on 57765. */
2945 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2946 return;
2947
2948 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2949 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2950 tg3_asic_rev(tp) == ASIC_REV_5720) {
2951 tg3_frob_aux_power_5717(tp, include_wol ?
2952 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2953 return;
2954 }
2955
2956 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2957 struct net_device *dev_peer;
2958
2959 dev_peer = pci_get_drvdata(tp->pdev_peer);
2960
2961 /* remove_one() may have been run on the peer. */
2962 if (dev_peer) {
2963 struct tg3 *tp_peer = netdev_priv(dev_peer);
2964
2965 if (tg3_flag(tp_peer, INIT_COMPLETE))
2966 return;
2967
2968 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2969 tg3_flag(tp_peer, ENABLE_ASF))
2970 need_vaux = true;
2971 }
2972 }
2973
2974 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2975 tg3_flag(tp, ENABLE_ASF))
2976 need_vaux = true;
2977
2978 if (need_vaux)
2979 tg3_pwrsrc_switch_to_vaux(tp);
2980 else
2981 tg3_pwrsrc_die_with_vmain(tp);
2982 }
2983
2984 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2985 {
2986 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2987 return 1;
2988 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2989 if (speed != SPEED_10)
2990 return 1;
2991 } else if (speed == SPEED_10)
2992 return 1;
2993
2994 return 0;
2995 }
2996
2997 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2998 {
2999 u32 val;
3000
3001 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3002 return;
3003
3004 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3005 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3006 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3007 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3008
3009 sg_dig_ctrl |=
3010 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3011 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3012 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3013 }
3014 return;
3015 }
3016
3017 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3018 tg3_bmcr_reset(tp);
3019 val = tr32(GRC_MISC_CFG);
3020 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3021 udelay(40);
3022 return;
3023 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3024 u32 phytest;
3025 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3026 u32 phy;
3027
3028 tg3_writephy(tp, MII_ADVERTISE, 0);
3029 tg3_writephy(tp, MII_BMCR,
3030 BMCR_ANENABLE | BMCR_ANRESTART);
3031
3032 tg3_writephy(tp, MII_TG3_FET_TEST,
3033 phytest | MII_TG3_FET_SHADOW_EN);
3034 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3035 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3036 tg3_writephy(tp,
3037 MII_TG3_FET_SHDW_AUXMODE4,
3038 phy);
3039 }
3040 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3041 }
3042 return;
3043 } else if (do_low_power) {
3044 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3045 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3046
3047 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3048 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3049 MII_TG3_AUXCTL_PCTL_VREG_11V;
3050 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3051 }
3052
3053 /* The PHY should not be powered down on some chips because
3054 * of bugs.
3055 */
3056 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3057 tg3_asic_rev(tp) == ASIC_REV_5704 ||
3058 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
3059 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
3060 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
3061 !tp->pci_fn))
3062 return;
3063
3064 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3065 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3066 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3067 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3068 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3069 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3070 }
3071
3072 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3073 }
3074
3075 /* tp->lock is held. */
3076 static int tg3_nvram_lock(struct tg3 *tp)
3077 {
3078 if (tg3_flag(tp, NVRAM)) {
3079 int i;
3080
3081 if (tp->nvram_lock_cnt == 0) {
3082 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3083 for (i = 0; i < 8000; i++) {
3084 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3085 break;
3086 udelay(20);
3087 }
3088 if (i == 8000) {
3089 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3090 return -ENODEV;
3091 }
3092 }
3093 tp->nvram_lock_cnt++;
3094 }
3095 return 0;
3096 }
3097
3098 /* tp->lock is held. */
3099 static void tg3_nvram_unlock(struct tg3 *tp)
3100 {
3101 if (tg3_flag(tp, NVRAM)) {
3102 if (tp->nvram_lock_cnt > 0)
3103 tp->nvram_lock_cnt--;
3104 if (tp->nvram_lock_cnt == 0)
3105 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3106 }
3107 }
3108
3109 /* tp->lock is held. */
3110 static void tg3_enable_nvram_access(struct tg3 *tp)
3111 {
3112 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3113 u32 nvaccess = tr32(NVRAM_ACCESS);
3114
3115 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3116 }
3117 }
3118
3119 /* tp->lock is held. */
3120 static void tg3_disable_nvram_access(struct tg3 *tp)
3121 {
3122 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3123 u32 nvaccess = tr32(NVRAM_ACCESS);
3124
3125 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3126 }
3127 }
3128
3129 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3130 u32 offset, u32 *val)
3131 {
3132 u32 tmp;
3133 int i;
3134
3135 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3136 return -EINVAL;
3137
3138 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3139 EEPROM_ADDR_DEVID_MASK |
3140 EEPROM_ADDR_READ);
3141 tw32(GRC_EEPROM_ADDR,
3142 tmp |
3143 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3144 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3145 EEPROM_ADDR_ADDR_MASK) |
3146 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3147
3148 for (i = 0; i < 1000; i++) {
3149 tmp = tr32(GRC_EEPROM_ADDR);
3150
3151 if (tmp & EEPROM_ADDR_COMPLETE)
3152 break;
3153 msleep(1);
3154 }
3155 if (!(tmp & EEPROM_ADDR_COMPLETE))
3156 return -EBUSY;
3157
3158 tmp = tr32(GRC_EEPROM_DATA);
3159
3160 /*
3161 * The data will always be opposite the native endian
3162 * format. Perform a blind byteswap to compensate.
3163 */
3164 *val = swab32(tmp);
3165
3166 return 0;
3167 }
3168
3169 #define NVRAM_CMD_TIMEOUT 10000
3170
3171 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3172 {
3173 int i;
3174
3175 tw32(NVRAM_CMD, nvram_cmd);
3176 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3177 udelay(10);
3178 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3179 udelay(10);
3180 break;
3181 }
3182 }
3183
3184 if (i == NVRAM_CMD_TIMEOUT)
3185 return -EBUSY;
3186
3187 return 0;
3188 }
3189
3190 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3191 {
3192 if (tg3_flag(tp, NVRAM) &&
3193 tg3_flag(tp, NVRAM_BUFFERED) &&
3194 tg3_flag(tp, FLASH) &&
3195 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3196 (tp->nvram_jedecnum == JEDEC_ATMEL))
3197
3198 addr = ((addr / tp->nvram_pagesize) <<
3199 ATMEL_AT45DB0X1B_PAGE_POS) +
3200 (addr % tp->nvram_pagesize);
3201
3202 return addr;
3203 }
3204
3205 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3206 {
3207 if (tg3_flag(tp, NVRAM) &&
3208 tg3_flag(tp, NVRAM_BUFFERED) &&
3209 tg3_flag(tp, FLASH) &&
3210 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3211 (tp->nvram_jedecnum == JEDEC_ATMEL))
3212
3213 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3214 tp->nvram_pagesize) +
3215 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3216
3217 return addr;
3218 }
3219
3220 /* NOTE: Data read in from NVRAM is byteswapped according to
3221 * the byteswapping settings for all other register accesses.
3222 * tg3 devices are BE devices, so on a BE machine, the data
3223 * returned will be exactly as it is seen in NVRAM. On a LE
3224 * machine, the 32-bit value will be byteswapped.
3225 */
3226 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3227 {
3228 int ret;
3229
3230 if (!tg3_flag(tp, NVRAM))
3231 return tg3_nvram_read_using_eeprom(tp, offset, val);
3232
3233 offset = tg3_nvram_phys_addr(tp, offset);
3234
3235 if (offset > NVRAM_ADDR_MSK)
3236 return -EINVAL;
3237
3238 ret = tg3_nvram_lock(tp);
3239 if (ret)
3240 return ret;
3241
3242 tg3_enable_nvram_access(tp);
3243
3244 tw32(NVRAM_ADDR, offset);
3245 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3246 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3247
3248 if (ret == 0)
3249 *val = tr32(NVRAM_RDDATA);
3250
3251 tg3_disable_nvram_access(tp);
3252
3253 tg3_nvram_unlock(tp);
3254
3255 return ret;
3256 }
3257
3258 /* Ensures NVRAM data is in bytestream format. */
3259 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3260 {
3261 u32 v;
3262 int res = tg3_nvram_read(tp, offset, &v);
3263 if (!res)
3264 *val = cpu_to_be32(v);
3265 return res;
3266 }
3267
3268 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3269 u32 offset, u32 len, u8 *buf)
3270 {
3271 int i, j, rc = 0;
3272 u32 val;
3273
3274 for (i = 0; i < len; i += 4) {
3275 u32 addr;
3276 __be32 data;
3277
3278 addr = offset + i;
3279
3280 memcpy(&data, buf + i, 4);
3281
3282 /*
3283 * The SEEPROM interface expects the data to always be opposite
3284 * the native endian format. We accomplish this by reversing
3285 * all the operations that would have been performed on the
3286 * data from a call to tg3_nvram_read_be32().
3287 */
3288 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3289
3290 val = tr32(GRC_EEPROM_ADDR);
3291 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3292
3293 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3294 EEPROM_ADDR_READ);
3295 tw32(GRC_EEPROM_ADDR, val |
3296 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3297 (addr & EEPROM_ADDR_ADDR_MASK) |
3298 EEPROM_ADDR_START |
3299 EEPROM_ADDR_WRITE);
3300
3301 for (j = 0; j < 1000; j++) {
3302 val = tr32(GRC_EEPROM_ADDR);
3303
3304 if (val & EEPROM_ADDR_COMPLETE)
3305 break;
3306 msleep(1);
3307 }
3308 if (!(val & EEPROM_ADDR_COMPLETE)) {
3309 rc = -EBUSY;
3310 break;
3311 }
3312 }
3313
3314 return rc;
3315 }
3316
3317 /* offset and length are dword aligned */
3318 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3319 u8 *buf)
3320 {
3321 int ret = 0;
3322 u32 pagesize = tp->nvram_pagesize;
3323 u32 pagemask = pagesize - 1;
3324 u32 nvram_cmd;
3325 u8 *tmp;
3326
3327 tmp = kmalloc(pagesize, GFP_KERNEL);
3328 if (tmp == NULL)
3329 return -ENOMEM;
3330
3331 while (len) {
3332 int j;
3333 u32 phy_addr, page_off, size;
3334
3335 phy_addr = offset & ~pagemask;
3336
3337 for (j = 0; j < pagesize; j += 4) {
3338 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3339 (__be32 *) (tmp + j));
3340 if (ret)
3341 break;
3342 }
3343 if (ret)
3344 break;
3345
3346 page_off = offset & pagemask;
3347 size = pagesize;
3348 if (len < size)
3349 size = len;
3350
3351 len -= size;
3352
3353 memcpy(tmp + page_off, buf, size);
3354
3355 offset = offset + (pagesize - page_off);
3356
3357 tg3_enable_nvram_access(tp);
3358
3359 /*
3360 * Before we can erase the flash page, we need
3361 * to issue a special "write enable" command.
3362 */
3363 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3364
3365 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3366 break;
3367
3368 /* Erase the target page */
3369 tw32(NVRAM_ADDR, phy_addr);
3370
3371 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3372 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3373
3374 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3375 break;
3376
3377 /* Issue another write enable to start the write. */
3378 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3379
3380 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3381 break;
3382
3383 for (j = 0; j < pagesize; j += 4) {
3384 __be32 data;
3385
3386 data = *((__be32 *) (tmp + j));
3387
3388 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3389
3390 tw32(NVRAM_ADDR, phy_addr + j);
3391
3392 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3393 NVRAM_CMD_WR;
3394
3395 if (j == 0)
3396 nvram_cmd |= NVRAM_CMD_FIRST;
3397 else if (j == (pagesize - 4))
3398 nvram_cmd |= NVRAM_CMD_LAST;
3399
3400 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3401 if (ret)
3402 break;
3403 }
3404 if (ret)
3405 break;
3406 }
3407
3408 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3409 tg3_nvram_exec_cmd(tp, nvram_cmd);
3410
3411 kfree(tmp);
3412
3413 return ret;
3414 }
3415
3416 /* offset and length are dword aligned */
3417 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3418 u8 *buf)
3419 {
3420 int i, ret = 0;
3421
3422 for (i = 0; i < len; i += 4, offset += 4) {
3423 u32 page_off, phy_addr, nvram_cmd;
3424 __be32 data;
3425
3426 memcpy(&data, buf + i, 4);
3427 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3428
3429 page_off = offset % tp->nvram_pagesize;
3430
3431 phy_addr = tg3_nvram_phys_addr(tp, offset);
3432
3433 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3434
3435 if (page_off == 0 || i == 0)
3436 nvram_cmd |= NVRAM_CMD_FIRST;
3437 if (page_off == (tp->nvram_pagesize - 4))
3438 nvram_cmd |= NVRAM_CMD_LAST;
3439
3440 if (i == (len - 4))
3441 nvram_cmd |= NVRAM_CMD_LAST;
3442
3443 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3444 !tg3_flag(tp, FLASH) ||
3445 !tg3_flag(tp, 57765_PLUS))
3446 tw32(NVRAM_ADDR, phy_addr);
3447
3448 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3449 !tg3_flag(tp, 5755_PLUS) &&
3450 (tp->nvram_jedecnum == JEDEC_ST) &&
3451 (nvram_cmd & NVRAM_CMD_FIRST)) {
3452 u32 cmd;
3453
3454 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3455 ret = tg3_nvram_exec_cmd(tp, cmd);
3456 if (ret)
3457 break;
3458 }
3459 if (!tg3_flag(tp, FLASH)) {
3460 /* We always do complete word writes to eeprom. */
3461 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3462 }
3463
3464 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3465 if (ret)
3466 break;
3467 }
3468 return ret;
3469 }
3470
3471 /* offset and length are dword aligned */
3472 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3473 {
3474 int ret;
3475
3476 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3477 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3478 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3479 udelay(40);
3480 }
3481
3482 if (!tg3_flag(tp, NVRAM)) {
3483 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3484 } else {
3485 u32 grc_mode;
3486
3487 ret = tg3_nvram_lock(tp);
3488 if (ret)
3489 return ret;
3490
3491 tg3_enable_nvram_access(tp);
3492 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3493 tw32(NVRAM_WRITE1, 0x406);
3494
3495 grc_mode = tr32(GRC_MODE);
3496 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3497
3498 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3499 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3500 buf);
3501 } else {
3502 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3503 buf);
3504 }
3505
3506 grc_mode = tr32(GRC_MODE);
3507 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3508
3509 tg3_disable_nvram_access(tp);
3510 tg3_nvram_unlock(tp);
3511 }
3512
3513 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3514 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3515 udelay(40);
3516 }
3517
3518 return ret;
3519 }
3520
3521 #define RX_CPU_SCRATCH_BASE 0x30000
3522 #define RX_CPU_SCRATCH_SIZE 0x04000
3523 #define TX_CPU_SCRATCH_BASE 0x34000
3524 #define TX_CPU_SCRATCH_SIZE 0x04000
3525
3526 /* tp->lock is held. */
3527 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3528 {
3529 int i;
3530 const int iters = 10000;
3531
3532 for (i = 0; i < iters; i++) {
3533 tw32(cpu_base + CPU_STATE, 0xffffffff);
3534 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3535 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3536 break;
3537 }
3538
3539 return (i == iters) ? -EBUSY : 0;
3540 }
3541
3542 /* tp->lock is held. */
3543 static int tg3_rxcpu_pause(struct tg3 *tp)
3544 {
3545 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3546
3547 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3548 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3549 udelay(10);
3550
3551 return rc;
3552 }
3553
3554 /* tp->lock is held. */
3555 static int tg3_txcpu_pause(struct tg3 *tp)
3556 {
3557 return tg3_pause_cpu(tp, TX_CPU_BASE);
3558 }
3559
3560 /* tp->lock is held. */
3561 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3562 {
3563 tw32(cpu_base + CPU_STATE, 0xffffffff);
3564 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3565 }
3566
3567 /* tp->lock is held. */
3568 static void tg3_rxcpu_resume(struct tg3 *tp)
3569 {
3570 tg3_resume_cpu(tp, RX_CPU_BASE);
3571 }
3572
3573 /* tp->lock is held. */
3574 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3575 {
3576 int rc;
3577
3578 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3579
3580 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3581 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3582
3583 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3584 return 0;
3585 }
3586 if (cpu_base == RX_CPU_BASE) {
3587 rc = tg3_rxcpu_pause(tp);
3588 } else {
3589 /*
3590 * There is only an Rx CPU for the 5750 derivative in the
3591 * BCM4785.
3592 */
3593 if (tg3_flag(tp, IS_SSB_CORE))
3594 return 0;
3595
3596 rc = tg3_txcpu_pause(tp);
3597 }
3598
3599 if (rc) {
3600 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3601 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3602 return -ENODEV;
3603 }
3604
3605 /* Clear firmware's nvram arbitration. */
3606 if (tg3_flag(tp, NVRAM))
3607 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3608 return 0;
3609 }
3610
3611 static int tg3_fw_data_len(struct tg3 *tp,
3612 const struct tg3_firmware_hdr *fw_hdr)
3613 {
3614 int fw_len;
3615
3616 /* Non fragmented firmware have one firmware header followed by a
3617 * contiguous chunk of data to be written. The length field in that
3618 * header is not the length of data to be written but the complete
3619 * length of the bss. The data length is determined based on
3620 * tp->fw->size minus headers.
3621 *
3622 * Fragmented firmware have a main header followed by multiple
3623 * fragments. Each fragment is identical to non fragmented firmware
3624 * with a firmware header followed by a contiguous chunk of data. In
3625 * the main header, the length field is unused and set to 0xffffffff.
3626 * In each fragment header the length is the entire size of that
3627 * fragment i.e. fragment data + header length. Data length is
3628 * therefore length field in the header minus TG3_FW_HDR_LEN.
3629 */
3630 if (tp->fw_len == 0xffffffff)
3631 fw_len = be32_to_cpu(fw_hdr->len);
3632 else
3633 fw_len = tp->fw->size;
3634
3635 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3636 }
3637
3638 /* tp->lock is held. */
3639 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3640 u32 cpu_scratch_base, int cpu_scratch_size,
3641 const struct tg3_firmware_hdr *fw_hdr)
3642 {
3643 int err, i;
3644 void (*write_op)(struct tg3 *, u32, u32);
3645 int total_len = tp->fw->size;
3646
3647 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3648 netdev_err(tp->dev,
3649 "%s: Trying to load TX cpu firmware which is 5705\n",
3650 __func__);
3651 return -EINVAL;
3652 }
3653
3654 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3655 write_op = tg3_write_mem;
3656 else
3657 write_op = tg3_write_indirect_reg32;
3658
3659 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3660 /* It is possible that bootcode is still loading at this point.
3661 * Get the nvram lock first before halting the cpu.
3662 */
3663 int lock_err = tg3_nvram_lock(tp);
3664 err = tg3_halt_cpu(tp, cpu_base);
3665 if (!lock_err)
3666 tg3_nvram_unlock(tp);
3667 if (err)
3668 goto out;
3669
3670 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3671 write_op(tp, cpu_scratch_base + i, 0);
3672 tw32(cpu_base + CPU_STATE, 0xffffffff);
3673 tw32(cpu_base + CPU_MODE,
3674 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3675 } else {
3676 /* Subtract additional main header for fragmented firmware and
3677 * advance to the first fragment
3678 */
3679 total_len -= TG3_FW_HDR_LEN;
3680 fw_hdr++;
3681 }
3682
3683 do {
3684 u32 *fw_data = (u32 *)(fw_hdr + 1);
3685 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3686 write_op(tp, cpu_scratch_base +
3687 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3688 (i * sizeof(u32)),
3689 be32_to_cpu(fw_data[i]));
3690
3691 total_len -= be32_to_cpu(fw_hdr->len);
3692
3693 /* Advance to next fragment */
3694 fw_hdr = (struct tg3_firmware_hdr *)
3695 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3696 } while (total_len > 0);
3697
3698 err = 0;
3699
3700 out:
3701 return err;
3702 }
3703
3704 /* tp->lock is held. */
3705 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3706 {
3707 int i;
3708 const int iters = 5;
3709
3710 tw32(cpu_base + CPU_STATE, 0xffffffff);
3711 tw32_f(cpu_base + CPU_PC, pc);
3712
3713 for (i = 0; i < iters; i++) {
3714 if (tr32(cpu_base + CPU_PC) == pc)
3715 break;
3716 tw32(cpu_base + CPU_STATE, 0xffffffff);
3717 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3718 tw32_f(cpu_base + CPU_PC, pc);
3719 udelay(1000);
3720 }
3721
3722 return (i == iters) ? -EBUSY : 0;
3723 }
3724
3725 /* tp->lock is held. */
3726 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3727 {
3728 const struct tg3_firmware_hdr *fw_hdr;
3729 int err;
3730
3731 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3732
3733 /* Firmware blob starts with version numbers, followed by
3734 start address and length. We are setting complete length.
3735 length = end_address_of_bss - start_address_of_text.
3736 Remainder is the blob to be loaded contiguously
3737 from start address. */
3738
3739 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3740 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3741 fw_hdr);
3742 if (err)
3743 return err;
3744
3745 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3746 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3747 fw_hdr);
3748 if (err)
3749 return err;
3750
3751 /* Now startup only the RX cpu. */
3752 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3753 be32_to_cpu(fw_hdr->base_addr));
3754 if (err) {
3755 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3756 "should be %08x\n", __func__,
3757 tr32(RX_CPU_BASE + CPU_PC),
3758 be32_to_cpu(fw_hdr->base_addr));
3759 return -ENODEV;
3760 }
3761
3762 tg3_rxcpu_resume(tp);
3763
3764 return 0;
3765 }
3766
3767 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3768 {
3769 const int iters = 1000;
3770 int i;
3771 u32 val;
3772
3773 /* Wait for boot code to complete initialization and enter service
3774 * loop. It is then safe to download service patches
3775 */
3776 for (i = 0; i < iters; i++) {
3777 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3778 break;
3779
3780 udelay(10);
3781 }
3782
3783 if (i == iters) {
3784 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3785 return -EBUSY;
3786 }
3787
3788 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3789 if (val & 0xff) {
3790 netdev_warn(tp->dev,
3791 "Other patches exist. Not downloading EEE patch\n");
3792 return -EEXIST;
3793 }
3794
3795 return 0;
3796 }
3797
3798 /* tp->lock is held. */
3799 static void tg3_load_57766_firmware(struct tg3 *tp)
3800 {
3801 struct tg3_firmware_hdr *fw_hdr;
3802
3803 if (!tg3_flag(tp, NO_NVRAM))
3804 return;
3805
3806 if (tg3_validate_rxcpu_state(tp))
3807 return;
3808
3809 if (!tp->fw)
3810 return;
3811
3812 /* This firmware blob has a different format than older firmware
3813 * releases as given below. The main difference is we have fragmented
3814 * data to be written to non-contiguous locations.
3815 *
3816 * In the beginning we have a firmware header identical to other
3817 * firmware which consists of version, base addr and length. The length
3818 * here is unused and set to 0xffffffff.
3819 *
3820 * This is followed by a series of firmware fragments which are
3821 * individually identical to previous firmware. i.e. they have the
3822 * firmware header and followed by data for that fragment. The version
3823 * field of the individual fragment header is unused.
3824 */
3825
3826 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3827 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3828 return;
3829
3830 if (tg3_rxcpu_pause(tp))
3831 return;
3832
3833 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3834 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3835
3836 tg3_rxcpu_resume(tp);
3837 }
3838
3839 /* tp->lock is held. */
3840 static int tg3_load_tso_firmware(struct tg3 *tp)
3841 {
3842 const struct tg3_firmware_hdr *fw_hdr;
3843 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3844 int err;
3845
3846 if (!tg3_flag(tp, FW_TSO))
3847 return 0;
3848
3849 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3850
3851 /* Firmware blob starts with version numbers, followed by
3852 start address and length. We are setting complete length.
3853 length = end_address_of_bss - start_address_of_text.
3854 Remainder is the blob to be loaded contiguously
3855 from start address. */
3856
3857 cpu_scratch_size = tp->fw_len;
3858
3859 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3860 cpu_base = RX_CPU_BASE;
3861 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3862 } else {
3863 cpu_base = TX_CPU_BASE;
3864 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3865 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3866 }
3867
3868 err = tg3_load_firmware_cpu(tp, cpu_base,
3869 cpu_scratch_base, cpu_scratch_size,
3870 fw_hdr);
3871 if (err)
3872 return err;
3873
3874 /* Now startup the cpu. */
3875 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3876 be32_to_cpu(fw_hdr->base_addr));
3877 if (err) {
3878 netdev_err(tp->dev,
3879 "%s fails to set CPU PC, is %08x should be %08x\n",
3880 __func__, tr32(cpu_base + CPU_PC),
3881 be32_to_cpu(fw_hdr->base_addr));
3882 return -ENODEV;
3883 }
3884
3885 tg3_resume_cpu(tp, cpu_base);
3886 return 0;
3887 }
3888
3889
3890 /* tp->lock is held. */
3891 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3892 {
3893 u32 addr_high, addr_low;
3894 int i;
3895
3896 addr_high = ((tp->dev->dev_addr[0] << 8) |
3897 tp->dev->dev_addr[1]);
3898 addr_low = ((tp->dev->dev_addr[2] << 24) |
3899 (tp->dev->dev_addr[3] << 16) |
3900 (tp->dev->dev_addr[4] << 8) |
3901 (tp->dev->dev_addr[5] << 0));
3902 for (i = 0; i < 4; i++) {
3903 if (i == 1 && skip_mac_1)
3904 continue;
3905 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3906 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3907 }
3908
3909 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3910 tg3_asic_rev(tp) == ASIC_REV_5704) {
3911 for (i = 0; i < 12; i++) {
3912 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3913 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3914 }
3915 }
3916
3917 addr_high = (tp->dev->dev_addr[0] +
3918 tp->dev->dev_addr[1] +
3919 tp->dev->dev_addr[2] +
3920 tp->dev->dev_addr[3] +
3921 tp->dev->dev_addr[4] +
3922 tp->dev->dev_addr[5]) &
3923 TX_BACKOFF_SEED_MASK;
3924 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3925 }
3926
3927 static void tg3_enable_register_access(struct tg3 *tp)
3928 {
3929 /*
3930 * Make sure register accesses (indirect or otherwise) will function
3931 * correctly.
3932 */
3933 pci_write_config_dword(tp->pdev,
3934 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3935 }
3936
3937 static int tg3_power_up(struct tg3 *tp)
3938 {
3939 int err;
3940
3941 tg3_enable_register_access(tp);
3942
3943 err = pci_set_power_state(tp->pdev, PCI_D0);
3944 if (!err) {
3945 /* Switch out of Vaux if it is a NIC */
3946 tg3_pwrsrc_switch_to_vmain(tp);
3947 } else {
3948 netdev_err(tp->dev, "Transition to D0 failed\n");
3949 }
3950
3951 return err;
3952 }
3953
3954 static int tg3_setup_phy(struct tg3 *, bool);
3955
3956 static int tg3_power_down_prepare(struct tg3 *tp)
3957 {
3958 u32 misc_host_ctrl;
3959 bool device_should_wake, do_low_power;
3960
3961 tg3_enable_register_access(tp);
3962
3963 /* Restore the CLKREQ setting. */
3964 if (tg3_flag(tp, CLKREQ_BUG))
3965 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3966 PCI_EXP_LNKCTL_CLKREQ_EN);
3967
3968 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3969 tw32(TG3PCI_MISC_HOST_CTRL,
3970 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3971
3972 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3973 tg3_flag(tp, WOL_ENABLE);
3974
3975 if (tg3_flag(tp, USE_PHYLIB)) {
3976 do_low_power = false;
3977 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3978 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3979 struct phy_device *phydev;
3980 u32 phyid, advertising;
3981
3982 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3983
3984 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3985
3986 tp->link_config.speed = phydev->speed;
3987 tp->link_config.duplex = phydev->duplex;
3988 tp->link_config.autoneg = phydev->autoneg;
3989 tp->link_config.advertising = phydev->advertising;
3990
3991 advertising = ADVERTISED_TP |
3992 ADVERTISED_Pause |
3993 ADVERTISED_Autoneg |
3994 ADVERTISED_10baseT_Half;
3995
3996 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3997 if (tg3_flag(tp, WOL_SPEED_100MB))
3998 advertising |=
3999 ADVERTISED_100baseT_Half |
4000 ADVERTISED_100baseT_Full |
4001 ADVERTISED_10baseT_Full;
4002 else
4003 advertising |= ADVERTISED_10baseT_Full;
4004 }
4005
4006 phydev->advertising = advertising;
4007
4008 phy_start_aneg(phydev);
4009
4010 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4011 if (phyid != PHY_ID_BCMAC131) {
4012 phyid &= PHY_BCM_OUI_MASK;
4013 if (phyid == PHY_BCM_OUI_1 ||
4014 phyid == PHY_BCM_OUI_2 ||
4015 phyid == PHY_BCM_OUI_3)
4016 do_low_power = true;
4017 }
4018 }
4019 } else {
4020 do_low_power = true;
4021
4022 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4023 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4024
4025 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4026 tg3_setup_phy(tp, false);
4027 }
4028
4029 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4030 u32 val;
4031
4032 val = tr32(GRC_VCPU_EXT_CTRL);
4033 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4034 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4035 int i;
4036 u32 val;
4037
4038 for (i = 0; i < 200; i++) {
4039 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4040 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4041 break;
4042 msleep(1);
4043 }
4044 }
4045 if (tg3_flag(tp, WOL_CAP))
4046 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4047 WOL_DRV_STATE_SHUTDOWN |
4048 WOL_DRV_WOL |
4049 WOL_SET_MAGIC_PKT);
4050
4051 if (device_should_wake) {
4052 u32 mac_mode;
4053
4054 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4055 if (do_low_power &&
4056 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4057 tg3_phy_auxctl_write(tp,
4058 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4059 MII_TG3_AUXCTL_PCTL_WOL_EN |
4060 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4061 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4062 udelay(40);
4063 }
4064
4065 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4066 mac_mode = MAC_MODE_PORT_MODE_GMII;
4067 else if (tp->phy_flags &
4068 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4069 if (tp->link_config.active_speed == SPEED_1000)
4070 mac_mode = MAC_MODE_PORT_MODE_GMII;
4071 else
4072 mac_mode = MAC_MODE_PORT_MODE_MII;
4073 } else
4074 mac_mode = MAC_MODE_PORT_MODE_MII;
4075
4076 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4077 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4078 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4079 SPEED_100 : SPEED_10;
4080 if (tg3_5700_link_polarity(tp, speed))
4081 mac_mode |= MAC_MODE_LINK_POLARITY;
4082 else
4083 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4084 }
4085 } else {
4086 mac_mode = MAC_MODE_PORT_MODE_TBI;
4087 }
4088
4089 if (!tg3_flag(tp, 5750_PLUS))
4090 tw32(MAC_LED_CTRL, tp->led_ctrl);
4091
4092 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4093 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4094 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4095 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4096
4097 if (tg3_flag(tp, ENABLE_APE))
4098 mac_mode |= MAC_MODE_APE_TX_EN |
4099 MAC_MODE_APE_RX_EN |
4100 MAC_MODE_TDE_ENABLE;
4101
4102 tw32_f(MAC_MODE, mac_mode);
4103 udelay(100);
4104
4105 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4106 udelay(10);
4107 }
4108
4109 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4110 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4111 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4112 u32 base_val;
4113
4114 base_val = tp->pci_clock_ctrl;
4115 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4116 CLOCK_CTRL_TXCLK_DISABLE);
4117
4118 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4119 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4120 } else if (tg3_flag(tp, 5780_CLASS) ||
4121 tg3_flag(tp, CPMU_PRESENT) ||
4122 tg3_asic_rev(tp) == ASIC_REV_5906) {
4123 /* do nothing */
4124 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4125 u32 newbits1, newbits2;
4126
4127 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4128 tg3_asic_rev(tp) == ASIC_REV_5701) {
4129 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4130 CLOCK_CTRL_TXCLK_DISABLE |
4131 CLOCK_CTRL_ALTCLK);
4132 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4133 } else if (tg3_flag(tp, 5705_PLUS)) {
4134 newbits1 = CLOCK_CTRL_625_CORE;
4135 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4136 } else {
4137 newbits1 = CLOCK_CTRL_ALTCLK;
4138 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4139 }
4140
4141 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4142 40);
4143
4144 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4145 40);
4146
4147 if (!tg3_flag(tp, 5705_PLUS)) {
4148 u32 newbits3;
4149
4150 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4151 tg3_asic_rev(tp) == ASIC_REV_5701) {
4152 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4153 CLOCK_CTRL_TXCLK_DISABLE |
4154 CLOCK_CTRL_44MHZ_CORE);
4155 } else {
4156 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4157 }
4158
4159 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4160 tp->pci_clock_ctrl | newbits3, 40);
4161 }
4162 }
4163
4164 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4165 tg3_power_down_phy(tp, do_low_power);
4166
4167 tg3_frob_aux_power(tp, true);
4168
4169 /* Workaround for unstable PLL clock */
4170 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4171 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4172 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4173 u32 val = tr32(0x7d00);
4174
4175 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4176 tw32(0x7d00, val);
4177 if (!tg3_flag(tp, ENABLE_ASF)) {
4178 int err;
4179
4180 err = tg3_nvram_lock(tp);
4181 tg3_halt_cpu(tp, RX_CPU_BASE);
4182 if (!err)
4183 tg3_nvram_unlock(tp);
4184 }
4185 }
4186
4187 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4188
4189 return 0;
4190 }
4191
4192 static void tg3_power_down(struct tg3 *tp)
4193 {
4194 tg3_power_down_prepare(tp);
4195
4196 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4197 pci_set_power_state(tp->pdev, PCI_D3hot);
4198 }
4199
4200 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4201 {
4202 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4203 case MII_TG3_AUX_STAT_10HALF:
4204 *speed = SPEED_10;
4205 *duplex = DUPLEX_HALF;
4206 break;
4207
4208 case MII_TG3_AUX_STAT_10FULL:
4209 *speed = SPEED_10;
4210 *duplex = DUPLEX_FULL;
4211 break;
4212
4213 case MII_TG3_AUX_STAT_100HALF:
4214 *speed = SPEED_100;
4215 *duplex = DUPLEX_HALF;
4216 break;
4217
4218 case MII_TG3_AUX_STAT_100FULL:
4219 *speed = SPEED_100;
4220 *duplex = DUPLEX_FULL;
4221 break;
4222
4223 case MII_TG3_AUX_STAT_1000HALF:
4224 *speed = SPEED_1000;
4225 *duplex = DUPLEX_HALF;
4226 break;
4227
4228 case MII_TG3_AUX_STAT_1000FULL:
4229 *speed = SPEED_1000;
4230 *duplex = DUPLEX_FULL;
4231 break;
4232
4233 default:
4234 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4235 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4236 SPEED_10;
4237 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4238 DUPLEX_HALF;
4239 break;
4240 }
4241 *speed = SPEED_UNKNOWN;
4242 *duplex = DUPLEX_UNKNOWN;
4243 break;
4244 }
4245 }
4246
4247 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4248 {
4249 int err = 0;
4250 u32 val, new_adv;
4251
4252 new_adv = ADVERTISE_CSMA;
4253 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4254 new_adv |= mii_advertise_flowctrl(flowctrl);
4255
4256 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4257 if (err)
4258 goto done;
4259
4260 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4261 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4262
4263 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4264 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4265 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4266
4267 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4268 if (err)
4269 goto done;
4270 }
4271
4272 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4273 goto done;
4274
4275 tw32(TG3_CPMU_EEE_MODE,
4276 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4277
4278 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4279 if (!err) {
4280 u32 err2;
4281
4282 val = 0;
4283 /* Advertise 100-BaseTX EEE ability */
4284 if (advertise & ADVERTISED_100baseT_Full)
4285 val |= MDIO_AN_EEE_ADV_100TX;
4286 /* Advertise 1000-BaseT EEE ability */
4287 if (advertise & ADVERTISED_1000baseT_Full)
4288 val |= MDIO_AN_EEE_ADV_1000T;
4289
4290 if (!tp->eee.eee_enabled) {
4291 val = 0;
4292 tp->eee.advertised = 0;
4293 } else {
4294 tp->eee.advertised = advertise &
4295 (ADVERTISED_100baseT_Full |
4296 ADVERTISED_1000baseT_Full);
4297 }
4298
4299 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4300 if (err)
4301 val = 0;
4302
4303 switch (tg3_asic_rev(tp)) {
4304 case ASIC_REV_5717:
4305 case ASIC_REV_57765:
4306 case ASIC_REV_57766:
4307 case ASIC_REV_5719:
4308 /* If we advertised any eee advertisements above... */
4309 if (val)
4310 val = MII_TG3_DSP_TAP26_ALNOKO |
4311 MII_TG3_DSP_TAP26_RMRXSTO |
4312 MII_TG3_DSP_TAP26_OPCSINPT;
4313 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4314 /* Fall through */
4315 case ASIC_REV_5720:
4316 case ASIC_REV_5762:
4317 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4318 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4319 MII_TG3_DSP_CH34TP2_HIBW01);
4320 }
4321
4322 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4323 if (!err)
4324 err = err2;
4325 }
4326
4327 done:
4328 return err;
4329 }
4330
4331 static void tg3_phy_copper_begin(struct tg3 *tp)
4332 {
4333 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4334 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4335 u32 adv, fc;
4336
4337 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4338 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4339 adv = ADVERTISED_10baseT_Half |
4340 ADVERTISED_10baseT_Full;
4341 if (tg3_flag(tp, WOL_SPEED_100MB))
4342 adv |= ADVERTISED_100baseT_Half |
4343 ADVERTISED_100baseT_Full;
4344 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4345 adv |= ADVERTISED_1000baseT_Half |
4346 ADVERTISED_1000baseT_Full;
4347
4348 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4349 } else {
4350 adv = tp->link_config.advertising;
4351 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4352 adv &= ~(ADVERTISED_1000baseT_Half |
4353 ADVERTISED_1000baseT_Full);
4354
4355 fc = tp->link_config.flowctrl;
4356 }
4357
4358 tg3_phy_autoneg_cfg(tp, adv, fc);
4359
4360 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4361 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4362 /* Normally during power down we want to autonegotiate
4363 * the lowest possible speed for WOL. However, to avoid
4364 * link flap, we leave it untouched.
4365 */
4366 return;
4367 }
4368
4369 tg3_writephy(tp, MII_BMCR,
4370 BMCR_ANENABLE | BMCR_ANRESTART);
4371 } else {
4372 int i;
4373 u32 bmcr, orig_bmcr;
4374
4375 tp->link_config.active_speed = tp->link_config.speed;
4376 tp->link_config.active_duplex = tp->link_config.duplex;
4377
4378 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4379 /* With autoneg disabled, 5715 only links up when the
4380 * advertisement register has the configured speed
4381 * enabled.
4382 */
4383 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4384 }
4385
4386 bmcr = 0;
4387 switch (tp->link_config.speed) {
4388 default:
4389 case SPEED_10:
4390 break;
4391
4392 case SPEED_100:
4393 bmcr |= BMCR_SPEED100;
4394 break;
4395
4396 case SPEED_1000:
4397 bmcr |= BMCR_SPEED1000;
4398 break;
4399 }
4400
4401 if (tp->link_config.duplex == DUPLEX_FULL)
4402 bmcr |= BMCR_FULLDPLX;
4403
4404 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4405 (bmcr != orig_bmcr)) {
4406 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4407 for (i = 0; i < 1500; i++) {
4408 u32 tmp;
4409
4410 udelay(10);
4411 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4412 tg3_readphy(tp, MII_BMSR, &tmp))
4413 continue;
4414 if (!(tmp & BMSR_LSTATUS)) {
4415 udelay(40);
4416 break;
4417 }
4418 }
4419 tg3_writephy(tp, MII_BMCR, bmcr);
4420 udelay(40);
4421 }
4422 }
4423 }
4424
4425 static int tg3_phy_pull_config(struct tg3 *tp)
4426 {
4427 int err;
4428 u32 val;
4429
4430 err = tg3_readphy(tp, MII_BMCR, &val);
4431 if (err)
4432 goto done;
4433
4434 if (!(val & BMCR_ANENABLE)) {
4435 tp->link_config.autoneg = AUTONEG_DISABLE;
4436 tp->link_config.advertising = 0;
4437 tg3_flag_clear(tp, PAUSE_AUTONEG);
4438
4439 err = -EIO;
4440
4441 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4442 case 0:
4443 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4444 goto done;
4445
4446 tp->link_config.speed = SPEED_10;
4447 break;
4448 case BMCR_SPEED100:
4449 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4450 goto done;
4451
4452 tp->link_config.speed = SPEED_100;
4453 break;
4454 case BMCR_SPEED1000:
4455 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4456 tp->link_config.speed = SPEED_1000;
4457 break;
4458 }
4459 /* Fall through */
4460 default:
4461 goto done;
4462 }
4463
4464 if (val & BMCR_FULLDPLX)
4465 tp->link_config.duplex = DUPLEX_FULL;
4466 else
4467 tp->link_config.duplex = DUPLEX_HALF;
4468
4469 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4470
4471 err = 0;
4472 goto done;
4473 }
4474
4475 tp->link_config.autoneg = AUTONEG_ENABLE;
4476 tp->link_config.advertising = ADVERTISED_Autoneg;
4477 tg3_flag_set(tp, PAUSE_AUTONEG);
4478
4479 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4480 u32 adv;
4481
4482 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4483 if (err)
4484 goto done;
4485
4486 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4487 tp->link_config.advertising |= adv | ADVERTISED_TP;
4488
4489 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4490 } else {
4491 tp->link_config.advertising |= ADVERTISED_FIBRE;
4492 }
4493
4494 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4495 u32 adv;
4496
4497 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4498 err = tg3_readphy(tp, MII_CTRL1000, &val);
4499 if (err)
4500 goto done;
4501
4502 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4503 } else {
4504 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4505 if (err)
4506 goto done;
4507
4508 adv = tg3_decode_flowctrl_1000X(val);
4509 tp->link_config.flowctrl = adv;
4510
4511 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4512 adv = mii_adv_to_ethtool_adv_x(val);
4513 }
4514
4515 tp->link_config.advertising |= adv;
4516 }
4517
4518 done:
4519 return err;
4520 }
4521
4522 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4523 {
4524 int err;
4525
4526 /* Turn off tap power management. */
4527 /* Set Extended packet length bit */
4528 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4529
4530 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4531 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4532 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4533 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4534 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4535
4536 udelay(40);
4537
4538 return err;
4539 }
4540
4541 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4542 {
4543 u32 val;
4544 u32 tgtadv = 0;
4545 u32 advertising = tp->link_config.advertising;
4546
4547 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4548 return true;
4549
4550 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4551 return false;
4552
4553 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4554
4555
4556 if (advertising & ADVERTISED_100baseT_Full)
4557 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4558 if (advertising & ADVERTISED_1000baseT_Full)
4559 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4560
4561 if (val != tgtadv)
4562 return false;
4563
4564 return true;
4565 }
4566
4567 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4568 {
4569 u32 advmsk, tgtadv, advertising;
4570
4571 advertising = tp->link_config.advertising;
4572 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4573
4574 advmsk = ADVERTISE_ALL;
4575 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4576 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4577 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4578 }
4579
4580 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4581 return false;
4582
4583 if ((*lcladv & advmsk) != tgtadv)
4584 return false;
4585
4586 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4587 u32 tg3_ctrl;
4588
4589 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4590
4591 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4592 return false;
4593
4594 if (tgtadv &&
4595 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4596 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4597 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4598 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4599 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4600 } else {
4601 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4602 }
4603
4604 if (tg3_ctrl != tgtadv)
4605 return false;
4606 }
4607
4608 return true;
4609 }
4610
4611 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4612 {
4613 u32 lpeth = 0;
4614
4615 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4616 u32 val;
4617
4618 if (tg3_readphy(tp, MII_STAT1000, &val))
4619 return false;
4620
4621 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4622 }
4623
4624 if (tg3_readphy(tp, MII_LPA, rmtadv))
4625 return false;
4626
4627 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4628 tp->link_config.rmt_adv = lpeth;
4629
4630 return true;
4631 }
4632
4633 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4634 {
4635 if (curr_link_up != tp->link_up) {
4636 if (curr_link_up) {
4637 netif_carrier_on(tp->dev);
4638 } else {
4639 netif_carrier_off(tp->dev);
4640 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4641 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4642 }
4643
4644 tg3_link_report(tp);
4645 return true;
4646 }
4647
4648 return false;
4649 }
4650
4651 static void tg3_clear_mac_status(struct tg3 *tp)
4652 {
4653 tw32(MAC_EVENT, 0);
4654
4655 tw32_f(MAC_STATUS,
4656 MAC_STATUS_SYNC_CHANGED |
4657 MAC_STATUS_CFG_CHANGED |
4658 MAC_STATUS_MI_COMPLETION |
4659 MAC_STATUS_LNKSTATE_CHANGED);
4660 udelay(40);
4661 }
4662
4663 static void tg3_setup_eee(struct tg3 *tp)
4664 {
4665 u32 val;
4666
4667 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4668 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4669 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4670 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4671
4672 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4673
4674 tw32_f(TG3_CPMU_EEE_CTRL,
4675 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4676
4677 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4678 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4679 TG3_CPMU_EEEMD_LPI_IN_RX |
4680 TG3_CPMU_EEEMD_EEE_ENABLE;
4681
4682 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4683 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4684
4685 if (tg3_flag(tp, ENABLE_APE))
4686 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4687
4688 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4689
4690 tw32_f(TG3_CPMU_EEE_DBTMR1,
4691 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4692 (tp->eee.tx_lpi_timer & 0xffff));
4693
4694 tw32_f(TG3_CPMU_EEE_DBTMR2,
4695 TG3_CPMU_DBTMR2_APE_TX_2047US |
4696 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4697 }
4698
4699 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4700 {
4701 bool current_link_up;
4702 u32 bmsr, val;
4703 u32 lcl_adv, rmt_adv;
4704 u16 current_speed;
4705 u8 current_duplex;
4706 int i, err;
4707
4708 tg3_clear_mac_status(tp);
4709
4710 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4711 tw32_f(MAC_MI_MODE,
4712 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4713 udelay(80);
4714 }
4715
4716 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4717
4718 /* Some third-party PHYs need to be reset on link going
4719 * down.
4720 */
4721 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4722 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4723 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4724 tp->link_up) {
4725 tg3_readphy(tp, MII_BMSR, &bmsr);
4726 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4727 !(bmsr & BMSR_LSTATUS))
4728 force_reset = true;
4729 }
4730 if (force_reset)
4731 tg3_phy_reset(tp);
4732
4733 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4734 tg3_readphy(tp, MII_BMSR, &bmsr);
4735 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4736 !tg3_flag(tp, INIT_COMPLETE))
4737 bmsr = 0;
4738
4739 if (!(bmsr & BMSR_LSTATUS)) {
4740 err = tg3_init_5401phy_dsp(tp);
4741 if (err)
4742 return err;
4743
4744 tg3_readphy(tp, MII_BMSR, &bmsr);
4745 for (i = 0; i < 1000; i++) {
4746 udelay(10);
4747 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4748 (bmsr & BMSR_LSTATUS)) {
4749 udelay(40);
4750 break;
4751 }
4752 }
4753
4754 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4755 TG3_PHY_REV_BCM5401_B0 &&
4756 !(bmsr & BMSR_LSTATUS) &&
4757 tp->link_config.active_speed == SPEED_1000) {
4758 err = tg3_phy_reset(tp);
4759 if (!err)
4760 err = tg3_init_5401phy_dsp(tp);
4761 if (err)
4762 return err;
4763 }
4764 }
4765 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4766 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4767 /* 5701 {A0,B0} CRC bug workaround */
4768 tg3_writephy(tp, 0x15, 0x0a75);
4769 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4770 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4771 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4772 }
4773
4774 /* Clear pending interrupts... */
4775 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4776 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4777
4778 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4779 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4780 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4781 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4782
4783 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4784 tg3_asic_rev(tp) == ASIC_REV_5701) {
4785 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4786 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4787 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4788 else
4789 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4790 }
4791
4792 current_link_up = false;
4793 current_speed = SPEED_UNKNOWN;
4794 current_duplex = DUPLEX_UNKNOWN;
4795 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4796 tp->link_config.rmt_adv = 0;
4797
4798 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4799 err = tg3_phy_auxctl_read(tp,
4800 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4801 &val);
4802 if (!err && !(val & (1 << 10))) {
4803 tg3_phy_auxctl_write(tp,
4804 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4805 val | (1 << 10));
4806 goto relink;
4807 }
4808 }
4809
4810 bmsr = 0;
4811 for (i = 0; i < 100; i++) {
4812 tg3_readphy(tp, MII_BMSR, &bmsr);
4813 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4814 (bmsr & BMSR_LSTATUS))
4815 break;
4816 udelay(40);
4817 }
4818
4819 if (bmsr & BMSR_LSTATUS) {
4820 u32 aux_stat, bmcr;
4821
4822 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4823 for (i = 0; i < 2000; i++) {
4824 udelay(10);
4825 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4826 aux_stat)
4827 break;
4828 }
4829
4830 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4831 &current_speed,
4832 &current_duplex);
4833
4834 bmcr = 0;
4835 for (i = 0; i < 200; i++) {
4836 tg3_readphy(tp, MII_BMCR, &bmcr);
4837 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4838 continue;
4839 if (bmcr && bmcr != 0x7fff)
4840 break;
4841 udelay(10);
4842 }
4843
4844 lcl_adv = 0;
4845 rmt_adv = 0;
4846
4847 tp->link_config.active_speed = current_speed;
4848 tp->link_config.active_duplex = current_duplex;
4849
4850 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4851 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4852
4853 if ((bmcr & BMCR_ANENABLE) &&
4854 eee_config_ok &&
4855 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4856 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4857 current_link_up = true;
4858
4859 /* EEE settings changes take effect only after a phy
4860 * reset. If we have skipped a reset due to Link Flap
4861 * Avoidance being enabled, do it now.
4862 */
4863 if (!eee_config_ok &&
4864 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4865 !force_reset)
4866 tg3_phy_reset(tp);
4867 } else {
4868 if (!(bmcr & BMCR_ANENABLE) &&
4869 tp->link_config.speed == current_speed &&
4870 tp->link_config.duplex == current_duplex) {
4871 current_link_up = true;
4872 }
4873 }
4874
4875 if (current_link_up &&
4876 tp->link_config.active_duplex == DUPLEX_FULL) {
4877 u32 reg, bit;
4878
4879 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4880 reg = MII_TG3_FET_GEN_STAT;
4881 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4882 } else {
4883 reg = MII_TG3_EXT_STAT;
4884 bit = MII_TG3_EXT_STAT_MDIX;
4885 }
4886
4887 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4888 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4889
4890 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4891 }
4892 }
4893
4894 relink:
4895 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4896 tg3_phy_copper_begin(tp);
4897
4898 if (tg3_flag(tp, ROBOSWITCH)) {
4899 current_link_up = true;
4900 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4901 current_speed = SPEED_1000;
4902 current_duplex = DUPLEX_FULL;
4903 tp->link_config.active_speed = current_speed;
4904 tp->link_config.active_duplex = current_duplex;
4905 }
4906
4907 tg3_readphy(tp, MII_BMSR, &bmsr);
4908 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4909 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4910 current_link_up = true;
4911 }
4912
4913 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4914 if (current_link_up) {
4915 if (tp->link_config.active_speed == SPEED_100 ||
4916 tp->link_config.active_speed == SPEED_10)
4917 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4918 else
4919 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4920 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4921 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4922 else
4923 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4924
4925 /* In order for the 5750 core in BCM4785 chip to work properly
4926 * in RGMII mode, the Led Control Register must be set up.
4927 */
4928 if (tg3_flag(tp, RGMII_MODE)) {
4929 u32 led_ctrl = tr32(MAC_LED_CTRL);
4930 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4931
4932 if (tp->link_config.active_speed == SPEED_10)
4933 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4934 else if (tp->link_config.active_speed == SPEED_100)
4935 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4936 LED_CTRL_100MBPS_ON);
4937 else if (tp->link_config.active_speed == SPEED_1000)
4938 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4939 LED_CTRL_1000MBPS_ON);
4940
4941 tw32(MAC_LED_CTRL, led_ctrl);
4942 udelay(40);
4943 }
4944
4945 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4946 if (tp->link_config.active_duplex == DUPLEX_HALF)
4947 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4948
4949 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4950 if (current_link_up &&
4951 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4952 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4953 else
4954 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4955 }
4956
4957 /* ??? Without this setting Netgear GA302T PHY does not
4958 * ??? send/receive packets...
4959 */
4960 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4961 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4962 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4963 tw32_f(MAC_MI_MODE, tp->mi_mode);
4964 udelay(80);
4965 }
4966
4967 tw32_f(MAC_MODE, tp->mac_mode);
4968 udelay(40);
4969
4970 tg3_phy_eee_adjust(tp, current_link_up);
4971
4972 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4973 /* Polled via timer. */
4974 tw32_f(MAC_EVENT, 0);
4975 } else {
4976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4977 }
4978 udelay(40);
4979
4980 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4981 current_link_up &&
4982 tp->link_config.active_speed == SPEED_1000 &&
4983 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4984 udelay(120);
4985 tw32_f(MAC_STATUS,
4986 (MAC_STATUS_SYNC_CHANGED |
4987 MAC_STATUS_CFG_CHANGED));
4988 udelay(40);
4989 tg3_write_mem(tp,
4990 NIC_SRAM_FIRMWARE_MBOX,
4991 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4992 }
4993
4994 /* Prevent send BD corruption. */
4995 if (tg3_flag(tp, CLKREQ_BUG)) {
4996 if (tp->link_config.active_speed == SPEED_100 ||
4997 tp->link_config.active_speed == SPEED_10)
4998 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4999 PCI_EXP_LNKCTL_CLKREQ_EN);
5000 else
5001 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5002 PCI_EXP_LNKCTL_CLKREQ_EN);
5003 }
5004
5005 tg3_test_and_report_link_chg(tp, current_link_up);
5006
5007 return 0;
5008 }
5009
5010 struct tg3_fiber_aneginfo {
5011 int state;
5012 #define ANEG_STATE_UNKNOWN 0
5013 #define ANEG_STATE_AN_ENABLE 1
5014 #define ANEG_STATE_RESTART_INIT 2
5015 #define ANEG_STATE_RESTART 3
5016 #define ANEG_STATE_DISABLE_LINK_OK 4
5017 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5018 #define ANEG_STATE_ABILITY_DETECT 6
5019 #define ANEG_STATE_ACK_DETECT_INIT 7
5020 #define ANEG_STATE_ACK_DETECT 8
5021 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5022 #define ANEG_STATE_COMPLETE_ACK 10
5023 #define ANEG_STATE_IDLE_DETECT_INIT 11
5024 #define ANEG_STATE_IDLE_DETECT 12
5025 #define ANEG_STATE_LINK_OK 13
5026 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5027 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5028
5029 u32 flags;
5030 #define MR_AN_ENABLE 0x00000001
5031 #define MR_RESTART_AN 0x00000002
5032 #define MR_AN_COMPLETE 0x00000004
5033 #define MR_PAGE_RX 0x00000008
5034 #define MR_NP_LOADED 0x00000010
5035 #define MR_TOGGLE_TX 0x00000020
5036 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5037 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5038 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5039 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5040 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5041 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5042 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5043 #define MR_TOGGLE_RX 0x00002000
5044 #define MR_NP_RX 0x00004000
5045
5046 #define MR_LINK_OK 0x80000000
5047
5048 unsigned long link_time, cur_time;
5049
5050 u32 ability_match_cfg;
5051 int ability_match_count;
5052
5053 char ability_match, idle_match, ack_match;
5054
5055 u32 txconfig, rxconfig;
5056 #define ANEG_CFG_NP 0x00000080
5057 #define ANEG_CFG_ACK 0x00000040
5058 #define ANEG_CFG_RF2 0x00000020
5059 #define ANEG_CFG_RF1 0x00000010
5060 #define ANEG_CFG_PS2 0x00000001
5061 #define ANEG_CFG_PS1 0x00008000
5062 #define ANEG_CFG_HD 0x00004000
5063 #define ANEG_CFG_FD 0x00002000
5064 #define ANEG_CFG_INVAL 0x00001f06
5065
5066 };
5067 #define ANEG_OK 0
5068 #define ANEG_DONE 1
5069 #define ANEG_TIMER_ENAB 2
5070 #define ANEG_FAILED -1
5071
5072 #define ANEG_STATE_SETTLE_TIME 10000
5073
5074 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5075 struct tg3_fiber_aneginfo *ap)
5076 {
5077 u16 flowctrl;
5078 unsigned long delta;
5079 u32 rx_cfg_reg;
5080 int ret;
5081
5082 if (ap->state == ANEG_STATE_UNKNOWN) {
5083 ap->rxconfig = 0;
5084 ap->link_time = 0;
5085 ap->cur_time = 0;
5086 ap->ability_match_cfg = 0;
5087 ap->ability_match_count = 0;
5088 ap->ability_match = 0;
5089 ap->idle_match = 0;
5090 ap->ack_match = 0;
5091 }
5092 ap->cur_time++;
5093
5094 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5095 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5096
5097 if (rx_cfg_reg != ap->ability_match_cfg) {
5098 ap->ability_match_cfg = rx_cfg_reg;
5099 ap->ability_match = 0;
5100 ap->ability_match_count = 0;
5101 } else {
5102 if (++ap->ability_match_count > 1) {
5103 ap->ability_match = 1;
5104 ap->ability_match_cfg = rx_cfg_reg;
5105 }
5106 }
5107 if (rx_cfg_reg & ANEG_CFG_ACK)
5108 ap->ack_match = 1;
5109 else
5110 ap->ack_match = 0;
5111
5112 ap->idle_match = 0;
5113 } else {
5114 ap->idle_match = 1;
5115 ap->ability_match_cfg = 0;
5116 ap->ability_match_count = 0;
5117 ap->ability_match = 0;
5118 ap->ack_match = 0;
5119
5120 rx_cfg_reg = 0;
5121 }
5122
5123 ap->rxconfig = rx_cfg_reg;
5124 ret = ANEG_OK;
5125
5126 switch (ap->state) {
5127 case ANEG_STATE_UNKNOWN:
5128 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5129 ap->state = ANEG_STATE_AN_ENABLE;
5130
5131 /* fallthru */
5132 case ANEG_STATE_AN_ENABLE:
5133 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5134 if (ap->flags & MR_AN_ENABLE) {
5135 ap->link_time = 0;
5136 ap->cur_time = 0;
5137 ap->ability_match_cfg = 0;
5138 ap->ability_match_count = 0;
5139 ap->ability_match = 0;
5140 ap->idle_match = 0;
5141 ap->ack_match = 0;
5142
5143 ap->state = ANEG_STATE_RESTART_INIT;
5144 } else {
5145 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5146 }
5147 break;
5148
5149 case ANEG_STATE_RESTART_INIT:
5150 ap->link_time = ap->cur_time;
5151 ap->flags &= ~(MR_NP_LOADED);
5152 ap->txconfig = 0;
5153 tw32(MAC_TX_AUTO_NEG, 0);
5154 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5155 tw32_f(MAC_MODE, tp->mac_mode);
5156 udelay(40);
5157
5158 ret = ANEG_TIMER_ENAB;
5159 ap->state = ANEG_STATE_RESTART;
5160
5161 /* fallthru */
5162 case ANEG_STATE_RESTART:
5163 delta = ap->cur_time - ap->link_time;
5164 if (delta > ANEG_STATE_SETTLE_TIME)
5165 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5166 else
5167 ret = ANEG_TIMER_ENAB;
5168 break;
5169
5170 case ANEG_STATE_DISABLE_LINK_OK:
5171 ret = ANEG_DONE;
5172 break;
5173
5174 case ANEG_STATE_ABILITY_DETECT_INIT:
5175 ap->flags &= ~(MR_TOGGLE_TX);
5176 ap->txconfig = ANEG_CFG_FD;
5177 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5178 if (flowctrl & ADVERTISE_1000XPAUSE)
5179 ap->txconfig |= ANEG_CFG_PS1;
5180 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5181 ap->txconfig |= ANEG_CFG_PS2;
5182 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5183 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5184 tw32_f(MAC_MODE, tp->mac_mode);
5185 udelay(40);
5186
5187 ap->state = ANEG_STATE_ABILITY_DETECT;
5188 break;
5189
5190 case ANEG_STATE_ABILITY_DETECT:
5191 if (ap->ability_match != 0 && ap->rxconfig != 0)
5192 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5193 break;
5194
5195 case ANEG_STATE_ACK_DETECT_INIT:
5196 ap->txconfig |= ANEG_CFG_ACK;
5197 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5198 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5199 tw32_f(MAC_MODE, tp->mac_mode);
5200 udelay(40);
5201
5202 ap->state = ANEG_STATE_ACK_DETECT;
5203
5204 /* fallthru */
5205 case ANEG_STATE_ACK_DETECT:
5206 if (ap->ack_match != 0) {
5207 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5208 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5209 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5210 } else {
5211 ap->state = ANEG_STATE_AN_ENABLE;
5212 }
5213 } else if (ap->ability_match != 0 &&
5214 ap->rxconfig == 0) {
5215 ap->state = ANEG_STATE_AN_ENABLE;
5216 }
5217 break;
5218
5219 case ANEG_STATE_COMPLETE_ACK_INIT:
5220 if (ap->rxconfig & ANEG_CFG_INVAL) {
5221 ret = ANEG_FAILED;
5222 break;
5223 }
5224 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5225 MR_LP_ADV_HALF_DUPLEX |
5226 MR_LP_ADV_SYM_PAUSE |
5227 MR_LP_ADV_ASYM_PAUSE |
5228 MR_LP_ADV_REMOTE_FAULT1 |
5229 MR_LP_ADV_REMOTE_FAULT2 |
5230 MR_LP_ADV_NEXT_PAGE |
5231 MR_TOGGLE_RX |
5232 MR_NP_RX);
5233 if (ap->rxconfig & ANEG_CFG_FD)
5234 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5235 if (ap->rxconfig & ANEG_CFG_HD)
5236 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5237 if (ap->rxconfig & ANEG_CFG_PS1)
5238 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5239 if (ap->rxconfig & ANEG_CFG_PS2)
5240 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5241 if (ap->rxconfig & ANEG_CFG_RF1)
5242 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5243 if (ap->rxconfig & ANEG_CFG_RF2)
5244 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5245 if (ap->rxconfig & ANEG_CFG_NP)
5246 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5247
5248 ap->link_time = ap->cur_time;
5249
5250 ap->flags ^= (MR_TOGGLE_TX);
5251 if (ap->rxconfig & 0x0008)
5252 ap->flags |= MR_TOGGLE_RX;
5253 if (ap->rxconfig & ANEG_CFG_NP)
5254 ap->flags |= MR_NP_RX;
5255 ap->flags |= MR_PAGE_RX;
5256
5257 ap->state = ANEG_STATE_COMPLETE_ACK;
5258 ret = ANEG_TIMER_ENAB;
5259 break;
5260
5261 case ANEG_STATE_COMPLETE_ACK:
5262 if (ap->ability_match != 0 &&
5263 ap->rxconfig == 0) {
5264 ap->state = ANEG_STATE_AN_ENABLE;
5265 break;
5266 }
5267 delta = ap->cur_time - ap->link_time;
5268 if (delta > ANEG_STATE_SETTLE_TIME) {
5269 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5270 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5271 } else {
5272 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5273 !(ap->flags & MR_NP_RX)) {
5274 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5275 } else {
5276 ret = ANEG_FAILED;
5277 }
5278 }
5279 }
5280 break;
5281
5282 case ANEG_STATE_IDLE_DETECT_INIT:
5283 ap->link_time = ap->cur_time;
5284 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5285 tw32_f(MAC_MODE, tp->mac_mode);
5286 udelay(40);
5287
5288 ap->state = ANEG_STATE_IDLE_DETECT;
5289 ret = ANEG_TIMER_ENAB;
5290 break;
5291
5292 case ANEG_STATE_IDLE_DETECT:
5293 if (ap->ability_match != 0 &&
5294 ap->rxconfig == 0) {
5295 ap->state = ANEG_STATE_AN_ENABLE;
5296 break;
5297 }
5298 delta = ap->cur_time - ap->link_time;
5299 if (delta > ANEG_STATE_SETTLE_TIME) {
5300 /* XXX another gem from the Broadcom driver :( */
5301 ap->state = ANEG_STATE_LINK_OK;
5302 }
5303 break;
5304
5305 case ANEG_STATE_LINK_OK:
5306 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5307 ret = ANEG_DONE;
5308 break;
5309
5310 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5311 /* ??? unimplemented */
5312 break;
5313
5314 case ANEG_STATE_NEXT_PAGE_WAIT:
5315 /* ??? unimplemented */
5316 break;
5317
5318 default:
5319 ret = ANEG_FAILED;
5320 break;
5321 }
5322
5323 return ret;
5324 }
5325
5326 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5327 {
5328 int res = 0;
5329 struct tg3_fiber_aneginfo aninfo;
5330 int status = ANEG_FAILED;
5331 unsigned int tick;
5332 u32 tmp;
5333
5334 tw32_f(MAC_TX_AUTO_NEG, 0);
5335
5336 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5337 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5338 udelay(40);
5339
5340 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5341 udelay(40);
5342
5343 memset(&aninfo, 0, sizeof(aninfo));
5344 aninfo.flags |= MR_AN_ENABLE;
5345 aninfo.state = ANEG_STATE_UNKNOWN;
5346 aninfo.cur_time = 0;
5347 tick = 0;
5348 while (++tick < 195000) {
5349 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5350 if (status == ANEG_DONE || status == ANEG_FAILED)
5351 break;
5352
5353 udelay(1);
5354 }
5355
5356 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5357 tw32_f(MAC_MODE, tp->mac_mode);
5358 udelay(40);
5359
5360 *txflags = aninfo.txconfig;
5361 *rxflags = aninfo.flags;
5362
5363 if (status == ANEG_DONE &&
5364 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5365 MR_LP_ADV_FULL_DUPLEX)))
5366 res = 1;
5367
5368 return res;
5369 }
5370
5371 static void tg3_init_bcm8002(struct tg3 *tp)
5372 {
5373 u32 mac_status = tr32(MAC_STATUS);
5374 int i;
5375
5376 /* Reset when initting first time or we have a link. */
5377 if (tg3_flag(tp, INIT_COMPLETE) &&
5378 !(mac_status & MAC_STATUS_PCS_SYNCED))
5379 return;
5380
5381 /* Set PLL lock range. */
5382 tg3_writephy(tp, 0x16, 0x8007);
5383
5384 /* SW reset */
5385 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5386
5387 /* Wait for reset to complete. */
5388 /* XXX schedule_timeout() ... */
5389 for (i = 0; i < 500; i++)
5390 udelay(10);
5391
5392 /* Config mode; select PMA/Ch 1 regs. */
5393 tg3_writephy(tp, 0x10, 0x8411);
5394
5395 /* Enable auto-lock and comdet, select txclk for tx. */
5396 tg3_writephy(tp, 0x11, 0x0a10);
5397
5398 tg3_writephy(tp, 0x18, 0x00a0);
5399 tg3_writephy(tp, 0x16, 0x41ff);
5400
5401 /* Assert and deassert POR. */
5402 tg3_writephy(tp, 0x13, 0x0400);
5403 udelay(40);
5404 tg3_writephy(tp, 0x13, 0x0000);
5405
5406 tg3_writephy(tp, 0x11, 0x0a50);
5407 udelay(40);
5408 tg3_writephy(tp, 0x11, 0x0a10);
5409
5410 /* Wait for signal to stabilize */
5411 /* XXX schedule_timeout() ... */
5412 for (i = 0; i < 15000; i++)
5413 udelay(10);
5414
5415 /* Deselect the channel register so we can read the PHYID
5416 * later.
5417 */
5418 tg3_writephy(tp, 0x10, 0x8011);
5419 }
5420
5421 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5422 {
5423 u16 flowctrl;
5424 bool current_link_up;
5425 u32 sg_dig_ctrl, sg_dig_status;
5426 u32 serdes_cfg, expected_sg_dig_ctrl;
5427 int workaround, port_a;
5428
5429 serdes_cfg = 0;
5430 expected_sg_dig_ctrl = 0;
5431 workaround = 0;
5432 port_a = 1;
5433 current_link_up = false;
5434
5435 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5436 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5437 workaround = 1;
5438 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5439 port_a = 0;
5440
5441 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5442 /* preserve bits 20-23 for voltage regulator */
5443 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5444 }
5445
5446 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5447
5448 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5449 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5450 if (workaround) {
5451 u32 val = serdes_cfg;
5452
5453 if (port_a)
5454 val |= 0xc010000;
5455 else
5456 val |= 0x4010000;
5457 tw32_f(MAC_SERDES_CFG, val);
5458 }
5459
5460 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5461 }
5462 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5463 tg3_setup_flow_control(tp, 0, 0);
5464 current_link_up = true;
5465 }
5466 goto out;
5467 }
5468
5469 /* Want auto-negotiation. */
5470 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5471
5472 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5473 if (flowctrl & ADVERTISE_1000XPAUSE)
5474 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5475 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5476 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5477
5478 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5479 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5480 tp->serdes_counter &&
5481 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5482 MAC_STATUS_RCVD_CFG)) ==
5483 MAC_STATUS_PCS_SYNCED)) {
5484 tp->serdes_counter--;
5485 current_link_up = true;
5486 goto out;
5487 }
5488 restart_autoneg:
5489 if (workaround)
5490 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5491 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5492 udelay(5);
5493 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5494
5495 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5496 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5498 MAC_STATUS_SIGNAL_DET)) {
5499 sg_dig_status = tr32(SG_DIG_STATUS);
5500 mac_status = tr32(MAC_STATUS);
5501
5502 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5503 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5504 u32 local_adv = 0, remote_adv = 0;
5505
5506 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5507 local_adv |= ADVERTISE_1000XPAUSE;
5508 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5509 local_adv |= ADVERTISE_1000XPSE_ASYM;
5510
5511 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5512 remote_adv |= LPA_1000XPAUSE;
5513 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5514 remote_adv |= LPA_1000XPAUSE_ASYM;
5515
5516 tp->link_config.rmt_adv =
5517 mii_adv_to_ethtool_adv_x(remote_adv);
5518
5519 tg3_setup_flow_control(tp, local_adv, remote_adv);
5520 current_link_up = true;
5521 tp->serdes_counter = 0;
5522 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5523 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5524 if (tp->serdes_counter)
5525 tp->serdes_counter--;
5526 else {
5527 if (workaround) {
5528 u32 val = serdes_cfg;
5529
5530 if (port_a)
5531 val |= 0xc010000;
5532 else
5533 val |= 0x4010000;
5534
5535 tw32_f(MAC_SERDES_CFG, val);
5536 }
5537
5538 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5539 udelay(40);
5540
5541 /* Link parallel detection - link is up */
5542 /* only if we have PCS_SYNC and not */
5543 /* receiving config code words */
5544 mac_status = tr32(MAC_STATUS);
5545 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5546 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5547 tg3_setup_flow_control(tp, 0, 0);
5548 current_link_up = true;
5549 tp->phy_flags |=
5550 TG3_PHYFLG_PARALLEL_DETECT;
5551 tp->serdes_counter =
5552 SERDES_PARALLEL_DET_TIMEOUT;
5553 } else
5554 goto restart_autoneg;
5555 }
5556 }
5557 } else {
5558 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5559 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5560 }
5561
5562 out:
5563 return current_link_up;
5564 }
5565
5566 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5567 {
5568 bool current_link_up = false;
5569
5570 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5571 goto out;
5572
5573 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5574 u32 txflags, rxflags;
5575 int i;
5576
5577 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5578 u32 local_adv = 0, remote_adv = 0;
5579
5580 if (txflags & ANEG_CFG_PS1)
5581 local_adv |= ADVERTISE_1000XPAUSE;
5582 if (txflags & ANEG_CFG_PS2)
5583 local_adv |= ADVERTISE_1000XPSE_ASYM;
5584
5585 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5586 remote_adv |= LPA_1000XPAUSE;
5587 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5588 remote_adv |= LPA_1000XPAUSE_ASYM;
5589
5590 tp->link_config.rmt_adv =
5591 mii_adv_to_ethtool_adv_x(remote_adv);
5592
5593 tg3_setup_flow_control(tp, local_adv, remote_adv);
5594
5595 current_link_up = true;
5596 }
5597 for (i = 0; i < 30; i++) {
5598 udelay(20);
5599 tw32_f(MAC_STATUS,
5600 (MAC_STATUS_SYNC_CHANGED |
5601 MAC_STATUS_CFG_CHANGED));
5602 udelay(40);
5603 if ((tr32(MAC_STATUS) &
5604 (MAC_STATUS_SYNC_CHANGED |
5605 MAC_STATUS_CFG_CHANGED)) == 0)
5606 break;
5607 }
5608
5609 mac_status = tr32(MAC_STATUS);
5610 if (!current_link_up &&
5611 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5612 !(mac_status & MAC_STATUS_RCVD_CFG))
5613 current_link_up = true;
5614 } else {
5615 tg3_setup_flow_control(tp, 0, 0);
5616
5617 /* Forcing 1000FD link up. */
5618 current_link_up = true;
5619
5620 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5621 udelay(40);
5622
5623 tw32_f(MAC_MODE, tp->mac_mode);
5624 udelay(40);
5625 }
5626
5627 out:
5628 return current_link_up;
5629 }
5630
5631 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5632 {
5633 u32 orig_pause_cfg;
5634 u16 orig_active_speed;
5635 u8 orig_active_duplex;
5636 u32 mac_status;
5637 bool current_link_up;
5638 int i;
5639
5640 orig_pause_cfg = tp->link_config.active_flowctrl;
5641 orig_active_speed = tp->link_config.active_speed;
5642 orig_active_duplex = tp->link_config.active_duplex;
5643
5644 if (!tg3_flag(tp, HW_AUTONEG) &&
5645 tp->link_up &&
5646 tg3_flag(tp, INIT_COMPLETE)) {
5647 mac_status = tr32(MAC_STATUS);
5648 mac_status &= (MAC_STATUS_PCS_SYNCED |
5649 MAC_STATUS_SIGNAL_DET |
5650 MAC_STATUS_CFG_CHANGED |
5651 MAC_STATUS_RCVD_CFG);
5652 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5653 MAC_STATUS_SIGNAL_DET)) {
5654 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5655 MAC_STATUS_CFG_CHANGED));
5656 return 0;
5657 }
5658 }
5659
5660 tw32_f(MAC_TX_AUTO_NEG, 0);
5661
5662 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5663 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5664 tw32_f(MAC_MODE, tp->mac_mode);
5665 udelay(40);
5666
5667 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5668 tg3_init_bcm8002(tp);
5669
5670 /* Enable link change event even when serdes polling. */
5671 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5672 udelay(40);
5673
5674 current_link_up = false;
5675 tp->link_config.rmt_adv = 0;
5676 mac_status = tr32(MAC_STATUS);
5677
5678 if (tg3_flag(tp, HW_AUTONEG))
5679 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5680 else
5681 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5682
5683 tp->napi[0].hw_status->status =
5684 (SD_STATUS_UPDATED |
5685 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5686
5687 for (i = 0; i < 100; i++) {
5688 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5689 MAC_STATUS_CFG_CHANGED));
5690 udelay(5);
5691 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5692 MAC_STATUS_CFG_CHANGED |
5693 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5694 break;
5695 }
5696
5697 mac_status = tr32(MAC_STATUS);
5698 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5699 current_link_up = false;
5700 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5701 tp->serdes_counter == 0) {
5702 tw32_f(MAC_MODE, (tp->mac_mode |
5703 MAC_MODE_SEND_CONFIGS));
5704 udelay(1);
5705 tw32_f(MAC_MODE, tp->mac_mode);
5706 }
5707 }
5708
5709 if (current_link_up) {
5710 tp->link_config.active_speed = SPEED_1000;
5711 tp->link_config.active_duplex = DUPLEX_FULL;
5712 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5713 LED_CTRL_LNKLED_OVERRIDE |
5714 LED_CTRL_1000MBPS_ON));
5715 } else {
5716 tp->link_config.active_speed = SPEED_UNKNOWN;
5717 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5718 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5719 LED_CTRL_LNKLED_OVERRIDE |
5720 LED_CTRL_TRAFFIC_OVERRIDE));
5721 }
5722
5723 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5724 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5725 if (orig_pause_cfg != now_pause_cfg ||
5726 orig_active_speed != tp->link_config.active_speed ||
5727 orig_active_duplex != tp->link_config.active_duplex)
5728 tg3_link_report(tp);
5729 }
5730
5731 return 0;
5732 }
5733
5734 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5735 {
5736 int err = 0;
5737 u32 bmsr, bmcr;
5738 u16 current_speed = SPEED_UNKNOWN;
5739 u8 current_duplex = DUPLEX_UNKNOWN;
5740 bool current_link_up = false;
5741 u32 local_adv, remote_adv, sgsr;
5742
5743 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5744 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5745 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5746 (sgsr & SERDES_TG3_SGMII_MODE)) {
5747
5748 if (force_reset)
5749 tg3_phy_reset(tp);
5750
5751 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5752
5753 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5754 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5755 } else {
5756 current_link_up = true;
5757 if (sgsr & SERDES_TG3_SPEED_1000) {
5758 current_speed = SPEED_1000;
5759 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5760 } else if (sgsr & SERDES_TG3_SPEED_100) {
5761 current_speed = SPEED_100;
5762 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5763 } else {
5764 current_speed = SPEED_10;
5765 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5766 }
5767
5768 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5769 current_duplex = DUPLEX_FULL;
5770 else
5771 current_duplex = DUPLEX_HALF;
5772 }
5773
5774 tw32_f(MAC_MODE, tp->mac_mode);
5775 udelay(40);
5776
5777 tg3_clear_mac_status(tp);
5778
5779 goto fiber_setup_done;
5780 }
5781
5782 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5783 tw32_f(MAC_MODE, tp->mac_mode);
5784 udelay(40);
5785
5786 tg3_clear_mac_status(tp);
5787
5788 if (force_reset)
5789 tg3_phy_reset(tp);
5790
5791 tp->link_config.rmt_adv = 0;
5792
5793 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5794 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5795 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5796 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5797 bmsr |= BMSR_LSTATUS;
5798 else
5799 bmsr &= ~BMSR_LSTATUS;
5800 }
5801
5802 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5803
5804 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5805 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5806 /* do nothing, just check for link up at the end */
5807 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5808 u32 adv, newadv;
5809
5810 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5811 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5812 ADVERTISE_1000XPAUSE |
5813 ADVERTISE_1000XPSE_ASYM |
5814 ADVERTISE_SLCT);
5815
5816 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5817 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5818
5819 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5820 tg3_writephy(tp, MII_ADVERTISE, newadv);
5821 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5822 tg3_writephy(tp, MII_BMCR, bmcr);
5823
5824 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5825 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5826 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5827
5828 return err;
5829 }
5830 } else {
5831 u32 new_bmcr;
5832
5833 bmcr &= ~BMCR_SPEED1000;
5834 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5835
5836 if (tp->link_config.duplex == DUPLEX_FULL)
5837 new_bmcr |= BMCR_FULLDPLX;
5838
5839 if (new_bmcr != bmcr) {
5840 /* BMCR_SPEED1000 is a reserved bit that needs
5841 * to be set on write.
5842 */
5843 new_bmcr |= BMCR_SPEED1000;
5844
5845 /* Force a linkdown */
5846 if (tp->link_up) {
5847 u32 adv;
5848
5849 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5850 adv &= ~(ADVERTISE_1000XFULL |
5851 ADVERTISE_1000XHALF |
5852 ADVERTISE_SLCT);
5853 tg3_writephy(tp, MII_ADVERTISE, adv);
5854 tg3_writephy(tp, MII_BMCR, bmcr |
5855 BMCR_ANRESTART |
5856 BMCR_ANENABLE);
5857 udelay(10);
5858 tg3_carrier_off(tp);
5859 }
5860 tg3_writephy(tp, MII_BMCR, new_bmcr);
5861 bmcr = new_bmcr;
5862 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5863 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5864 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5865 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5866 bmsr |= BMSR_LSTATUS;
5867 else
5868 bmsr &= ~BMSR_LSTATUS;
5869 }
5870 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5871 }
5872 }
5873
5874 if (bmsr & BMSR_LSTATUS) {
5875 current_speed = SPEED_1000;
5876 current_link_up = true;
5877 if (bmcr & BMCR_FULLDPLX)
5878 current_duplex = DUPLEX_FULL;
5879 else
5880 current_duplex = DUPLEX_HALF;
5881
5882 local_adv = 0;
5883 remote_adv = 0;
5884
5885 if (bmcr & BMCR_ANENABLE) {
5886 u32 common;
5887
5888 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5889 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5890 common = local_adv & remote_adv;
5891 if (common & (ADVERTISE_1000XHALF |
5892 ADVERTISE_1000XFULL)) {
5893 if (common & ADVERTISE_1000XFULL)
5894 current_duplex = DUPLEX_FULL;
5895 else
5896 current_duplex = DUPLEX_HALF;
5897
5898 tp->link_config.rmt_adv =
5899 mii_adv_to_ethtool_adv_x(remote_adv);
5900 } else if (!tg3_flag(tp, 5780_CLASS)) {
5901 /* Link is up via parallel detect */
5902 } else {
5903 current_link_up = false;
5904 }
5905 }
5906 }
5907
5908 fiber_setup_done:
5909 if (current_link_up && current_duplex == DUPLEX_FULL)
5910 tg3_setup_flow_control(tp, local_adv, remote_adv);
5911
5912 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5913 if (tp->link_config.active_duplex == DUPLEX_HALF)
5914 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5915
5916 tw32_f(MAC_MODE, tp->mac_mode);
5917 udelay(40);
5918
5919 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5920
5921 tp->link_config.active_speed = current_speed;
5922 tp->link_config.active_duplex = current_duplex;
5923
5924 tg3_test_and_report_link_chg(tp, current_link_up);
5925 return err;
5926 }
5927
5928 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5929 {
5930 if (tp->serdes_counter) {
5931 /* Give autoneg time to complete. */
5932 tp->serdes_counter--;
5933 return;
5934 }
5935
5936 if (!tp->link_up &&
5937 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5938 u32 bmcr;
5939
5940 tg3_readphy(tp, MII_BMCR, &bmcr);
5941 if (bmcr & BMCR_ANENABLE) {
5942 u32 phy1, phy2;
5943
5944 /* Select shadow register 0x1f */
5945 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5946 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5947
5948 /* Select expansion interrupt status register */
5949 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5950 MII_TG3_DSP_EXP1_INT_STAT);
5951 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5952 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5953
5954 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5955 /* We have signal detect and not receiving
5956 * config code words, link is up by parallel
5957 * detection.
5958 */
5959
5960 bmcr &= ~BMCR_ANENABLE;
5961 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5962 tg3_writephy(tp, MII_BMCR, bmcr);
5963 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5964 }
5965 }
5966 } else if (tp->link_up &&
5967 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5968 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5969 u32 phy2;
5970
5971 /* Select expansion interrupt status register */
5972 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5973 MII_TG3_DSP_EXP1_INT_STAT);
5974 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5975 if (phy2 & 0x20) {
5976 u32 bmcr;
5977
5978 /* Config code words received, turn on autoneg. */
5979 tg3_readphy(tp, MII_BMCR, &bmcr);
5980 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5981
5982 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5983
5984 }
5985 }
5986 }
5987
5988 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5989 {
5990 u32 val;
5991 int err;
5992
5993 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5994 err = tg3_setup_fiber_phy(tp, force_reset);
5995 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5996 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5997 else
5998 err = tg3_setup_copper_phy(tp, force_reset);
5999
6000 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6001 u32 scale;
6002
6003 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6004 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6005 scale = 65;
6006 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6007 scale = 6;
6008 else
6009 scale = 12;
6010
6011 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6012 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6013 tw32(GRC_MISC_CFG, val);
6014 }
6015
6016 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6017 (6 << TX_LENGTHS_IPG_SHIFT);
6018 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6019 tg3_asic_rev(tp) == ASIC_REV_5762)
6020 val |= tr32(MAC_TX_LENGTHS) &
6021 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6022 TX_LENGTHS_CNT_DWN_VAL_MSK);
6023
6024 if (tp->link_config.active_speed == SPEED_1000 &&
6025 tp->link_config.active_duplex == DUPLEX_HALF)
6026 tw32(MAC_TX_LENGTHS, val |
6027 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6028 else
6029 tw32(MAC_TX_LENGTHS, val |
6030 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6031
6032 if (!tg3_flag(tp, 5705_PLUS)) {
6033 if (tp->link_up) {
6034 tw32(HOSTCC_STAT_COAL_TICKS,
6035 tp->coal.stats_block_coalesce_usecs);
6036 } else {
6037 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6038 }
6039 }
6040
6041 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6042 val = tr32(PCIE_PWR_MGMT_THRESH);
6043 if (!tp->link_up)
6044 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6045 tp->pwrmgmt_thresh;
6046 else
6047 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6048 tw32(PCIE_PWR_MGMT_THRESH, val);
6049 }
6050
6051 return err;
6052 }
6053
6054 /* tp->lock must be held */
6055 static u64 tg3_refclk_read(struct tg3 *tp)
6056 {
6057 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6058 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6059 }
6060
6061 /* tp->lock must be held */
6062 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6063 {
6064 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6065 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6066 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6067 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6068 }
6069
6070 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6071 static inline void tg3_full_unlock(struct tg3 *tp);
6072 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6073 {
6074 struct tg3 *tp = netdev_priv(dev);
6075
6076 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6077 SOF_TIMESTAMPING_RX_SOFTWARE |
6078 SOF_TIMESTAMPING_SOFTWARE;
6079
6080 if (tg3_flag(tp, PTP_CAPABLE)) {
6081 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6082 SOF_TIMESTAMPING_RX_HARDWARE |
6083 SOF_TIMESTAMPING_RAW_HARDWARE;
6084 }
6085
6086 if (tp->ptp_clock)
6087 info->phc_index = ptp_clock_index(tp->ptp_clock);
6088 else
6089 info->phc_index = -1;
6090
6091 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6092
6093 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6094 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6095 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6096 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6097 return 0;
6098 }
6099
6100 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6101 {
6102 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6103 bool neg_adj = false;
6104 u32 correction = 0;
6105
6106 if (ppb < 0) {
6107 neg_adj = true;
6108 ppb = -ppb;
6109 }
6110
6111 /* Frequency adjustment is performed using hardware with a 24 bit
6112 * accumulator and a programmable correction value. On each clk, the
6113 * correction value gets added to the accumulator and when it
6114 * overflows, the time counter is incremented/decremented.
6115 *
6116 * So conversion from ppb to correction value is
6117 * ppb * (1 << 24) / 1000000000
6118 */
6119 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6120 TG3_EAV_REF_CLK_CORRECT_MASK;
6121
6122 tg3_full_lock(tp, 0);
6123
6124 if (correction)
6125 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6126 TG3_EAV_REF_CLK_CORRECT_EN |
6127 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6128 else
6129 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6130
6131 tg3_full_unlock(tp);
6132
6133 return 0;
6134 }
6135
6136 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6137 {
6138 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6139
6140 tg3_full_lock(tp, 0);
6141 tp->ptp_adjust += delta;
6142 tg3_full_unlock(tp);
6143
6144 return 0;
6145 }
6146
6147 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6148 {
6149 u64 ns;
6150 u32 remainder;
6151 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6152
6153 tg3_full_lock(tp, 0);
6154 ns = tg3_refclk_read(tp);
6155 ns += tp->ptp_adjust;
6156 tg3_full_unlock(tp);
6157
6158 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6159 ts->tv_nsec = remainder;
6160
6161 return 0;
6162 }
6163
6164 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6165 const struct timespec *ts)
6166 {
6167 u64 ns;
6168 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6169
6170 ns = timespec_to_ns(ts);
6171
6172 tg3_full_lock(tp, 0);
6173 tg3_refclk_write(tp, ns);
6174 tp->ptp_adjust = 0;
6175 tg3_full_unlock(tp);
6176
6177 return 0;
6178 }
6179
6180 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6181 struct ptp_clock_request *rq, int on)
6182 {
6183 return -EOPNOTSUPP;
6184 }
6185
6186 static const struct ptp_clock_info tg3_ptp_caps = {
6187 .owner = THIS_MODULE,
6188 .name = "tg3 clock",
6189 .max_adj = 250000000,
6190 .n_alarm = 0,
6191 .n_ext_ts = 0,
6192 .n_per_out = 0,
6193 .pps = 0,
6194 .adjfreq = tg3_ptp_adjfreq,
6195 .adjtime = tg3_ptp_adjtime,
6196 .gettime = tg3_ptp_gettime,
6197 .settime = tg3_ptp_settime,
6198 .enable = tg3_ptp_enable,
6199 };
6200
6201 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6202 struct skb_shared_hwtstamps *timestamp)
6203 {
6204 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6205 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6206 tp->ptp_adjust);
6207 }
6208
6209 /* tp->lock must be held */
6210 static void tg3_ptp_init(struct tg3 *tp)
6211 {
6212 if (!tg3_flag(tp, PTP_CAPABLE))
6213 return;
6214
6215 /* Initialize the hardware clock to the system time. */
6216 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6217 tp->ptp_adjust = 0;
6218 tp->ptp_info = tg3_ptp_caps;
6219 }
6220
6221 /* tp->lock must be held */
6222 static void tg3_ptp_resume(struct tg3 *tp)
6223 {
6224 if (!tg3_flag(tp, PTP_CAPABLE))
6225 return;
6226
6227 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6228 tp->ptp_adjust = 0;
6229 }
6230
6231 static void tg3_ptp_fini(struct tg3 *tp)
6232 {
6233 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6234 return;
6235
6236 ptp_clock_unregister(tp->ptp_clock);
6237 tp->ptp_clock = NULL;
6238 tp->ptp_adjust = 0;
6239 }
6240
6241 static inline int tg3_irq_sync(struct tg3 *tp)
6242 {
6243 return tp->irq_sync;
6244 }
6245
6246 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6247 {
6248 int i;
6249
6250 dst = (u32 *)((u8 *)dst + off);
6251 for (i = 0; i < len; i += sizeof(u32))
6252 *dst++ = tr32(off + i);
6253 }
6254
6255 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6256 {
6257 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6258 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6259 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6260 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6261 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6262 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6263 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6264 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6265 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6266 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6267 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6268 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6269 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6270 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6271 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6272 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6273 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6274 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6275 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6276
6277 if (tg3_flag(tp, SUPPORT_MSIX))
6278 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6279
6280 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6281 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6282 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6283 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6284 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6285 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6286 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6287 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6288
6289 if (!tg3_flag(tp, 5705_PLUS)) {
6290 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6291 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6292 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6293 }
6294
6295 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6296 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6297 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6298 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6299 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6300
6301 if (tg3_flag(tp, NVRAM))
6302 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6303 }
6304
6305 static void tg3_dump_state(struct tg3 *tp)
6306 {
6307 int i;
6308 u32 *regs;
6309
6310 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6311 if (!regs)
6312 return;
6313
6314 if (tg3_flag(tp, PCI_EXPRESS)) {
6315 /* Read up to but not including private PCI registers */
6316 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6317 regs[i / sizeof(u32)] = tr32(i);
6318 } else
6319 tg3_dump_legacy_regs(tp, regs);
6320
6321 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6322 if (!regs[i + 0] && !regs[i + 1] &&
6323 !regs[i + 2] && !regs[i + 3])
6324 continue;
6325
6326 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6327 i * 4,
6328 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6329 }
6330
6331 kfree(regs);
6332
6333 for (i = 0; i < tp->irq_cnt; i++) {
6334 struct tg3_napi *tnapi = &tp->napi[i];
6335
6336 /* SW status block */
6337 netdev_err(tp->dev,
6338 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6339 i,
6340 tnapi->hw_status->status,
6341 tnapi->hw_status->status_tag,
6342 tnapi->hw_status->rx_jumbo_consumer,
6343 tnapi->hw_status->rx_consumer,
6344 tnapi->hw_status->rx_mini_consumer,
6345 tnapi->hw_status->idx[0].rx_producer,
6346 tnapi->hw_status->idx[0].tx_consumer);
6347
6348 netdev_err(tp->dev,
6349 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6350 i,
6351 tnapi->last_tag, tnapi->last_irq_tag,
6352 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6353 tnapi->rx_rcb_ptr,
6354 tnapi->prodring.rx_std_prod_idx,
6355 tnapi->prodring.rx_std_cons_idx,
6356 tnapi->prodring.rx_jmb_prod_idx,
6357 tnapi->prodring.rx_jmb_cons_idx);
6358 }
6359 }
6360
6361 /* This is called whenever we suspect that the system chipset is re-
6362 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6363 * is bogus tx completions. We try to recover by setting the
6364 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6365 * in the workqueue.
6366 */
6367 static void tg3_tx_recover(struct tg3 *tp)
6368 {
6369 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6370 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6371
6372 netdev_warn(tp->dev,
6373 "The system may be re-ordering memory-mapped I/O "
6374 "cycles to the network device, attempting to recover. "
6375 "Please report the problem to the driver maintainer "
6376 "and include system chipset information.\n");
6377
6378 spin_lock(&tp->lock);
6379 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6380 spin_unlock(&tp->lock);
6381 }
6382
6383 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6384 {
6385 /* Tell compiler to fetch tx indices from memory. */
6386 barrier();
6387 return tnapi->tx_pending -
6388 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6389 }
6390
6391 /* Tigon3 never reports partial packet sends. So we do not
6392 * need special logic to handle SKBs that have not had all
6393 * of their frags sent yet, like SunGEM does.
6394 */
6395 static void tg3_tx(struct tg3_napi *tnapi)
6396 {
6397 struct tg3 *tp = tnapi->tp;
6398 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6399 u32 sw_idx = tnapi->tx_cons;
6400 struct netdev_queue *txq;
6401 int index = tnapi - tp->napi;
6402 unsigned int pkts_compl = 0, bytes_compl = 0;
6403
6404 if (tg3_flag(tp, ENABLE_TSS))
6405 index--;
6406
6407 txq = netdev_get_tx_queue(tp->dev, index);
6408
6409 while (sw_idx != hw_idx) {
6410 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6411 struct sk_buff *skb = ri->skb;
6412 int i, tx_bug = 0;
6413
6414 if (unlikely(skb == NULL)) {
6415 tg3_tx_recover(tp);
6416 return;
6417 }
6418
6419 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6420 struct skb_shared_hwtstamps timestamp;
6421 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6422 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6423
6424 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6425
6426 skb_tstamp_tx(skb, &timestamp);
6427 }
6428
6429 pci_unmap_single(tp->pdev,
6430 dma_unmap_addr(ri, mapping),
6431 skb_headlen(skb),
6432 PCI_DMA_TODEVICE);
6433
6434 ri->skb = NULL;
6435
6436 while (ri->fragmented) {
6437 ri->fragmented = false;
6438 sw_idx = NEXT_TX(sw_idx);
6439 ri = &tnapi->tx_buffers[sw_idx];
6440 }
6441
6442 sw_idx = NEXT_TX(sw_idx);
6443
6444 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6445 ri = &tnapi->tx_buffers[sw_idx];
6446 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6447 tx_bug = 1;
6448
6449 pci_unmap_page(tp->pdev,
6450 dma_unmap_addr(ri, mapping),
6451 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6452 PCI_DMA_TODEVICE);
6453
6454 while (ri->fragmented) {
6455 ri->fragmented = false;
6456 sw_idx = NEXT_TX(sw_idx);
6457 ri = &tnapi->tx_buffers[sw_idx];
6458 }
6459
6460 sw_idx = NEXT_TX(sw_idx);
6461 }
6462
6463 pkts_compl++;
6464 bytes_compl += skb->len;
6465
6466 dev_kfree_skb(skb);
6467
6468 if (unlikely(tx_bug)) {
6469 tg3_tx_recover(tp);
6470 return;
6471 }
6472 }
6473
6474 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6475
6476 tnapi->tx_cons = sw_idx;
6477
6478 /* Need to make the tx_cons update visible to tg3_start_xmit()
6479 * before checking for netif_queue_stopped(). Without the
6480 * memory barrier, there is a small possibility that tg3_start_xmit()
6481 * will miss it and cause the queue to be stopped forever.
6482 */
6483 smp_mb();
6484
6485 if (unlikely(netif_tx_queue_stopped(txq) &&
6486 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6487 __netif_tx_lock(txq, smp_processor_id());
6488 if (netif_tx_queue_stopped(txq) &&
6489 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6490 netif_tx_wake_queue(txq);
6491 __netif_tx_unlock(txq);
6492 }
6493 }
6494
6495 static void tg3_frag_free(bool is_frag, void *data)
6496 {
6497 if (is_frag)
6498 put_page(virt_to_head_page(data));
6499 else
6500 kfree(data);
6501 }
6502
6503 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6504 {
6505 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6506 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6507
6508 if (!ri->data)
6509 return;
6510
6511 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6512 map_sz, PCI_DMA_FROMDEVICE);
6513 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6514 ri->data = NULL;
6515 }
6516
6517
6518 /* Returns size of skb allocated or < 0 on error.
6519 *
6520 * We only need to fill in the address because the other members
6521 * of the RX descriptor are invariant, see tg3_init_rings.
6522 *
6523 * Note the purposeful assymetry of cpu vs. chip accesses. For
6524 * posting buffers we only dirty the first cache line of the RX
6525 * descriptor (containing the address). Whereas for the RX status
6526 * buffers the cpu only reads the last cacheline of the RX descriptor
6527 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6528 */
6529 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6530 u32 opaque_key, u32 dest_idx_unmasked,
6531 unsigned int *frag_size)
6532 {
6533 struct tg3_rx_buffer_desc *desc;
6534 struct ring_info *map;
6535 u8 *data;
6536 dma_addr_t mapping;
6537 int skb_size, data_size, dest_idx;
6538
6539 switch (opaque_key) {
6540 case RXD_OPAQUE_RING_STD:
6541 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6542 desc = &tpr->rx_std[dest_idx];
6543 map = &tpr->rx_std_buffers[dest_idx];
6544 data_size = tp->rx_pkt_map_sz;
6545 break;
6546
6547 case RXD_OPAQUE_RING_JUMBO:
6548 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6549 desc = &tpr->rx_jmb[dest_idx].std;
6550 map = &tpr->rx_jmb_buffers[dest_idx];
6551 data_size = TG3_RX_JMB_MAP_SZ;
6552 break;
6553
6554 default:
6555 return -EINVAL;
6556 }
6557
6558 /* Do not overwrite any of the map or rp information
6559 * until we are sure we can commit to a new buffer.
6560 *
6561 * Callers depend upon this behavior and assume that
6562 * we leave everything unchanged if we fail.
6563 */
6564 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6565 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6566 if (skb_size <= PAGE_SIZE) {
6567 data = netdev_alloc_frag(skb_size);
6568 *frag_size = skb_size;
6569 } else {
6570 data = kmalloc(skb_size, GFP_ATOMIC);
6571 *frag_size = 0;
6572 }
6573 if (!data)
6574 return -ENOMEM;
6575
6576 mapping = pci_map_single(tp->pdev,
6577 data + TG3_RX_OFFSET(tp),
6578 data_size,
6579 PCI_DMA_FROMDEVICE);
6580 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6581 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6582 return -EIO;
6583 }
6584
6585 map->data = data;
6586 dma_unmap_addr_set(map, mapping, mapping);
6587
6588 desc->addr_hi = ((u64)mapping >> 32);
6589 desc->addr_lo = ((u64)mapping & 0xffffffff);
6590
6591 return data_size;
6592 }
6593
6594 /* We only need to move over in the address because the other
6595 * members of the RX descriptor are invariant. See notes above
6596 * tg3_alloc_rx_data for full details.
6597 */
6598 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6599 struct tg3_rx_prodring_set *dpr,
6600 u32 opaque_key, int src_idx,
6601 u32 dest_idx_unmasked)
6602 {
6603 struct tg3 *tp = tnapi->tp;
6604 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6605 struct ring_info *src_map, *dest_map;
6606 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6607 int dest_idx;
6608
6609 switch (opaque_key) {
6610 case RXD_OPAQUE_RING_STD:
6611 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6612 dest_desc = &dpr->rx_std[dest_idx];
6613 dest_map = &dpr->rx_std_buffers[dest_idx];
6614 src_desc = &spr->rx_std[src_idx];
6615 src_map = &spr->rx_std_buffers[src_idx];
6616 break;
6617
6618 case RXD_OPAQUE_RING_JUMBO:
6619 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6620 dest_desc = &dpr->rx_jmb[dest_idx].std;
6621 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6622 src_desc = &spr->rx_jmb[src_idx].std;
6623 src_map = &spr->rx_jmb_buffers[src_idx];
6624 break;
6625
6626 default:
6627 return;
6628 }
6629
6630 dest_map->data = src_map->data;
6631 dma_unmap_addr_set(dest_map, mapping,
6632 dma_unmap_addr(src_map, mapping));
6633 dest_desc->addr_hi = src_desc->addr_hi;
6634 dest_desc->addr_lo = src_desc->addr_lo;
6635
6636 /* Ensure that the update to the skb happens after the physical
6637 * addresses have been transferred to the new BD location.
6638 */
6639 smp_wmb();
6640
6641 src_map->data = NULL;
6642 }
6643
6644 /* The RX ring scheme is composed of multiple rings which post fresh
6645 * buffers to the chip, and one special ring the chip uses to report
6646 * status back to the host.
6647 *
6648 * The special ring reports the status of received packets to the
6649 * host. The chip does not write into the original descriptor the
6650 * RX buffer was obtained from. The chip simply takes the original
6651 * descriptor as provided by the host, updates the status and length
6652 * field, then writes this into the next status ring entry.
6653 *
6654 * Each ring the host uses to post buffers to the chip is described
6655 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6656 * it is first placed into the on-chip ram. When the packet's length
6657 * is known, it walks down the TG3_BDINFO entries to select the ring.
6658 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6659 * which is within the range of the new packet's length is chosen.
6660 *
6661 * The "separate ring for rx status" scheme may sound queer, but it makes
6662 * sense from a cache coherency perspective. If only the host writes
6663 * to the buffer post rings, and only the chip writes to the rx status
6664 * rings, then cache lines never move beyond shared-modified state.
6665 * If both the host and chip were to write into the same ring, cache line
6666 * eviction could occur since both entities want it in an exclusive state.
6667 */
6668 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6669 {
6670 struct tg3 *tp = tnapi->tp;
6671 u32 work_mask, rx_std_posted = 0;
6672 u32 std_prod_idx, jmb_prod_idx;
6673 u32 sw_idx = tnapi->rx_rcb_ptr;
6674 u16 hw_idx;
6675 int received;
6676 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6677
6678 hw_idx = *(tnapi->rx_rcb_prod_idx);
6679 /*
6680 * We need to order the read of hw_idx and the read of
6681 * the opaque cookie.
6682 */
6683 rmb();
6684 work_mask = 0;
6685 received = 0;
6686 std_prod_idx = tpr->rx_std_prod_idx;
6687 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6688 while (sw_idx != hw_idx && budget > 0) {
6689 struct ring_info *ri;
6690 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6691 unsigned int len;
6692 struct sk_buff *skb;
6693 dma_addr_t dma_addr;
6694 u32 opaque_key, desc_idx, *post_ptr;
6695 u8 *data;
6696 u64 tstamp = 0;
6697
6698 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6699 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6700 if (opaque_key == RXD_OPAQUE_RING_STD) {
6701 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6702 dma_addr = dma_unmap_addr(ri, mapping);
6703 data = ri->data;
6704 post_ptr = &std_prod_idx;
6705 rx_std_posted++;
6706 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6707 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6708 dma_addr = dma_unmap_addr(ri, mapping);
6709 data = ri->data;
6710 post_ptr = &jmb_prod_idx;
6711 } else
6712 goto next_pkt_nopost;
6713
6714 work_mask |= opaque_key;
6715
6716 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6717 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6718 drop_it:
6719 tg3_recycle_rx(tnapi, tpr, opaque_key,
6720 desc_idx, *post_ptr);
6721 drop_it_no_recycle:
6722 /* Other statistics kept track of by card. */
6723 tp->rx_dropped++;
6724 goto next_pkt;
6725 }
6726
6727 prefetch(data + TG3_RX_OFFSET(tp));
6728 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6729 ETH_FCS_LEN;
6730
6731 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6732 RXD_FLAG_PTPSTAT_PTPV1 ||
6733 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6734 RXD_FLAG_PTPSTAT_PTPV2) {
6735 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6736 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6737 }
6738
6739 if (len > TG3_RX_COPY_THRESH(tp)) {
6740 int skb_size;
6741 unsigned int frag_size;
6742
6743 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6744 *post_ptr, &frag_size);
6745 if (skb_size < 0)
6746 goto drop_it;
6747
6748 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6749 PCI_DMA_FROMDEVICE);
6750
6751 skb = build_skb(data, frag_size);
6752 if (!skb) {
6753 tg3_frag_free(frag_size != 0, data);
6754 goto drop_it_no_recycle;
6755 }
6756 skb_reserve(skb, TG3_RX_OFFSET(tp));
6757 /* Ensure that the update to the data happens
6758 * after the usage of the old DMA mapping.
6759 */
6760 smp_wmb();
6761
6762 ri->data = NULL;
6763
6764 } else {
6765 tg3_recycle_rx(tnapi, tpr, opaque_key,
6766 desc_idx, *post_ptr);
6767
6768 skb = netdev_alloc_skb(tp->dev,
6769 len + TG3_RAW_IP_ALIGN);
6770 if (skb == NULL)
6771 goto drop_it_no_recycle;
6772
6773 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6774 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6775 memcpy(skb->data,
6776 data + TG3_RX_OFFSET(tp),
6777 len);
6778 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6779 }
6780
6781 skb_put(skb, len);
6782 if (tstamp)
6783 tg3_hwclock_to_timestamp(tp, tstamp,
6784 skb_hwtstamps(skb));
6785
6786 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6787 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6788 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6789 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6790 skb->ip_summed = CHECKSUM_UNNECESSARY;
6791 else
6792 skb_checksum_none_assert(skb);
6793
6794 skb->protocol = eth_type_trans(skb, tp->dev);
6795
6796 if (len > (tp->dev->mtu + ETH_HLEN) &&
6797 skb->protocol != htons(ETH_P_8021Q)) {
6798 dev_kfree_skb(skb);
6799 goto drop_it_no_recycle;
6800 }
6801
6802 if (desc->type_flags & RXD_FLAG_VLAN &&
6803 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6804 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6805 desc->err_vlan & RXD_VLAN_MASK);
6806
6807 napi_gro_receive(&tnapi->napi, skb);
6808
6809 received++;
6810 budget--;
6811
6812 next_pkt:
6813 (*post_ptr)++;
6814
6815 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6816 tpr->rx_std_prod_idx = std_prod_idx &
6817 tp->rx_std_ring_mask;
6818 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6819 tpr->rx_std_prod_idx);
6820 work_mask &= ~RXD_OPAQUE_RING_STD;
6821 rx_std_posted = 0;
6822 }
6823 next_pkt_nopost:
6824 sw_idx++;
6825 sw_idx &= tp->rx_ret_ring_mask;
6826
6827 /* Refresh hw_idx to see if there is new work */
6828 if (sw_idx == hw_idx) {
6829 hw_idx = *(tnapi->rx_rcb_prod_idx);
6830 rmb();
6831 }
6832 }
6833
6834 /* ACK the status ring. */
6835 tnapi->rx_rcb_ptr = sw_idx;
6836 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6837
6838 /* Refill RX ring(s). */
6839 if (!tg3_flag(tp, ENABLE_RSS)) {
6840 /* Sync BD data before updating mailbox */
6841 wmb();
6842
6843 if (work_mask & RXD_OPAQUE_RING_STD) {
6844 tpr->rx_std_prod_idx = std_prod_idx &
6845 tp->rx_std_ring_mask;
6846 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6847 tpr->rx_std_prod_idx);
6848 }
6849 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6850 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6851 tp->rx_jmb_ring_mask;
6852 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6853 tpr->rx_jmb_prod_idx);
6854 }
6855 mmiowb();
6856 } else if (work_mask) {
6857 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6858 * updated before the producer indices can be updated.
6859 */
6860 smp_wmb();
6861
6862 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6863 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6864
6865 if (tnapi != &tp->napi[1]) {
6866 tp->rx_refill = true;
6867 napi_schedule(&tp->napi[1].napi);
6868 }
6869 }
6870
6871 return received;
6872 }
6873
6874 static void tg3_poll_link(struct tg3 *tp)
6875 {
6876 /* handle link change and other phy events */
6877 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6878 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6879
6880 if (sblk->status & SD_STATUS_LINK_CHG) {
6881 sblk->status = SD_STATUS_UPDATED |
6882 (sblk->status & ~SD_STATUS_LINK_CHG);
6883 spin_lock(&tp->lock);
6884 if (tg3_flag(tp, USE_PHYLIB)) {
6885 tw32_f(MAC_STATUS,
6886 (MAC_STATUS_SYNC_CHANGED |
6887 MAC_STATUS_CFG_CHANGED |
6888 MAC_STATUS_MI_COMPLETION |
6889 MAC_STATUS_LNKSTATE_CHANGED));
6890 udelay(40);
6891 } else
6892 tg3_setup_phy(tp, false);
6893 spin_unlock(&tp->lock);
6894 }
6895 }
6896 }
6897
6898 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6899 struct tg3_rx_prodring_set *dpr,
6900 struct tg3_rx_prodring_set *spr)
6901 {
6902 u32 si, di, cpycnt, src_prod_idx;
6903 int i, err = 0;
6904
6905 while (1) {
6906 src_prod_idx = spr->rx_std_prod_idx;
6907
6908 /* Make sure updates to the rx_std_buffers[] entries and the
6909 * standard producer index are seen in the correct order.
6910 */
6911 smp_rmb();
6912
6913 if (spr->rx_std_cons_idx == src_prod_idx)
6914 break;
6915
6916 if (spr->rx_std_cons_idx < src_prod_idx)
6917 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6918 else
6919 cpycnt = tp->rx_std_ring_mask + 1 -
6920 spr->rx_std_cons_idx;
6921
6922 cpycnt = min(cpycnt,
6923 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6924
6925 si = spr->rx_std_cons_idx;
6926 di = dpr->rx_std_prod_idx;
6927
6928 for (i = di; i < di + cpycnt; i++) {
6929 if (dpr->rx_std_buffers[i].data) {
6930 cpycnt = i - di;
6931 err = -ENOSPC;
6932 break;
6933 }
6934 }
6935
6936 if (!cpycnt)
6937 break;
6938
6939 /* Ensure that updates to the rx_std_buffers ring and the
6940 * shadowed hardware producer ring from tg3_recycle_skb() are
6941 * ordered correctly WRT the skb check above.
6942 */
6943 smp_rmb();
6944
6945 memcpy(&dpr->rx_std_buffers[di],
6946 &spr->rx_std_buffers[si],
6947 cpycnt * sizeof(struct ring_info));
6948
6949 for (i = 0; i < cpycnt; i++, di++, si++) {
6950 struct tg3_rx_buffer_desc *sbd, *dbd;
6951 sbd = &spr->rx_std[si];
6952 dbd = &dpr->rx_std[di];
6953 dbd->addr_hi = sbd->addr_hi;
6954 dbd->addr_lo = sbd->addr_lo;
6955 }
6956
6957 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6958 tp->rx_std_ring_mask;
6959 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6960 tp->rx_std_ring_mask;
6961 }
6962
6963 while (1) {
6964 src_prod_idx = spr->rx_jmb_prod_idx;
6965
6966 /* Make sure updates to the rx_jmb_buffers[] entries and
6967 * the jumbo producer index are seen in the correct order.
6968 */
6969 smp_rmb();
6970
6971 if (spr->rx_jmb_cons_idx == src_prod_idx)
6972 break;
6973
6974 if (spr->rx_jmb_cons_idx < src_prod_idx)
6975 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6976 else
6977 cpycnt = tp->rx_jmb_ring_mask + 1 -
6978 spr->rx_jmb_cons_idx;
6979
6980 cpycnt = min(cpycnt,
6981 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6982
6983 si = spr->rx_jmb_cons_idx;
6984 di = dpr->rx_jmb_prod_idx;
6985
6986 for (i = di; i < di + cpycnt; i++) {
6987 if (dpr->rx_jmb_buffers[i].data) {
6988 cpycnt = i - di;
6989 err = -ENOSPC;
6990 break;
6991 }
6992 }
6993
6994 if (!cpycnt)
6995 break;
6996
6997 /* Ensure that updates to the rx_jmb_buffers ring and the
6998 * shadowed hardware producer ring from tg3_recycle_skb() are
6999 * ordered correctly WRT the skb check above.
7000 */
7001 smp_rmb();
7002
7003 memcpy(&dpr->rx_jmb_buffers[di],
7004 &spr->rx_jmb_buffers[si],
7005 cpycnt * sizeof(struct ring_info));
7006
7007 for (i = 0; i < cpycnt; i++, di++, si++) {
7008 struct tg3_rx_buffer_desc *sbd, *dbd;
7009 sbd = &spr->rx_jmb[si].std;
7010 dbd = &dpr->rx_jmb[di].std;
7011 dbd->addr_hi = sbd->addr_hi;
7012 dbd->addr_lo = sbd->addr_lo;
7013 }
7014
7015 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7016 tp->rx_jmb_ring_mask;
7017 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7018 tp->rx_jmb_ring_mask;
7019 }
7020
7021 return err;
7022 }
7023
7024 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7025 {
7026 struct tg3 *tp = tnapi->tp;
7027
7028 /* run TX completion thread */
7029 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7030 tg3_tx(tnapi);
7031 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7032 return work_done;
7033 }
7034
7035 if (!tnapi->rx_rcb_prod_idx)
7036 return work_done;
7037
7038 /* run RX thread, within the bounds set by NAPI.
7039 * All RX "locking" is done by ensuring outside
7040 * code synchronizes with tg3->napi.poll()
7041 */
7042 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7043 work_done += tg3_rx(tnapi, budget - work_done);
7044
7045 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7046 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7047 int i, err = 0;
7048 u32 std_prod_idx = dpr->rx_std_prod_idx;
7049 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7050
7051 tp->rx_refill = false;
7052 for (i = 1; i <= tp->rxq_cnt; i++)
7053 err |= tg3_rx_prodring_xfer(tp, dpr,
7054 &tp->napi[i].prodring);
7055
7056 wmb();
7057
7058 if (std_prod_idx != dpr->rx_std_prod_idx)
7059 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7060 dpr->rx_std_prod_idx);
7061
7062 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7063 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7064 dpr->rx_jmb_prod_idx);
7065
7066 mmiowb();
7067
7068 if (err)
7069 tw32_f(HOSTCC_MODE, tp->coal_now);
7070 }
7071
7072 return work_done;
7073 }
7074
7075 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7076 {
7077 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7078 schedule_work(&tp->reset_task);
7079 }
7080
7081 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7082 {
7083 cancel_work_sync(&tp->reset_task);
7084 tg3_flag_clear(tp, RESET_TASK_PENDING);
7085 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7086 }
7087
7088 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7089 {
7090 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7091 struct tg3 *tp = tnapi->tp;
7092 int work_done = 0;
7093 struct tg3_hw_status *sblk = tnapi->hw_status;
7094
7095 while (1) {
7096 work_done = tg3_poll_work(tnapi, work_done, budget);
7097
7098 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7099 goto tx_recovery;
7100
7101 if (unlikely(work_done >= budget))
7102 break;
7103
7104 /* tp->last_tag is used in tg3_int_reenable() below
7105 * to tell the hw how much work has been processed,
7106 * so we must read it before checking for more work.
7107 */
7108 tnapi->last_tag = sblk->status_tag;
7109 tnapi->last_irq_tag = tnapi->last_tag;
7110 rmb();
7111
7112 /* check for RX/TX work to do */
7113 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7114 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7115
7116 /* This test here is not race free, but will reduce
7117 * the number of interrupts by looping again.
7118 */
7119 if (tnapi == &tp->napi[1] && tp->rx_refill)
7120 continue;
7121
7122 napi_complete(napi);
7123 /* Reenable interrupts. */
7124 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7125
7126 /* This test here is synchronized by napi_schedule()
7127 * and napi_complete() to close the race condition.
7128 */
7129 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7130 tw32(HOSTCC_MODE, tp->coalesce_mode |
7131 HOSTCC_MODE_ENABLE |
7132 tnapi->coal_now);
7133 }
7134 mmiowb();
7135 break;
7136 }
7137 }
7138
7139 return work_done;
7140
7141 tx_recovery:
7142 /* work_done is guaranteed to be less than budget. */
7143 napi_complete(napi);
7144 tg3_reset_task_schedule(tp);
7145 return work_done;
7146 }
7147
7148 static void tg3_process_error(struct tg3 *tp)
7149 {
7150 u32 val;
7151 bool real_error = false;
7152
7153 if (tg3_flag(tp, ERROR_PROCESSED))
7154 return;
7155
7156 /* Check Flow Attention register */
7157 val = tr32(HOSTCC_FLOW_ATTN);
7158 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7159 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7160 real_error = true;
7161 }
7162
7163 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7164 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7165 real_error = true;
7166 }
7167
7168 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7169 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7170 real_error = true;
7171 }
7172
7173 if (!real_error)
7174 return;
7175
7176 tg3_dump_state(tp);
7177
7178 tg3_flag_set(tp, ERROR_PROCESSED);
7179 tg3_reset_task_schedule(tp);
7180 }
7181
7182 static int tg3_poll(struct napi_struct *napi, int budget)
7183 {
7184 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7185 struct tg3 *tp = tnapi->tp;
7186 int work_done = 0;
7187 struct tg3_hw_status *sblk = tnapi->hw_status;
7188
7189 while (1) {
7190 if (sblk->status & SD_STATUS_ERROR)
7191 tg3_process_error(tp);
7192
7193 tg3_poll_link(tp);
7194
7195 work_done = tg3_poll_work(tnapi, work_done, budget);
7196
7197 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7198 goto tx_recovery;
7199
7200 if (unlikely(work_done >= budget))
7201 break;
7202
7203 if (tg3_flag(tp, TAGGED_STATUS)) {
7204 /* tp->last_tag is used in tg3_int_reenable() below
7205 * to tell the hw how much work has been processed,
7206 * so we must read it before checking for more work.
7207 */
7208 tnapi->last_tag = sblk->status_tag;
7209 tnapi->last_irq_tag = tnapi->last_tag;
7210 rmb();
7211 } else
7212 sblk->status &= ~SD_STATUS_UPDATED;
7213
7214 if (likely(!tg3_has_work(tnapi))) {
7215 napi_complete(napi);
7216 tg3_int_reenable(tnapi);
7217 break;
7218 }
7219 }
7220
7221 return work_done;
7222
7223 tx_recovery:
7224 /* work_done is guaranteed to be less than budget. */
7225 napi_complete(napi);
7226 tg3_reset_task_schedule(tp);
7227 return work_done;
7228 }
7229
7230 static void tg3_napi_disable(struct tg3 *tp)
7231 {
7232 int i;
7233
7234 for (i = tp->irq_cnt - 1; i >= 0; i--)
7235 napi_disable(&tp->napi[i].napi);
7236 }
7237
7238 static void tg3_napi_enable(struct tg3 *tp)
7239 {
7240 int i;
7241
7242 for (i = 0; i < tp->irq_cnt; i++)
7243 napi_enable(&tp->napi[i].napi);
7244 }
7245
7246 static void tg3_napi_init(struct tg3 *tp)
7247 {
7248 int i;
7249
7250 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7251 for (i = 1; i < tp->irq_cnt; i++)
7252 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7253 }
7254
7255 static void tg3_napi_fini(struct tg3 *tp)
7256 {
7257 int i;
7258
7259 for (i = 0; i < tp->irq_cnt; i++)
7260 netif_napi_del(&tp->napi[i].napi);
7261 }
7262
7263 static inline void tg3_netif_stop(struct tg3 *tp)
7264 {
7265 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7266 tg3_napi_disable(tp);
7267 netif_carrier_off(tp->dev);
7268 netif_tx_disable(tp->dev);
7269 }
7270
7271 /* tp->lock must be held */
7272 static inline void tg3_netif_start(struct tg3 *tp)
7273 {
7274 tg3_ptp_resume(tp);
7275
7276 /* NOTE: unconditional netif_tx_wake_all_queues is only
7277 * appropriate so long as all callers are assured to
7278 * have free tx slots (such as after tg3_init_hw)
7279 */
7280 netif_tx_wake_all_queues(tp->dev);
7281
7282 if (tp->link_up)
7283 netif_carrier_on(tp->dev);
7284
7285 tg3_napi_enable(tp);
7286 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7287 tg3_enable_ints(tp);
7288 }
7289
7290 static void tg3_irq_quiesce(struct tg3 *tp)
7291 {
7292 int i;
7293
7294 BUG_ON(tp->irq_sync);
7295
7296 tp->irq_sync = 1;
7297 smp_mb();
7298
7299 for (i = 0; i < tp->irq_cnt; i++)
7300 synchronize_irq(tp->napi[i].irq_vec);
7301 }
7302
7303 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7304 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7305 * with as well. Most of the time, this is not necessary except when
7306 * shutting down the device.
7307 */
7308 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7309 {
7310 spin_lock_bh(&tp->lock);
7311 if (irq_sync)
7312 tg3_irq_quiesce(tp);
7313 }
7314
7315 static inline void tg3_full_unlock(struct tg3 *tp)
7316 {
7317 spin_unlock_bh(&tp->lock);
7318 }
7319
7320 /* One-shot MSI handler - Chip automatically disables interrupt
7321 * after sending MSI so driver doesn't have to do it.
7322 */
7323 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7324 {
7325 struct tg3_napi *tnapi = dev_id;
7326 struct tg3 *tp = tnapi->tp;
7327
7328 prefetch(tnapi->hw_status);
7329 if (tnapi->rx_rcb)
7330 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7331
7332 if (likely(!tg3_irq_sync(tp)))
7333 napi_schedule(&tnapi->napi);
7334
7335 return IRQ_HANDLED;
7336 }
7337
7338 /* MSI ISR - No need to check for interrupt sharing and no need to
7339 * flush status block and interrupt mailbox. PCI ordering rules
7340 * guarantee that MSI will arrive after the status block.
7341 */
7342 static irqreturn_t tg3_msi(int irq, void *dev_id)
7343 {
7344 struct tg3_napi *tnapi = dev_id;
7345 struct tg3 *tp = tnapi->tp;
7346
7347 prefetch(tnapi->hw_status);
7348 if (tnapi->rx_rcb)
7349 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7350 /*
7351 * Writing any value to intr-mbox-0 clears PCI INTA# and
7352 * chip-internal interrupt pending events.
7353 * Writing non-zero to intr-mbox-0 additional tells the
7354 * NIC to stop sending us irqs, engaging "in-intr-handler"
7355 * event coalescing.
7356 */
7357 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7358 if (likely(!tg3_irq_sync(tp)))
7359 napi_schedule(&tnapi->napi);
7360
7361 return IRQ_RETVAL(1);
7362 }
7363
7364 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7365 {
7366 struct tg3_napi *tnapi = dev_id;
7367 struct tg3 *tp = tnapi->tp;
7368 struct tg3_hw_status *sblk = tnapi->hw_status;
7369 unsigned int handled = 1;
7370
7371 /* In INTx mode, it is possible for the interrupt to arrive at
7372 * the CPU before the status block posted prior to the interrupt.
7373 * Reading the PCI State register will confirm whether the
7374 * interrupt is ours and will flush the status block.
7375 */
7376 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7377 if (tg3_flag(tp, CHIP_RESETTING) ||
7378 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7379 handled = 0;
7380 goto out;
7381 }
7382 }
7383
7384 /*
7385 * Writing any value to intr-mbox-0 clears PCI INTA# and
7386 * chip-internal interrupt pending events.
7387 * Writing non-zero to intr-mbox-0 additional tells the
7388 * NIC to stop sending us irqs, engaging "in-intr-handler"
7389 * event coalescing.
7390 *
7391 * Flush the mailbox to de-assert the IRQ immediately to prevent
7392 * spurious interrupts. The flush impacts performance but
7393 * excessive spurious interrupts can be worse in some cases.
7394 */
7395 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7396 if (tg3_irq_sync(tp))
7397 goto out;
7398 sblk->status &= ~SD_STATUS_UPDATED;
7399 if (likely(tg3_has_work(tnapi))) {
7400 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7401 napi_schedule(&tnapi->napi);
7402 } else {
7403 /* No work, shared interrupt perhaps? re-enable
7404 * interrupts, and flush that PCI write
7405 */
7406 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7407 0x00000000);
7408 }
7409 out:
7410 return IRQ_RETVAL(handled);
7411 }
7412
7413 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7414 {
7415 struct tg3_napi *tnapi = dev_id;
7416 struct tg3 *tp = tnapi->tp;
7417 struct tg3_hw_status *sblk = tnapi->hw_status;
7418 unsigned int handled = 1;
7419
7420 /* In INTx mode, it is possible for the interrupt to arrive at
7421 * the CPU before the status block posted prior to the interrupt.
7422 * Reading the PCI State register will confirm whether the
7423 * interrupt is ours and will flush the status block.
7424 */
7425 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7426 if (tg3_flag(tp, CHIP_RESETTING) ||
7427 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7428 handled = 0;
7429 goto out;
7430 }
7431 }
7432
7433 /*
7434 * writing any value to intr-mbox-0 clears PCI INTA# and
7435 * chip-internal interrupt pending events.
7436 * writing non-zero to intr-mbox-0 additional tells the
7437 * NIC to stop sending us irqs, engaging "in-intr-handler"
7438 * event coalescing.
7439 *
7440 * Flush the mailbox to de-assert the IRQ immediately to prevent
7441 * spurious interrupts. The flush impacts performance but
7442 * excessive spurious interrupts can be worse in some cases.
7443 */
7444 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7445
7446 /*
7447 * In a shared interrupt configuration, sometimes other devices'
7448 * interrupts will scream. We record the current status tag here
7449 * so that the above check can report that the screaming interrupts
7450 * are unhandled. Eventually they will be silenced.
7451 */
7452 tnapi->last_irq_tag = sblk->status_tag;
7453
7454 if (tg3_irq_sync(tp))
7455 goto out;
7456
7457 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7458
7459 napi_schedule(&tnapi->napi);
7460
7461 out:
7462 return IRQ_RETVAL(handled);
7463 }
7464
7465 /* ISR for interrupt test */
7466 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7467 {
7468 struct tg3_napi *tnapi = dev_id;
7469 struct tg3 *tp = tnapi->tp;
7470 struct tg3_hw_status *sblk = tnapi->hw_status;
7471
7472 if ((sblk->status & SD_STATUS_UPDATED) ||
7473 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7474 tg3_disable_ints(tp);
7475 return IRQ_RETVAL(1);
7476 }
7477 return IRQ_RETVAL(0);
7478 }
7479
7480 #ifdef CONFIG_NET_POLL_CONTROLLER
7481 static void tg3_poll_controller(struct net_device *dev)
7482 {
7483 int i;
7484 struct tg3 *tp = netdev_priv(dev);
7485
7486 if (tg3_irq_sync(tp))
7487 return;
7488
7489 for (i = 0; i < tp->irq_cnt; i++)
7490 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7491 }
7492 #endif
7493
7494 static void tg3_tx_timeout(struct net_device *dev)
7495 {
7496 struct tg3 *tp = netdev_priv(dev);
7497
7498 if (netif_msg_tx_err(tp)) {
7499 netdev_err(dev, "transmit timed out, resetting\n");
7500 tg3_dump_state(tp);
7501 }
7502
7503 tg3_reset_task_schedule(tp);
7504 }
7505
7506 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7507 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7508 {
7509 u32 base = (u32) mapping & 0xffffffff;
7510
7511 return (base > 0xffffdcc0) && (base + len + 8 < base);
7512 }
7513
7514 /* Test for DMA addresses > 40-bit */
7515 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7516 int len)
7517 {
7518 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7519 if (tg3_flag(tp, 40BIT_DMA_BUG))
7520 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7521 return 0;
7522 #else
7523 return 0;
7524 #endif
7525 }
7526
7527 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7528 dma_addr_t mapping, u32 len, u32 flags,
7529 u32 mss, u32 vlan)
7530 {
7531 txbd->addr_hi = ((u64) mapping >> 32);
7532 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7533 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7534 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7535 }
7536
7537 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7538 dma_addr_t map, u32 len, u32 flags,
7539 u32 mss, u32 vlan)
7540 {
7541 struct tg3 *tp = tnapi->tp;
7542 bool hwbug = false;
7543
7544 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7545 hwbug = true;
7546
7547 if (tg3_4g_overflow_test(map, len))
7548 hwbug = true;
7549
7550 if (tg3_40bit_overflow_test(tp, map, len))
7551 hwbug = true;
7552
7553 if (tp->dma_limit) {
7554 u32 prvidx = *entry;
7555 u32 tmp_flag = flags & ~TXD_FLAG_END;
7556 while (len > tp->dma_limit && *budget) {
7557 u32 frag_len = tp->dma_limit;
7558 len -= tp->dma_limit;
7559
7560 /* Avoid the 8byte DMA problem */
7561 if (len <= 8) {
7562 len += tp->dma_limit / 2;
7563 frag_len = tp->dma_limit / 2;
7564 }
7565
7566 tnapi->tx_buffers[*entry].fragmented = true;
7567
7568 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7569 frag_len, tmp_flag, mss, vlan);
7570 *budget -= 1;
7571 prvidx = *entry;
7572 *entry = NEXT_TX(*entry);
7573
7574 map += frag_len;
7575 }
7576
7577 if (len) {
7578 if (*budget) {
7579 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7580 len, flags, mss, vlan);
7581 *budget -= 1;
7582 *entry = NEXT_TX(*entry);
7583 } else {
7584 hwbug = true;
7585 tnapi->tx_buffers[prvidx].fragmented = false;
7586 }
7587 }
7588 } else {
7589 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7590 len, flags, mss, vlan);
7591 *entry = NEXT_TX(*entry);
7592 }
7593
7594 return hwbug;
7595 }
7596
7597 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7598 {
7599 int i;
7600 struct sk_buff *skb;
7601 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7602
7603 skb = txb->skb;
7604 txb->skb = NULL;
7605
7606 pci_unmap_single(tnapi->tp->pdev,
7607 dma_unmap_addr(txb, mapping),
7608 skb_headlen(skb),
7609 PCI_DMA_TODEVICE);
7610
7611 while (txb->fragmented) {
7612 txb->fragmented = false;
7613 entry = NEXT_TX(entry);
7614 txb = &tnapi->tx_buffers[entry];
7615 }
7616
7617 for (i = 0; i <= last; i++) {
7618 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7619
7620 entry = NEXT_TX(entry);
7621 txb = &tnapi->tx_buffers[entry];
7622
7623 pci_unmap_page(tnapi->tp->pdev,
7624 dma_unmap_addr(txb, mapping),
7625 skb_frag_size(frag), PCI_DMA_TODEVICE);
7626
7627 while (txb->fragmented) {
7628 txb->fragmented = false;
7629 entry = NEXT_TX(entry);
7630 txb = &tnapi->tx_buffers[entry];
7631 }
7632 }
7633 }
7634
7635 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7636 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7637 struct sk_buff **pskb,
7638 u32 *entry, u32 *budget,
7639 u32 base_flags, u32 mss, u32 vlan)
7640 {
7641 struct tg3 *tp = tnapi->tp;
7642 struct sk_buff *new_skb, *skb = *pskb;
7643 dma_addr_t new_addr = 0;
7644 int ret = 0;
7645
7646 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7647 new_skb = skb_copy(skb, GFP_ATOMIC);
7648 else {
7649 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7650
7651 new_skb = skb_copy_expand(skb,
7652 skb_headroom(skb) + more_headroom,
7653 skb_tailroom(skb), GFP_ATOMIC);
7654 }
7655
7656 if (!new_skb) {
7657 ret = -1;
7658 } else {
7659 /* New SKB is guaranteed to be linear. */
7660 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7661 PCI_DMA_TODEVICE);
7662 /* Make sure the mapping succeeded */
7663 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7664 dev_kfree_skb(new_skb);
7665 ret = -1;
7666 } else {
7667 u32 save_entry = *entry;
7668
7669 base_flags |= TXD_FLAG_END;
7670
7671 tnapi->tx_buffers[*entry].skb = new_skb;
7672 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7673 mapping, new_addr);
7674
7675 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7676 new_skb->len, base_flags,
7677 mss, vlan)) {
7678 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7679 dev_kfree_skb(new_skb);
7680 ret = -1;
7681 }
7682 }
7683 }
7684
7685 dev_kfree_skb(skb);
7686 *pskb = new_skb;
7687 return ret;
7688 }
7689
7690 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7691
7692 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7693 * TSO header is greater than 80 bytes.
7694 */
7695 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7696 {
7697 struct sk_buff *segs, *nskb;
7698 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7699
7700 /* Estimate the number of fragments in the worst case */
7701 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7702 netif_stop_queue(tp->dev);
7703
7704 /* netif_tx_stop_queue() must be done before checking
7705 * checking tx index in tg3_tx_avail() below, because in
7706 * tg3_tx(), we update tx index before checking for
7707 * netif_tx_queue_stopped().
7708 */
7709 smp_mb();
7710 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7711 return NETDEV_TX_BUSY;
7712
7713 netif_wake_queue(tp->dev);
7714 }
7715
7716 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7717 if (IS_ERR(segs))
7718 goto tg3_tso_bug_end;
7719
7720 do {
7721 nskb = segs;
7722 segs = segs->next;
7723 nskb->next = NULL;
7724 tg3_start_xmit(nskb, tp->dev);
7725 } while (segs);
7726
7727 tg3_tso_bug_end:
7728 dev_kfree_skb(skb);
7729
7730 return NETDEV_TX_OK;
7731 }
7732
7733 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7734 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7735 */
7736 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7737 {
7738 struct tg3 *tp = netdev_priv(dev);
7739 u32 len, entry, base_flags, mss, vlan = 0;
7740 u32 budget;
7741 int i = -1, would_hit_hwbug;
7742 dma_addr_t mapping;
7743 struct tg3_napi *tnapi;
7744 struct netdev_queue *txq;
7745 unsigned int last;
7746
7747 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7748 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7749 if (tg3_flag(tp, ENABLE_TSS))
7750 tnapi++;
7751
7752 budget = tg3_tx_avail(tnapi);
7753
7754 /* We are running in BH disabled context with netif_tx_lock
7755 * and TX reclaim runs via tp->napi.poll inside of a software
7756 * interrupt. Furthermore, IRQ processing runs lockless so we have
7757 * no IRQ context deadlocks to worry about either. Rejoice!
7758 */
7759 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7760 if (!netif_tx_queue_stopped(txq)) {
7761 netif_tx_stop_queue(txq);
7762
7763 /* This is a hard error, log it. */
7764 netdev_err(dev,
7765 "BUG! Tx Ring full when queue awake!\n");
7766 }
7767 return NETDEV_TX_BUSY;
7768 }
7769
7770 entry = tnapi->tx_prod;
7771 base_flags = 0;
7772 if (skb->ip_summed == CHECKSUM_PARTIAL)
7773 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7774
7775 mss = skb_shinfo(skb)->gso_size;
7776 if (mss) {
7777 struct iphdr *iph;
7778 u32 tcp_opt_len, hdr_len;
7779
7780 if (skb_header_cloned(skb) &&
7781 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7782 goto drop;
7783
7784 iph = ip_hdr(skb);
7785 tcp_opt_len = tcp_optlen(skb);
7786
7787 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7788
7789 if (!skb_is_gso_v6(skb)) {
7790 iph->check = 0;
7791 iph->tot_len = htons(mss + hdr_len);
7792 }
7793
7794 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7795 tg3_flag(tp, TSO_BUG))
7796 return tg3_tso_bug(tp, skb);
7797
7798 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7799 TXD_FLAG_CPU_POST_DMA);
7800
7801 if (tg3_flag(tp, HW_TSO_1) ||
7802 tg3_flag(tp, HW_TSO_2) ||
7803 tg3_flag(tp, HW_TSO_3)) {
7804 tcp_hdr(skb)->check = 0;
7805 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7806 } else
7807 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7808 iph->daddr, 0,
7809 IPPROTO_TCP,
7810 0);
7811
7812 if (tg3_flag(tp, HW_TSO_3)) {
7813 mss |= (hdr_len & 0xc) << 12;
7814 if (hdr_len & 0x10)
7815 base_flags |= 0x00000010;
7816 base_flags |= (hdr_len & 0x3e0) << 5;
7817 } else if (tg3_flag(tp, HW_TSO_2))
7818 mss |= hdr_len << 9;
7819 else if (tg3_flag(tp, HW_TSO_1) ||
7820 tg3_asic_rev(tp) == ASIC_REV_5705) {
7821 if (tcp_opt_len || iph->ihl > 5) {
7822 int tsflags;
7823
7824 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7825 mss |= (tsflags << 11);
7826 }
7827 } else {
7828 if (tcp_opt_len || iph->ihl > 5) {
7829 int tsflags;
7830
7831 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7832 base_flags |= tsflags << 12;
7833 }
7834 }
7835 }
7836
7837 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7838 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7839 base_flags |= TXD_FLAG_JMB_PKT;
7840
7841 if (vlan_tx_tag_present(skb)) {
7842 base_flags |= TXD_FLAG_VLAN;
7843 vlan = vlan_tx_tag_get(skb);
7844 }
7845
7846 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7847 tg3_flag(tp, TX_TSTAMP_EN)) {
7848 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7849 base_flags |= TXD_FLAG_HWTSTAMP;
7850 }
7851
7852 len = skb_headlen(skb);
7853
7854 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7855 if (pci_dma_mapping_error(tp->pdev, mapping))
7856 goto drop;
7857
7858
7859 tnapi->tx_buffers[entry].skb = skb;
7860 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7861
7862 would_hit_hwbug = 0;
7863
7864 if (tg3_flag(tp, 5701_DMA_BUG))
7865 would_hit_hwbug = 1;
7866
7867 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7868 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7869 mss, vlan)) {
7870 would_hit_hwbug = 1;
7871 } else if (skb_shinfo(skb)->nr_frags > 0) {
7872 u32 tmp_mss = mss;
7873
7874 if (!tg3_flag(tp, HW_TSO_1) &&
7875 !tg3_flag(tp, HW_TSO_2) &&
7876 !tg3_flag(tp, HW_TSO_3))
7877 tmp_mss = 0;
7878
7879 /* Now loop through additional data
7880 * fragments, and queue them.
7881 */
7882 last = skb_shinfo(skb)->nr_frags - 1;
7883 for (i = 0; i <= last; i++) {
7884 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7885
7886 len = skb_frag_size(frag);
7887 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7888 len, DMA_TO_DEVICE);
7889
7890 tnapi->tx_buffers[entry].skb = NULL;
7891 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7892 mapping);
7893 if (dma_mapping_error(&tp->pdev->dev, mapping))
7894 goto dma_error;
7895
7896 if (!budget ||
7897 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7898 len, base_flags |
7899 ((i == last) ? TXD_FLAG_END : 0),
7900 tmp_mss, vlan)) {
7901 would_hit_hwbug = 1;
7902 break;
7903 }
7904 }
7905 }
7906
7907 if (would_hit_hwbug) {
7908 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7909
7910 /* If the workaround fails due to memory/mapping
7911 * failure, silently drop this packet.
7912 */
7913 entry = tnapi->tx_prod;
7914 budget = tg3_tx_avail(tnapi);
7915 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7916 base_flags, mss, vlan))
7917 goto drop_nofree;
7918 }
7919
7920 skb_tx_timestamp(skb);
7921 netdev_tx_sent_queue(txq, skb->len);
7922
7923 /* Sync BD data before updating mailbox */
7924 wmb();
7925
7926 /* Packets are ready, update Tx producer idx local and on card. */
7927 tw32_tx_mbox(tnapi->prodmbox, entry);
7928
7929 tnapi->tx_prod = entry;
7930 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7931 netif_tx_stop_queue(txq);
7932
7933 /* netif_tx_stop_queue() must be done before checking
7934 * checking tx index in tg3_tx_avail() below, because in
7935 * tg3_tx(), we update tx index before checking for
7936 * netif_tx_queue_stopped().
7937 */
7938 smp_mb();
7939 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7940 netif_tx_wake_queue(txq);
7941 }
7942
7943 mmiowb();
7944 return NETDEV_TX_OK;
7945
7946 dma_error:
7947 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7948 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7949 drop:
7950 dev_kfree_skb(skb);
7951 drop_nofree:
7952 tp->tx_dropped++;
7953 return NETDEV_TX_OK;
7954 }
7955
7956 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7957 {
7958 if (enable) {
7959 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7960 MAC_MODE_PORT_MODE_MASK);
7961
7962 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7963
7964 if (!tg3_flag(tp, 5705_PLUS))
7965 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7966
7967 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7968 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7969 else
7970 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7971 } else {
7972 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7973
7974 if (tg3_flag(tp, 5705_PLUS) ||
7975 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7976 tg3_asic_rev(tp) == ASIC_REV_5700)
7977 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7978 }
7979
7980 tw32(MAC_MODE, tp->mac_mode);
7981 udelay(40);
7982 }
7983
7984 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7985 {
7986 u32 val, bmcr, mac_mode, ptest = 0;
7987
7988 tg3_phy_toggle_apd(tp, false);
7989 tg3_phy_toggle_automdix(tp, false);
7990
7991 if (extlpbk && tg3_phy_set_extloopbk(tp))
7992 return -EIO;
7993
7994 bmcr = BMCR_FULLDPLX;
7995 switch (speed) {
7996 case SPEED_10:
7997 break;
7998 case SPEED_100:
7999 bmcr |= BMCR_SPEED100;
8000 break;
8001 case SPEED_1000:
8002 default:
8003 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8004 speed = SPEED_100;
8005 bmcr |= BMCR_SPEED100;
8006 } else {
8007 speed = SPEED_1000;
8008 bmcr |= BMCR_SPEED1000;
8009 }
8010 }
8011
8012 if (extlpbk) {
8013 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8014 tg3_readphy(tp, MII_CTRL1000, &val);
8015 val |= CTL1000_AS_MASTER |
8016 CTL1000_ENABLE_MASTER;
8017 tg3_writephy(tp, MII_CTRL1000, val);
8018 } else {
8019 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8020 MII_TG3_FET_PTEST_TRIM_2;
8021 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8022 }
8023 } else
8024 bmcr |= BMCR_LOOPBACK;
8025
8026 tg3_writephy(tp, MII_BMCR, bmcr);
8027
8028 /* The write needs to be flushed for the FETs */
8029 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8030 tg3_readphy(tp, MII_BMCR, &bmcr);
8031
8032 udelay(40);
8033
8034 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8035 tg3_asic_rev(tp) == ASIC_REV_5785) {
8036 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8037 MII_TG3_FET_PTEST_FRC_TX_LINK |
8038 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8039
8040 /* The write needs to be flushed for the AC131 */
8041 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8042 }
8043
8044 /* Reset to prevent losing 1st rx packet intermittently */
8045 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8046 tg3_flag(tp, 5780_CLASS)) {
8047 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8048 udelay(10);
8049 tw32_f(MAC_RX_MODE, tp->rx_mode);
8050 }
8051
8052 mac_mode = tp->mac_mode &
8053 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8054 if (speed == SPEED_1000)
8055 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8056 else
8057 mac_mode |= MAC_MODE_PORT_MODE_MII;
8058
8059 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8060 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8061
8062 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8063 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8064 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8065 mac_mode |= MAC_MODE_LINK_POLARITY;
8066
8067 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8068 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8069 }
8070
8071 tw32(MAC_MODE, mac_mode);
8072 udelay(40);
8073
8074 return 0;
8075 }
8076
8077 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8078 {
8079 struct tg3 *tp = netdev_priv(dev);
8080
8081 if (features & NETIF_F_LOOPBACK) {
8082 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8083 return;
8084
8085 spin_lock_bh(&tp->lock);
8086 tg3_mac_loopback(tp, true);
8087 netif_carrier_on(tp->dev);
8088 spin_unlock_bh(&tp->lock);
8089 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8090 } else {
8091 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8092 return;
8093
8094 spin_lock_bh(&tp->lock);
8095 tg3_mac_loopback(tp, false);
8096 /* Force link status check */
8097 tg3_setup_phy(tp, true);
8098 spin_unlock_bh(&tp->lock);
8099 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8100 }
8101 }
8102
8103 static netdev_features_t tg3_fix_features(struct net_device *dev,
8104 netdev_features_t features)
8105 {
8106 struct tg3 *tp = netdev_priv(dev);
8107
8108 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8109 features &= ~NETIF_F_ALL_TSO;
8110
8111 return features;
8112 }
8113
8114 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8115 {
8116 netdev_features_t changed = dev->features ^ features;
8117
8118 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8119 tg3_set_loopback(dev, features);
8120
8121 return 0;
8122 }
8123
8124 static void tg3_rx_prodring_free(struct tg3 *tp,
8125 struct tg3_rx_prodring_set *tpr)
8126 {
8127 int i;
8128
8129 if (tpr != &tp->napi[0].prodring) {
8130 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8131 i = (i + 1) & tp->rx_std_ring_mask)
8132 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8133 tp->rx_pkt_map_sz);
8134
8135 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8136 for (i = tpr->rx_jmb_cons_idx;
8137 i != tpr->rx_jmb_prod_idx;
8138 i = (i + 1) & tp->rx_jmb_ring_mask) {
8139 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8140 TG3_RX_JMB_MAP_SZ);
8141 }
8142 }
8143
8144 return;
8145 }
8146
8147 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8148 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8149 tp->rx_pkt_map_sz);
8150
8151 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8152 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8153 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8154 TG3_RX_JMB_MAP_SZ);
8155 }
8156 }
8157
8158 /* Initialize rx rings for packet processing.
8159 *
8160 * The chip has been shut down and the driver detached from
8161 * the networking, so no interrupts or new tx packets will
8162 * end up in the driver. tp->{tx,}lock are held and thus
8163 * we may not sleep.
8164 */
8165 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8166 struct tg3_rx_prodring_set *tpr)
8167 {
8168 u32 i, rx_pkt_dma_sz;
8169
8170 tpr->rx_std_cons_idx = 0;
8171 tpr->rx_std_prod_idx = 0;
8172 tpr->rx_jmb_cons_idx = 0;
8173 tpr->rx_jmb_prod_idx = 0;
8174
8175 if (tpr != &tp->napi[0].prodring) {
8176 memset(&tpr->rx_std_buffers[0], 0,
8177 TG3_RX_STD_BUFF_RING_SIZE(tp));
8178 if (tpr->rx_jmb_buffers)
8179 memset(&tpr->rx_jmb_buffers[0], 0,
8180 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8181 goto done;
8182 }
8183
8184 /* Zero out all descriptors. */
8185 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8186
8187 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8188 if (tg3_flag(tp, 5780_CLASS) &&
8189 tp->dev->mtu > ETH_DATA_LEN)
8190 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8191 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8192
8193 /* Initialize invariants of the rings, we only set this
8194 * stuff once. This works because the card does not
8195 * write into the rx buffer posting rings.
8196 */
8197 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8198 struct tg3_rx_buffer_desc *rxd;
8199
8200 rxd = &tpr->rx_std[i];
8201 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8202 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8203 rxd->opaque = (RXD_OPAQUE_RING_STD |
8204 (i << RXD_OPAQUE_INDEX_SHIFT));
8205 }
8206
8207 /* Now allocate fresh SKBs for each rx ring. */
8208 for (i = 0; i < tp->rx_pending; i++) {
8209 unsigned int frag_size;
8210
8211 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8212 &frag_size) < 0) {
8213 netdev_warn(tp->dev,
8214 "Using a smaller RX standard ring. Only "
8215 "%d out of %d buffers were allocated "
8216 "successfully\n", i, tp->rx_pending);
8217 if (i == 0)
8218 goto initfail;
8219 tp->rx_pending = i;
8220 break;
8221 }
8222 }
8223
8224 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8225 goto done;
8226
8227 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8228
8229 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8230 goto done;
8231
8232 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8233 struct tg3_rx_buffer_desc *rxd;
8234
8235 rxd = &tpr->rx_jmb[i].std;
8236 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8237 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8238 RXD_FLAG_JUMBO;
8239 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8240 (i << RXD_OPAQUE_INDEX_SHIFT));
8241 }
8242
8243 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8244 unsigned int frag_size;
8245
8246 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8247 &frag_size) < 0) {
8248 netdev_warn(tp->dev,
8249 "Using a smaller RX jumbo ring. Only %d "
8250 "out of %d buffers were allocated "
8251 "successfully\n", i, tp->rx_jumbo_pending);
8252 if (i == 0)
8253 goto initfail;
8254 tp->rx_jumbo_pending = i;
8255 break;
8256 }
8257 }
8258
8259 done:
8260 return 0;
8261
8262 initfail:
8263 tg3_rx_prodring_free(tp, tpr);
8264 return -ENOMEM;
8265 }
8266
8267 static void tg3_rx_prodring_fini(struct tg3 *tp,
8268 struct tg3_rx_prodring_set *tpr)
8269 {
8270 kfree(tpr->rx_std_buffers);
8271 tpr->rx_std_buffers = NULL;
8272 kfree(tpr->rx_jmb_buffers);
8273 tpr->rx_jmb_buffers = NULL;
8274 if (tpr->rx_std) {
8275 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8276 tpr->rx_std, tpr->rx_std_mapping);
8277 tpr->rx_std = NULL;
8278 }
8279 if (tpr->rx_jmb) {
8280 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8281 tpr->rx_jmb, tpr->rx_jmb_mapping);
8282 tpr->rx_jmb = NULL;
8283 }
8284 }
8285
8286 static int tg3_rx_prodring_init(struct tg3 *tp,
8287 struct tg3_rx_prodring_set *tpr)
8288 {
8289 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8290 GFP_KERNEL);
8291 if (!tpr->rx_std_buffers)
8292 return -ENOMEM;
8293
8294 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8295 TG3_RX_STD_RING_BYTES(tp),
8296 &tpr->rx_std_mapping,
8297 GFP_KERNEL);
8298 if (!tpr->rx_std)
8299 goto err_out;
8300
8301 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8302 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8303 GFP_KERNEL);
8304 if (!tpr->rx_jmb_buffers)
8305 goto err_out;
8306
8307 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8308 TG3_RX_JMB_RING_BYTES(tp),
8309 &tpr->rx_jmb_mapping,
8310 GFP_KERNEL);
8311 if (!tpr->rx_jmb)
8312 goto err_out;
8313 }
8314
8315 return 0;
8316
8317 err_out:
8318 tg3_rx_prodring_fini(tp, tpr);
8319 return -ENOMEM;
8320 }
8321
8322 /* Free up pending packets in all rx/tx rings.
8323 *
8324 * The chip has been shut down and the driver detached from
8325 * the networking, so no interrupts or new tx packets will
8326 * end up in the driver. tp->{tx,}lock is not held and we are not
8327 * in an interrupt context and thus may sleep.
8328 */
8329 static void tg3_free_rings(struct tg3 *tp)
8330 {
8331 int i, j;
8332
8333 for (j = 0; j < tp->irq_cnt; j++) {
8334 struct tg3_napi *tnapi = &tp->napi[j];
8335
8336 tg3_rx_prodring_free(tp, &tnapi->prodring);
8337
8338 if (!tnapi->tx_buffers)
8339 continue;
8340
8341 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8342 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8343
8344 if (!skb)
8345 continue;
8346
8347 tg3_tx_skb_unmap(tnapi, i,
8348 skb_shinfo(skb)->nr_frags - 1);
8349
8350 dev_kfree_skb_any(skb);
8351 }
8352 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8353 }
8354 }
8355
8356 /* Initialize tx/rx rings for packet processing.
8357 *
8358 * The chip has been shut down and the driver detached from
8359 * the networking, so no interrupts or new tx packets will
8360 * end up in the driver. tp->{tx,}lock are held and thus
8361 * we may not sleep.
8362 */
8363 static int tg3_init_rings(struct tg3 *tp)
8364 {
8365 int i;
8366
8367 /* Free up all the SKBs. */
8368 tg3_free_rings(tp);
8369
8370 for (i = 0; i < tp->irq_cnt; i++) {
8371 struct tg3_napi *tnapi = &tp->napi[i];
8372
8373 tnapi->last_tag = 0;
8374 tnapi->last_irq_tag = 0;
8375 tnapi->hw_status->status = 0;
8376 tnapi->hw_status->status_tag = 0;
8377 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8378
8379 tnapi->tx_prod = 0;
8380 tnapi->tx_cons = 0;
8381 if (tnapi->tx_ring)
8382 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8383
8384 tnapi->rx_rcb_ptr = 0;
8385 if (tnapi->rx_rcb)
8386 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8387
8388 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8389 tg3_free_rings(tp);
8390 return -ENOMEM;
8391 }
8392 }
8393
8394 return 0;
8395 }
8396
8397 static void tg3_mem_tx_release(struct tg3 *tp)
8398 {
8399 int i;
8400
8401 for (i = 0; i < tp->irq_max; i++) {
8402 struct tg3_napi *tnapi = &tp->napi[i];
8403
8404 if (tnapi->tx_ring) {
8405 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8406 tnapi->tx_ring, tnapi->tx_desc_mapping);
8407 tnapi->tx_ring = NULL;
8408 }
8409
8410 kfree(tnapi->tx_buffers);
8411 tnapi->tx_buffers = NULL;
8412 }
8413 }
8414
8415 static int tg3_mem_tx_acquire(struct tg3 *tp)
8416 {
8417 int i;
8418 struct tg3_napi *tnapi = &tp->napi[0];
8419
8420 /* If multivector TSS is enabled, vector 0 does not handle
8421 * tx interrupts. Don't allocate any resources for it.
8422 */
8423 if (tg3_flag(tp, ENABLE_TSS))
8424 tnapi++;
8425
8426 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8427 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8428 TG3_TX_RING_SIZE, GFP_KERNEL);
8429 if (!tnapi->tx_buffers)
8430 goto err_out;
8431
8432 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8433 TG3_TX_RING_BYTES,
8434 &tnapi->tx_desc_mapping,
8435 GFP_KERNEL);
8436 if (!tnapi->tx_ring)
8437 goto err_out;
8438 }
8439
8440 return 0;
8441
8442 err_out:
8443 tg3_mem_tx_release(tp);
8444 return -ENOMEM;
8445 }
8446
8447 static void tg3_mem_rx_release(struct tg3 *tp)
8448 {
8449 int i;
8450
8451 for (i = 0; i < tp->irq_max; i++) {
8452 struct tg3_napi *tnapi = &tp->napi[i];
8453
8454 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8455
8456 if (!tnapi->rx_rcb)
8457 continue;
8458
8459 dma_free_coherent(&tp->pdev->dev,
8460 TG3_RX_RCB_RING_BYTES(tp),
8461 tnapi->rx_rcb,
8462 tnapi->rx_rcb_mapping);
8463 tnapi->rx_rcb = NULL;
8464 }
8465 }
8466
8467 static int tg3_mem_rx_acquire(struct tg3 *tp)
8468 {
8469 unsigned int i, limit;
8470
8471 limit = tp->rxq_cnt;
8472
8473 /* If RSS is enabled, we need a (dummy) producer ring
8474 * set on vector zero. This is the true hw prodring.
8475 */
8476 if (tg3_flag(tp, ENABLE_RSS))
8477 limit++;
8478
8479 for (i = 0; i < limit; i++) {
8480 struct tg3_napi *tnapi = &tp->napi[i];
8481
8482 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8483 goto err_out;
8484
8485 /* If multivector RSS is enabled, vector 0
8486 * does not handle rx or tx interrupts.
8487 * Don't allocate any resources for it.
8488 */
8489 if (!i && tg3_flag(tp, ENABLE_RSS))
8490 continue;
8491
8492 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8493 TG3_RX_RCB_RING_BYTES(tp),
8494 &tnapi->rx_rcb_mapping,
8495 GFP_KERNEL | __GFP_ZERO);
8496 if (!tnapi->rx_rcb)
8497 goto err_out;
8498 }
8499
8500 return 0;
8501
8502 err_out:
8503 tg3_mem_rx_release(tp);
8504 return -ENOMEM;
8505 }
8506
8507 /*
8508 * Must not be invoked with interrupt sources disabled and
8509 * the hardware shutdown down.
8510 */
8511 static void tg3_free_consistent(struct tg3 *tp)
8512 {
8513 int i;
8514
8515 for (i = 0; i < tp->irq_cnt; i++) {
8516 struct tg3_napi *tnapi = &tp->napi[i];
8517
8518 if (tnapi->hw_status) {
8519 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8520 tnapi->hw_status,
8521 tnapi->status_mapping);
8522 tnapi->hw_status = NULL;
8523 }
8524 }
8525
8526 tg3_mem_rx_release(tp);
8527 tg3_mem_tx_release(tp);
8528
8529 if (tp->hw_stats) {
8530 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8531 tp->hw_stats, tp->stats_mapping);
8532 tp->hw_stats = NULL;
8533 }
8534 }
8535
8536 /*
8537 * Must not be invoked with interrupt sources disabled and
8538 * the hardware shutdown down. Can sleep.
8539 */
8540 static int tg3_alloc_consistent(struct tg3 *tp)
8541 {
8542 int i;
8543
8544 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8545 sizeof(struct tg3_hw_stats),
8546 &tp->stats_mapping,
8547 GFP_KERNEL | __GFP_ZERO);
8548 if (!tp->hw_stats)
8549 goto err_out;
8550
8551 for (i = 0; i < tp->irq_cnt; i++) {
8552 struct tg3_napi *tnapi = &tp->napi[i];
8553 struct tg3_hw_status *sblk;
8554
8555 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8556 TG3_HW_STATUS_SIZE,
8557 &tnapi->status_mapping,
8558 GFP_KERNEL | __GFP_ZERO);
8559 if (!tnapi->hw_status)
8560 goto err_out;
8561
8562 sblk = tnapi->hw_status;
8563
8564 if (tg3_flag(tp, ENABLE_RSS)) {
8565 u16 *prodptr = NULL;
8566
8567 /*
8568 * When RSS is enabled, the status block format changes
8569 * slightly. The "rx_jumbo_consumer", "reserved",
8570 * and "rx_mini_consumer" members get mapped to the
8571 * other three rx return ring producer indexes.
8572 */
8573 switch (i) {
8574 case 1:
8575 prodptr = &sblk->idx[0].rx_producer;
8576 break;
8577 case 2:
8578 prodptr = &sblk->rx_jumbo_consumer;
8579 break;
8580 case 3:
8581 prodptr = &sblk->reserved;
8582 break;
8583 case 4:
8584 prodptr = &sblk->rx_mini_consumer;
8585 break;
8586 }
8587 tnapi->rx_rcb_prod_idx = prodptr;
8588 } else {
8589 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8590 }
8591 }
8592
8593 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8594 goto err_out;
8595
8596 return 0;
8597
8598 err_out:
8599 tg3_free_consistent(tp);
8600 return -ENOMEM;
8601 }
8602
8603 #define MAX_WAIT_CNT 1000
8604
8605 /* To stop a block, clear the enable bit and poll till it
8606 * clears. tp->lock is held.
8607 */
8608 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8609 {
8610 unsigned int i;
8611 u32 val;
8612
8613 if (tg3_flag(tp, 5705_PLUS)) {
8614 switch (ofs) {
8615 case RCVLSC_MODE:
8616 case DMAC_MODE:
8617 case MBFREE_MODE:
8618 case BUFMGR_MODE:
8619 case MEMARB_MODE:
8620 /* We can't enable/disable these bits of the
8621 * 5705/5750, just say success.
8622 */
8623 return 0;
8624
8625 default:
8626 break;
8627 }
8628 }
8629
8630 val = tr32(ofs);
8631 val &= ~enable_bit;
8632 tw32_f(ofs, val);
8633
8634 for (i = 0; i < MAX_WAIT_CNT; i++) {
8635 udelay(100);
8636 val = tr32(ofs);
8637 if ((val & enable_bit) == 0)
8638 break;
8639 }
8640
8641 if (i == MAX_WAIT_CNT && !silent) {
8642 dev_err(&tp->pdev->dev,
8643 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8644 ofs, enable_bit);
8645 return -ENODEV;
8646 }
8647
8648 return 0;
8649 }
8650
8651 /* tp->lock is held. */
8652 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8653 {
8654 int i, err;
8655
8656 tg3_disable_ints(tp);
8657
8658 tp->rx_mode &= ~RX_MODE_ENABLE;
8659 tw32_f(MAC_RX_MODE, tp->rx_mode);
8660 udelay(10);
8661
8662 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8663 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8664 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8665 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8666 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8667 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8668
8669 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8670 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8671 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8672 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8673 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8674 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8675 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8676
8677 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8678 tw32_f(MAC_MODE, tp->mac_mode);
8679 udelay(40);
8680
8681 tp->tx_mode &= ~TX_MODE_ENABLE;
8682 tw32_f(MAC_TX_MODE, tp->tx_mode);
8683
8684 for (i = 0; i < MAX_WAIT_CNT; i++) {
8685 udelay(100);
8686 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8687 break;
8688 }
8689 if (i >= MAX_WAIT_CNT) {
8690 dev_err(&tp->pdev->dev,
8691 "%s timed out, TX_MODE_ENABLE will not clear "
8692 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8693 err |= -ENODEV;
8694 }
8695
8696 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8697 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8698 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8699
8700 tw32(FTQ_RESET, 0xffffffff);
8701 tw32(FTQ_RESET, 0x00000000);
8702
8703 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8704 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8705
8706 for (i = 0; i < tp->irq_cnt; i++) {
8707 struct tg3_napi *tnapi = &tp->napi[i];
8708 if (tnapi->hw_status)
8709 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8710 }
8711
8712 return err;
8713 }
8714
8715 /* Save PCI command register before chip reset */
8716 static void tg3_save_pci_state(struct tg3 *tp)
8717 {
8718 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8719 }
8720
8721 /* Restore PCI state after chip reset */
8722 static void tg3_restore_pci_state(struct tg3 *tp)
8723 {
8724 u32 val;
8725
8726 /* Re-enable indirect register accesses. */
8727 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8728 tp->misc_host_ctrl);
8729
8730 /* Set MAX PCI retry to zero. */
8731 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8732 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8733 tg3_flag(tp, PCIX_MODE))
8734 val |= PCISTATE_RETRY_SAME_DMA;
8735 /* Allow reads and writes to the APE register and memory space. */
8736 if (tg3_flag(tp, ENABLE_APE))
8737 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8738 PCISTATE_ALLOW_APE_SHMEM_WR |
8739 PCISTATE_ALLOW_APE_PSPACE_WR;
8740 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8741
8742 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8743
8744 if (!tg3_flag(tp, PCI_EXPRESS)) {
8745 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8746 tp->pci_cacheline_sz);
8747 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8748 tp->pci_lat_timer);
8749 }
8750
8751 /* Make sure PCI-X relaxed ordering bit is clear. */
8752 if (tg3_flag(tp, PCIX_MODE)) {
8753 u16 pcix_cmd;
8754
8755 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8756 &pcix_cmd);
8757 pcix_cmd &= ~PCI_X_CMD_ERO;
8758 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8759 pcix_cmd);
8760 }
8761
8762 if (tg3_flag(tp, 5780_CLASS)) {
8763
8764 /* Chip reset on 5780 will reset MSI enable bit,
8765 * so need to restore it.
8766 */
8767 if (tg3_flag(tp, USING_MSI)) {
8768 u16 ctrl;
8769
8770 pci_read_config_word(tp->pdev,
8771 tp->msi_cap + PCI_MSI_FLAGS,
8772 &ctrl);
8773 pci_write_config_word(tp->pdev,
8774 tp->msi_cap + PCI_MSI_FLAGS,
8775 ctrl | PCI_MSI_FLAGS_ENABLE);
8776 val = tr32(MSGINT_MODE);
8777 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8778 }
8779 }
8780 }
8781
8782 /* tp->lock is held. */
8783 static int tg3_chip_reset(struct tg3 *tp)
8784 {
8785 u32 val;
8786 void (*write_op)(struct tg3 *, u32, u32);
8787 int i, err;
8788
8789 tg3_nvram_lock(tp);
8790
8791 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8792
8793 /* No matching tg3_nvram_unlock() after this because
8794 * chip reset below will undo the nvram lock.
8795 */
8796 tp->nvram_lock_cnt = 0;
8797
8798 /* GRC_MISC_CFG core clock reset will clear the memory
8799 * enable bit in PCI register 4 and the MSI enable bit
8800 * on some chips, so we save relevant registers here.
8801 */
8802 tg3_save_pci_state(tp);
8803
8804 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8805 tg3_flag(tp, 5755_PLUS))
8806 tw32(GRC_FASTBOOT_PC, 0);
8807
8808 /*
8809 * We must avoid the readl() that normally takes place.
8810 * It locks machines, causes machine checks, and other
8811 * fun things. So, temporarily disable the 5701
8812 * hardware workaround, while we do the reset.
8813 */
8814 write_op = tp->write32;
8815 if (write_op == tg3_write_flush_reg32)
8816 tp->write32 = tg3_write32;
8817
8818 /* Prevent the irq handler from reading or writing PCI registers
8819 * during chip reset when the memory enable bit in the PCI command
8820 * register may be cleared. The chip does not generate interrupt
8821 * at this time, but the irq handler may still be called due to irq
8822 * sharing or irqpoll.
8823 */
8824 tg3_flag_set(tp, CHIP_RESETTING);
8825 for (i = 0; i < tp->irq_cnt; i++) {
8826 struct tg3_napi *tnapi = &tp->napi[i];
8827 if (tnapi->hw_status) {
8828 tnapi->hw_status->status = 0;
8829 tnapi->hw_status->status_tag = 0;
8830 }
8831 tnapi->last_tag = 0;
8832 tnapi->last_irq_tag = 0;
8833 }
8834 smp_mb();
8835
8836 for (i = 0; i < tp->irq_cnt; i++)
8837 synchronize_irq(tp->napi[i].irq_vec);
8838
8839 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8840 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8841 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8842 }
8843
8844 /* do the reset */
8845 val = GRC_MISC_CFG_CORECLK_RESET;
8846
8847 if (tg3_flag(tp, PCI_EXPRESS)) {
8848 /* Force PCIe 1.0a mode */
8849 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8850 !tg3_flag(tp, 57765_PLUS) &&
8851 tr32(TG3_PCIE_PHY_TSTCTL) ==
8852 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8853 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8854
8855 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8856 tw32(GRC_MISC_CFG, (1 << 29));
8857 val |= (1 << 29);
8858 }
8859 }
8860
8861 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8862 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8863 tw32(GRC_VCPU_EXT_CTRL,
8864 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8865 }
8866
8867 /* Manage gphy power for all CPMU absent PCIe devices. */
8868 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8869 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8870
8871 tw32(GRC_MISC_CFG, val);
8872
8873 /* restore 5701 hardware bug workaround write method */
8874 tp->write32 = write_op;
8875
8876 /* Unfortunately, we have to delay before the PCI read back.
8877 * Some 575X chips even will not respond to a PCI cfg access
8878 * when the reset command is given to the chip.
8879 *
8880 * How do these hardware designers expect things to work
8881 * properly if the PCI write is posted for a long period
8882 * of time? It is always necessary to have some method by
8883 * which a register read back can occur to push the write
8884 * out which does the reset.
8885 *
8886 * For most tg3 variants the trick below was working.
8887 * Ho hum...
8888 */
8889 udelay(120);
8890
8891 /* Flush PCI posted writes. The normal MMIO registers
8892 * are inaccessible at this time so this is the only
8893 * way to make this reliably (actually, this is no longer
8894 * the case, see above). I tried to use indirect
8895 * register read/write but this upset some 5701 variants.
8896 */
8897 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8898
8899 udelay(120);
8900
8901 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8902 u16 val16;
8903
8904 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8905 int j;
8906 u32 cfg_val;
8907
8908 /* Wait for link training to complete. */
8909 for (j = 0; j < 5000; j++)
8910 udelay(100);
8911
8912 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8913 pci_write_config_dword(tp->pdev, 0xc4,
8914 cfg_val | (1 << 15));
8915 }
8916
8917 /* Clear the "no snoop" and "relaxed ordering" bits. */
8918 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8919 /*
8920 * Older PCIe devices only support the 128 byte
8921 * MPS setting. Enforce the restriction.
8922 */
8923 if (!tg3_flag(tp, CPMU_PRESENT))
8924 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8925 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8926
8927 /* Clear error status */
8928 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8929 PCI_EXP_DEVSTA_CED |
8930 PCI_EXP_DEVSTA_NFED |
8931 PCI_EXP_DEVSTA_FED |
8932 PCI_EXP_DEVSTA_URD);
8933 }
8934
8935 tg3_restore_pci_state(tp);
8936
8937 tg3_flag_clear(tp, CHIP_RESETTING);
8938 tg3_flag_clear(tp, ERROR_PROCESSED);
8939
8940 val = 0;
8941 if (tg3_flag(tp, 5780_CLASS))
8942 val = tr32(MEMARB_MODE);
8943 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8944
8945 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8946 tg3_stop_fw(tp);
8947 tw32(0x5000, 0x400);
8948 }
8949
8950 if (tg3_flag(tp, IS_SSB_CORE)) {
8951 /*
8952 * BCM4785: In order to avoid repercussions from using
8953 * potentially defective internal ROM, stop the Rx RISC CPU,
8954 * which is not required.
8955 */
8956 tg3_stop_fw(tp);
8957 tg3_halt_cpu(tp, RX_CPU_BASE);
8958 }
8959
8960 tw32(GRC_MODE, tp->grc_mode);
8961
8962 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8963 val = tr32(0xc4);
8964
8965 tw32(0xc4, val | (1 << 15));
8966 }
8967
8968 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8969 tg3_asic_rev(tp) == ASIC_REV_5705) {
8970 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8971 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8972 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8973 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8974 }
8975
8976 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8977 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8978 val = tp->mac_mode;
8979 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8980 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8981 val = tp->mac_mode;
8982 } else
8983 val = 0;
8984
8985 tw32_f(MAC_MODE, val);
8986 udelay(40);
8987
8988 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8989
8990 err = tg3_poll_fw(tp);
8991 if (err)
8992 return err;
8993
8994 tg3_mdio_start(tp);
8995
8996 if (tg3_flag(tp, PCI_EXPRESS) &&
8997 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8998 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8999 !tg3_flag(tp, 57765_PLUS)) {
9000 val = tr32(0x7c00);
9001
9002 tw32(0x7c00, val | (1 << 25));
9003 }
9004
9005 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9006 val = tr32(TG3_CPMU_CLCK_ORIDE);
9007 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9008 }
9009
9010 /* Reprobe ASF enable state. */
9011 tg3_flag_clear(tp, ENABLE_ASF);
9012 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9013 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9014
9015 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9016 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9017 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9018 u32 nic_cfg;
9019
9020 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9021 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9022 tg3_flag_set(tp, ENABLE_ASF);
9023 tp->last_event_jiffies = jiffies;
9024 if (tg3_flag(tp, 5750_PLUS))
9025 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9026
9027 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9028 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9029 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9030 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9031 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9032 }
9033 }
9034
9035 return 0;
9036 }
9037
9038 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9039 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9040
9041 /* tp->lock is held. */
9042 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9043 {
9044 int err;
9045
9046 tg3_stop_fw(tp);
9047
9048 tg3_write_sig_pre_reset(tp, kind);
9049
9050 tg3_abort_hw(tp, silent);
9051 err = tg3_chip_reset(tp);
9052
9053 __tg3_set_mac_addr(tp, false);
9054
9055 tg3_write_sig_legacy(tp, kind);
9056 tg3_write_sig_post_reset(tp, kind);
9057
9058 if (tp->hw_stats) {
9059 /* Save the stats across chip resets... */
9060 tg3_get_nstats(tp, &tp->net_stats_prev);
9061 tg3_get_estats(tp, &tp->estats_prev);
9062
9063 /* And make sure the next sample is new data */
9064 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9065 }
9066
9067 if (err)
9068 return err;
9069
9070 return 0;
9071 }
9072
9073 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9074 {
9075 struct tg3 *tp = netdev_priv(dev);
9076 struct sockaddr *addr = p;
9077 int err = 0;
9078 bool skip_mac_1 = false;
9079
9080 if (!is_valid_ether_addr(addr->sa_data))
9081 return -EADDRNOTAVAIL;
9082
9083 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9084
9085 if (!netif_running(dev))
9086 return 0;
9087
9088 if (tg3_flag(tp, ENABLE_ASF)) {
9089 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9090
9091 addr0_high = tr32(MAC_ADDR_0_HIGH);
9092 addr0_low = tr32(MAC_ADDR_0_LOW);
9093 addr1_high = tr32(MAC_ADDR_1_HIGH);
9094 addr1_low = tr32(MAC_ADDR_1_LOW);
9095
9096 /* Skip MAC addr 1 if ASF is using it. */
9097 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9098 !(addr1_high == 0 && addr1_low == 0))
9099 skip_mac_1 = true;
9100 }
9101 spin_lock_bh(&tp->lock);
9102 __tg3_set_mac_addr(tp, skip_mac_1);
9103 spin_unlock_bh(&tp->lock);
9104
9105 return err;
9106 }
9107
9108 /* tp->lock is held. */
9109 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9110 dma_addr_t mapping, u32 maxlen_flags,
9111 u32 nic_addr)
9112 {
9113 tg3_write_mem(tp,
9114 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9115 ((u64) mapping >> 32));
9116 tg3_write_mem(tp,
9117 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9118 ((u64) mapping & 0xffffffff));
9119 tg3_write_mem(tp,
9120 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9121 maxlen_flags);
9122
9123 if (!tg3_flag(tp, 5705_PLUS))
9124 tg3_write_mem(tp,
9125 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9126 nic_addr);
9127 }
9128
9129
9130 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9131 {
9132 int i = 0;
9133
9134 if (!tg3_flag(tp, ENABLE_TSS)) {
9135 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9136 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9137 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9138 } else {
9139 tw32(HOSTCC_TXCOL_TICKS, 0);
9140 tw32(HOSTCC_TXMAX_FRAMES, 0);
9141 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9142
9143 for (; i < tp->txq_cnt; i++) {
9144 u32 reg;
9145
9146 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9147 tw32(reg, ec->tx_coalesce_usecs);
9148 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9149 tw32(reg, ec->tx_max_coalesced_frames);
9150 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9151 tw32(reg, ec->tx_max_coalesced_frames_irq);
9152 }
9153 }
9154
9155 for (; i < tp->irq_max - 1; i++) {
9156 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9157 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9158 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9159 }
9160 }
9161
9162 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9163 {
9164 int i = 0;
9165 u32 limit = tp->rxq_cnt;
9166
9167 if (!tg3_flag(tp, ENABLE_RSS)) {
9168 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9169 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9170 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9171 limit--;
9172 } else {
9173 tw32(HOSTCC_RXCOL_TICKS, 0);
9174 tw32(HOSTCC_RXMAX_FRAMES, 0);
9175 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9176 }
9177
9178 for (; i < limit; i++) {
9179 u32 reg;
9180
9181 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9182 tw32(reg, ec->rx_coalesce_usecs);
9183 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9184 tw32(reg, ec->rx_max_coalesced_frames);
9185 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9186 tw32(reg, ec->rx_max_coalesced_frames_irq);
9187 }
9188
9189 for (; i < tp->irq_max - 1; i++) {
9190 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9191 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9192 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9193 }
9194 }
9195
9196 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9197 {
9198 tg3_coal_tx_init(tp, ec);
9199 tg3_coal_rx_init(tp, ec);
9200
9201 if (!tg3_flag(tp, 5705_PLUS)) {
9202 u32 val = ec->stats_block_coalesce_usecs;
9203
9204 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9205 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9206
9207 if (!tp->link_up)
9208 val = 0;
9209
9210 tw32(HOSTCC_STAT_COAL_TICKS, val);
9211 }
9212 }
9213
9214 /* tp->lock is held. */
9215 static void tg3_rings_reset(struct tg3 *tp)
9216 {
9217 int i;
9218 u32 stblk, txrcb, rxrcb, limit;
9219 struct tg3_napi *tnapi = &tp->napi[0];
9220
9221 /* Disable all transmit rings but the first. */
9222 if (!tg3_flag(tp, 5705_PLUS))
9223 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9224 else if (tg3_flag(tp, 5717_PLUS))
9225 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9226 else if (tg3_flag(tp, 57765_CLASS) ||
9227 tg3_asic_rev(tp) == ASIC_REV_5762)
9228 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9229 else
9230 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9231
9232 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9233 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9234 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9235 BDINFO_FLAGS_DISABLED);
9236
9237
9238 /* Disable all receive return rings but the first. */
9239 if (tg3_flag(tp, 5717_PLUS))
9240 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9241 else if (!tg3_flag(tp, 5705_PLUS))
9242 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9243 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9244 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9245 tg3_flag(tp, 57765_CLASS))
9246 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9247 else
9248 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9249
9250 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9251 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9252 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9253 BDINFO_FLAGS_DISABLED);
9254
9255 /* Disable interrupts */
9256 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9257 tp->napi[0].chk_msi_cnt = 0;
9258 tp->napi[0].last_rx_cons = 0;
9259 tp->napi[0].last_tx_cons = 0;
9260
9261 /* Zero mailbox registers. */
9262 if (tg3_flag(tp, SUPPORT_MSIX)) {
9263 for (i = 1; i < tp->irq_max; i++) {
9264 tp->napi[i].tx_prod = 0;
9265 tp->napi[i].tx_cons = 0;
9266 if (tg3_flag(tp, ENABLE_TSS))
9267 tw32_mailbox(tp->napi[i].prodmbox, 0);
9268 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9269 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9270 tp->napi[i].chk_msi_cnt = 0;
9271 tp->napi[i].last_rx_cons = 0;
9272 tp->napi[i].last_tx_cons = 0;
9273 }
9274 if (!tg3_flag(tp, ENABLE_TSS))
9275 tw32_mailbox(tp->napi[0].prodmbox, 0);
9276 } else {
9277 tp->napi[0].tx_prod = 0;
9278 tp->napi[0].tx_cons = 0;
9279 tw32_mailbox(tp->napi[0].prodmbox, 0);
9280 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9281 }
9282
9283 /* Make sure the NIC-based send BD rings are disabled. */
9284 if (!tg3_flag(tp, 5705_PLUS)) {
9285 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9286 for (i = 0; i < 16; i++)
9287 tw32_tx_mbox(mbox + i * 8, 0);
9288 }
9289
9290 txrcb = NIC_SRAM_SEND_RCB;
9291 rxrcb = NIC_SRAM_RCV_RET_RCB;
9292
9293 /* Clear status block in ram. */
9294 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9295
9296 /* Set status block DMA address */
9297 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9298 ((u64) tnapi->status_mapping >> 32));
9299 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9300 ((u64) tnapi->status_mapping & 0xffffffff));
9301
9302 if (tnapi->tx_ring) {
9303 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9304 (TG3_TX_RING_SIZE <<
9305 BDINFO_FLAGS_MAXLEN_SHIFT),
9306 NIC_SRAM_TX_BUFFER_DESC);
9307 txrcb += TG3_BDINFO_SIZE;
9308 }
9309
9310 if (tnapi->rx_rcb) {
9311 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9312 (tp->rx_ret_ring_mask + 1) <<
9313 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9314 rxrcb += TG3_BDINFO_SIZE;
9315 }
9316
9317 stblk = HOSTCC_STATBLCK_RING1;
9318
9319 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9320 u64 mapping = (u64)tnapi->status_mapping;
9321 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9322 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9323
9324 /* Clear status block in ram. */
9325 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9326
9327 if (tnapi->tx_ring) {
9328 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9329 (TG3_TX_RING_SIZE <<
9330 BDINFO_FLAGS_MAXLEN_SHIFT),
9331 NIC_SRAM_TX_BUFFER_DESC);
9332 txrcb += TG3_BDINFO_SIZE;
9333 }
9334
9335 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9336 ((tp->rx_ret_ring_mask + 1) <<
9337 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9338
9339 stblk += 8;
9340 rxrcb += TG3_BDINFO_SIZE;
9341 }
9342 }
9343
9344 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9345 {
9346 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9347
9348 if (!tg3_flag(tp, 5750_PLUS) ||
9349 tg3_flag(tp, 5780_CLASS) ||
9350 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9351 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9352 tg3_flag(tp, 57765_PLUS))
9353 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9354 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9355 tg3_asic_rev(tp) == ASIC_REV_5787)
9356 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9357 else
9358 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9359
9360 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9361 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9362
9363 val = min(nic_rep_thresh, host_rep_thresh);
9364 tw32(RCVBDI_STD_THRESH, val);
9365
9366 if (tg3_flag(tp, 57765_PLUS))
9367 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9368
9369 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9370 return;
9371
9372 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9373
9374 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9375
9376 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9377 tw32(RCVBDI_JUMBO_THRESH, val);
9378
9379 if (tg3_flag(tp, 57765_PLUS))
9380 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9381 }
9382
9383 static inline u32 calc_crc(unsigned char *buf, int len)
9384 {
9385 u32 reg;
9386 u32 tmp;
9387 int j, k;
9388
9389 reg = 0xffffffff;
9390
9391 for (j = 0; j < len; j++) {
9392 reg ^= buf[j];
9393
9394 for (k = 0; k < 8; k++) {
9395 tmp = reg & 0x01;
9396
9397 reg >>= 1;
9398
9399 if (tmp)
9400 reg ^= 0xedb88320;
9401 }
9402 }
9403
9404 return ~reg;
9405 }
9406
9407 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9408 {
9409 /* accept or reject all multicast frames */
9410 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9411 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9412 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9413 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9414 }
9415
9416 static void __tg3_set_rx_mode(struct net_device *dev)
9417 {
9418 struct tg3 *tp = netdev_priv(dev);
9419 u32 rx_mode;
9420
9421 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9422 RX_MODE_KEEP_VLAN_TAG);
9423
9424 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9425 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9426 * flag clear.
9427 */
9428 if (!tg3_flag(tp, ENABLE_ASF))
9429 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9430 #endif
9431
9432 if (dev->flags & IFF_PROMISC) {
9433 /* Promiscuous mode. */
9434 rx_mode |= RX_MODE_PROMISC;
9435 } else if (dev->flags & IFF_ALLMULTI) {
9436 /* Accept all multicast. */
9437 tg3_set_multi(tp, 1);
9438 } else if (netdev_mc_empty(dev)) {
9439 /* Reject all multicast. */
9440 tg3_set_multi(tp, 0);
9441 } else {
9442 /* Accept one or more multicast(s). */
9443 struct netdev_hw_addr *ha;
9444 u32 mc_filter[4] = { 0, };
9445 u32 regidx;
9446 u32 bit;
9447 u32 crc;
9448
9449 netdev_for_each_mc_addr(ha, dev) {
9450 crc = calc_crc(ha->addr, ETH_ALEN);
9451 bit = ~crc & 0x7f;
9452 regidx = (bit & 0x60) >> 5;
9453 bit &= 0x1f;
9454 mc_filter[regidx] |= (1 << bit);
9455 }
9456
9457 tw32(MAC_HASH_REG_0, mc_filter[0]);
9458 tw32(MAC_HASH_REG_1, mc_filter[1]);
9459 tw32(MAC_HASH_REG_2, mc_filter[2]);
9460 tw32(MAC_HASH_REG_3, mc_filter[3]);
9461 }
9462
9463 if (rx_mode != tp->rx_mode) {
9464 tp->rx_mode = rx_mode;
9465 tw32_f(MAC_RX_MODE, rx_mode);
9466 udelay(10);
9467 }
9468 }
9469
9470 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9471 {
9472 int i;
9473
9474 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9475 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9476 }
9477
9478 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9479 {
9480 int i;
9481
9482 if (!tg3_flag(tp, SUPPORT_MSIX))
9483 return;
9484
9485 if (tp->rxq_cnt == 1) {
9486 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9487 return;
9488 }
9489
9490 /* Validate table against current IRQ count */
9491 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9492 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9493 break;
9494 }
9495
9496 if (i != TG3_RSS_INDIR_TBL_SIZE)
9497 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9498 }
9499
9500 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9501 {
9502 int i = 0;
9503 u32 reg = MAC_RSS_INDIR_TBL_0;
9504
9505 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9506 u32 val = tp->rss_ind_tbl[i];
9507 i++;
9508 for (; i % 8; i++) {
9509 val <<= 4;
9510 val |= tp->rss_ind_tbl[i];
9511 }
9512 tw32(reg, val);
9513 reg += 4;
9514 }
9515 }
9516
9517 /* tp->lock is held. */
9518 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9519 {
9520 u32 val, rdmac_mode;
9521 int i, err, limit;
9522 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9523
9524 tg3_disable_ints(tp);
9525
9526 tg3_stop_fw(tp);
9527
9528 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9529
9530 if (tg3_flag(tp, INIT_COMPLETE))
9531 tg3_abort_hw(tp, 1);
9532
9533 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9534 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9535 tg3_phy_pull_config(tp);
9536 tg3_eee_pull_config(tp, NULL);
9537 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9538 }
9539
9540 /* Enable MAC control of LPI */
9541 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9542 tg3_setup_eee(tp);
9543
9544 if (reset_phy)
9545 tg3_phy_reset(tp);
9546
9547 err = tg3_chip_reset(tp);
9548 if (err)
9549 return err;
9550
9551 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9552
9553 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9554 val = tr32(TG3_CPMU_CTRL);
9555 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9556 tw32(TG3_CPMU_CTRL, val);
9557
9558 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9559 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9560 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9561 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9562
9563 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9564 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9565 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9566 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9567
9568 val = tr32(TG3_CPMU_HST_ACC);
9569 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9570 val |= CPMU_HST_ACC_MACCLK_6_25;
9571 tw32(TG3_CPMU_HST_ACC, val);
9572 }
9573
9574 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9575 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9576 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9577 PCIE_PWR_MGMT_L1_THRESH_4MS;
9578 tw32(PCIE_PWR_MGMT_THRESH, val);
9579
9580 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9581 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9582
9583 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9584
9585 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9586 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9587 }
9588
9589 if (tg3_flag(tp, L1PLLPD_EN)) {
9590 u32 grc_mode = tr32(GRC_MODE);
9591
9592 /* Access the lower 1K of PL PCIE block registers. */
9593 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9594 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9595
9596 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9597 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9598 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9599
9600 tw32(GRC_MODE, grc_mode);
9601 }
9602
9603 if (tg3_flag(tp, 57765_CLASS)) {
9604 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9605 u32 grc_mode = tr32(GRC_MODE);
9606
9607 /* Access the lower 1K of PL PCIE block registers. */
9608 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9609 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9610
9611 val = tr32(TG3_PCIE_TLDLPL_PORT +
9612 TG3_PCIE_PL_LO_PHYCTL5);
9613 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9614 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9615
9616 tw32(GRC_MODE, grc_mode);
9617 }
9618
9619 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9620 u32 grc_mode;
9621
9622 /* Fix transmit hangs */
9623 val = tr32(TG3_CPMU_PADRNG_CTL);
9624 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9625 tw32(TG3_CPMU_PADRNG_CTL, val);
9626
9627 grc_mode = tr32(GRC_MODE);
9628
9629 /* Access the lower 1K of DL PCIE block registers. */
9630 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9631 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9632
9633 val = tr32(TG3_PCIE_TLDLPL_PORT +
9634 TG3_PCIE_DL_LO_FTSMAX);
9635 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9636 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9637 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9638
9639 tw32(GRC_MODE, grc_mode);
9640 }
9641
9642 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9643 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9644 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9645 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9646 }
9647
9648 /* This works around an issue with Athlon chipsets on
9649 * B3 tigon3 silicon. This bit has no effect on any
9650 * other revision. But do not set this on PCI Express
9651 * chips and don't even touch the clocks if the CPMU is present.
9652 */
9653 if (!tg3_flag(tp, CPMU_PRESENT)) {
9654 if (!tg3_flag(tp, PCI_EXPRESS))
9655 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9656 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9657 }
9658
9659 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9660 tg3_flag(tp, PCIX_MODE)) {
9661 val = tr32(TG3PCI_PCISTATE);
9662 val |= PCISTATE_RETRY_SAME_DMA;
9663 tw32(TG3PCI_PCISTATE, val);
9664 }
9665
9666 if (tg3_flag(tp, ENABLE_APE)) {
9667 /* Allow reads and writes to the
9668 * APE register and memory space.
9669 */
9670 val = tr32(TG3PCI_PCISTATE);
9671 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9672 PCISTATE_ALLOW_APE_SHMEM_WR |
9673 PCISTATE_ALLOW_APE_PSPACE_WR;
9674 tw32(TG3PCI_PCISTATE, val);
9675 }
9676
9677 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9678 /* Enable some hw fixes. */
9679 val = tr32(TG3PCI_MSI_DATA);
9680 val |= (1 << 26) | (1 << 28) | (1 << 29);
9681 tw32(TG3PCI_MSI_DATA, val);
9682 }
9683
9684 /* Descriptor ring init may make accesses to the
9685 * NIC SRAM area to setup the TX descriptors, so we
9686 * can only do this after the hardware has been
9687 * successfully reset.
9688 */
9689 err = tg3_init_rings(tp);
9690 if (err)
9691 return err;
9692
9693 if (tg3_flag(tp, 57765_PLUS)) {
9694 val = tr32(TG3PCI_DMA_RW_CTRL) &
9695 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9696 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9697 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9698 if (!tg3_flag(tp, 57765_CLASS) &&
9699 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9700 tg3_asic_rev(tp) != ASIC_REV_5762)
9701 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9702 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9703 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9704 tg3_asic_rev(tp) != ASIC_REV_5761) {
9705 /* This value is determined during the probe time DMA
9706 * engine test, tg3_test_dma.
9707 */
9708 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9709 }
9710
9711 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9712 GRC_MODE_4X_NIC_SEND_RINGS |
9713 GRC_MODE_NO_TX_PHDR_CSUM |
9714 GRC_MODE_NO_RX_PHDR_CSUM);
9715 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9716
9717 /* Pseudo-header checksum is done by hardware logic and not
9718 * the offload processers, so make the chip do the pseudo-
9719 * header checksums on receive. For transmit it is more
9720 * convenient to do the pseudo-header checksum in software
9721 * as Linux does that on transmit for us in all cases.
9722 */
9723 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9724
9725 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9726 if (tp->rxptpctl)
9727 tw32(TG3_RX_PTP_CTL,
9728 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9729
9730 if (tg3_flag(tp, PTP_CAPABLE))
9731 val |= GRC_MODE_TIME_SYNC_ENABLE;
9732
9733 tw32(GRC_MODE, tp->grc_mode | val);
9734
9735 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9736 val = tr32(GRC_MISC_CFG);
9737 val &= ~0xff;
9738 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9739 tw32(GRC_MISC_CFG, val);
9740
9741 /* Initialize MBUF/DESC pool. */
9742 if (tg3_flag(tp, 5750_PLUS)) {
9743 /* Do nothing. */
9744 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9745 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9746 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9747 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9748 else
9749 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9750 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9751 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9752 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9753 int fw_len;
9754
9755 fw_len = tp->fw_len;
9756 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9757 tw32(BUFMGR_MB_POOL_ADDR,
9758 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9759 tw32(BUFMGR_MB_POOL_SIZE,
9760 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9761 }
9762
9763 if (tp->dev->mtu <= ETH_DATA_LEN) {
9764 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9765 tp->bufmgr_config.mbuf_read_dma_low_water);
9766 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9767 tp->bufmgr_config.mbuf_mac_rx_low_water);
9768 tw32(BUFMGR_MB_HIGH_WATER,
9769 tp->bufmgr_config.mbuf_high_water);
9770 } else {
9771 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9772 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9773 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9774 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9775 tw32(BUFMGR_MB_HIGH_WATER,
9776 tp->bufmgr_config.mbuf_high_water_jumbo);
9777 }
9778 tw32(BUFMGR_DMA_LOW_WATER,
9779 tp->bufmgr_config.dma_low_water);
9780 tw32(BUFMGR_DMA_HIGH_WATER,
9781 tp->bufmgr_config.dma_high_water);
9782
9783 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9784 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9785 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9786 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9787 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9788 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9789 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9790 tw32(BUFMGR_MODE, val);
9791 for (i = 0; i < 2000; i++) {
9792 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9793 break;
9794 udelay(10);
9795 }
9796 if (i >= 2000) {
9797 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9798 return -ENODEV;
9799 }
9800
9801 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9802 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9803
9804 tg3_setup_rxbd_thresholds(tp);
9805
9806 /* Initialize TG3_BDINFO's at:
9807 * RCVDBDI_STD_BD: standard eth size rx ring
9808 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9809 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9810 *
9811 * like so:
9812 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9813 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9814 * ring attribute flags
9815 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9816 *
9817 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9818 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9819 *
9820 * The size of each ring is fixed in the firmware, but the location is
9821 * configurable.
9822 */
9823 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9824 ((u64) tpr->rx_std_mapping >> 32));
9825 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9826 ((u64) tpr->rx_std_mapping & 0xffffffff));
9827 if (!tg3_flag(tp, 5717_PLUS))
9828 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9829 NIC_SRAM_RX_BUFFER_DESC);
9830
9831 /* Disable the mini ring */
9832 if (!tg3_flag(tp, 5705_PLUS))
9833 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9834 BDINFO_FLAGS_DISABLED);
9835
9836 /* Program the jumbo buffer descriptor ring control
9837 * blocks on those devices that have them.
9838 */
9839 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9840 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9841
9842 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9843 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9844 ((u64) tpr->rx_jmb_mapping >> 32));
9845 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9846 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9847 val = TG3_RX_JMB_RING_SIZE(tp) <<
9848 BDINFO_FLAGS_MAXLEN_SHIFT;
9849 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9850 val | BDINFO_FLAGS_USE_EXT_RECV);
9851 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9852 tg3_flag(tp, 57765_CLASS) ||
9853 tg3_asic_rev(tp) == ASIC_REV_5762)
9854 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9855 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9856 } else {
9857 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9858 BDINFO_FLAGS_DISABLED);
9859 }
9860
9861 if (tg3_flag(tp, 57765_PLUS)) {
9862 val = TG3_RX_STD_RING_SIZE(tp);
9863 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9864 val |= (TG3_RX_STD_DMA_SZ << 2);
9865 } else
9866 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9867 } else
9868 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9869
9870 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9871
9872 tpr->rx_std_prod_idx = tp->rx_pending;
9873 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9874
9875 tpr->rx_jmb_prod_idx =
9876 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9877 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9878
9879 tg3_rings_reset(tp);
9880
9881 /* Initialize MAC address and backoff seed. */
9882 __tg3_set_mac_addr(tp, false);
9883
9884 /* MTU + ethernet header + FCS + optional VLAN tag */
9885 tw32(MAC_RX_MTU_SIZE,
9886 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9887
9888 /* The slot time is changed by tg3_setup_phy if we
9889 * run at gigabit with half duplex.
9890 */
9891 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9892 (6 << TX_LENGTHS_IPG_SHIFT) |
9893 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9894
9895 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9896 tg3_asic_rev(tp) == ASIC_REV_5762)
9897 val |= tr32(MAC_TX_LENGTHS) &
9898 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9899 TX_LENGTHS_CNT_DWN_VAL_MSK);
9900
9901 tw32(MAC_TX_LENGTHS, val);
9902
9903 /* Receive rules. */
9904 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9905 tw32(RCVLPC_CONFIG, 0x0181);
9906
9907 /* Calculate RDMAC_MODE setting early, we need it to determine
9908 * the RCVLPC_STATE_ENABLE mask.
9909 */
9910 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9911 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9912 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9913 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9914 RDMAC_MODE_LNGREAD_ENAB);
9915
9916 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9917 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9918
9919 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9920 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9921 tg3_asic_rev(tp) == ASIC_REV_57780)
9922 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9923 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9924 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9925
9926 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9927 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9928 if (tg3_flag(tp, TSO_CAPABLE) &&
9929 tg3_asic_rev(tp) == ASIC_REV_5705) {
9930 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9931 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9932 !tg3_flag(tp, IS_5788)) {
9933 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9934 }
9935 }
9936
9937 if (tg3_flag(tp, PCI_EXPRESS))
9938 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9939
9940 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9941 tp->dma_limit = 0;
9942 if (tp->dev->mtu <= ETH_DATA_LEN) {
9943 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9944 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9945 }
9946 }
9947
9948 if (tg3_flag(tp, HW_TSO_1) ||
9949 tg3_flag(tp, HW_TSO_2) ||
9950 tg3_flag(tp, HW_TSO_3))
9951 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9952
9953 if (tg3_flag(tp, 57765_PLUS) ||
9954 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9955 tg3_asic_rev(tp) == ASIC_REV_57780)
9956 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9957
9958 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9959 tg3_asic_rev(tp) == ASIC_REV_5762)
9960 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9961
9962 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9963 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9964 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9965 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9966 tg3_flag(tp, 57765_PLUS)) {
9967 u32 tgtreg;
9968
9969 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9970 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9971 else
9972 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9973
9974 val = tr32(tgtreg);
9975 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9976 tg3_asic_rev(tp) == ASIC_REV_5762) {
9977 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9978 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9979 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9980 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9981 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9982 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9983 }
9984 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9985 }
9986
9987 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9988 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9989 tg3_asic_rev(tp) == ASIC_REV_5762) {
9990 u32 tgtreg;
9991
9992 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9993 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9994 else
9995 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9996
9997 val = tr32(tgtreg);
9998 tw32(tgtreg, val |
9999 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10000 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10001 }
10002
10003 /* Receive/send statistics. */
10004 if (tg3_flag(tp, 5750_PLUS)) {
10005 val = tr32(RCVLPC_STATS_ENABLE);
10006 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10007 tw32(RCVLPC_STATS_ENABLE, val);
10008 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10009 tg3_flag(tp, TSO_CAPABLE)) {
10010 val = tr32(RCVLPC_STATS_ENABLE);
10011 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10012 tw32(RCVLPC_STATS_ENABLE, val);
10013 } else {
10014 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10015 }
10016 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10017 tw32(SNDDATAI_STATSENAB, 0xffffff);
10018 tw32(SNDDATAI_STATSCTRL,
10019 (SNDDATAI_SCTRL_ENABLE |
10020 SNDDATAI_SCTRL_FASTUPD));
10021
10022 /* Setup host coalescing engine. */
10023 tw32(HOSTCC_MODE, 0);
10024 for (i = 0; i < 2000; i++) {
10025 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10026 break;
10027 udelay(10);
10028 }
10029
10030 __tg3_set_coalesce(tp, &tp->coal);
10031
10032 if (!tg3_flag(tp, 5705_PLUS)) {
10033 /* Status/statistics block address. See tg3_timer,
10034 * the tg3_periodic_fetch_stats call there, and
10035 * tg3_get_stats to see how this works for 5705/5750 chips.
10036 */
10037 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10038 ((u64) tp->stats_mapping >> 32));
10039 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10040 ((u64) tp->stats_mapping & 0xffffffff));
10041 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10042
10043 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10044
10045 /* Clear statistics and status block memory areas */
10046 for (i = NIC_SRAM_STATS_BLK;
10047 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10048 i += sizeof(u32)) {
10049 tg3_write_mem(tp, i, 0);
10050 udelay(40);
10051 }
10052 }
10053
10054 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10055
10056 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10057 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10058 if (!tg3_flag(tp, 5705_PLUS))
10059 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10060
10061 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10062 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10063 /* reset to prevent losing 1st rx packet intermittently */
10064 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10065 udelay(10);
10066 }
10067
10068 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10069 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10070 MAC_MODE_FHDE_ENABLE;
10071 if (tg3_flag(tp, ENABLE_APE))
10072 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10073 if (!tg3_flag(tp, 5705_PLUS) &&
10074 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10075 tg3_asic_rev(tp) != ASIC_REV_5700)
10076 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10077 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10078 udelay(40);
10079
10080 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10081 * If TG3_FLAG_IS_NIC is zero, we should read the
10082 * register to preserve the GPIO settings for LOMs. The GPIOs,
10083 * whether used as inputs or outputs, are set by boot code after
10084 * reset.
10085 */
10086 if (!tg3_flag(tp, IS_NIC)) {
10087 u32 gpio_mask;
10088
10089 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10090 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10091 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10092
10093 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10094 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10095 GRC_LCLCTRL_GPIO_OUTPUT3;
10096
10097 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10098 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10099
10100 tp->grc_local_ctrl &= ~gpio_mask;
10101 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10102
10103 /* GPIO1 must be driven high for eeprom write protect */
10104 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10105 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10106 GRC_LCLCTRL_GPIO_OUTPUT1);
10107 }
10108 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10109 udelay(100);
10110
10111 if (tg3_flag(tp, USING_MSIX)) {
10112 val = tr32(MSGINT_MODE);
10113 val |= MSGINT_MODE_ENABLE;
10114 if (tp->irq_cnt > 1)
10115 val |= MSGINT_MODE_MULTIVEC_EN;
10116 if (!tg3_flag(tp, 1SHOT_MSI))
10117 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10118 tw32(MSGINT_MODE, val);
10119 }
10120
10121 if (!tg3_flag(tp, 5705_PLUS)) {
10122 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10123 udelay(40);
10124 }
10125
10126 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10127 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10128 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10129 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10130 WDMAC_MODE_LNGREAD_ENAB);
10131
10132 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10133 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10134 if (tg3_flag(tp, TSO_CAPABLE) &&
10135 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10136 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10137 /* nothing */
10138 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10139 !tg3_flag(tp, IS_5788)) {
10140 val |= WDMAC_MODE_RX_ACCEL;
10141 }
10142 }
10143
10144 /* Enable host coalescing bug fix */
10145 if (tg3_flag(tp, 5755_PLUS))
10146 val |= WDMAC_MODE_STATUS_TAG_FIX;
10147
10148 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10149 val |= WDMAC_MODE_BURST_ALL_DATA;
10150
10151 tw32_f(WDMAC_MODE, val);
10152 udelay(40);
10153
10154 if (tg3_flag(tp, PCIX_MODE)) {
10155 u16 pcix_cmd;
10156
10157 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10158 &pcix_cmd);
10159 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10160 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10161 pcix_cmd |= PCI_X_CMD_READ_2K;
10162 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10163 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10164 pcix_cmd |= PCI_X_CMD_READ_2K;
10165 }
10166 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10167 pcix_cmd);
10168 }
10169
10170 tw32_f(RDMAC_MODE, rdmac_mode);
10171 udelay(40);
10172
10173 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10174 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10175 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10176 break;
10177 }
10178 if (i < TG3_NUM_RDMA_CHANNELS) {
10179 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10180 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10181 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10182 tg3_flag_set(tp, 5719_RDMA_BUG);
10183 }
10184 }
10185
10186 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10187 if (!tg3_flag(tp, 5705_PLUS))
10188 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10189
10190 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10191 tw32(SNDDATAC_MODE,
10192 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10193 else
10194 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10195
10196 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10197 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10198 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10199 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10200 val |= RCVDBDI_MODE_LRG_RING_SZ;
10201 tw32(RCVDBDI_MODE, val);
10202 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10203 if (tg3_flag(tp, HW_TSO_1) ||
10204 tg3_flag(tp, HW_TSO_2) ||
10205 tg3_flag(tp, HW_TSO_3))
10206 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10207 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10208 if (tg3_flag(tp, ENABLE_TSS))
10209 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10210 tw32(SNDBDI_MODE, val);
10211 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10212
10213 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10214 err = tg3_load_5701_a0_firmware_fix(tp);
10215 if (err)
10216 return err;
10217 }
10218
10219 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10220 /* Ignore any errors for the firmware download. If download
10221 * fails, the device will operate with EEE disabled
10222 */
10223 tg3_load_57766_firmware(tp);
10224 }
10225
10226 if (tg3_flag(tp, TSO_CAPABLE)) {
10227 err = tg3_load_tso_firmware(tp);
10228 if (err)
10229 return err;
10230 }
10231
10232 tp->tx_mode = TX_MODE_ENABLE;
10233
10234 if (tg3_flag(tp, 5755_PLUS) ||
10235 tg3_asic_rev(tp) == ASIC_REV_5906)
10236 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10237
10238 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10239 tg3_asic_rev(tp) == ASIC_REV_5762) {
10240 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10241 tp->tx_mode &= ~val;
10242 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10243 }
10244
10245 tw32_f(MAC_TX_MODE, tp->tx_mode);
10246 udelay(100);
10247
10248 if (tg3_flag(tp, ENABLE_RSS)) {
10249 tg3_rss_write_indir_tbl(tp);
10250
10251 /* Setup the "secret" hash key. */
10252 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10253 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10254 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10255 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10256 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10257 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10258 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10259 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10260 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10261 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10262 }
10263
10264 tp->rx_mode = RX_MODE_ENABLE;
10265 if (tg3_flag(tp, 5755_PLUS))
10266 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10267
10268 if (tg3_flag(tp, ENABLE_RSS))
10269 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10270 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10271 RX_MODE_RSS_IPV6_HASH_EN |
10272 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10273 RX_MODE_RSS_IPV4_HASH_EN |
10274 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10275
10276 tw32_f(MAC_RX_MODE, tp->rx_mode);
10277 udelay(10);
10278
10279 tw32(MAC_LED_CTRL, tp->led_ctrl);
10280
10281 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10282 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10283 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10284 udelay(10);
10285 }
10286 tw32_f(MAC_RX_MODE, tp->rx_mode);
10287 udelay(10);
10288
10289 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10290 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10291 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10292 /* Set drive transmission level to 1.2V */
10293 /* only if the signal pre-emphasis bit is not set */
10294 val = tr32(MAC_SERDES_CFG);
10295 val &= 0xfffff000;
10296 val |= 0x880;
10297 tw32(MAC_SERDES_CFG, val);
10298 }
10299 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10300 tw32(MAC_SERDES_CFG, 0x616000);
10301 }
10302
10303 /* Prevent chip from dropping frames when flow control
10304 * is enabled.
10305 */
10306 if (tg3_flag(tp, 57765_CLASS))
10307 val = 1;
10308 else
10309 val = 2;
10310 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10311
10312 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10313 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10314 /* Use hardware link auto-negotiation */
10315 tg3_flag_set(tp, HW_AUTONEG);
10316 }
10317
10318 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10319 tg3_asic_rev(tp) == ASIC_REV_5714) {
10320 u32 tmp;
10321
10322 tmp = tr32(SERDES_RX_CTRL);
10323 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10324 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10325 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10326 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10327 }
10328
10329 if (!tg3_flag(tp, USE_PHYLIB)) {
10330 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10331 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10332
10333 err = tg3_setup_phy(tp, false);
10334 if (err)
10335 return err;
10336
10337 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10338 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10339 u32 tmp;
10340
10341 /* Clear CRC stats. */
10342 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10343 tg3_writephy(tp, MII_TG3_TEST1,
10344 tmp | MII_TG3_TEST1_CRC_EN);
10345 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10346 }
10347 }
10348 }
10349
10350 __tg3_set_rx_mode(tp->dev);
10351
10352 /* Initialize receive rules. */
10353 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10354 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10355 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10356 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10357
10358 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10359 limit = 8;
10360 else
10361 limit = 16;
10362 if (tg3_flag(tp, ENABLE_ASF))
10363 limit -= 4;
10364 switch (limit) {
10365 case 16:
10366 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10367 case 15:
10368 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10369 case 14:
10370 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10371 case 13:
10372 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10373 case 12:
10374 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10375 case 11:
10376 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10377 case 10:
10378 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10379 case 9:
10380 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10381 case 8:
10382 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10383 case 7:
10384 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10385 case 6:
10386 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10387 case 5:
10388 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10389 case 4:
10390 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10391 case 3:
10392 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10393 case 2:
10394 case 1:
10395
10396 default:
10397 break;
10398 }
10399
10400 if (tg3_flag(tp, ENABLE_APE))
10401 /* Write our heartbeat update interval to APE. */
10402 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10403 APE_HOST_HEARTBEAT_INT_DISABLE);
10404
10405 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10406
10407 return 0;
10408 }
10409
10410 /* Called at device open time to get the chip ready for
10411 * packet processing. Invoked with tp->lock held.
10412 */
10413 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10414 {
10415 tg3_switch_clocks(tp);
10416
10417 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10418
10419 return tg3_reset_hw(tp, reset_phy);
10420 }
10421
10422 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10423 {
10424 int i;
10425
10426 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10427 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10428
10429 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10430 off += len;
10431
10432 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10433 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10434 memset(ocir, 0, TG3_OCIR_LEN);
10435 }
10436 }
10437
10438 /* sysfs attributes for hwmon */
10439 static ssize_t tg3_show_temp(struct device *dev,
10440 struct device_attribute *devattr, char *buf)
10441 {
10442 struct pci_dev *pdev = to_pci_dev(dev);
10443 struct net_device *netdev = pci_get_drvdata(pdev);
10444 struct tg3 *tp = netdev_priv(netdev);
10445 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10446 u32 temperature;
10447
10448 spin_lock_bh(&tp->lock);
10449 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10450 sizeof(temperature));
10451 spin_unlock_bh(&tp->lock);
10452 return sprintf(buf, "%u\n", temperature);
10453 }
10454
10455
10456 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10457 TG3_TEMP_SENSOR_OFFSET);
10458 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10459 TG3_TEMP_CAUTION_OFFSET);
10460 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10461 TG3_TEMP_MAX_OFFSET);
10462
10463 static struct attribute *tg3_attributes[] = {
10464 &sensor_dev_attr_temp1_input.dev_attr.attr,
10465 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10466 &sensor_dev_attr_temp1_max.dev_attr.attr,
10467 NULL
10468 };
10469
10470 static const struct attribute_group tg3_group = {
10471 .attrs = tg3_attributes,
10472 };
10473
10474 static void tg3_hwmon_close(struct tg3 *tp)
10475 {
10476 if (tp->hwmon_dev) {
10477 hwmon_device_unregister(tp->hwmon_dev);
10478 tp->hwmon_dev = NULL;
10479 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10480 }
10481 }
10482
10483 static void tg3_hwmon_open(struct tg3 *tp)
10484 {
10485 int i, err;
10486 u32 size = 0;
10487 struct pci_dev *pdev = tp->pdev;
10488 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10489
10490 tg3_sd_scan_scratchpad(tp, ocirs);
10491
10492 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10493 if (!ocirs[i].src_data_length)
10494 continue;
10495
10496 size += ocirs[i].src_hdr_length;
10497 size += ocirs[i].src_data_length;
10498 }
10499
10500 if (!size)
10501 return;
10502
10503 /* Register hwmon sysfs hooks */
10504 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10505 if (err) {
10506 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10507 return;
10508 }
10509
10510 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10511 if (IS_ERR(tp->hwmon_dev)) {
10512 tp->hwmon_dev = NULL;
10513 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10514 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10515 }
10516 }
10517
10518
10519 #define TG3_STAT_ADD32(PSTAT, REG) \
10520 do { u32 __val = tr32(REG); \
10521 (PSTAT)->low += __val; \
10522 if ((PSTAT)->low < __val) \
10523 (PSTAT)->high += 1; \
10524 } while (0)
10525
10526 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10527 {
10528 struct tg3_hw_stats *sp = tp->hw_stats;
10529
10530 if (!tp->link_up)
10531 return;
10532
10533 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10534 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10535 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10536 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10537 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10538 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10539 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10540 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10541 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10542 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10543 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10544 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10545 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10546 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10547 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10548 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10549 u32 val;
10550
10551 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10552 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10553 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10554 tg3_flag_clear(tp, 5719_RDMA_BUG);
10555 }
10556
10557 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10558 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10559 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10560 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10561 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10562 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10563 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10564 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10565 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10566 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10567 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10568 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10569 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10570 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10571
10572 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10573 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10574 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10575 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10576 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10577 } else {
10578 u32 val = tr32(HOSTCC_FLOW_ATTN);
10579 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10580 if (val) {
10581 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10582 sp->rx_discards.low += val;
10583 if (sp->rx_discards.low < val)
10584 sp->rx_discards.high += 1;
10585 }
10586 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10587 }
10588 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10589 }
10590
10591 static void tg3_chk_missed_msi(struct tg3 *tp)
10592 {
10593 u32 i;
10594
10595 for (i = 0; i < tp->irq_cnt; i++) {
10596 struct tg3_napi *tnapi = &tp->napi[i];
10597
10598 if (tg3_has_work(tnapi)) {
10599 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10600 tnapi->last_tx_cons == tnapi->tx_cons) {
10601 if (tnapi->chk_msi_cnt < 1) {
10602 tnapi->chk_msi_cnt++;
10603 return;
10604 }
10605 tg3_msi(0, tnapi);
10606 }
10607 }
10608 tnapi->chk_msi_cnt = 0;
10609 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10610 tnapi->last_tx_cons = tnapi->tx_cons;
10611 }
10612 }
10613
10614 static void tg3_timer(unsigned long __opaque)
10615 {
10616 struct tg3 *tp = (struct tg3 *) __opaque;
10617
10618 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10619 goto restart_timer;
10620
10621 spin_lock(&tp->lock);
10622
10623 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10624 tg3_flag(tp, 57765_CLASS))
10625 tg3_chk_missed_msi(tp);
10626
10627 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10628 /* BCM4785: Flush posted writes from GbE to host memory. */
10629 tr32(HOSTCC_MODE);
10630 }
10631
10632 if (!tg3_flag(tp, TAGGED_STATUS)) {
10633 /* All of this garbage is because when using non-tagged
10634 * IRQ status the mailbox/status_block protocol the chip
10635 * uses with the cpu is race prone.
10636 */
10637 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10638 tw32(GRC_LOCAL_CTRL,
10639 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10640 } else {
10641 tw32(HOSTCC_MODE, tp->coalesce_mode |
10642 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10643 }
10644
10645 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10646 spin_unlock(&tp->lock);
10647 tg3_reset_task_schedule(tp);
10648 goto restart_timer;
10649 }
10650 }
10651
10652 /* This part only runs once per second. */
10653 if (!--tp->timer_counter) {
10654 if (tg3_flag(tp, 5705_PLUS))
10655 tg3_periodic_fetch_stats(tp);
10656
10657 if (tp->setlpicnt && !--tp->setlpicnt)
10658 tg3_phy_eee_enable(tp);
10659
10660 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10661 u32 mac_stat;
10662 int phy_event;
10663
10664 mac_stat = tr32(MAC_STATUS);
10665
10666 phy_event = 0;
10667 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10668 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10669 phy_event = 1;
10670 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10671 phy_event = 1;
10672
10673 if (phy_event)
10674 tg3_setup_phy(tp, false);
10675 } else if (tg3_flag(tp, POLL_SERDES)) {
10676 u32 mac_stat = tr32(MAC_STATUS);
10677 int need_setup = 0;
10678
10679 if (tp->link_up &&
10680 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10681 need_setup = 1;
10682 }
10683 if (!tp->link_up &&
10684 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10685 MAC_STATUS_SIGNAL_DET))) {
10686 need_setup = 1;
10687 }
10688 if (need_setup) {
10689 if (!tp->serdes_counter) {
10690 tw32_f(MAC_MODE,
10691 (tp->mac_mode &
10692 ~MAC_MODE_PORT_MODE_MASK));
10693 udelay(40);
10694 tw32_f(MAC_MODE, tp->mac_mode);
10695 udelay(40);
10696 }
10697 tg3_setup_phy(tp, false);
10698 }
10699 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10700 tg3_flag(tp, 5780_CLASS)) {
10701 tg3_serdes_parallel_detect(tp);
10702 }
10703
10704 tp->timer_counter = tp->timer_multiplier;
10705 }
10706
10707 /* Heartbeat is only sent once every 2 seconds.
10708 *
10709 * The heartbeat is to tell the ASF firmware that the host
10710 * driver is still alive. In the event that the OS crashes,
10711 * ASF needs to reset the hardware to free up the FIFO space
10712 * that may be filled with rx packets destined for the host.
10713 * If the FIFO is full, ASF will no longer function properly.
10714 *
10715 * Unintended resets have been reported on real time kernels
10716 * where the timer doesn't run on time. Netpoll will also have
10717 * same problem.
10718 *
10719 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10720 * to check the ring condition when the heartbeat is expiring
10721 * before doing the reset. This will prevent most unintended
10722 * resets.
10723 */
10724 if (!--tp->asf_counter) {
10725 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10726 tg3_wait_for_event_ack(tp);
10727
10728 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10729 FWCMD_NICDRV_ALIVE3);
10730 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10731 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10732 TG3_FW_UPDATE_TIMEOUT_SEC);
10733
10734 tg3_generate_fw_event(tp);
10735 }
10736 tp->asf_counter = tp->asf_multiplier;
10737 }
10738
10739 spin_unlock(&tp->lock);
10740
10741 restart_timer:
10742 tp->timer.expires = jiffies + tp->timer_offset;
10743 add_timer(&tp->timer);
10744 }
10745
10746 static void tg3_timer_init(struct tg3 *tp)
10747 {
10748 if (tg3_flag(tp, TAGGED_STATUS) &&
10749 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10750 !tg3_flag(tp, 57765_CLASS))
10751 tp->timer_offset = HZ;
10752 else
10753 tp->timer_offset = HZ / 10;
10754
10755 BUG_ON(tp->timer_offset > HZ);
10756
10757 tp->timer_multiplier = (HZ / tp->timer_offset);
10758 tp->asf_multiplier = (HZ / tp->timer_offset) *
10759 TG3_FW_UPDATE_FREQ_SEC;
10760
10761 init_timer(&tp->timer);
10762 tp->timer.data = (unsigned long) tp;
10763 tp->timer.function = tg3_timer;
10764 }
10765
10766 static void tg3_timer_start(struct tg3 *tp)
10767 {
10768 tp->asf_counter = tp->asf_multiplier;
10769 tp->timer_counter = tp->timer_multiplier;
10770
10771 tp->timer.expires = jiffies + tp->timer_offset;
10772 add_timer(&tp->timer);
10773 }
10774
10775 static void tg3_timer_stop(struct tg3 *tp)
10776 {
10777 del_timer_sync(&tp->timer);
10778 }
10779
10780 /* Restart hardware after configuration changes, self-test, etc.
10781 * Invoked with tp->lock held.
10782 */
10783 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10784 __releases(tp->lock)
10785 __acquires(tp->lock)
10786 {
10787 int err;
10788
10789 err = tg3_init_hw(tp, reset_phy);
10790 if (err) {
10791 netdev_err(tp->dev,
10792 "Failed to re-initialize device, aborting\n");
10793 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10794 tg3_full_unlock(tp);
10795 tg3_timer_stop(tp);
10796 tp->irq_sync = 0;
10797 tg3_napi_enable(tp);
10798 dev_close(tp->dev);
10799 tg3_full_lock(tp, 0);
10800 }
10801 return err;
10802 }
10803
10804 static void tg3_reset_task(struct work_struct *work)
10805 {
10806 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10807 int err;
10808
10809 tg3_full_lock(tp, 0);
10810
10811 if (!netif_running(tp->dev)) {
10812 tg3_flag_clear(tp, RESET_TASK_PENDING);
10813 tg3_full_unlock(tp);
10814 return;
10815 }
10816
10817 tg3_full_unlock(tp);
10818
10819 tg3_phy_stop(tp);
10820
10821 tg3_netif_stop(tp);
10822
10823 tg3_full_lock(tp, 1);
10824
10825 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10826 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10827 tp->write32_rx_mbox = tg3_write_flush_reg32;
10828 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10829 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10830 }
10831
10832 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10833 err = tg3_init_hw(tp, true);
10834 if (err)
10835 goto out;
10836
10837 tg3_netif_start(tp);
10838
10839 out:
10840 tg3_full_unlock(tp);
10841
10842 if (!err)
10843 tg3_phy_start(tp);
10844
10845 tg3_flag_clear(tp, RESET_TASK_PENDING);
10846 }
10847
10848 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10849 {
10850 irq_handler_t fn;
10851 unsigned long flags;
10852 char *name;
10853 struct tg3_napi *tnapi = &tp->napi[irq_num];
10854
10855 if (tp->irq_cnt == 1)
10856 name = tp->dev->name;
10857 else {
10858 name = &tnapi->irq_lbl[0];
10859 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10860 name[IFNAMSIZ-1] = 0;
10861 }
10862
10863 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10864 fn = tg3_msi;
10865 if (tg3_flag(tp, 1SHOT_MSI))
10866 fn = tg3_msi_1shot;
10867 flags = 0;
10868 } else {
10869 fn = tg3_interrupt;
10870 if (tg3_flag(tp, TAGGED_STATUS))
10871 fn = tg3_interrupt_tagged;
10872 flags = IRQF_SHARED;
10873 }
10874
10875 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10876 }
10877
10878 static int tg3_test_interrupt(struct tg3 *tp)
10879 {
10880 struct tg3_napi *tnapi = &tp->napi[0];
10881 struct net_device *dev = tp->dev;
10882 int err, i, intr_ok = 0;
10883 u32 val;
10884
10885 if (!netif_running(dev))
10886 return -ENODEV;
10887
10888 tg3_disable_ints(tp);
10889
10890 free_irq(tnapi->irq_vec, tnapi);
10891
10892 /*
10893 * Turn off MSI one shot mode. Otherwise this test has no
10894 * observable way to know whether the interrupt was delivered.
10895 */
10896 if (tg3_flag(tp, 57765_PLUS)) {
10897 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10898 tw32(MSGINT_MODE, val);
10899 }
10900
10901 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10902 IRQF_SHARED, dev->name, tnapi);
10903 if (err)
10904 return err;
10905
10906 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10907 tg3_enable_ints(tp);
10908
10909 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10910 tnapi->coal_now);
10911
10912 for (i = 0; i < 5; i++) {
10913 u32 int_mbox, misc_host_ctrl;
10914
10915 int_mbox = tr32_mailbox(tnapi->int_mbox);
10916 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10917
10918 if ((int_mbox != 0) ||
10919 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10920 intr_ok = 1;
10921 break;
10922 }
10923
10924 if (tg3_flag(tp, 57765_PLUS) &&
10925 tnapi->hw_status->status_tag != tnapi->last_tag)
10926 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10927
10928 msleep(10);
10929 }
10930
10931 tg3_disable_ints(tp);
10932
10933 free_irq(tnapi->irq_vec, tnapi);
10934
10935 err = tg3_request_irq(tp, 0);
10936
10937 if (err)
10938 return err;
10939
10940 if (intr_ok) {
10941 /* Reenable MSI one shot mode. */
10942 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10943 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10944 tw32(MSGINT_MODE, val);
10945 }
10946 return 0;
10947 }
10948
10949 return -EIO;
10950 }
10951
10952 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10953 * successfully restored
10954 */
10955 static int tg3_test_msi(struct tg3 *tp)
10956 {
10957 int err;
10958 u16 pci_cmd;
10959
10960 if (!tg3_flag(tp, USING_MSI))
10961 return 0;
10962
10963 /* Turn off SERR reporting in case MSI terminates with Master
10964 * Abort.
10965 */
10966 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10967 pci_write_config_word(tp->pdev, PCI_COMMAND,
10968 pci_cmd & ~PCI_COMMAND_SERR);
10969
10970 err = tg3_test_interrupt(tp);
10971
10972 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10973
10974 if (!err)
10975 return 0;
10976
10977 /* other failures */
10978 if (err != -EIO)
10979 return err;
10980
10981 /* MSI test failed, go back to INTx mode */
10982 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10983 "to INTx mode. Please report this failure to the PCI "
10984 "maintainer and include system chipset information\n");
10985
10986 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10987
10988 pci_disable_msi(tp->pdev);
10989
10990 tg3_flag_clear(tp, USING_MSI);
10991 tp->napi[0].irq_vec = tp->pdev->irq;
10992
10993 err = tg3_request_irq(tp, 0);
10994 if (err)
10995 return err;
10996
10997 /* Need to reset the chip because the MSI cycle may have terminated
10998 * with Master Abort.
10999 */
11000 tg3_full_lock(tp, 1);
11001
11002 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11003 err = tg3_init_hw(tp, true);
11004
11005 tg3_full_unlock(tp);
11006
11007 if (err)
11008 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11009
11010 return err;
11011 }
11012
11013 static int tg3_request_firmware(struct tg3 *tp)
11014 {
11015 const struct tg3_firmware_hdr *fw_hdr;
11016
11017 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11018 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11019 tp->fw_needed);
11020 return -ENOENT;
11021 }
11022
11023 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11024
11025 /* Firmware blob starts with version numbers, followed by
11026 * start address and _full_ length including BSS sections
11027 * (which must be longer than the actual data, of course
11028 */
11029
11030 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11031 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11032 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11033 tp->fw_len, tp->fw_needed);
11034 release_firmware(tp->fw);
11035 tp->fw = NULL;
11036 return -EINVAL;
11037 }
11038
11039 /* We no longer need firmware; we have it. */
11040 tp->fw_needed = NULL;
11041 return 0;
11042 }
11043
11044 static u32 tg3_irq_count(struct tg3 *tp)
11045 {
11046 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11047
11048 if (irq_cnt > 1) {
11049 /* We want as many rx rings enabled as there are cpus.
11050 * In multiqueue MSI-X mode, the first MSI-X vector
11051 * only deals with link interrupts, etc, so we add
11052 * one to the number of vectors we are requesting.
11053 */
11054 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11055 }
11056
11057 return irq_cnt;
11058 }
11059
11060 static bool tg3_enable_msix(struct tg3 *tp)
11061 {
11062 int i, rc;
11063 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11064
11065 tp->txq_cnt = tp->txq_req;
11066 tp->rxq_cnt = tp->rxq_req;
11067 if (!tp->rxq_cnt)
11068 tp->rxq_cnt = netif_get_num_default_rss_queues();
11069 if (tp->rxq_cnt > tp->rxq_max)
11070 tp->rxq_cnt = tp->rxq_max;
11071
11072 /* Disable multiple TX rings by default. Simple round-robin hardware
11073 * scheduling of the TX rings can cause starvation of rings with
11074 * small packets when other rings have TSO or jumbo packets.
11075 */
11076 if (!tp->txq_req)
11077 tp->txq_cnt = 1;
11078
11079 tp->irq_cnt = tg3_irq_count(tp);
11080
11081 for (i = 0; i < tp->irq_max; i++) {
11082 msix_ent[i].entry = i;
11083 msix_ent[i].vector = 0;
11084 }
11085
11086 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11087 if (rc < 0) {
11088 return false;
11089 } else if (rc != 0) {
11090 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11091 return false;
11092 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11093 tp->irq_cnt, rc);
11094 tp->irq_cnt = rc;
11095 tp->rxq_cnt = max(rc - 1, 1);
11096 if (tp->txq_cnt)
11097 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11098 }
11099
11100 for (i = 0; i < tp->irq_max; i++)
11101 tp->napi[i].irq_vec = msix_ent[i].vector;
11102
11103 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11104 pci_disable_msix(tp->pdev);
11105 return false;
11106 }
11107
11108 if (tp->irq_cnt == 1)
11109 return true;
11110
11111 tg3_flag_set(tp, ENABLE_RSS);
11112
11113 if (tp->txq_cnt > 1)
11114 tg3_flag_set(tp, ENABLE_TSS);
11115
11116 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11117
11118 return true;
11119 }
11120
11121 static void tg3_ints_init(struct tg3 *tp)
11122 {
11123 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11124 !tg3_flag(tp, TAGGED_STATUS)) {
11125 /* All MSI supporting chips should support tagged
11126 * status. Assert that this is the case.
11127 */
11128 netdev_warn(tp->dev,
11129 "MSI without TAGGED_STATUS? Not using MSI\n");
11130 goto defcfg;
11131 }
11132
11133 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11134 tg3_flag_set(tp, USING_MSIX);
11135 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11136 tg3_flag_set(tp, USING_MSI);
11137
11138 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11139 u32 msi_mode = tr32(MSGINT_MODE);
11140 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11141 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11142 if (!tg3_flag(tp, 1SHOT_MSI))
11143 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11144 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11145 }
11146 defcfg:
11147 if (!tg3_flag(tp, USING_MSIX)) {
11148 tp->irq_cnt = 1;
11149 tp->napi[0].irq_vec = tp->pdev->irq;
11150 }
11151
11152 if (tp->irq_cnt == 1) {
11153 tp->txq_cnt = 1;
11154 tp->rxq_cnt = 1;
11155 netif_set_real_num_tx_queues(tp->dev, 1);
11156 netif_set_real_num_rx_queues(tp->dev, 1);
11157 }
11158 }
11159
11160 static void tg3_ints_fini(struct tg3 *tp)
11161 {
11162 if (tg3_flag(tp, USING_MSIX))
11163 pci_disable_msix(tp->pdev);
11164 else if (tg3_flag(tp, USING_MSI))
11165 pci_disable_msi(tp->pdev);
11166 tg3_flag_clear(tp, USING_MSI);
11167 tg3_flag_clear(tp, USING_MSIX);
11168 tg3_flag_clear(tp, ENABLE_RSS);
11169 tg3_flag_clear(tp, ENABLE_TSS);
11170 }
11171
11172 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11173 bool init)
11174 {
11175 struct net_device *dev = tp->dev;
11176 int i, err;
11177
11178 /*
11179 * Setup interrupts first so we know how
11180 * many NAPI resources to allocate
11181 */
11182 tg3_ints_init(tp);
11183
11184 tg3_rss_check_indir_tbl(tp);
11185
11186 /* The placement of this call is tied
11187 * to the setup and use of Host TX descriptors.
11188 */
11189 err = tg3_alloc_consistent(tp);
11190 if (err)
11191 goto err_out1;
11192
11193 tg3_napi_init(tp);
11194
11195 tg3_napi_enable(tp);
11196
11197 for (i = 0; i < tp->irq_cnt; i++) {
11198 struct tg3_napi *tnapi = &tp->napi[i];
11199 err = tg3_request_irq(tp, i);
11200 if (err) {
11201 for (i--; i >= 0; i--) {
11202 tnapi = &tp->napi[i];
11203 free_irq(tnapi->irq_vec, tnapi);
11204 }
11205 goto err_out2;
11206 }
11207 }
11208
11209 tg3_full_lock(tp, 0);
11210
11211 err = tg3_init_hw(tp, reset_phy);
11212 if (err) {
11213 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11214 tg3_free_rings(tp);
11215 }
11216
11217 tg3_full_unlock(tp);
11218
11219 if (err)
11220 goto err_out3;
11221
11222 if (test_irq && tg3_flag(tp, USING_MSI)) {
11223 err = tg3_test_msi(tp);
11224
11225 if (err) {
11226 tg3_full_lock(tp, 0);
11227 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11228 tg3_free_rings(tp);
11229 tg3_full_unlock(tp);
11230
11231 goto err_out2;
11232 }
11233
11234 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11235 u32 val = tr32(PCIE_TRANSACTION_CFG);
11236
11237 tw32(PCIE_TRANSACTION_CFG,
11238 val | PCIE_TRANS_CFG_1SHOT_MSI);
11239 }
11240 }
11241
11242 tg3_phy_start(tp);
11243
11244 tg3_hwmon_open(tp);
11245
11246 tg3_full_lock(tp, 0);
11247
11248 tg3_timer_start(tp);
11249 tg3_flag_set(tp, INIT_COMPLETE);
11250 tg3_enable_ints(tp);
11251
11252 if (init)
11253 tg3_ptp_init(tp);
11254 else
11255 tg3_ptp_resume(tp);
11256
11257
11258 tg3_full_unlock(tp);
11259
11260 netif_tx_start_all_queues(dev);
11261
11262 /*
11263 * Reset loopback feature if it was turned on while the device was down
11264 * make sure that it's installed properly now.
11265 */
11266 if (dev->features & NETIF_F_LOOPBACK)
11267 tg3_set_loopback(dev, dev->features);
11268
11269 return 0;
11270
11271 err_out3:
11272 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11273 struct tg3_napi *tnapi = &tp->napi[i];
11274 free_irq(tnapi->irq_vec, tnapi);
11275 }
11276
11277 err_out2:
11278 tg3_napi_disable(tp);
11279 tg3_napi_fini(tp);
11280 tg3_free_consistent(tp);
11281
11282 err_out1:
11283 tg3_ints_fini(tp);
11284
11285 return err;
11286 }
11287
11288 static void tg3_stop(struct tg3 *tp)
11289 {
11290 int i;
11291
11292 tg3_reset_task_cancel(tp);
11293 tg3_netif_stop(tp);
11294
11295 tg3_timer_stop(tp);
11296
11297 tg3_hwmon_close(tp);
11298
11299 tg3_phy_stop(tp);
11300
11301 tg3_full_lock(tp, 1);
11302
11303 tg3_disable_ints(tp);
11304
11305 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11306 tg3_free_rings(tp);
11307 tg3_flag_clear(tp, INIT_COMPLETE);
11308
11309 tg3_full_unlock(tp);
11310
11311 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11312 struct tg3_napi *tnapi = &tp->napi[i];
11313 free_irq(tnapi->irq_vec, tnapi);
11314 }
11315
11316 tg3_ints_fini(tp);
11317
11318 tg3_napi_fini(tp);
11319
11320 tg3_free_consistent(tp);
11321 }
11322
11323 static int tg3_open(struct net_device *dev)
11324 {
11325 struct tg3 *tp = netdev_priv(dev);
11326 int err;
11327
11328 if (tp->fw_needed) {
11329 err = tg3_request_firmware(tp);
11330 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11331 if (err) {
11332 netdev_warn(tp->dev, "EEE capability disabled\n");
11333 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11334 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11335 netdev_warn(tp->dev, "EEE capability restored\n");
11336 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11337 }
11338 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11339 if (err)
11340 return err;
11341 } else if (err) {
11342 netdev_warn(tp->dev, "TSO capability disabled\n");
11343 tg3_flag_clear(tp, TSO_CAPABLE);
11344 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11345 netdev_notice(tp->dev, "TSO capability restored\n");
11346 tg3_flag_set(tp, TSO_CAPABLE);
11347 }
11348 }
11349
11350 tg3_carrier_off(tp);
11351
11352 err = tg3_power_up(tp);
11353 if (err)
11354 return err;
11355
11356 tg3_full_lock(tp, 0);
11357
11358 tg3_disable_ints(tp);
11359 tg3_flag_clear(tp, INIT_COMPLETE);
11360
11361 tg3_full_unlock(tp);
11362
11363 err = tg3_start(tp,
11364 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11365 true, true);
11366 if (err) {
11367 tg3_frob_aux_power(tp, false);
11368 pci_set_power_state(tp->pdev, PCI_D3hot);
11369 }
11370
11371 if (tg3_flag(tp, PTP_CAPABLE)) {
11372 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11373 &tp->pdev->dev);
11374 if (IS_ERR(tp->ptp_clock))
11375 tp->ptp_clock = NULL;
11376 }
11377
11378 return err;
11379 }
11380
11381 static int tg3_close(struct net_device *dev)
11382 {
11383 struct tg3 *tp = netdev_priv(dev);
11384
11385 tg3_ptp_fini(tp);
11386
11387 tg3_stop(tp);
11388
11389 /* Clear stats across close / open calls */
11390 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11391 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11392
11393 tg3_power_down(tp);
11394
11395 tg3_carrier_off(tp);
11396
11397 return 0;
11398 }
11399
11400 static inline u64 get_stat64(tg3_stat64_t *val)
11401 {
11402 return ((u64)val->high << 32) | ((u64)val->low);
11403 }
11404
11405 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11406 {
11407 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11408
11409 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11410 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11411 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11412 u32 val;
11413
11414 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11415 tg3_writephy(tp, MII_TG3_TEST1,
11416 val | MII_TG3_TEST1_CRC_EN);
11417 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11418 } else
11419 val = 0;
11420
11421 tp->phy_crc_errors += val;
11422
11423 return tp->phy_crc_errors;
11424 }
11425
11426 return get_stat64(&hw_stats->rx_fcs_errors);
11427 }
11428
11429 #define ESTAT_ADD(member) \
11430 estats->member = old_estats->member + \
11431 get_stat64(&hw_stats->member)
11432
11433 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11434 {
11435 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11436 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11437
11438 ESTAT_ADD(rx_octets);
11439 ESTAT_ADD(rx_fragments);
11440 ESTAT_ADD(rx_ucast_packets);
11441 ESTAT_ADD(rx_mcast_packets);
11442 ESTAT_ADD(rx_bcast_packets);
11443 ESTAT_ADD(rx_fcs_errors);
11444 ESTAT_ADD(rx_align_errors);
11445 ESTAT_ADD(rx_xon_pause_rcvd);
11446 ESTAT_ADD(rx_xoff_pause_rcvd);
11447 ESTAT_ADD(rx_mac_ctrl_rcvd);
11448 ESTAT_ADD(rx_xoff_entered);
11449 ESTAT_ADD(rx_frame_too_long_errors);
11450 ESTAT_ADD(rx_jabbers);
11451 ESTAT_ADD(rx_undersize_packets);
11452 ESTAT_ADD(rx_in_length_errors);
11453 ESTAT_ADD(rx_out_length_errors);
11454 ESTAT_ADD(rx_64_or_less_octet_packets);
11455 ESTAT_ADD(rx_65_to_127_octet_packets);
11456 ESTAT_ADD(rx_128_to_255_octet_packets);
11457 ESTAT_ADD(rx_256_to_511_octet_packets);
11458 ESTAT_ADD(rx_512_to_1023_octet_packets);
11459 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11460 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11461 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11462 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11463 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11464
11465 ESTAT_ADD(tx_octets);
11466 ESTAT_ADD(tx_collisions);
11467 ESTAT_ADD(tx_xon_sent);
11468 ESTAT_ADD(tx_xoff_sent);
11469 ESTAT_ADD(tx_flow_control);
11470 ESTAT_ADD(tx_mac_errors);
11471 ESTAT_ADD(tx_single_collisions);
11472 ESTAT_ADD(tx_mult_collisions);
11473 ESTAT_ADD(tx_deferred);
11474 ESTAT_ADD(tx_excessive_collisions);
11475 ESTAT_ADD(tx_late_collisions);
11476 ESTAT_ADD(tx_collide_2times);
11477 ESTAT_ADD(tx_collide_3times);
11478 ESTAT_ADD(tx_collide_4times);
11479 ESTAT_ADD(tx_collide_5times);
11480 ESTAT_ADD(tx_collide_6times);
11481 ESTAT_ADD(tx_collide_7times);
11482 ESTAT_ADD(tx_collide_8times);
11483 ESTAT_ADD(tx_collide_9times);
11484 ESTAT_ADD(tx_collide_10times);
11485 ESTAT_ADD(tx_collide_11times);
11486 ESTAT_ADD(tx_collide_12times);
11487 ESTAT_ADD(tx_collide_13times);
11488 ESTAT_ADD(tx_collide_14times);
11489 ESTAT_ADD(tx_collide_15times);
11490 ESTAT_ADD(tx_ucast_packets);
11491 ESTAT_ADD(tx_mcast_packets);
11492 ESTAT_ADD(tx_bcast_packets);
11493 ESTAT_ADD(tx_carrier_sense_errors);
11494 ESTAT_ADD(tx_discards);
11495 ESTAT_ADD(tx_errors);
11496
11497 ESTAT_ADD(dma_writeq_full);
11498 ESTAT_ADD(dma_write_prioq_full);
11499 ESTAT_ADD(rxbds_empty);
11500 ESTAT_ADD(rx_discards);
11501 ESTAT_ADD(rx_errors);
11502 ESTAT_ADD(rx_threshold_hit);
11503
11504 ESTAT_ADD(dma_readq_full);
11505 ESTAT_ADD(dma_read_prioq_full);
11506 ESTAT_ADD(tx_comp_queue_full);
11507
11508 ESTAT_ADD(ring_set_send_prod_index);
11509 ESTAT_ADD(ring_status_update);
11510 ESTAT_ADD(nic_irqs);
11511 ESTAT_ADD(nic_avoided_irqs);
11512 ESTAT_ADD(nic_tx_threshold_hit);
11513
11514 ESTAT_ADD(mbuf_lwm_thresh_hit);
11515 }
11516
11517 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11518 {
11519 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11520 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11521
11522 stats->rx_packets = old_stats->rx_packets +
11523 get_stat64(&hw_stats->rx_ucast_packets) +
11524 get_stat64(&hw_stats->rx_mcast_packets) +
11525 get_stat64(&hw_stats->rx_bcast_packets);
11526
11527 stats->tx_packets = old_stats->tx_packets +
11528 get_stat64(&hw_stats->tx_ucast_packets) +
11529 get_stat64(&hw_stats->tx_mcast_packets) +
11530 get_stat64(&hw_stats->tx_bcast_packets);
11531
11532 stats->rx_bytes = old_stats->rx_bytes +
11533 get_stat64(&hw_stats->rx_octets);
11534 stats->tx_bytes = old_stats->tx_bytes +
11535 get_stat64(&hw_stats->tx_octets);
11536
11537 stats->rx_errors = old_stats->rx_errors +
11538 get_stat64(&hw_stats->rx_errors);
11539 stats->tx_errors = old_stats->tx_errors +
11540 get_stat64(&hw_stats->tx_errors) +
11541 get_stat64(&hw_stats->tx_mac_errors) +
11542 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11543 get_stat64(&hw_stats->tx_discards);
11544
11545 stats->multicast = old_stats->multicast +
11546 get_stat64(&hw_stats->rx_mcast_packets);
11547 stats->collisions = old_stats->collisions +
11548 get_stat64(&hw_stats->tx_collisions);
11549
11550 stats->rx_length_errors = old_stats->rx_length_errors +
11551 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11552 get_stat64(&hw_stats->rx_undersize_packets);
11553
11554 stats->rx_over_errors = old_stats->rx_over_errors +
11555 get_stat64(&hw_stats->rxbds_empty);
11556 stats->rx_frame_errors = old_stats->rx_frame_errors +
11557 get_stat64(&hw_stats->rx_align_errors);
11558 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11559 get_stat64(&hw_stats->tx_discards);
11560 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11561 get_stat64(&hw_stats->tx_carrier_sense_errors);
11562
11563 stats->rx_crc_errors = old_stats->rx_crc_errors +
11564 tg3_calc_crc_errors(tp);
11565
11566 stats->rx_missed_errors = old_stats->rx_missed_errors +
11567 get_stat64(&hw_stats->rx_discards);
11568
11569 stats->rx_dropped = tp->rx_dropped;
11570 stats->tx_dropped = tp->tx_dropped;
11571 }
11572
11573 static int tg3_get_regs_len(struct net_device *dev)
11574 {
11575 return TG3_REG_BLK_SIZE;
11576 }
11577
11578 static void tg3_get_regs(struct net_device *dev,
11579 struct ethtool_regs *regs, void *_p)
11580 {
11581 struct tg3 *tp = netdev_priv(dev);
11582
11583 regs->version = 0;
11584
11585 memset(_p, 0, TG3_REG_BLK_SIZE);
11586
11587 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11588 return;
11589
11590 tg3_full_lock(tp, 0);
11591
11592 tg3_dump_legacy_regs(tp, (u32 *)_p);
11593
11594 tg3_full_unlock(tp);
11595 }
11596
11597 static int tg3_get_eeprom_len(struct net_device *dev)
11598 {
11599 struct tg3 *tp = netdev_priv(dev);
11600
11601 return tp->nvram_size;
11602 }
11603
11604 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11605 {
11606 struct tg3 *tp = netdev_priv(dev);
11607 int ret;
11608 u8 *pd;
11609 u32 i, offset, len, b_offset, b_count;
11610 __be32 val;
11611
11612 if (tg3_flag(tp, NO_NVRAM))
11613 return -EINVAL;
11614
11615 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11616 return -EAGAIN;
11617
11618 offset = eeprom->offset;
11619 len = eeprom->len;
11620 eeprom->len = 0;
11621
11622 eeprom->magic = TG3_EEPROM_MAGIC;
11623
11624 if (offset & 3) {
11625 /* adjustments to start on required 4 byte boundary */
11626 b_offset = offset & 3;
11627 b_count = 4 - b_offset;
11628 if (b_count > len) {
11629 /* i.e. offset=1 len=2 */
11630 b_count = len;
11631 }
11632 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11633 if (ret)
11634 return ret;
11635 memcpy(data, ((char *)&val) + b_offset, b_count);
11636 len -= b_count;
11637 offset += b_count;
11638 eeprom->len += b_count;
11639 }
11640
11641 /* read bytes up to the last 4 byte boundary */
11642 pd = &data[eeprom->len];
11643 for (i = 0; i < (len - (len & 3)); i += 4) {
11644 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11645 if (ret) {
11646 eeprom->len += i;
11647 return ret;
11648 }
11649 memcpy(pd + i, &val, 4);
11650 }
11651 eeprom->len += i;
11652
11653 if (len & 3) {
11654 /* read last bytes not ending on 4 byte boundary */
11655 pd = &data[eeprom->len];
11656 b_count = len & 3;
11657 b_offset = offset + len - b_count;
11658 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11659 if (ret)
11660 return ret;
11661 memcpy(pd, &val, b_count);
11662 eeprom->len += b_count;
11663 }
11664 return 0;
11665 }
11666
11667 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11668 {
11669 struct tg3 *tp = netdev_priv(dev);
11670 int ret;
11671 u32 offset, len, b_offset, odd_len;
11672 u8 *buf;
11673 __be32 start, end;
11674
11675 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11676 return -EAGAIN;
11677
11678 if (tg3_flag(tp, NO_NVRAM) ||
11679 eeprom->magic != TG3_EEPROM_MAGIC)
11680 return -EINVAL;
11681
11682 offset = eeprom->offset;
11683 len = eeprom->len;
11684
11685 if ((b_offset = (offset & 3))) {
11686 /* adjustments to start on required 4 byte boundary */
11687 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11688 if (ret)
11689 return ret;
11690 len += b_offset;
11691 offset &= ~3;
11692 if (len < 4)
11693 len = 4;
11694 }
11695
11696 odd_len = 0;
11697 if (len & 3) {
11698 /* adjustments to end on required 4 byte boundary */
11699 odd_len = 1;
11700 len = (len + 3) & ~3;
11701 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11702 if (ret)
11703 return ret;
11704 }
11705
11706 buf = data;
11707 if (b_offset || odd_len) {
11708 buf = kmalloc(len, GFP_KERNEL);
11709 if (!buf)
11710 return -ENOMEM;
11711 if (b_offset)
11712 memcpy(buf, &start, 4);
11713 if (odd_len)
11714 memcpy(buf+len-4, &end, 4);
11715 memcpy(buf + b_offset, data, eeprom->len);
11716 }
11717
11718 ret = tg3_nvram_write_block(tp, offset, len, buf);
11719
11720 if (buf != data)
11721 kfree(buf);
11722
11723 return ret;
11724 }
11725
11726 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11727 {
11728 struct tg3 *tp = netdev_priv(dev);
11729
11730 if (tg3_flag(tp, USE_PHYLIB)) {
11731 struct phy_device *phydev;
11732 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11733 return -EAGAIN;
11734 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11735 return phy_ethtool_gset(phydev, cmd);
11736 }
11737
11738 cmd->supported = (SUPPORTED_Autoneg);
11739
11740 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11741 cmd->supported |= (SUPPORTED_1000baseT_Half |
11742 SUPPORTED_1000baseT_Full);
11743
11744 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11745 cmd->supported |= (SUPPORTED_100baseT_Half |
11746 SUPPORTED_100baseT_Full |
11747 SUPPORTED_10baseT_Half |
11748 SUPPORTED_10baseT_Full |
11749 SUPPORTED_TP);
11750 cmd->port = PORT_TP;
11751 } else {
11752 cmd->supported |= SUPPORTED_FIBRE;
11753 cmd->port = PORT_FIBRE;
11754 }
11755
11756 cmd->advertising = tp->link_config.advertising;
11757 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11758 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11759 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11760 cmd->advertising |= ADVERTISED_Pause;
11761 } else {
11762 cmd->advertising |= ADVERTISED_Pause |
11763 ADVERTISED_Asym_Pause;
11764 }
11765 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11766 cmd->advertising |= ADVERTISED_Asym_Pause;
11767 }
11768 }
11769 if (netif_running(dev) && tp->link_up) {
11770 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11771 cmd->duplex = tp->link_config.active_duplex;
11772 cmd->lp_advertising = tp->link_config.rmt_adv;
11773 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11774 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11775 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11776 else
11777 cmd->eth_tp_mdix = ETH_TP_MDI;
11778 }
11779 } else {
11780 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11781 cmd->duplex = DUPLEX_UNKNOWN;
11782 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11783 }
11784 cmd->phy_address = tp->phy_addr;
11785 cmd->transceiver = XCVR_INTERNAL;
11786 cmd->autoneg = tp->link_config.autoneg;
11787 cmd->maxtxpkt = 0;
11788 cmd->maxrxpkt = 0;
11789 return 0;
11790 }
11791
11792 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11793 {
11794 struct tg3 *tp = netdev_priv(dev);
11795 u32 speed = ethtool_cmd_speed(cmd);
11796
11797 if (tg3_flag(tp, USE_PHYLIB)) {
11798 struct phy_device *phydev;
11799 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11800 return -EAGAIN;
11801 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11802 return phy_ethtool_sset(phydev, cmd);
11803 }
11804
11805 if (cmd->autoneg != AUTONEG_ENABLE &&
11806 cmd->autoneg != AUTONEG_DISABLE)
11807 return -EINVAL;
11808
11809 if (cmd->autoneg == AUTONEG_DISABLE &&
11810 cmd->duplex != DUPLEX_FULL &&
11811 cmd->duplex != DUPLEX_HALF)
11812 return -EINVAL;
11813
11814 if (cmd->autoneg == AUTONEG_ENABLE) {
11815 u32 mask = ADVERTISED_Autoneg |
11816 ADVERTISED_Pause |
11817 ADVERTISED_Asym_Pause;
11818
11819 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11820 mask |= ADVERTISED_1000baseT_Half |
11821 ADVERTISED_1000baseT_Full;
11822
11823 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11824 mask |= ADVERTISED_100baseT_Half |
11825 ADVERTISED_100baseT_Full |
11826 ADVERTISED_10baseT_Half |
11827 ADVERTISED_10baseT_Full |
11828 ADVERTISED_TP;
11829 else
11830 mask |= ADVERTISED_FIBRE;
11831
11832 if (cmd->advertising & ~mask)
11833 return -EINVAL;
11834
11835 mask &= (ADVERTISED_1000baseT_Half |
11836 ADVERTISED_1000baseT_Full |
11837 ADVERTISED_100baseT_Half |
11838 ADVERTISED_100baseT_Full |
11839 ADVERTISED_10baseT_Half |
11840 ADVERTISED_10baseT_Full);
11841
11842 cmd->advertising &= mask;
11843 } else {
11844 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11845 if (speed != SPEED_1000)
11846 return -EINVAL;
11847
11848 if (cmd->duplex != DUPLEX_FULL)
11849 return -EINVAL;
11850 } else {
11851 if (speed != SPEED_100 &&
11852 speed != SPEED_10)
11853 return -EINVAL;
11854 }
11855 }
11856
11857 tg3_full_lock(tp, 0);
11858
11859 tp->link_config.autoneg = cmd->autoneg;
11860 if (cmd->autoneg == AUTONEG_ENABLE) {
11861 tp->link_config.advertising = (cmd->advertising |
11862 ADVERTISED_Autoneg);
11863 tp->link_config.speed = SPEED_UNKNOWN;
11864 tp->link_config.duplex = DUPLEX_UNKNOWN;
11865 } else {
11866 tp->link_config.advertising = 0;
11867 tp->link_config.speed = speed;
11868 tp->link_config.duplex = cmd->duplex;
11869 }
11870
11871 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11872
11873 tg3_warn_mgmt_link_flap(tp);
11874
11875 if (netif_running(dev))
11876 tg3_setup_phy(tp, true);
11877
11878 tg3_full_unlock(tp);
11879
11880 return 0;
11881 }
11882
11883 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11884 {
11885 struct tg3 *tp = netdev_priv(dev);
11886
11887 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11888 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11889 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11890 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11891 }
11892
11893 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11894 {
11895 struct tg3 *tp = netdev_priv(dev);
11896
11897 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11898 wol->supported = WAKE_MAGIC;
11899 else
11900 wol->supported = 0;
11901 wol->wolopts = 0;
11902 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11903 wol->wolopts = WAKE_MAGIC;
11904 memset(&wol->sopass, 0, sizeof(wol->sopass));
11905 }
11906
11907 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11908 {
11909 struct tg3 *tp = netdev_priv(dev);
11910 struct device *dp = &tp->pdev->dev;
11911
11912 if (wol->wolopts & ~WAKE_MAGIC)
11913 return -EINVAL;
11914 if ((wol->wolopts & WAKE_MAGIC) &&
11915 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11916 return -EINVAL;
11917
11918 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11919
11920 spin_lock_bh(&tp->lock);
11921 if (device_may_wakeup(dp))
11922 tg3_flag_set(tp, WOL_ENABLE);
11923 else
11924 tg3_flag_clear(tp, WOL_ENABLE);
11925 spin_unlock_bh(&tp->lock);
11926
11927 return 0;
11928 }
11929
11930 static u32 tg3_get_msglevel(struct net_device *dev)
11931 {
11932 struct tg3 *tp = netdev_priv(dev);
11933 return tp->msg_enable;
11934 }
11935
11936 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11937 {
11938 struct tg3 *tp = netdev_priv(dev);
11939 tp->msg_enable = value;
11940 }
11941
11942 static int tg3_nway_reset(struct net_device *dev)
11943 {
11944 struct tg3 *tp = netdev_priv(dev);
11945 int r;
11946
11947 if (!netif_running(dev))
11948 return -EAGAIN;
11949
11950 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11951 return -EINVAL;
11952
11953 tg3_warn_mgmt_link_flap(tp);
11954
11955 if (tg3_flag(tp, USE_PHYLIB)) {
11956 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11957 return -EAGAIN;
11958 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11959 } else {
11960 u32 bmcr;
11961
11962 spin_lock_bh(&tp->lock);
11963 r = -EINVAL;
11964 tg3_readphy(tp, MII_BMCR, &bmcr);
11965 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11966 ((bmcr & BMCR_ANENABLE) ||
11967 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11968 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11969 BMCR_ANENABLE);
11970 r = 0;
11971 }
11972 spin_unlock_bh(&tp->lock);
11973 }
11974
11975 return r;
11976 }
11977
11978 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11979 {
11980 struct tg3 *tp = netdev_priv(dev);
11981
11982 ering->rx_max_pending = tp->rx_std_ring_mask;
11983 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11984 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11985 else
11986 ering->rx_jumbo_max_pending = 0;
11987
11988 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11989
11990 ering->rx_pending = tp->rx_pending;
11991 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11992 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11993 else
11994 ering->rx_jumbo_pending = 0;
11995
11996 ering->tx_pending = tp->napi[0].tx_pending;
11997 }
11998
11999 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12000 {
12001 struct tg3 *tp = netdev_priv(dev);
12002 int i, irq_sync = 0, err = 0;
12003
12004 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12005 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12006 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12007 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12008 (tg3_flag(tp, TSO_BUG) &&
12009 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12010 return -EINVAL;
12011
12012 if (netif_running(dev)) {
12013 tg3_phy_stop(tp);
12014 tg3_netif_stop(tp);
12015 irq_sync = 1;
12016 }
12017
12018 tg3_full_lock(tp, irq_sync);
12019
12020 tp->rx_pending = ering->rx_pending;
12021
12022 if (tg3_flag(tp, MAX_RXPEND_64) &&
12023 tp->rx_pending > 63)
12024 tp->rx_pending = 63;
12025 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12026
12027 for (i = 0; i < tp->irq_max; i++)
12028 tp->napi[i].tx_pending = ering->tx_pending;
12029
12030 if (netif_running(dev)) {
12031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12032 err = tg3_restart_hw(tp, false);
12033 if (!err)
12034 tg3_netif_start(tp);
12035 }
12036
12037 tg3_full_unlock(tp);
12038
12039 if (irq_sync && !err)
12040 tg3_phy_start(tp);
12041
12042 return err;
12043 }
12044
12045 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12046 {
12047 struct tg3 *tp = netdev_priv(dev);
12048
12049 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12050
12051 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12052 epause->rx_pause = 1;
12053 else
12054 epause->rx_pause = 0;
12055
12056 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12057 epause->tx_pause = 1;
12058 else
12059 epause->tx_pause = 0;
12060 }
12061
12062 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12063 {
12064 struct tg3 *tp = netdev_priv(dev);
12065 int err = 0;
12066
12067 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12068 tg3_warn_mgmt_link_flap(tp);
12069
12070 if (tg3_flag(tp, USE_PHYLIB)) {
12071 u32 newadv;
12072 struct phy_device *phydev;
12073
12074 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12075
12076 if (!(phydev->supported & SUPPORTED_Pause) ||
12077 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12078 (epause->rx_pause != epause->tx_pause)))
12079 return -EINVAL;
12080
12081 tp->link_config.flowctrl = 0;
12082 if (epause->rx_pause) {
12083 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12084
12085 if (epause->tx_pause) {
12086 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12087 newadv = ADVERTISED_Pause;
12088 } else
12089 newadv = ADVERTISED_Pause |
12090 ADVERTISED_Asym_Pause;
12091 } else if (epause->tx_pause) {
12092 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12093 newadv = ADVERTISED_Asym_Pause;
12094 } else
12095 newadv = 0;
12096
12097 if (epause->autoneg)
12098 tg3_flag_set(tp, PAUSE_AUTONEG);
12099 else
12100 tg3_flag_clear(tp, PAUSE_AUTONEG);
12101
12102 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12103 u32 oldadv = phydev->advertising &
12104 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12105 if (oldadv != newadv) {
12106 phydev->advertising &=
12107 ~(ADVERTISED_Pause |
12108 ADVERTISED_Asym_Pause);
12109 phydev->advertising |= newadv;
12110 if (phydev->autoneg) {
12111 /*
12112 * Always renegotiate the link to
12113 * inform our link partner of our
12114 * flow control settings, even if the
12115 * flow control is forced. Let
12116 * tg3_adjust_link() do the final
12117 * flow control setup.
12118 */
12119 return phy_start_aneg(phydev);
12120 }
12121 }
12122
12123 if (!epause->autoneg)
12124 tg3_setup_flow_control(tp, 0, 0);
12125 } else {
12126 tp->link_config.advertising &=
12127 ~(ADVERTISED_Pause |
12128 ADVERTISED_Asym_Pause);
12129 tp->link_config.advertising |= newadv;
12130 }
12131 } else {
12132 int irq_sync = 0;
12133
12134 if (netif_running(dev)) {
12135 tg3_netif_stop(tp);
12136 irq_sync = 1;
12137 }
12138
12139 tg3_full_lock(tp, irq_sync);
12140
12141 if (epause->autoneg)
12142 tg3_flag_set(tp, PAUSE_AUTONEG);
12143 else
12144 tg3_flag_clear(tp, PAUSE_AUTONEG);
12145 if (epause->rx_pause)
12146 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12147 else
12148 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12149 if (epause->tx_pause)
12150 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12151 else
12152 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12153
12154 if (netif_running(dev)) {
12155 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12156 err = tg3_restart_hw(tp, false);
12157 if (!err)
12158 tg3_netif_start(tp);
12159 }
12160
12161 tg3_full_unlock(tp);
12162 }
12163
12164 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12165
12166 return err;
12167 }
12168
12169 static int tg3_get_sset_count(struct net_device *dev, int sset)
12170 {
12171 switch (sset) {
12172 case ETH_SS_TEST:
12173 return TG3_NUM_TEST;
12174 case ETH_SS_STATS:
12175 return TG3_NUM_STATS;
12176 default:
12177 return -EOPNOTSUPP;
12178 }
12179 }
12180
12181 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12182 u32 *rules __always_unused)
12183 {
12184 struct tg3 *tp = netdev_priv(dev);
12185
12186 if (!tg3_flag(tp, SUPPORT_MSIX))
12187 return -EOPNOTSUPP;
12188
12189 switch (info->cmd) {
12190 case ETHTOOL_GRXRINGS:
12191 if (netif_running(tp->dev))
12192 info->data = tp->rxq_cnt;
12193 else {
12194 info->data = num_online_cpus();
12195 if (info->data > TG3_RSS_MAX_NUM_QS)
12196 info->data = TG3_RSS_MAX_NUM_QS;
12197 }
12198
12199 /* The first interrupt vector only
12200 * handles link interrupts.
12201 */
12202 info->data -= 1;
12203 return 0;
12204
12205 default:
12206 return -EOPNOTSUPP;
12207 }
12208 }
12209
12210 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12211 {
12212 u32 size = 0;
12213 struct tg3 *tp = netdev_priv(dev);
12214
12215 if (tg3_flag(tp, SUPPORT_MSIX))
12216 size = TG3_RSS_INDIR_TBL_SIZE;
12217
12218 return size;
12219 }
12220
12221 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12222 {
12223 struct tg3 *tp = netdev_priv(dev);
12224 int i;
12225
12226 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12227 indir[i] = tp->rss_ind_tbl[i];
12228
12229 return 0;
12230 }
12231
12232 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12233 {
12234 struct tg3 *tp = netdev_priv(dev);
12235 size_t i;
12236
12237 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12238 tp->rss_ind_tbl[i] = indir[i];
12239
12240 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12241 return 0;
12242
12243 /* It is legal to write the indirection
12244 * table while the device is running.
12245 */
12246 tg3_full_lock(tp, 0);
12247 tg3_rss_write_indir_tbl(tp);
12248 tg3_full_unlock(tp);
12249
12250 return 0;
12251 }
12252
12253 static void tg3_get_channels(struct net_device *dev,
12254 struct ethtool_channels *channel)
12255 {
12256 struct tg3 *tp = netdev_priv(dev);
12257 u32 deflt_qs = netif_get_num_default_rss_queues();
12258
12259 channel->max_rx = tp->rxq_max;
12260 channel->max_tx = tp->txq_max;
12261
12262 if (netif_running(dev)) {
12263 channel->rx_count = tp->rxq_cnt;
12264 channel->tx_count = tp->txq_cnt;
12265 } else {
12266 if (tp->rxq_req)
12267 channel->rx_count = tp->rxq_req;
12268 else
12269 channel->rx_count = min(deflt_qs, tp->rxq_max);
12270
12271 if (tp->txq_req)
12272 channel->tx_count = tp->txq_req;
12273 else
12274 channel->tx_count = min(deflt_qs, tp->txq_max);
12275 }
12276 }
12277
12278 static int tg3_set_channels(struct net_device *dev,
12279 struct ethtool_channels *channel)
12280 {
12281 struct tg3 *tp = netdev_priv(dev);
12282
12283 if (!tg3_flag(tp, SUPPORT_MSIX))
12284 return -EOPNOTSUPP;
12285
12286 if (channel->rx_count > tp->rxq_max ||
12287 channel->tx_count > tp->txq_max)
12288 return -EINVAL;
12289
12290 tp->rxq_req = channel->rx_count;
12291 tp->txq_req = channel->tx_count;
12292
12293 if (!netif_running(dev))
12294 return 0;
12295
12296 tg3_stop(tp);
12297
12298 tg3_carrier_off(tp);
12299
12300 tg3_start(tp, true, false, false);
12301
12302 return 0;
12303 }
12304
12305 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12306 {
12307 switch (stringset) {
12308 case ETH_SS_STATS:
12309 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12310 break;
12311 case ETH_SS_TEST:
12312 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12313 break;
12314 default:
12315 WARN_ON(1); /* we need a WARN() */
12316 break;
12317 }
12318 }
12319
12320 static int tg3_set_phys_id(struct net_device *dev,
12321 enum ethtool_phys_id_state state)
12322 {
12323 struct tg3 *tp = netdev_priv(dev);
12324
12325 if (!netif_running(tp->dev))
12326 return -EAGAIN;
12327
12328 switch (state) {
12329 case ETHTOOL_ID_ACTIVE:
12330 return 1; /* cycle on/off once per second */
12331
12332 case ETHTOOL_ID_ON:
12333 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12334 LED_CTRL_1000MBPS_ON |
12335 LED_CTRL_100MBPS_ON |
12336 LED_CTRL_10MBPS_ON |
12337 LED_CTRL_TRAFFIC_OVERRIDE |
12338 LED_CTRL_TRAFFIC_BLINK |
12339 LED_CTRL_TRAFFIC_LED);
12340 break;
12341
12342 case ETHTOOL_ID_OFF:
12343 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12344 LED_CTRL_TRAFFIC_OVERRIDE);
12345 break;
12346
12347 case ETHTOOL_ID_INACTIVE:
12348 tw32(MAC_LED_CTRL, tp->led_ctrl);
12349 break;
12350 }
12351
12352 return 0;
12353 }
12354
12355 static void tg3_get_ethtool_stats(struct net_device *dev,
12356 struct ethtool_stats *estats, u64 *tmp_stats)
12357 {
12358 struct tg3 *tp = netdev_priv(dev);
12359
12360 if (tp->hw_stats)
12361 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12362 else
12363 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12364 }
12365
12366 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12367 {
12368 int i;
12369 __be32 *buf;
12370 u32 offset = 0, len = 0;
12371 u32 magic, val;
12372
12373 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12374 return NULL;
12375
12376 if (magic == TG3_EEPROM_MAGIC) {
12377 for (offset = TG3_NVM_DIR_START;
12378 offset < TG3_NVM_DIR_END;
12379 offset += TG3_NVM_DIRENT_SIZE) {
12380 if (tg3_nvram_read(tp, offset, &val))
12381 return NULL;
12382
12383 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12384 TG3_NVM_DIRTYPE_EXTVPD)
12385 break;
12386 }
12387
12388 if (offset != TG3_NVM_DIR_END) {
12389 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12390 if (tg3_nvram_read(tp, offset + 4, &offset))
12391 return NULL;
12392
12393 offset = tg3_nvram_logical_addr(tp, offset);
12394 }
12395 }
12396
12397 if (!offset || !len) {
12398 offset = TG3_NVM_VPD_OFF;
12399 len = TG3_NVM_VPD_LEN;
12400 }
12401
12402 buf = kmalloc(len, GFP_KERNEL);
12403 if (buf == NULL)
12404 return NULL;
12405
12406 if (magic == TG3_EEPROM_MAGIC) {
12407 for (i = 0; i < len; i += 4) {
12408 /* The data is in little-endian format in NVRAM.
12409 * Use the big-endian read routines to preserve
12410 * the byte order as it exists in NVRAM.
12411 */
12412 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12413 goto error;
12414 }
12415 } else {
12416 u8 *ptr;
12417 ssize_t cnt;
12418 unsigned int pos = 0;
12419
12420 ptr = (u8 *)&buf[0];
12421 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12422 cnt = pci_read_vpd(tp->pdev, pos,
12423 len - pos, ptr);
12424 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12425 cnt = 0;
12426 else if (cnt < 0)
12427 goto error;
12428 }
12429 if (pos != len)
12430 goto error;
12431 }
12432
12433 *vpdlen = len;
12434
12435 return buf;
12436
12437 error:
12438 kfree(buf);
12439 return NULL;
12440 }
12441
12442 #define NVRAM_TEST_SIZE 0x100
12443 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12444 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12445 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12446 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12447 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12448 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12449 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12450 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12451
12452 static int tg3_test_nvram(struct tg3 *tp)
12453 {
12454 u32 csum, magic, len;
12455 __be32 *buf;
12456 int i, j, k, err = 0, size;
12457
12458 if (tg3_flag(tp, NO_NVRAM))
12459 return 0;
12460
12461 if (tg3_nvram_read(tp, 0, &magic) != 0)
12462 return -EIO;
12463
12464 if (magic == TG3_EEPROM_MAGIC)
12465 size = NVRAM_TEST_SIZE;
12466 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12467 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12468 TG3_EEPROM_SB_FORMAT_1) {
12469 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12470 case TG3_EEPROM_SB_REVISION_0:
12471 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12472 break;
12473 case TG3_EEPROM_SB_REVISION_2:
12474 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12475 break;
12476 case TG3_EEPROM_SB_REVISION_3:
12477 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12478 break;
12479 case TG3_EEPROM_SB_REVISION_4:
12480 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12481 break;
12482 case TG3_EEPROM_SB_REVISION_5:
12483 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12484 break;
12485 case TG3_EEPROM_SB_REVISION_6:
12486 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12487 break;
12488 default:
12489 return -EIO;
12490 }
12491 } else
12492 return 0;
12493 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12494 size = NVRAM_SELFBOOT_HW_SIZE;
12495 else
12496 return -EIO;
12497
12498 buf = kmalloc(size, GFP_KERNEL);
12499 if (buf == NULL)
12500 return -ENOMEM;
12501
12502 err = -EIO;
12503 for (i = 0, j = 0; i < size; i += 4, j++) {
12504 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12505 if (err)
12506 break;
12507 }
12508 if (i < size)
12509 goto out;
12510
12511 /* Selfboot format */
12512 magic = be32_to_cpu(buf[0]);
12513 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12514 TG3_EEPROM_MAGIC_FW) {
12515 u8 *buf8 = (u8 *) buf, csum8 = 0;
12516
12517 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12518 TG3_EEPROM_SB_REVISION_2) {
12519 /* For rev 2, the csum doesn't include the MBA. */
12520 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12521 csum8 += buf8[i];
12522 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12523 csum8 += buf8[i];
12524 } else {
12525 for (i = 0; i < size; i++)
12526 csum8 += buf8[i];
12527 }
12528
12529 if (csum8 == 0) {
12530 err = 0;
12531 goto out;
12532 }
12533
12534 err = -EIO;
12535 goto out;
12536 }
12537
12538 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12539 TG3_EEPROM_MAGIC_HW) {
12540 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12541 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12542 u8 *buf8 = (u8 *) buf;
12543
12544 /* Separate the parity bits and the data bytes. */
12545 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12546 if ((i == 0) || (i == 8)) {
12547 int l;
12548 u8 msk;
12549
12550 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12551 parity[k++] = buf8[i] & msk;
12552 i++;
12553 } else if (i == 16) {
12554 int l;
12555 u8 msk;
12556
12557 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12558 parity[k++] = buf8[i] & msk;
12559 i++;
12560
12561 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12562 parity[k++] = buf8[i] & msk;
12563 i++;
12564 }
12565 data[j++] = buf8[i];
12566 }
12567
12568 err = -EIO;
12569 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12570 u8 hw8 = hweight8(data[i]);
12571
12572 if ((hw8 & 0x1) && parity[i])
12573 goto out;
12574 else if (!(hw8 & 0x1) && !parity[i])
12575 goto out;
12576 }
12577 err = 0;
12578 goto out;
12579 }
12580
12581 err = -EIO;
12582
12583 /* Bootstrap checksum at offset 0x10 */
12584 csum = calc_crc((unsigned char *) buf, 0x10);
12585 if (csum != le32_to_cpu(buf[0x10/4]))
12586 goto out;
12587
12588 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12589 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12590 if (csum != le32_to_cpu(buf[0xfc/4]))
12591 goto out;
12592
12593 kfree(buf);
12594
12595 buf = tg3_vpd_readblock(tp, &len);
12596 if (!buf)
12597 return -ENOMEM;
12598
12599 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12600 if (i > 0) {
12601 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12602 if (j < 0)
12603 goto out;
12604
12605 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12606 goto out;
12607
12608 i += PCI_VPD_LRDT_TAG_SIZE;
12609 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12610 PCI_VPD_RO_KEYWORD_CHKSUM);
12611 if (j > 0) {
12612 u8 csum8 = 0;
12613
12614 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12615
12616 for (i = 0; i <= j; i++)
12617 csum8 += ((u8 *)buf)[i];
12618
12619 if (csum8)
12620 goto out;
12621 }
12622 }
12623
12624 err = 0;
12625
12626 out:
12627 kfree(buf);
12628 return err;
12629 }
12630
12631 #define TG3_SERDES_TIMEOUT_SEC 2
12632 #define TG3_COPPER_TIMEOUT_SEC 6
12633
12634 static int tg3_test_link(struct tg3 *tp)
12635 {
12636 int i, max;
12637
12638 if (!netif_running(tp->dev))
12639 return -ENODEV;
12640
12641 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12642 max = TG3_SERDES_TIMEOUT_SEC;
12643 else
12644 max = TG3_COPPER_TIMEOUT_SEC;
12645
12646 for (i = 0; i < max; i++) {
12647 if (tp->link_up)
12648 return 0;
12649
12650 if (msleep_interruptible(1000))
12651 break;
12652 }
12653
12654 return -EIO;
12655 }
12656
12657 /* Only test the commonly used registers */
12658 static int tg3_test_registers(struct tg3 *tp)
12659 {
12660 int i, is_5705, is_5750;
12661 u32 offset, read_mask, write_mask, val, save_val, read_val;
12662 static struct {
12663 u16 offset;
12664 u16 flags;
12665 #define TG3_FL_5705 0x1
12666 #define TG3_FL_NOT_5705 0x2
12667 #define TG3_FL_NOT_5788 0x4
12668 #define TG3_FL_NOT_5750 0x8
12669 u32 read_mask;
12670 u32 write_mask;
12671 } reg_tbl[] = {
12672 /* MAC Control Registers */
12673 { MAC_MODE, TG3_FL_NOT_5705,
12674 0x00000000, 0x00ef6f8c },
12675 { MAC_MODE, TG3_FL_5705,
12676 0x00000000, 0x01ef6b8c },
12677 { MAC_STATUS, TG3_FL_NOT_5705,
12678 0x03800107, 0x00000000 },
12679 { MAC_STATUS, TG3_FL_5705,
12680 0x03800100, 0x00000000 },
12681 { MAC_ADDR_0_HIGH, 0x0000,
12682 0x00000000, 0x0000ffff },
12683 { MAC_ADDR_0_LOW, 0x0000,
12684 0x00000000, 0xffffffff },
12685 { MAC_RX_MTU_SIZE, 0x0000,
12686 0x00000000, 0x0000ffff },
12687 { MAC_TX_MODE, 0x0000,
12688 0x00000000, 0x00000070 },
12689 { MAC_TX_LENGTHS, 0x0000,
12690 0x00000000, 0x00003fff },
12691 { MAC_RX_MODE, TG3_FL_NOT_5705,
12692 0x00000000, 0x000007fc },
12693 { MAC_RX_MODE, TG3_FL_5705,
12694 0x00000000, 0x000007dc },
12695 { MAC_HASH_REG_0, 0x0000,
12696 0x00000000, 0xffffffff },
12697 { MAC_HASH_REG_1, 0x0000,
12698 0x00000000, 0xffffffff },
12699 { MAC_HASH_REG_2, 0x0000,
12700 0x00000000, 0xffffffff },
12701 { MAC_HASH_REG_3, 0x0000,
12702 0x00000000, 0xffffffff },
12703
12704 /* Receive Data and Receive BD Initiator Control Registers. */
12705 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12706 0x00000000, 0xffffffff },
12707 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12708 0x00000000, 0xffffffff },
12709 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12710 0x00000000, 0x00000003 },
12711 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12712 0x00000000, 0xffffffff },
12713 { RCVDBDI_STD_BD+0, 0x0000,
12714 0x00000000, 0xffffffff },
12715 { RCVDBDI_STD_BD+4, 0x0000,
12716 0x00000000, 0xffffffff },
12717 { RCVDBDI_STD_BD+8, 0x0000,
12718 0x00000000, 0xffff0002 },
12719 { RCVDBDI_STD_BD+0xc, 0x0000,
12720 0x00000000, 0xffffffff },
12721
12722 /* Receive BD Initiator Control Registers. */
12723 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12724 0x00000000, 0xffffffff },
12725 { RCVBDI_STD_THRESH, TG3_FL_5705,
12726 0x00000000, 0x000003ff },
12727 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12728 0x00000000, 0xffffffff },
12729
12730 /* Host Coalescing Control Registers. */
12731 { HOSTCC_MODE, TG3_FL_NOT_5705,
12732 0x00000000, 0x00000004 },
12733 { HOSTCC_MODE, TG3_FL_5705,
12734 0x00000000, 0x000000f6 },
12735 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12736 0x00000000, 0xffffffff },
12737 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12738 0x00000000, 0x000003ff },
12739 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12740 0x00000000, 0xffffffff },
12741 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12742 0x00000000, 0x000003ff },
12743 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12744 0x00000000, 0xffffffff },
12745 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12746 0x00000000, 0x000000ff },
12747 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12748 0x00000000, 0xffffffff },
12749 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12750 0x00000000, 0x000000ff },
12751 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12752 0x00000000, 0xffffffff },
12753 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12754 0x00000000, 0xffffffff },
12755 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12756 0x00000000, 0xffffffff },
12757 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12758 0x00000000, 0x000000ff },
12759 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12760 0x00000000, 0xffffffff },
12761 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12762 0x00000000, 0x000000ff },
12763 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12764 0x00000000, 0xffffffff },
12765 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12766 0x00000000, 0xffffffff },
12767 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12768 0x00000000, 0xffffffff },
12769 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12770 0x00000000, 0xffffffff },
12771 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12772 0x00000000, 0xffffffff },
12773 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12774 0xffffffff, 0x00000000 },
12775 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12776 0xffffffff, 0x00000000 },
12777
12778 /* Buffer Manager Control Registers. */
12779 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12780 0x00000000, 0x007fff80 },
12781 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12782 0x00000000, 0x007fffff },
12783 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12784 0x00000000, 0x0000003f },
12785 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12786 0x00000000, 0x000001ff },
12787 { BUFMGR_MB_HIGH_WATER, 0x0000,
12788 0x00000000, 0x000001ff },
12789 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12790 0xffffffff, 0x00000000 },
12791 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12792 0xffffffff, 0x00000000 },
12793
12794 /* Mailbox Registers */
12795 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12796 0x00000000, 0x000001ff },
12797 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12798 0x00000000, 0x000001ff },
12799 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12800 0x00000000, 0x000007ff },
12801 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12802 0x00000000, 0x000001ff },
12803
12804 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12805 };
12806
12807 is_5705 = is_5750 = 0;
12808 if (tg3_flag(tp, 5705_PLUS)) {
12809 is_5705 = 1;
12810 if (tg3_flag(tp, 5750_PLUS))
12811 is_5750 = 1;
12812 }
12813
12814 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12815 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12816 continue;
12817
12818 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12819 continue;
12820
12821 if (tg3_flag(tp, IS_5788) &&
12822 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12823 continue;
12824
12825 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12826 continue;
12827
12828 offset = (u32) reg_tbl[i].offset;
12829 read_mask = reg_tbl[i].read_mask;
12830 write_mask = reg_tbl[i].write_mask;
12831
12832 /* Save the original register content */
12833 save_val = tr32(offset);
12834
12835 /* Determine the read-only value. */
12836 read_val = save_val & read_mask;
12837
12838 /* Write zero to the register, then make sure the read-only bits
12839 * are not changed and the read/write bits are all zeros.
12840 */
12841 tw32(offset, 0);
12842
12843 val = tr32(offset);
12844
12845 /* Test the read-only and read/write bits. */
12846 if (((val & read_mask) != read_val) || (val & write_mask))
12847 goto out;
12848
12849 /* Write ones to all the bits defined by RdMask and WrMask, then
12850 * make sure the read-only bits are not changed and the
12851 * read/write bits are all ones.
12852 */
12853 tw32(offset, read_mask | write_mask);
12854
12855 val = tr32(offset);
12856
12857 /* Test the read-only bits. */
12858 if ((val & read_mask) != read_val)
12859 goto out;
12860
12861 /* Test the read/write bits. */
12862 if ((val & write_mask) != write_mask)
12863 goto out;
12864
12865 tw32(offset, save_val);
12866 }
12867
12868 return 0;
12869
12870 out:
12871 if (netif_msg_hw(tp))
12872 netdev_err(tp->dev,
12873 "Register test failed at offset %x\n", offset);
12874 tw32(offset, save_val);
12875 return -EIO;
12876 }
12877
12878 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12879 {
12880 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12881 int i;
12882 u32 j;
12883
12884 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12885 for (j = 0; j < len; j += 4) {
12886 u32 val;
12887
12888 tg3_write_mem(tp, offset + j, test_pattern[i]);
12889 tg3_read_mem(tp, offset + j, &val);
12890 if (val != test_pattern[i])
12891 return -EIO;
12892 }
12893 }
12894 return 0;
12895 }
12896
12897 static int tg3_test_memory(struct tg3 *tp)
12898 {
12899 static struct mem_entry {
12900 u32 offset;
12901 u32 len;
12902 } mem_tbl_570x[] = {
12903 { 0x00000000, 0x00b50},
12904 { 0x00002000, 0x1c000},
12905 { 0xffffffff, 0x00000}
12906 }, mem_tbl_5705[] = {
12907 { 0x00000100, 0x0000c},
12908 { 0x00000200, 0x00008},
12909 { 0x00004000, 0x00800},
12910 { 0x00006000, 0x01000},
12911 { 0x00008000, 0x02000},
12912 { 0x00010000, 0x0e000},
12913 { 0xffffffff, 0x00000}
12914 }, mem_tbl_5755[] = {
12915 { 0x00000200, 0x00008},
12916 { 0x00004000, 0x00800},
12917 { 0x00006000, 0x00800},
12918 { 0x00008000, 0x02000},
12919 { 0x00010000, 0x0c000},
12920 { 0xffffffff, 0x00000}
12921 }, mem_tbl_5906[] = {
12922 { 0x00000200, 0x00008},
12923 { 0x00004000, 0x00400},
12924 { 0x00006000, 0x00400},
12925 { 0x00008000, 0x01000},
12926 { 0x00010000, 0x01000},
12927 { 0xffffffff, 0x00000}
12928 }, mem_tbl_5717[] = {
12929 { 0x00000200, 0x00008},
12930 { 0x00010000, 0x0a000},
12931 { 0x00020000, 0x13c00},
12932 { 0xffffffff, 0x00000}
12933 }, mem_tbl_57765[] = {
12934 { 0x00000200, 0x00008},
12935 { 0x00004000, 0x00800},
12936 { 0x00006000, 0x09800},
12937 { 0x00010000, 0x0a000},
12938 { 0xffffffff, 0x00000}
12939 };
12940 struct mem_entry *mem_tbl;
12941 int err = 0;
12942 int i;
12943
12944 if (tg3_flag(tp, 5717_PLUS))
12945 mem_tbl = mem_tbl_5717;
12946 else if (tg3_flag(tp, 57765_CLASS) ||
12947 tg3_asic_rev(tp) == ASIC_REV_5762)
12948 mem_tbl = mem_tbl_57765;
12949 else if (tg3_flag(tp, 5755_PLUS))
12950 mem_tbl = mem_tbl_5755;
12951 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12952 mem_tbl = mem_tbl_5906;
12953 else if (tg3_flag(tp, 5705_PLUS))
12954 mem_tbl = mem_tbl_5705;
12955 else
12956 mem_tbl = mem_tbl_570x;
12957
12958 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12959 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12960 if (err)
12961 break;
12962 }
12963
12964 return err;
12965 }
12966
12967 #define TG3_TSO_MSS 500
12968
12969 #define TG3_TSO_IP_HDR_LEN 20
12970 #define TG3_TSO_TCP_HDR_LEN 20
12971 #define TG3_TSO_TCP_OPT_LEN 12
12972
12973 static const u8 tg3_tso_header[] = {
12974 0x08, 0x00,
12975 0x45, 0x00, 0x00, 0x00,
12976 0x00, 0x00, 0x40, 0x00,
12977 0x40, 0x06, 0x00, 0x00,
12978 0x0a, 0x00, 0x00, 0x01,
12979 0x0a, 0x00, 0x00, 0x02,
12980 0x0d, 0x00, 0xe0, 0x00,
12981 0x00, 0x00, 0x01, 0x00,
12982 0x00, 0x00, 0x02, 0x00,
12983 0x80, 0x10, 0x10, 0x00,
12984 0x14, 0x09, 0x00, 0x00,
12985 0x01, 0x01, 0x08, 0x0a,
12986 0x11, 0x11, 0x11, 0x11,
12987 0x11, 0x11, 0x11, 0x11,
12988 };
12989
12990 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12991 {
12992 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12993 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12994 u32 budget;
12995 struct sk_buff *skb;
12996 u8 *tx_data, *rx_data;
12997 dma_addr_t map;
12998 int num_pkts, tx_len, rx_len, i, err;
12999 struct tg3_rx_buffer_desc *desc;
13000 struct tg3_napi *tnapi, *rnapi;
13001 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13002
13003 tnapi = &tp->napi[0];
13004 rnapi = &tp->napi[0];
13005 if (tp->irq_cnt > 1) {
13006 if (tg3_flag(tp, ENABLE_RSS))
13007 rnapi = &tp->napi[1];
13008 if (tg3_flag(tp, ENABLE_TSS))
13009 tnapi = &tp->napi[1];
13010 }
13011 coal_now = tnapi->coal_now | rnapi->coal_now;
13012
13013 err = -EIO;
13014
13015 tx_len = pktsz;
13016 skb = netdev_alloc_skb(tp->dev, tx_len);
13017 if (!skb)
13018 return -ENOMEM;
13019
13020 tx_data = skb_put(skb, tx_len);
13021 memcpy(tx_data, tp->dev->dev_addr, 6);
13022 memset(tx_data + 6, 0x0, 8);
13023
13024 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13025
13026 if (tso_loopback) {
13027 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13028
13029 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13030 TG3_TSO_TCP_OPT_LEN;
13031
13032 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13033 sizeof(tg3_tso_header));
13034 mss = TG3_TSO_MSS;
13035
13036 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13037 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13038
13039 /* Set the total length field in the IP header */
13040 iph->tot_len = htons((u16)(mss + hdr_len));
13041
13042 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13043 TXD_FLAG_CPU_POST_DMA);
13044
13045 if (tg3_flag(tp, HW_TSO_1) ||
13046 tg3_flag(tp, HW_TSO_2) ||
13047 tg3_flag(tp, HW_TSO_3)) {
13048 struct tcphdr *th;
13049 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13050 th = (struct tcphdr *)&tx_data[val];
13051 th->check = 0;
13052 } else
13053 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13054
13055 if (tg3_flag(tp, HW_TSO_3)) {
13056 mss |= (hdr_len & 0xc) << 12;
13057 if (hdr_len & 0x10)
13058 base_flags |= 0x00000010;
13059 base_flags |= (hdr_len & 0x3e0) << 5;
13060 } else if (tg3_flag(tp, HW_TSO_2))
13061 mss |= hdr_len << 9;
13062 else if (tg3_flag(tp, HW_TSO_1) ||
13063 tg3_asic_rev(tp) == ASIC_REV_5705) {
13064 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13065 } else {
13066 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13067 }
13068
13069 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13070 } else {
13071 num_pkts = 1;
13072 data_off = ETH_HLEN;
13073
13074 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13075 tx_len > VLAN_ETH_FRAME_LEN)
13076 base_flags |= TXD_FLAG_JMB_PKT;
13077 }
13078
13079 for (i = data_off; i < tx_len; i++)
13080 tx_data[i] = (u8) (i & 0xff);
13081
13082 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13083 if (pci_dma_mapping_error(tp->pdev, map)) {
13084 dev_kfree_skb(skb);
13085 return -EIO;
13086 }
13087
13088 val = tnapi->tx_prod;
13089 tnapi->tx_buffers[val].skb = skb;
13090 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13091
13092 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13093 rnapi->coal_now);
13094
13095 udelay(10);
13096
13097 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13098
13099 budget = tg3_tx_avail(tnapi);
13100 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13101 base_flags | TXD_FLAG_END, mss, 0)) {
13102 tnapi->tx_buffers[val].skb = NULL;
13103 dev_kfree_skb(skb);
13104 return -EIO;
13105 }
13106
13107 tnapi->tx_prod++;
13108
13109 /* Sync BD data before updating mailbox */
13110 wmb();
13111
13112 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13113 tr32_mailbox(tnapi->prodmbox);
13114
13115 udelay(10);
13116
13117 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13118 for (i = 0; i < 35; i++) {
13119 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13120 coal_now);
13121
13122 udelay(10);
13123
13124 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13125 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13126 if ((tx_idx == tnapi->tx_prod) &&
13127 (rx_idx == (rx_start_idx + num_pkts)))
13128 break;
13129 }
13130
13131 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13132 dev_kfree_skb(skb);
13133
13134 if (tx_idx != tnapi->tx_prod)
13135 goto out;
13136
13137 if (rx_idx != rx_start_idx + num_pkts)
13138 goto out;
13139
13140 val = data_off;
13141 while (rx_idx != rx_start_idx) {
13142 desc = &rnapi->rx_rcb[rx_start_idx++];
13143 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13144 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13145
13146 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13147 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13148 goto out;
13149
13150 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13151 - ETH_FCS_LEN;
13152
13153 if (!tso_loopback) {
13154 if (rx_len != tx_len)
13155 goto out;
13156
13157 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13158 if (opaque_key != RXD_OPAQUE_RING_STD)
13159 goto out;
13160 } else {
13161 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13162 goto out;
13163 }
13164 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13165 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13166 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13167 goto out;
13168 }
13169
13170 if (opaque_key == RXD_OPAQUE_RING_STD) {
13171 rx_data = tpr->rx_std_buffers[desc_idx].data;
13172 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13173 mapping);
13174 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13175 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13176 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13177 mapping);
13178 } else
13179 goto out;
13180
13181 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13182 PCI_DMA_FROMDEVICE);
13183
13184 rx_data += TG3_RX_OFFSET(tp);
13185 for (i = data_off; i < rx_len; i++, val++) {
13186 if (*(rx_data + i) != (u8) (val & 0xff))
13187 goto out;
13188 }
13189 }
13190
13191 err = 0;
13192
13193 /* tg3_free_rings will unmap and free the rx_data */
13194 out:
13195 return err;
13196 }
13197
13198 #define TG3_STD_LOOPBACK_FAILED 1
13199 #define TG3_JMB_LOOPBACK_FAILED 2
13200 #define TG3_TSO_LOOPBACK_FAILED 4
13201 #define TG3_LOOPBACK_FAILED \
13202 (TG3_STD_LOOPBACK_FAILED | \
13203 TG3_JMB_LOOPBACK_FAILED | \
13204 TG3_TSO_LOOPBACK_FAILED)
13205
13206 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13207 {
13208 int err = -EIO;
13209 u32 eee_cap;
13210 u32 jmb_pkt_sz = 9000;
13211
13212 if (tp->dma_limit)
13213 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13214
13215 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13216 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13217
13218 if (!netif_running(tp->dev)) {
13219 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13220 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13221 if (do_extlpbk)
13222 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13223 goto done;
13224 }
13225
13226 err = tg3_reset_hw(tp, true);
13227 if (err) {
13228 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13229 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13230 if (do_extlpbk)
13231 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13232 goto done;
13233 }
13234
13235 if (tg3_flag(tp, ENABLE_RSS)) {
13236 int i;
13237
13238 /* Reroute all rx packets to the 1st queue */
13239 for (i = MAC_RSS_INDIR_TBL_0;
13240 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13241 tw32(i, 0x0);
13242 }
13243
13244 /* HW errata - mac loopback fails in some cases on 5780.
13245 * Normal traffic and PHY loopback are not affected by
13246 * errata. Also, the MAC loopback test is deprecated for
13247 * all newer ASIC revisions.
13248 */
13249 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13250 !tg3_flag(tp, CPMU_PRESENT)) {
13251 tg3_mac_loopback(tp, true);
13252
13253 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13254 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13255
13256 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13257 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13258 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13259
13260 tg3_mac_loopback(tp, false);
13261 }
13262
13263 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13264 !tg3_flag(tp, USE_PHYLIB)) {
13265 int i;
13266
13267 tg3_phy_lpbk_set(tp, 0, false);
13268
13269 /* Wait for link */
13270 for (i = 0; i < 100; i++) {
13271 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13272 break;
13273 mdelay(1);
13274 }
13275
13276 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13277 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13278 if (tg3_flag(tp, TSO_CAPABLE) &&
13279 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13280 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13281 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13282 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13283 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13284
13285 if (do_extlpbk) {
13286 tg3_phy_lpbk_set(tp, 0, true);
13287
13288 /* All link indications report up, but the hardware
13289 * isn't really ready for about 20 msec. Double it
13290 * to be sure.
13291 */
13292 mdelay(40);
13293
13294 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13295 data[TG3_EXT_LOOPB_TEST] |=
13296 TG3_STD_LOOPBACK_FAILED;
13297 if (tg3_flag(tp, TSO_CAPABLE) &&
13298 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13299 data[TG3_EXT_LOOPB_TEST] |=
13300 TG3_TSO_LOOPBACK_FAILED;
13301 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13302 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13303 data[TG3_EXT_LOOPB_TEST] |=
13304 TG3_JMB_LOOPBACK_FAILED;
13305 }
13306
13307 /* Re-enable gphy autopowerdown. */
13308 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13309 tg3_phy_toggle_apd(tp, true);
13310 }
13311
13312 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13313 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13314
13315 done:
13316 tp->phy_flags |= eee_cap;
13317
13318 return err;
13319 }
13320
13321 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13322 u64 *data)
13323 {
13324 struct tg3 *tp = netdev_priv(dev);
13325 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13326
13327 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13328 tg3_power_up(tp)) {
13329 etest->flags |= ETH_TEST_FL_FAILED;
13330 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13331 return;
13332 }
13333
13334 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13335
13336 if (tg3_test_nvram(tp) != 0) {
13337 etest->flags |= ETH_TEST_FL_FAILED;
13338 data[TG3_NVRAM_TEST] = 1;
13339 }
13340 if (!doextlpbk && tg3_test_link(tp)) {
13341 etest->flags |= ETH_TEST_FL_FAILED;
13342 data[TG3_LINK_TEST] = 1;
13343 }
13344 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13345 int err, err2 = 0, irq_sync = 0;
13346
13347 if (netif_running(dev)) {
13348 tg3_phy_stop(tp);
13349 tg3_netif_stop(tp);
13350 irq_sync = 1;
13351 }
13352
13353 tg3_full_lock(tp, irq_sync);
13354 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13355 err = tg3_nvram_lock(tp);
13356 tg3_halt_cpu(tp, RX_CPU_BASE);
13357 if (!tg3_flag(tp, 5705_PLUS))
13358 tg3_halt_cpu(tp, TX_CPU_BASE);
13359 if (!err)
13360 tg3_nvram_unlock(tp);
13361
13362 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13363 tg3_phy_reset(tp);
13364
13365 if (tg3_test_registers(tp) != 0) {
13366 etest->flags |= ETH_TEST_FL_FAILED;
13367 data[TG3_REGISTER_TEST] = 1;
13368 }
13369
13370 if (tg3_test_memory(tp) != 0) {
13371 etest->flags |= ETH_TEST_FL_FAILED;
13372 data[TG3_MEMORY_TEST] = 1;
13373 }
13374
13375 if (doextlpbk)
13376 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13377
13378 if (tg3_test_loopback(tp, data, doextlpbk))
13379 etest->flags |= ETH_TEST_FL_FAILED;
13380
13381 tg3_full_unlock(tp);
13382
13383 if (tg3_test_interrupt(tp) != 0) {
13384 etest->flags |= ETH_TEST_FL_FAILED;
13385 data[TG3_INTERRUPT_TEST] = 1;
13386 }
13387
13388 tg3_full_lock(tp, 0);
13389
13390 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13391 if (netif_running(dev)) {
13392 tg3_flag_set(tp, INIT_COMPLETE);
13393 err2 = tg3_restart_hw(tp, true);
13394 if (!err2)
13395 tg3_netif_start(tp);
13396 }
13397
13398 tg3_full_unlock(tp);
13399
13400 if (irq_sync && !err2)
13401 tg3_phy_start(tp);
13402 }
13403 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13404 tg3_power_down(tp);
13405
13406 }
13407
13408 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13409 struct ifreq *ifr, int cmd)
13410 {
13411 struct tg3 *tp = netdev_priv(dev);
13412 struct hwtstamp_config stmpconf;
13413
13414 if (!tg3_flag(tp, PTP_CAPABLE))
13415 return -EINVAL;
13416
13417 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13418 return -EFAULT;
13419
13420 if (stmpconf.flags)
13421 return -EINVAL;
13422
13423 switch (stmpconf.tx_type) {
13424 case HWTSTAMP_TX_ON:
13425 tg3_flag_set(tp, TX_TSTAMP_EN);
13426 break;
13427 case HWTSTAMP_TX_OFF:
13428 tg3_flag_clear(tp, TX_TSTAMP_EN);
13429 break;
13430 default:
13431 return -ERANGE;
13432 }
13433
13434 switch (stmpconf.rx_filter) {
13435 case HWTSTAMP_FILTER_NONE:
13436 tp->rxptpctl = 0;
13437 break;
13438 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13439 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13440 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13441 break;
13442 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13443 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13444 TG3_RX_PTP_CTL_SYNC_EVNT;
13445 break;
13446 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13447 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13448 TG3_RX_PTP_CTL_DELAY_REQ;
13449 break;
13450 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13451 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13452 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13453 break;
13454 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13455 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13456 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13457 break;
13458 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13459 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13460 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13461 break;
13462 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13463 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13464 TG3_RX_PTP_CTL_SYNC_EVNT;
13465 break;
13466 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13467 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13468 TG3_RX_PTP_CTL_SYNC_EVNT;
13469 break;
13470 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13471 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13472 TG3_RX_PTP_CTL_SYNC_EVNT;
13473 break;
13474 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13475 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13476 TG3_RX_PTP_CTL_DELAY_REQ;
13477 break;
13478 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13479 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13480 TG3_RX_PTP_CTL_DELAY_REQ;
13481 break;
13482 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13483 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13484 TG3_RX_PTP_CTL_DELAY_REQ;
13485 break;
13486 default:
13487 return -ERANGE;
13488 }
13489
13490 if (netif_running(dev) && tp->rxptpctl)
13491 tw32(TG3_RX_PTP_CTL,
13492 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13493
13494 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13495 -EFAULT : 0;
13496 }
13497
13498 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13499 {
13500 struct mii_ioctl_data *data = if_mii(ifr);
13501 struct tg3 *tp = netdev_priv(dev);
13502 int err;
13503
13504 if (tg3_flag(tp, USE_PHYLIB)) {
13505 struct phy_device *phydev;
13506 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13507 return -EAGAIN;
13508 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13509 return phy_mii_ioctl(phydev, ifr, cmd);
13510 }
13511
13512 switch (cmd) {
13513 case SIOCGMIIPHY:
13514 data->phy_id = tp->phy_addr;
13515
13516 /* fallthru */
13517 case SIOCGMIIREG: {
13518 u32 mii_regval;
13519
13520 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13521 break; /* We have no PHY */
13522
13523 if (!netif_running(dev))
13524 return -EAGAIN;
13525
13526 spin_lock_bh(&tp->lock);
13527 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13528 data->reg_num & 0x1f, &mii_regval);
13529 spin_unlock_bh(&tp->lock);
13530
13531 data->val_out = mii_regval;
13532
13533 return err;
13534 }
13535
13536 case SIOCSMIIREG:
13537 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13538 break; /* We have no PHY */
13539
13540 if (!netif_running(dev))
13541 return -EAGAIN;
13542
13543 spin_lock_bh(&tp->lock);
13544 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13545 data->reg_num & 0x1f, data->val_in);
13546 spin_unlock_bh(&tp->lock);
13547
13548 return err;
13549
13550 case SIOCSHWTSTAMP:
13551 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13552
13553 default:
13554 /* do nothing */
13555 break;
13556 }
13557 return -EOPNOTSUPP;
13558 }
13559
13560 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13561 {
13562 struct tg3 *tp = netdev_priv(dev);
13563
13564 memcpy(ec, &tp->coal, sizeof(*ec));
13565 return 0;
13566 }
13567
13568 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13569 {
13570 struct tg3 *tp = netdev_priv(dev);
13571 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13572 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13573
13574 if (!tg3_flag(tp, 5705_PLUS)) {
13575 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13576 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13577 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13578 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13579 }
13580
13581 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13582 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13583 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13584 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13585 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13586 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13587 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13588 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13589 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13590 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13591 return -EINVAL;
13592
13593 /* No rx interrupts will be generated if both are zero */
13594 if ((ec->rx_coalesce_usecs == 0) &&
13595 (ec->rx_max_coalesced_frames == 0))
13596 return -EINVAL;
13597
13598 /* No tx interrupts will be generated if both are zero */
13599 if ((ec->tx_coalesce_usecs == 0) &&
13600 (ec->tx_max_coalesced_frames == 0))
13601 return -EINVAL;
13602
13603 /* Only copy relevant parameters, ignore all others. */
13604 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13605 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13606 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13607 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13608 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13609 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13610 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13611 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13612 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13613
13614 if (netif_running(dev)) {
13615 tg3_full_lock(tp, 0);
13616 __tg3_set_coalesce(tp, &tp->coal);
13617 tg3_full_unlock(tp);
13618 }
13619 return 0;
13620 }
13621
13622 static const struct ethtool_ops tg3_ethtool_ops = {
13623 .get_settings = tg3_get_settings,
13624 .set_settings = tg3_set_settings,
13625 .get_drvinfo = tg3_get_drvinfo,
13626 .get_regs_len = tg3_get_regs_len,
13627 .get_regs = tg3_get_regs,
13628 .get_wol = tg3_get_wol,
13629 .set_wol = tg3_set_wol,
13630 .get_msglevel = tg3_get_msglevel,
13631 .set_msglevel = tg3_set_msglevel,
13632 .nway_reset = tg3_nway_reset,
13633 .get_link = ethtool_op_get_link,
13634 .get_eeprom_len = tg3_get_eeprom_len,
13635 .get_eeprom = tg3_get_eeprom,
13636 .set_eeprom = tg3_set_eeprom,
13637 .get_ringparam = tg3_get_ringparam,
13638 .set_ringparam = tg3_set_ringparam,
13639 .get_pauseparam = tg3_get_pauseparam,
13640 .set_pauseparam = tg3_set_pauseparam,
13641 .self_test = tg3_self_test,
13642 .get_strings = tg3_get_strings,
13643 .set_phys_id = tg3_set_phys_id,
13644 .get_ethtool_stats = tg3_get_ethtool_stats,
13645 .get_coalesce = tg3_get_coalesce,
13646 .set_coalesce = tg3_set_coalesce,
13647 .get_sset_count = tg3_get_sset_count,
13648 .get_rxnfc = tg3_get_rxnfc,
13649 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13650 .get_rxfh_indir = tg3_get_rxfh_indir,
13651 .set_rxfh_indir = tg3_set_rxfh_indir,
13652 .get_channels = tg3_get_channels,
13653 .set_channels = tg3_set_channels,
13654 .get_ts_info = tg3_get_ts_info,
13655 };
13656
13657 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13658 struct rtnl_link_stats64 *stats)
13659 {
13660 struct tg3 *tp = netdev_priv(dev);
13661
13662 spin_lock_bh(&tp->lock);
13663 if (!tp->hw_stats) {
13664 spin_unlock_bh(&tp->lock);
13665 return &tp->net_stats_prev;
13666 }
13667
13668 tg3_get_nstats(tp, stats);
13669 spin_unlock_bh(&tp->lock);
13670
13671 return stats;
13672 }
13673
13674 static void tg3_set_rx_mode(struct net_device *dev)
13675 {
13676 struct tg3 *tp = netdev_priv(dev);
13677
13678 if (!netif_running(dev))
13679 return;
13680
13681 tg3_full_lock(tp, 0);
13682 __tg3_set_rx_mode(dev);
13683 tg3_full_unlock(tp);
13684 }
13685
13686 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13687 int new_mtu)
13688 {
13689 dev->mtu = new_mtu;
13690
13691 if (new_mtu > ETH_DATA_LEN) {
13692 if (tg3_flag(tp, 5780_CLASS)) {
13693 netdev_update_features(dev);
13694 tg3_flag_clear(tp, TSO_CAPABLE);
13695 } else {
13696 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13697 }
13698 } else {
13699 if (tg3_flag(tp, 5780_CLASS)) {
13700 tg3_flag_set(tp, TSO_CAPABLE);
13701 netdev_update_features(dev);
13702 }
13703 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13704 }
13705 }
13706
13707 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13708 {
13709 struct tg3 *tp = netdev_priv(dev);
13710 int err;
13711 bool reset_phy = false;
13712
13713 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13714 return -EINVAL;
13715
13716 if (!netif_running(dev)) {
13717 /* We'll just catch it later when the
13718 * device is up'd.
13719 */
13720 tg3_set_mtu(dev, tp, new_mtu);
13721 return 0;
13722 }
13723
13724 tg3_phy_stop(tp);
13725
13726 tg3_netif_stop(tp);
13727
13728 tg3_full_lock(tp, 1);
13729
13730 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13731
13732 tg3_set_mtu(dev, tp, new_mtu);
13733
13734 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13735 * breaks all requests to 256 bytes.
13736 */
13737 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13738 reset_phy = true;
13739
13740 err = tg3_restart_hw(tp, reset_phy);
13741
13742 if (!err)
13743 tg3_netif_start(tp);
13744
13745 tg3_full_unlock(tp);
13746
13747 if (!err)
13748 tg3_phy_start(tp);
13749
13750 return err;
13751 }
13752
13753 static const struct net_device_ops tg3_netdev_ops = {
13754 .ndo_open = tg3_open,
13755 .ndo_stop = tg3_close,
13756 .ndo_start_xmit = tg3_start_xmit,
13757 .ndo_get_stats64 = tg3_get_stats64,
13758 .ndo_validate_addr = eth_validate_addr,
13759 .ndo_set_rx_mode = tg3_set_rx_mode,
13760 .ndo_set_mac_address = tg3_set_mac_addr,
13761 .ndo_do_ioctl = tg3_ioctl,
13762 .ndo_tx_timeout = tg3_tx_timeout,
13763 .ndo_change_mtu = tg3_change_mtu,
13764 .ndo_fix_features = tg3_fix_features,
13765 .ndo_set_features = tg3_set_features,
13766 #ifdef CONFIG_NET_POLL_CONTROLLER
13767 .ndo_poll_controller = tg3_poll_controller,
13768 #endif
13769 };
13770
13771 static void tg3_get_eeprom_size(struct tg3 *tp)
13772 {
13773 u32 cursize, val, magic;
13774
13775 tp->nvram_size = EEPROM_CHIP_SIZE;
13776
13777 if (tg3_nvram_read(tp, 0, &magic) != 0)
13778 return;
13779
13780 if ((magic != TG3_EEPROM_MAGIC) &&
13781 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13782 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13783 return;
13784
13785 /*
13786 * Size the chip by reading offsets at increasing powers of two.
13787 * When we encounter our validation signature, we know the addressing
13788 * has wrapped around, and thus have our chip size.
13789 */
13790 cursize = 0x10;
13791
13792 while (cursize < tp->nvram_size) {
13793 if (tg3_nvram_read(tp, cursize, &val) != 0)
13794 return;
13795
13796 if (val == magic)
13797 break;
13798
13799 cursize <<= 1;
13800 }
13801
13802 tp->nvram_size = cursize;
13803 }
13804
13805 static void tg3_get_nvram_size(struct tg3 *tp)
13806 {
13807 u32 val;
13808
13809 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13810 return;
13811
13812 /* Selfboot format */
13813 if (val != TG3_EEPROM_MAGIC) {
13814 tg3_get_eeprom_size(tp);
13815 return;
13816 }
13817
13818 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13819 if (val != 0) {
13820 /* This is confusing. We want to operate on the
13821 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13822 * call will read from NVRAM and byteswap the data
13823 * according to the byteswapping settings for all
13824 * other register accesses. This ensures the data we
13825 * want will always reside in the lower 16-bits.
13826 * However, the data in NVRAM is in LE format, which
13827 * means the data from the NVRAM read will always be
13828 * opposite the endianness of the CPU. The 16-bit
13829 * byteswap then brings the data to CPU endianness.
13830 */
13831 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13832 return;
13833 }
13834 }
13835 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13836 }
13837
13838 static void tg3_get_nvram_info(struct tg3 *tp)
13839 {
13840 u32 nvcfg1;
13841
13842 nvcfg1 = tr32(NVRAM_CFG1);
13843 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13844 tg3_flag_set(tp, FLASH);
13845 } else {
13846 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13847 tw32(NVRAM_CFG1, nvcfg1);
13848 }
13849
13850 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13851 tg3_flag(tp, 5780_CLASS)) {
13852 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13853 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13854 tp->nvram_jedecnum = JEDEC_ATMEL;
13855 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13856 tg3_flag_set(tp, NVRAM_BUFFERED);
13857 break;
13858 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13859 tp->nvram_jedecnum = JEDEC_ATMEL;
13860 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13861 break;
13862 case FLASH_VENDOR_ATMEL_EEPROM:
13863 tp->nvram_jedecnum = JEDEC_ATMEL;
13864 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13865 tg3_flag_set(tp, NVRAM_BUFFERED);
13866 break;
13867 case FLASH_VENDOR_ST:
13868 tp->nvram_jedecnum = JEDEC_ST;
13869 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13870 tg3_flag_set(tp, NVRAM_BUFFERED);
13871 break;
13872 case FLASH_VENDOR_SAIFUN:
13873 tp->nvram_jedecnum = JEDEC_SAIFUN;
13874 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13875 break;
13876 case FLASH_VENDOR_SST_SMALL:
13877 case FLASH_VENDOR_SST_LARGE:
13878 tp->nvram_jedecnum = JEDEC_SST;
13879 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13880 break;
13881 }
13882 } else {
13883 tp->nvram_jedecnum = JEDEC_ATMEL;
13884 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13885 tg3_flag_set(tp, NVRAM_BUFFERED);
13886 }
13887 }
13888
13889 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13890 {
13891 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13892 case FLASH_5752PAGE_SIZE_256:
13893 tp->nvram_pagesize = 256;
13894 break;
13895 case FLASH_5752PAGE_SIZE_512:
13896 tp->nvram_pagesize = 512;
13897 break;
13898 case FLASH_5752PAGE_SIZE_1K:
13899 tp->nvram_pagesize = 1024;
13900 break;
13901 case FLASH_5752PAGE_SIZE_2K:
13902 tp->nvram_pagesize = 2048;
13903 break;
13904 case FLASH_5752PAGE_SIZE_4K:
13905 tp->nvram_pagesize = 4096;
13906 break;
13907 case FLASH_5752PAGE_SIZE_264:
13908 tp->nvram_pagesize = 264;
13909 break;
13910 case FLASH_5752PAGE_SIZE_528:
13911 tp->nvram_pagesize = 528;
13912 break;
13913 }
13914 }
13915
13916 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13917 {
13918 u32 nvcfg1;
13919
13920 nvcfg1 = tr32(NVRAM_CFG1);
13921
13922 /* NVRAM protection for TPM */
13923 if (nvcfg1 & (1 << 27))
13924 tg3_flag_set(tp, PROTECTED_NVRAM);
13925
13926 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13927 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13928 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13929 tp->nvram_jedecnum = JEDEC_ATMEL;
13930 tg3_flag_set(tp, NVRAM_BUFFERED);
13931 break;
13932 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13933 tp->nvram_jedecnum = JEDEC_ATMEL;
13934 tg3_flag_set(tp, NVRAM_BUFFERED);
13935 tg3_flag_set(tp, FLASH);
13936 break;
13937 case FLASH_5752VENDOR_ST_M45PE10:
13938 case FLASH_5752VENDOR_ST_M45PE20:
13939 case FLASH_5752VENDOR_ST_M45PE40:
13940 tp->nvram_jedecnum = JEDEC_ST;
13941 tg3_flag_set(tp, NVRAM_BUFFERED);
13942 tg3_flag_set(tp, FLASH);
13943 break;
13944 }
13945
13946 if (tg3_flag(tp, FLASH)) {
13947 tg3_nvram_get_pagesize(tp, nvcfg1);
13948 } else {
13949 /* For eeprom, set pagesize to maximum eeprom size */
13950 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13951
13952 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13953 tw32(NVRAM_CFG1, nvcfg1);
13954 }
13955 }
13956
13957 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13958 {
13959 u32 nvcfg1, protect = 0;
13960
13961 nvcfg1 = tr32(NVRAM_CFG1);
13962
13963 /* NVRAM protection for TPM */
13964 if (nvcfg1 & (1 << 27)) {
13965 tg3_flag_set(tp, PROTECTED_NVRAM);
13966 protect = 1;
13967 }
13968
13969 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13970 switch (nvcfg1) {
13971 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13972 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13973 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13974 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13975 tp->nvram_jedecnum = JEDEC_ATMEL;
13976 tg3_flag_set(tp, NVRAM_BUFFERED);
13977 tg3_flag_set(tp, FLASH);
13978 tp->nvram_pagesize = 264;
13979 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13980 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13981 tp->nvram_size = (protect ? 0x3e200 :
13982 TG3_NVRAM_SIZE_512KB);
13983 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13984 tp->nvram_size = (protect ? 0x1f200 :
13985 TG3_NVRAM_SIZE_256KB);
13986 else
13987 tp->nvram_size = (protect ? 0x1f200 :
13988 TG3_NVRAM_SIZE_128KB);
13989 break;
13990 case FLASH_5752VENDOR_ST_M45PE10:
13991 case FLASH_5752VENDOR_ST_M45PE20:
13992 case FLASH_5752VENDOR_ST_M45PE40:
13993 tp->nvram_jedecnum = JEDEC_ST;
13994 tg3_flag_set(tp, NVRAM_BUFFERED);
13995 tg3_flag_set(tp, FLASH);
13996 tp->nvram_pagesize = 256;
13997 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13998 tp->nvram_size = (protect ?
13999 TG3_NVRAM_SIZE_64KB :
14000 TG3_NVRAM_SIZE_128KB);
14001 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14002 tp->nvram_size = (protect ?
14003 TG3_NVRAM_SIZE_64KB :
14004 TG3_NVRAM_SIZE_256KB);
14005 else
14006 tp->nvram_size = (protect ?
14007 TG3_NVRAM_SIZE_128KB :
14008 TG3_NVRAM_SIZE_512KB);
14009 break;
14010 }
14011 }
14012
14013 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14014 {
14015 u32 nvcfg1;
14016
14017 nvcfg1 = tr32(NVRAM_CFG1);
14018
14019 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14020 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14021 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14022 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14023 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14024 tp->nvram_jedecnum = JEDEC_ATMEL;
14025 tg3_flag_set(tp, NVRAM_BUFFERED);
14026 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14027
14028 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14029 tw32(NVRAM_CFG1, nvcfg1);
14030 break;
14031 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14032 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14033 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14034 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14035 tp->nvram_jedecnum = JEDEC_ATMEL;
14036 tg3_flag_set(tp, NVRAM_BUFFERED);
14037 tg3_flag_set(tp, FLASH);
14038 tp->nvram_pagesize = 264;
14039 break;
14040 case FLASH_5752VENDOR_ST_M45PE10:
14041 case FLASH_5752VENDOR_ST_M45PE20:
14042 case FLASH_5752VENDOR_ST_M45PE40:
14043 tp->nvram_jedecnum = JEDEC_ST;
14044 tg3_flag_set(tp, NVRAM_BUFFERED);
14045 tg3_flag_set(tp, FLASH);
14046 tp->nvram_pagesize = 256;
14047 break;
14048 }
14049 }
14050
14051 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14052 {
14053 u32 nvcfg1, protect = 0;
14054
14055 nvcfg1 = tr32(NVRAM_CFG1);
14056
14057 /* NVRAM protection for TPM */
14058 if (nvcfg1 & (1 << 27)) {
14059 tg3_flag_set(tp, PROTECTED_NVRAM);
14060 protect = 1;
14061 }
14062
14063 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14064 switch (nvcfg1) {
14065 case FLASH_5761VENDOR_ATMEL_ADB021D:
14066 case FLASH_5761VENDOR_ATMEL_ADB041D:
14067 case FLASH_5761VENDOR_ATMEL_ADB081D:
14068 case FLASH_5761VENDOR_ATMEL_ADB161D:
14069 case FLASH_5761VENDOR_ATMEL_MDB021D:
14070 case FLASH_5761VENDOR_ATMEL_MDB041D:
14071 case FLASH_5761VENDOR_ATMEL_MDB081D:
14072 case FLASH_5761VENDOR_ATMEL_MDB161D:
14073 tp->nvram_jedecnum = JEDEC_ATMEL;
14074 tg3_flag_set(tp, NVRAM_BUFFERED);
14075 tg3_flag_set(tp, FLASH);
14076 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14077 tp->nvram_pagesize = 256;
14078 break;
14079 case FLASH_5761VENDOR_ST_A_M45PE20:
14080 case FLASH_5761VENDOR_ST_A_M45PE40:
14081 case FLASH_5761VENDOR_ST_A_M45PE80:
14082 case FLASH_5761VENDOR_ST_A_M45PE16:
14083 case FLASH_5761VENDOR_ST_M_M45PE20:
14084 case FLASH_5761VENDOR_ST_M_M45PE40:
14085 case FLASH_5761VENDOR_ST_M_M45PE80:
14086 case FLASH_5761VENDOR_ST_M_M45PE16:
14087 tp->nvram_jedecnum = JEDEC_ST;
14088 tg3_flag_set(tp, NVRAM_BUFFERED);
14089 tg3_flag_set(tp, FLASH);
14090 tp->nvram_pagesize = 256;
14091 break;
14092 }
14093
14094 if (protect) {
14095 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14096 } else {
14097 switch (nvcfg1) {
14098 case FLASH_5761VENDOR_ATMEL_ADB161D:
14099 case FLASH_5761VENDOR_ATMEL_MDB161D:
14100 case FLASH_5761VENDOR_ST_A_M45PE16:
14101 case FLASH_5761VENDOR_ST_M_M45PE16:
14102 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14103 break;
14104 case FLASH_5761VENDOR_ATMEL_ADB081D:
14105 case FLASH_5761VENDOR_ATMEL_MDB081D:
14106 case FLASH_5761VENDOR_ST_A_M45PE80:
14107 case FLASH_5761VENDOR_ST_M_M45PE80:
14108 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14109 break;
14110 case FLASH_5761VENDOR_ATMEL_ADB041D:
14111 case FLASH_5761VENDOR_ATMEL_MDB041D:
14112 case FLASH_5761VENDOR_ST_A_M45PE40:
14113 case FLASH_5761VENDOR_ST_M_M45PE40:
14114 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14115 break;
14116 case FLASH_5761VENDOR_ATMEL_ADB021D:
14117 case FLASH_5761VENDOR_ATMEL_MDB021D:
14118 case FLASH_5761VENDOR_ST_A_M45PE20:
14119 case FLASH_5761VENDOR_ST_M_M45PE20:
14120 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14121 break;
14122 }
14123 }
14124 }
14125
14126 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14127 {
14128 tp->nvram_jedecnum = JEDEC_ATMEL;
14129 tg3_flag_set(tp, NVRAM_BUFFERED);
14130 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14131 }
14132
14133 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14134 {
14135 u32 nvcfg1;
14136
14137 nvcfg1 = tr32(NVRAM_CFG1);
14138
14139 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14140 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14141 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14142 tp->nvram_jedecnum = JEDEC_ATMEL;
14143 tg3_flag_set(tp, NVRAM_BUFFERED);
14144 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14145
14146 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14147 tw32(NVRAM_CFG1, nvcfg1);
14148 return;
14149 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14150 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14151 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14152 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14153 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14154 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14155 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14156 tp->nvram_jedecnum = JEDEC_ATMEL;
14157 tg3_flag_set(tp, NVRAM_BUFFERED);
14158 tg3_flag_set(tp, FLASH);
14159
14160 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14161 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14162 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14163 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14164 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14165 break;
14166 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14167 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14168 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14169 break;
14170 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14171 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14172 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14173 break;
14174 }
14175 break;
14176 case FLASH_5752VENDOR_ST_M45PE10:
14177 case FLASH_5752VENDOR_ST_M45PE20:
14178 case FLASH_5752VENDOR_ST_M45PE40:
14179 tp->nvram_jedecnum = JEDEC_ST;
14180 tg3_flag_set(tp, NVRAM_BUFFERED);
14181 tg3_flag_set(tp, FLASH);
14182
14183 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14184 case FLASH_5752VENDOR_ST_M45PE10:
14185 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14186 break;
14187 case FLASH_5752VENDOR_ST_M45PE20:
14188 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14189 break;
14190 case FLASH_5752VENDOR_ST_M45PE40:
14191 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14192 break;
14193 }
14194 break;
14195 default:
14196 tg3_flag_set(tp, NO_NVRAM);
14197 return;
14198 }
14199
14200 tg3_nvram_get_pagesize(tp, nvcfg1);
14201 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14202 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14203 }
14204
14205
14206 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14207 {
14208 u32 nvcfg1;
14209
14210 nvcfg1 = tr32(NVRAM_CFG1);
14211
14212 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14213 case FLASH_5717VENDOR_ATMEL_EEPROM:
14214 case FLASH_5717VENDOR_MICRO_EEPROM:
14215 tp->nvram_jedecnum = JEDEC_ATMEL;
14216 tg3_flag_set(tp, NVRAM_BUFFERED);
14217 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14218
14219 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14220 tw32(NVRAM_CFG1, nvcfg1);
14221 return;
14222 case FLASH_5717VENDOR_ATMEL_MDB011D:
14223 case FLASH_5717VENDOR_ATMEL_ADB011B:
14224 case FLASH_5717VENDOR_ATMEL_ADB011D:
14225 case FLASH_5717VENDOR_ATMEL_MDB021D:
14226 case FLASH_5717VENDOR_ATMEL_ADB021B:
14227 case FLASH_5717VENDOR_ATMEL_ADB021D:
14228 case FLASH_5717VENDOR_ATMEL_45USPT:
14229 tp->nvram_jedecnum = JEDEC_ATMEL;
14230 tg3_flag_set(tp, NVRAM_BUFFERED);
14231 tg3_flag_set(tp, FLASH);
14232
14233 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14234 case FLASH_5717VENDOR_ATMEL_MDB021D:
14235 /* Detect size with tg3_nvram_get_size() */
14236 break;
14237 case FLASH_5717VENDOR_ATMEL_ADB021B:
14238 case FLASH_5717VENDOR_ATMEL_ADB021D:
14239 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14240 break;
14241 default:
14242 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14243 break;
14244 }
14245 break;
14246 case FLASH_5717VENDOR_ST_M_M25PE10:
14247 case FLASH_5717VENDOR_ST_A_M25PE10:
14248 case FLASH_5717VENDOR_ST_M_M45PE10:
14249 case FLASH_5717VENDOR_ST_A_M45PE10:
14250 case FLASH_5717VENDOR_ST_M_M25PE20:
14251 case FLASH_5717VENDOR_ST_A_M25PE20:
14252 case FLASH_5717VENDOR_ST_M_M45PE20:
14253 case FLASH_5717VENDOR_ST_A_M45PE20:
14254 case FLASH_5717VENDOR_ST_25USPT:
14255 case FLASH_5717VENDOR_ST_45USPT:
14256 tp->nvram_jedecnum = JEDEC_ST;
14257 tg3_flag_set(tp, NVRAM_BUFFERED);
14258 tg3_flag_set(tp, FLASH);
14259
14260 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14261 case FLASH_5717VENDOR_ST_M_M25PE20:
14262 case FLASH_5717VENDOR_ST_M_M45PE20:
14263 /* Detect size with tg3_nvram_get_size() */
14264 break;
14265 case FLASH_5717VENDOR_ST_A_M25PE20:
14266 case FLASH_5717VENDOR_ST_A_M45PE20:
14267 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14268 break;
14269 default:
14270 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14271 break;
14272 }
14273 break;
14274 default:
14275 tg3_flag_set(tp, NO_NVRAM);
14276 return;
14277 }
14278
14279 tg3_nvram_get_pagesize(tp, nvcfg1);
14280 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14281 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14282 }
14283
14284 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14285 {
14286 u32 nvcfg1, nvmpinstrp;
14287
14288 nvcfg1 = tr32(NVRAM_CFG1);
14289 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14290
14291 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14292 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14293 tg3_flag_set(tp, NO_NVRAM);
14294 return;
14295 }
14296
14297 switch (nvmpinstrp) {
14298 case FLASH_5762_EEPROM_HD:
14299 nvmpinstrp = FLASH_5720_EEPROM_HD;
14300 break;
14301 case FLASH_5762_EEPROM_LD:
14302 nvmpinstrp = FLASH_5720_EEPROM_LD;
14303 break;
14304 case FLASH_5720VENDOR_M_ST_M45PE20:
14305 /* This pinstrap supports multiple sizes, so force it
14306 * to read the actual size from location 0xf0.
14307 */
14308 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14309 break;
14310 }
14311 }
14312
14313 switch (nvmpinstrp) {
14314 case FLASH_5720_EEPROM_HD:
14315 case FLASH_5720_EEPROM_LD:
14316 tp->nvram_jedecnum = JEDEC_ATMEL;
14317 tg3_flag_set(tp, NVRAM_BUFFERED);
14318
14319 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14320 tw32(NVRAM_CFG1, nvcfg1);
14321 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14322 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14323 else
14324 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14325 return;
14326 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14327 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14328 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14329 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14330 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14331 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14332 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14333 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14334 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14335 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14336 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14337 case FLASH_5720VENDOR_ATMEL_45USPT:
14338 tp->nvram_jedecnum = JEDEC_ATMEL;
14339 tg3_flag_set(tp, NVRAM_BUFFERED);
14340 tg3_flag_set(tp, FLASH);
14341
14342 switch (nvmpinstrp) {
14343 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14344 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14345 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14346 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14347 break;
14348 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14349 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14350 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14351 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14352 break;
14353 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14354 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14355 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14356 break;
14357 default:
14358 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14359 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14360 break;
14361 }
14362 break;
14363 case FLASH_5720VENDOR_M_ST_M25PE10:
14364 case FLASH_5720VENDOR_M_ST_M45PE10:
14365 case FLASH_5720VENDOR_A_ST_M25PE10:
14366 case FLASH_5720VENDOR_A_ST_M45PE10:
14367 case FLASH_5720VENDOR_M_ST_M25PE20:
14368 case FLASH_5720VENDOR_M_ST_M45PE20:
14369 case FLASH_5720VENDOR_A_ST_M25PE20:
14370 case FLASH_5720VENDOR_A_ST_M45PE20:
14371 case FLASH_5720VENDOR_M_ST_M25PE40:
14372 case FLASH_5720VENDOR_M_ST_M45PE40:
14373 case FLASH_5720VENDOR_A_ST_M25PE40:
14374 case FLASH_5720VENDOR_A_ST_M45PE40:
14375 case FLASH_5720VENDOR_M_ST_M25PE80:
14376 case FLASH_5720VENDOR_M_ST_M45PE80:
14377 case FLASH_5720VENDOR_A_ST_M25PE80:
14378 case FLASH_5720VENDOR_A_ST_M45PE80:
14379 case FLASH_5720VENDOR_ST_25USPT:
14380 case FLASH_5720VENDOR_ST_45USPT:
14381 tp->nvram_jedecnum = JEDEC_ST;
14382 tg3_flag_set(tp, NVRAM_BUFFERED);
14383 tg3_flag_set(tp, FLASH);
14384
14385 switch (nvmpinstrp) {
14386 case FLASH_5720VENDOR_M_ST_M25PE20:
14387 case FLASH_5720VENDOR_M_ST_M45PE20:
14388 case FLASH_5720VENDOR_A_ST_M25PE20:
14389 case FLASH_5720VENDOR_A_ST_M45PE20:
14390 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14391 break;
14392 case FLASH_5720VENDOR_M_ST_M25PE40:
14393 case FLASH_5720VENDOR_M_ST_M45PE40:
14394 case FLASH_5720VENDOR_A_ST_M25PE40:
14395 case FLASH_5720VENDOR_A_ST_M45PE40:
14396 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14397 break;
14398 case FLASH_5720VENDOR_M_ST_M25PE80:
14399 case FLASH_5720VENDOR_M_ST_M45PE80:
14400 case FLASH_5720VENDOR_A_ST_M25PE80:
14401 case FLASH_5720VENDOR_A_ST_M45PE80:
14402 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14403 break;
14404 default:
14405 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14406 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14407 break;
14408 }
14409 break;
14410 default:
14411 tg3_flag_set(tp, NO_NVRAM);
14412 return;
14413 }
14414
14415 tg3_nvram_get_pagesize(tp, nvcfg1);
14416 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14417 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14418
14419 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14420 u32 val;
14421
14422 if (tg3_nvram_read(tp, 0, &val))
14423 return;
14424
14425 if (val != TG3_EEPROM_MAGIC &&
14426 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14427 tg3_flag_set(tp, NO_NVRAM);
14428 }
14429 }
14430
14431 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14432 static void tg3_nvram_init(struct tg3 *tp)
14433 {
14434 if (tg3_flag(tp, IS_SSB_CORE)) {
14435 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14436 tg3_flag_clear(tp, NVRAM);
14437 tg3_flag_clear(tp, NVRAM_BUFFERED);
14438 tg3_flag_set(tp, NO_NVRAM);
14439 return;
14440 }
14441
14442 tw32_f(GRC_EEPROM_ADDR,
14443 (EEPROM_ADDR_FSM_RESET |
14444 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14445 EEPROM_ADDR_CLKPERD_SHIFT)));
14446
14447 msleep(1);
14448
14449 /* Enable seeprom accesses. */
14450 tw32_f(GRC_LOCAL_CTRL,
14451 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14452 udelay(100);
14453
14454 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14455 tg3_asic_rev(tp) != ASIC_REV_5701) {
14456 tg3_flag_set(tp, NVRAM);
14457
14458 if (tg3_nvram_lock(tp)) {
14459 netdev_warn(tp->dev,
14460 "Cannot get nvram lock, %s failed\n",
14461 __func__);
14462 return;
14463 }
14464 tg3_enable_nvram_access(tp);
14465
14466 tp->nvram_size = 0;
14467
14468 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14469 tg3_get_5752_nvram_info(tp);
14470 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14471 tg3_get_5755_nvram_info(tp);
14472 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14473 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14474 tg3_asic_rev(tp) == ASIC_REV_5785)
14475 tg3_get_5787_nvram_info(tp);
14476 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14477 tg3_get_5761_nvram_info(tp);
14478 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14479 tg3_get_5906_nvram_info(tp);
14480 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14481 tg3_flag(tp, 57765_CLASS))
14482 tg3_get_57780_nvram_info(tp);
14483 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14484 tg3_asic_rev(tp) == ASIC_REV_5719)
14485 tg3_get_5717_nvram_info(tp);
14486 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14487 tg3_asic_rev(tp) == ASIC_REV_5762)
14488 tg3_get_5720_nvram_info(tp);
14489 else
14490 tg3_get_nvram_info(tp);
14491
14492 if (tp->nvram_size == 0)
14493 tg3_get_nvram_size(tp);
14494
14495 tg3_disable_nvram_access(tp);
14496 tg3_nvram_unlock(tp);
14497
14498 } else {
14499 tg3_flag_clear(tp, NVRAM);
14500 tg3_flag_clear(tp, NVRAM_BUFFERED);
14501
14502 tg3_get_eeprom_size(tp);
14503 }
14504 }
14505
14506 struct subsys_tbl_ent {
14507 u16 subsys_vendor, subsys_devid;
14508 u32 phy_id;
14509 };
14510
14511 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14512 /* Broadcom boards. */
14513 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14514 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14515 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14516 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14517 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14518 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14519 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14520 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14521 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14522 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14523 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14524 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14525 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14526 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14527 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14528 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14529 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14530 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14531 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14532 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14533 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14534 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14535
14536 /* 3com boards. */
14537 { TG3PCI_SUBVENDOR_ID_3COM,
14538 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14539 { TG3PCI_SUBVENDOR_ID_3COM,
14540 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14541 { TG3PCI_SUBVENDOR_ID_3COM,
14542 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14543 { TG3PCI_SUBVENDOR_ID_3COM,
14544 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14545 { TG3PCI_SUBVENDOR_ID_3COM,
14546 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14547
14548 /* DELL boards. */
14549 { TG3PCI_SUBVENDOR_ID_DELL,
14550 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14551 { TG3PCI_SUBVENDOR_ID_DELL,
14552 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14553 { TG3PCI_SUBVENDOR_ID_DELL,
14554 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14555 { TG3PCI_SUBVENDOR_ID_DELL,
14556 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14557
14558 /* Compaq boards. */
14559 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14560 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14561 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14562 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14563 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14564 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14565 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14566 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14567 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14568 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14569
14570 /* IBM boards. */
14571 { TG3PCI_SUBVENDOR_ID_IBM,
14572 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14573 };
14574
14575 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14576 {
14577 int i;
14578
14579 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14580 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14581 tp->pdev->subsystem_vendor) &&
14582 (subsys_id_to_phy_id[i].subsys_devid ==
14583 tp->pdev->subsystem_device))
14584 return &subsys_id_to_phy_id[i];
14585 }
14586 return NULL;
14587 }
14588
14589 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14590 {
14591 u32 val;
14592
14593 tp->phy_id = TG3_PHY_ID_INVALID;
14594 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14595
14596 /* Assume an onboard device and WOL capable by default. */
14597 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14598 tg3_flag_set(tp, WOL_CAP);
14599
14600 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14601 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14602 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14603 tg3_flag_set(tp, IS_NIC);
14604 }
14605 val = tr32(VCPU_CFGSHDW);
14606 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14607 tg3_flag_set(tp, ASPM_WORKAROUND);
14608 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14609 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14610 tg3_flag_set(tp, WOL_ENABLE);
14611 device_set_wakeup_enable(&tp->pdev->dev, true);
14612 }
14613 goto done;
14614 }
14615
14616 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14617 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14618 u32 nic_cfg, led_cfg;
14619 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14620 int eeprom_phy_serdes = 0;
14621
14622 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14623 tp->nic_sram_data_cfg = nic_cfg;
14624
14625 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14626 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14627 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14628 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14629 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14630 (ver > 0) && (ver < 0x100))
14631 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14632
14633 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14634 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14635
14636 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14637 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14638 eeprom_phy_serdes = 1;
14639
14640 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14641 if (nic_phy_id != 0) {
14642 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14643 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14644
14645 eeprom_phy_id = (id1 >> 16) << 10;
14646 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14647 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14648 } else
14649 eeprom_phy_id = 0;
14650
14651 tp->phy_id = eeprom_phy_id;
14652 if (eeprom_phy_serdes) {
14653 if (!tg3_flag(tp, 5705_PLUS))
14654 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14655 else
14656 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14657 }
14658
14659 if (tg3_flag(tp, 5750_PLUS))
14660 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14661 SHASTA_EXT_LED_MODE_MASK);
14662 else
14663 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14664
14665 switch (led_cfg) {
14666 default:
14667 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14668 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14669 break;
14670
14671 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14672 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14673 break;
14674
14675 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14676 tp->led_ctrl = LED_CTRL_MODE_MAC;
14677
14678 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14679 * read on some older 5700/5701 bootcode.
14680 */
14681 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14682 tg3_asic_rev(tp) == ASIC_REV_5701)
14683 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14684
14685 break;
14686
14687 case SHASTA_EXT_LED_SHARED:
14688 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14689 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14690 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14691 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14692 LED_CTRL_MODE_PHY_2);
14693 break;
14694
14695 case SHASTA_EXT_LED_MAC:
14696 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14697 break;
14698
14699 case SHASTA_EXT_LED_COMBO:
14700 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14701 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14702 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14703 LED_CTRL_MODE_PHY_2);
14704 break;
14705
14706 }
14707
14708 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14709 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14710 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14711 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14712
14713 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14714 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14715
14716 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14717 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14718 if ((tp->pdev->subsystem_vendor ==
14719 PCI_VENDOR_ID_ARIMA) &&
14720 (tp->pdev->subsystem_device == 0x205a ||
14721 tp->pdev->subsystem_device == 0x2063))
14722 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14723 } else {
14724 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14725 tg3_flag_set(tp, IS_NIC);
14726 }
14727
14728 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14729 tg3_flag_set(tp, ENABLE_ASF);
14730 if (tg3_flag(tp, 5750_PLUS))
14731 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14732 }
14733
14734 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14735 tg3_flag(tp, 5750_PLUS))
14736 tg3_flag_set(tp, ENABLE_APE);
14737
14738 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14739 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14740 tg3_flag_clear(tp, WOL_CAP);
14741
14742 if (tg3_flag(tp, WOL_CAP) &&
14743 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14744 tg3_flag_set(tp, WOL_ENABLE);
14745 device_set_wakeup_enable(&tp->pdev->dev, true);
14746 }
14747
14748 if (cfg2 & (1 << 17))
14749 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14750
14751 /* serdes signal pre-emphasis in register 0x590 set by */
14752 /* bootcode if bit 18 is set */
14753 if (cfg2 & (1 << 18))
14754 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14755
14756 if ((tg3_flag(tp, 57765_PLUS) ||
14757 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14758 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14759 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14760 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14761
14762 if (tg3_flag(tp, PCI_EXPRESS)) {
14763 u32 cfg3;
14764
14765 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14766 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14767 !tg3_flag(tp, 57765_PLUS) &&
14768 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14769 tg3_flag_set(tp, ASPM_WORKAROUND);
14770 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14771 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14772 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14773 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14774 }
14775
14776 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14777 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14778 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14779 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14780 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14781 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14782 }
14783 done:
14784 if (tg3_flag(tp, WOL_CAP))
14785 device_set_wakeup_enable(&tp->pdev->dev,
14786 tg3_flag(tp, WOL_ENABLE));
14787 else
14788 device_set_wakeup_capable(&tp->pdev->dev, false);
14789 }
14790
14791 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14792 {
14793 int i, err;
14794 u32 val2, off = offset * 8;
14795
14796 err = tg3_nvram_lock(tp);
14797 if (err)
14798 return err;
14799
14800 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14801 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14802 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14803 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14804 udelay(10);
14805
14806 for (i = 0; i < 100; i++) {
14807 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14808 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14809 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14810 break;
14811 }
14812 udelay(10);
14813 }
14814
14815 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14816
14817 tg3_nvram_unlock(tp);
14818 if (val2 & APE_OTP_STATUS_CMD_DONE)
14819 return 0;
14820
14821 return -EBUSY;
14822 }
14823
14824 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14825 {
14826 int i;
14827 u32 val;
14828
14829 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14830 tw32(OTP_CTRL, cmd);
14831
14832 /* Wait for up to 1 ms for command to execute. */
14833 for (i = 0; i < 100; i++) {
14834 val = tr32(OTP_STATUS);
14835 if (val & OTP_STATUS_CMD_DONE)
14836 break;
14837 udelay(10);
14838 }
14839
14840 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14841 }
14842
14843 /* Read the gphy configuration from the OTP region of the chip. The gphy
14844 * configuration is a 32-bit value that straddles the alignment boundary.
14845 * We do two 32-bit reads and then shift and merge the results.
14846 */
14847 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14848 {
14849 u32 bhalf_otp, thalf_otp;
14850
14851 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14852
14853 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14854 return 0;
14855
14856 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14857
14858 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14859 return 0;
14860
14861 thalf_otp = tr32(OTP_READ_DATA);
14862
14863 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14864
14865 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14866 return 0;
14867
14868 bhalf_otp = tr32(OTP_READ_DATA);
14869
14870 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14871 }
14872
14873 static void tg3_phy_init_link_config(struct tg3 *tp)
14874 {
14875 u32 adv = ADVERTISED_Autoneg;
14876
14877 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14878 adv |= ADVERTISED_1000baseT_Half |
14879 ADVERTISED_1000baseT_Full;
14880
14881 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14882 adv |= ADVERTISED_100baseT_Half |
14883 ADVERTISED_100baseT_Full |
14884 ADVERTISED_10baseT_Half |
14885 ADVERTISED_10baseT_Full |
14886 ADVERTISED_TP;
14887 else
14888 adv |= ADVERTISED_FIBRE;
14889
14890 tp->link_config.advertising = adv;
14891 tp->link_config.speed = SPEED_UNKNOWN;
14892 tp->link_config.duplex = DUPLEX_UNKNOWN;
14893 tp->link_config.autoneg = AUTONEG_ENABLE;
14894 tp->link_config.active_speed = SPEED_UNKNOWN;
14895 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14896
14897 tp->old_link = -1;
14898 }
14899
14900 static int tg3_phy_probe(struct tg3 *tp)
14901 {
14902 u32 hw_phy_id_1, hw_phy_id_2;
14903 u32 hw_phy_id, hw_phy_id_masked;
14904 int err;
14905
14906 /* flow control autonegotiation is default behavior */
14907 tg3_flag_set(tp, PAUSE_AUTONEG);
14908 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14909
14910 if (tg3_flag(tp, ENABLE_APE)) {
14911 switch (tp->pci_fn) {
14912 case 0:
14913 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14914 break;
14915 case 1:
14916 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14917 break;
14918 case 2:
14919 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14920 break;
14921 case 3:
14922 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14923 break;
14924 }
14925 }
14926
14927 if (!tg3_flag(tp, ENABLE_ASF) &&
14928 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14929 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14930 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14931 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14932
14933 if (tg3_flag(tp, USE_PHYLIB))
14934 return tg3_phy_init(tp);
14935
14936 /* Reading the PHY ID register can conflict with ASF
14937 * firmware access to the PHY hardware.
14938 */
14939 err = 0;
14940 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14941 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14942 } else {
14943 /* Now read the physical PHY_ID from the chip and verify
14944 * that it is sane. If it doesn't look good, we fall back
14945 * to either the hard-coded table based PHY_ID and failing
14946 * that the value found in the eeprom area.
14947 */
14948 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14949 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14950
14951 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14952 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14953 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14954
14955 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14956 }
14957
14958 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14959 tp->phy_id = hw_phy_id;
14960 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14961 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14962 else
14963 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14964 } else {
14965 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14966 /* Do nothing, phy ID already set up in
14967 * tg3_get_eeprom_hw_cfg().
14968 */
14969 } else {
14970 struct subsys_tbl_ent *p;
14971
14972 /* No eeprom signature? Try the hardcoded
14973 * subsys device table.
14974 */
14975 p = tg3_lookup_by_subsys(tp);
14976 if (p) {
14977 tp->phy_id = p->phy_id;
14978 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14979 /* For now we saw the IDs 0xbc050cd0,
14980 * 0xbc050f80 and 0xbc050c30 on devices
14981 * connected to an BCM4785 and there are
14982 * probably more. Just assume that the phy is
14983 * supported when it is connected to a SSB core
14984 * for now.
14985 */
14986 return -ENODEV;
14987 }
14988
14989 if (!tp->phy_id ||
14990 tp->phy_id == TG3_PHY_ID_BCM8002)
14991 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14992 }
14993 }
14994
14995 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14996 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14997 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14998 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14999 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15000 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15001 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15002 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15003 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15004 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15005
15006 tp->eee.supported = SUPPORTED_100baseT_Full |
15007 SUPPORTED_1000baseT_Full;
15008 tp->eee.advertised = ADVERTISED_100baseT_Full |
15009 ADVERTISED_1000baseT_Full;
15010 tp->eee.eee_enabled = 1;
15011 tp->eee.tx_lpi_enabled = 1;
15012 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15013 }
15014
15015 tg3_phy_init_link_config(tp);
15016
15017 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15018 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15019 !tg3_flag(tp, ENABLE_APE) &&
15020 !tg3_flag(tp, ENABLE_ASF)) {
15021 u32 bmsr, dummy;
15022
15023 tg3_readphy(tp, MII_BMSR, &bmsr);
15024 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15025 (bmsr & BMSR_LSTATUS))
15026 goto skip_phy_reset;
15027
15028 err = tg3_phy_reset(tp);
15029 if (err)
15030 return err;
15031
15032 tg3_phy_set_wirespeed(tp);
15033
15034 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15035 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15036 tp->link_config.flowctrl);
15037
15038 tg3_writephy(tp, MII_BMCR,
15039 BMCR_ANENABLE | BMCR_ANRESTART);
15040 }
15041 }
15042
15043 skip_phy_reset:
15044 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15045 err = tg3_init_5401phy_dsp(tp);
15046 if (err)
15047 return err;
15048
15049 err = tg3_init_5401phy_dsp(tp);
15050 }
15051
15052 return err;
15053 }
15054
15055 static void tg3_read_vpd(struct tg3 *tp)
15056 {
15057 u8 *vpd_data;
15058 unsigned int block_end, rosize, len;
15059 u32 vpdlen;
15060 int j, i = 0;
15061
15062 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15063 if (!vpd_data)
15064 goto out_no_vpd;
15065
15066 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15067 if (i < 0)
15068 goto out_not_found;
15069
15070 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15071 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15072 i += PCI_VPD_LRDT_TAG_SIZE;
15073
15074 if (block_end > vpdlen)
15075 goto out_not_found;
15076
15077 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15078 PCI_VPD_RO_KEYWORD_MFR_ID);
15079 if (j > 0) {
15080 len = pci_vpd_info_field_size(&vpd_data[j]);
15081
15082 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15083 if (j + len > block_end || len != 4 ||
15084 memcmp(&vpd_data[j], "1028", 4))
15085 goto partno;
15086
15087 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15088 PCI_VPD_RO_KEYWORD_VENDOR0);
15089 if (j < 0)
15090 goto partno;
15091
15092 len = pci_vpd_info_field_size(&vpd_data[j]);
15093
15094 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15095 if (j + len > block_end)
15096 goto partno;
15097
15098 if (len >= sizeof(tp->fw_ver))
15099 len = sizeof(tp->fw_ver) - 1;
15100 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15101 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15102 &vpd_data[j]);
15103 }
15104
15105 partno:
15106 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15107 PCI_VPD_RO_KEYWORD_PARTNO);
15108 if (i < 0)
15109 goto out_not_found;
15110
15111 len = pci_vpd_info_field_size(&vpd_data[i]);
15112
15113 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15114 if (len > TG3_BPN_SIZE ||
15115 (len + i) > vpdlen)
15116 goto out_not_found;
15117
15118 memcpy(tp->board_part_number, &vpd_data[i], len);
15119
15120 out_not_found:
15121 kfree(vpd_data);
15122 if (tp->board_part_number[0])
15123 return;
15124
15125 out_no_vpd:
15126 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15127 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15129 strcpy(tp->board_part_number, "BCM5717");
15130 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15131 strcpy(tp->board_part_number, "BCM5718");
15132 else
15133 goto nomatch;
15134 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15135 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15136 strcpy(tp->board_part_number, "BCM57780");
15137 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15138 strcpy(tp->board_part_number, "BCM57760");
15139 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15140 strcpy(tp->board_part_number, "BCM57790");
15141 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15142 strcpy(tp->board_part_number, "BCM57788");
15143 else
15144 goto nomatch;
15145 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15146 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15147 strcpy(tp->board_part_number, "BCM57761");
15148 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15149 strcpy(tp->board_part_number, "BCM57765");
15150 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15151 strcpy(tp->board_part_number, "BCM57781");
15152 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15153 strcpy(tp->board_part_number, "BCM57785");
15154 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15155 strcpy(tp->board_part_number, "BCM57791");
15156 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15157 strcpy(tp->board_part_number, "BCM57795");
15158 else
15159 goto nomatch;
15160 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15161 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15162 strcpy(tp->board_part_number, "BCM57762");
15163 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15164 strcpy(tp->board_part_number, "BCM57766");
15165 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15166 strcpy(tp->board_part_number, "BCM57782");
15167 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15168 strcpy(tp->board_part_number, "BCM57786");
15169 else
15170 goto nomatch;
15171 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15172 strcpy(tp->board_part_number, "BCM95906");
15173 } else {
15174 nomatch:
15175 strcpy(tp->board_part_number, "none");
15176 }
15177 }
15178
15179 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15180 {
15181 u32 val;
15182
15183 if (tg3_nvram_read(tp, offset, &val) ||
15184 (val & 0xfc000000) != 0x0c000000 ||
15185 tg3_nvram_read(tp, offset + 4, &val) ||
15186 val != 0)
15187 return 0;
15188
15189 return 1;
15190 }
15191
15192 static void tg3_read_bc_ver(struct tg3 *tp)
15193 {
15194 u32 val, offset, start, ver_offset;
15195 int i, dst_off;
15196 bool newver = false;
15197
15198 if (tg3_nvram_read(tp, 0xc, &offset) ||
15199 tg3_nvram_read(tp, 0x4, &start))
15200 return;
15201
15202 offset = tg3_nvram_logical_addr(tp, offset);
15203
15204 if (tg3_nvram_read(tp, offset, &val))
15205 return;
15206
15207 if ((val & 0xfc000000) == 0x0c000000) {
15208 if (tg3_nvram_read(tp, offset + 4, &val))
15209 return;
15210
15211 if (val == 0)
15212 newver = true;
15213 }
15214
15215 dst_off = strlen(tp->fw_ver);
15216
15217 if (newver) {
15218 if (TG3_VER_SIZE - dst_off < 16 ||
15219 tg3_nvram_read(tp, offset + 8, &ver_offset))
15220 return;
15221
15222 offset = offset + ver_offset - start;
15223 for (i = 0; i < 16; i += 4) {
15224 __be32 v;
15225 if (tg3_nvram_read_be32(tp, offset + i, &v))
15226 return;
15227
15228 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15229 }
15230 } else {
15231 u32 major, minor;
15232
15233 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15234 return;
15235
15236 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15237 TG3_NVM_BCVER_MAJSFT;
15238 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15239 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15240 "v%d.%02d", major, minor);
15241 }
15242 }
15243
15244 static void tg3_read_hwsb_ver(struct tg3 *tp)
15245 {
15246 u32 val, major, minor;
15247
15248 /* Use native endian representation */
15249 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15250 return;
15251
15252 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15253 TG3_NVM_HWSB_CFG1_MAJSFT;
15254 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15255 TG3_NVM_HWSB_CFG1_MINSFT;
15256
15257 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15258 }
15259
15260 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15261 {
15262 u32 offset, major, minor, build;
15263
15264 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15265
15266 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15267 return;
15268
15269 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15270 case TG3_EEPROM_SB_REVISION_0:
15271 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15272 break;
15273 case TG3_EEPROM_SB_REVISION_2:
15274 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15275 break;
15276 case TG3_EEPROM_SB_REVISION_3:
15277 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15278 break;
15279 case TG3_EEPROM_SB_REVISION_4:
15280 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15281 break;
15282 case TG3_EEPROM_SB_REVISION_5:
15283 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15284 break;
15285 case TG3_EEPROM_SB_REVISION_6:
15286 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15287 break;
15288 default:
15289 return;
15290 }
15291
15292 if (tg3_nvram_read(tp, offset, &val))
15293 return;
15294
15295 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15296 TG3_EEPROM_SB_EDH_BLD_SHFT;
15297 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15298 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15299 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15300
15301 if (minor > 99 || build > 26)
15302 return;
15303
15304 offset = strlen(tp->fw_ver);
15305 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15306 " v%d.%02d", major, minor);
15307
15308 if (build > 0) {
15309 offset = strlen(tp->fw_ver);
15310 if (offset < TG3_VER_SIZE - 1)
15311 tp->fw_ver[offset] = 'a' + build - 1;
15312 }
15313 }
15314
15315 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15316 {
15317 u32 val, offset, start;
15318 int i, vlen;
15319
15320 for (offset = TG3_NVM_DIR_START;
15321 offset < TG3_NVM_DIR_END;
15322 offset += TG3_NVM_DIRENT_SIZE) {
15323 if (tg3_nvram_read(tp, offset, &val))
15324 return;
15325
15326 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15327 break;
15328 }
15329
15330 if (offset == TG3_NVM_DIR_END)
15331 return;
15332
15333 if (!tg3_flag(tp, 5705_PLUS))
15334 start = 0x08000000;
15335 else if (tg3_nvram_read(tp, offset - 4, &start))
15336 return;
15337
15338 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15339 !tg3_fw_img_is_valid(tp, offset) ||
15340 tg3_nvram_read(tp, offset + 8, &val))
15341 return;
15342
15343 offset += val - start;
15344
15345 vlen = strlen(tp->fw_ver);
15346
15347 tp->fw_ver[vlen++] = ',';
15348 tp->fw_ver[vlen++] = ' ';
15349
15350 for (i = 0; i < 4; i++) {
15351 __be32 v;
15352 if (tg3_nvram_read_be32(tp, offset, &v))
15353 return;
15354
15355 offset += sizeof(v);
15356
15357 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15358 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15359 break;
15360 }
15361
15362 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15363 vlen += sizeof(v);
15364 }
15365 }
15366
15367 static void tg3_probe_ncsi(struct tg3 *tp)
15368 {
15369 u32 apedata;
15370
15371 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15372 if (apedata != APE_SEG_SIG_MAGIC)
15373 return;
15374
15375 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15376 if (!(apedata & APE_FW_STATUS_READY))
15377 return;
15378
15379 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15380 tg3_flag_set(tp, APE_HAS_NCSI);
15381 }
15382
15383 static void tg3_read_dash_ver(struct tg3 *tp)
15384 {
15385 int vlen;
15386 u32 apedata;
15387 char *fwtype;
15388
15389 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15390
15391 if (tg3_flag(tp, APE_HAS_NCSI))
15392 fwtype = "NCSI";
15393 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15394 fwtype = "SMASH";
15395 else
15396 fwtype = "DASH";
15397
15398 vlen = strlen(tp->fw_ver);
15399
15400 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15401 fwtype,
15402 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15403 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15404 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15405 (apedata & APE_FW_VERSION_BLDMSK));
15406 }
15407
15408 static void tg3_read_otp_ver(struct tg3 *tp)
15409 {
15410 u32 val, val2;
15411
15412 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15413 return;
15414
15415 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15416 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15417 TG3_OTP_MAGIC0_VALID(val)) {
15418 u64 val64 = (u64) val << 32 | val2;
15419 u32 ver = 0;
15420 int i, vlen;
15421
15422 for (i = 0; i < 7; i++) {
15423 if ((val64 & 0xff) == 0)
15424 break;
15425 ver = val64 & 0xff;
15426 val64 >>= 8;
15427 }
15428 vlen = strlen(tp->fw_ver);
15429 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15430 }
15431 }
15432
15433 static void tg3_read_fw_ver(struct tg3 *tp)
15434 {
15435 u32 val;
15436 bool vpd_vers = false;
15437
15438 if (tp->fw_ver[0] != 0)
15439 vpd_vers = true;
15440
15441 if (tg3_flag(tp, NO_NVRAM)) {
15442 strcat(tp->fw_ver, "sb");
15443 tg3_read_otp_ver(tp);
15444 return;
15445 }
15446
15447 if (tg3_nvram_read(tp, 0, &val))
15448 return;
15449
15450 if (val == TG3_EEPROM_MAGIC)
15451 tg3_read_bc_ver(tp);
15452 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15453 tg3_read_sb_ver(tp, val);
15454 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15455 tg3_read_hwsb_ver(tp);
15456
15457 if (tg3_flag(tp, ENABLE_ASF)) {
15458 if (tg3_flag(tp, ENABLE_APE)) {
15459 tg3_probe_ncsi(tp);
15460 if (!vpd_vers)
15461 tg3_read_dash_ver(tp);
15462 } else if (!vpd_vers) {
15463 tg3_read_mgmtfw_ver(tp);
15464 }
15465 }
15466
15467 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15468 }
15469
15470 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15471 {
15472 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15473 return TG3_RX_RET_MAX_SIZE_5717;
15474 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15475 return TG3_RX_RET_MAX_SIZE_5700;
15476 else
15477 return TG3_RX_RET_MAX_SIZE_5705;
15478 }
15479
15480 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15481 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15482 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15483 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15484 { },
15485 };
15486
15487 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15488 {
15489 struct pci_dev *peer;
15490 unsigned int func, devnr = tp->pdev->devfn & ~7;
15491
15492 for (func = 0; func < 8; func++) {
15493 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15494 if (peer && peer != tp->pdev)
15495 break;
15496 pci_dev_put(peer);
15497 }
15498 /* 5704 can be configured in single-port mode, set peer to
15499 * tp->pdev in that case.
15500 */
15501 if (!peer) {
15502 peer = tp->pdev;
15503 return peer;
15504 }
15505
15506 /*
15507 * We don't need to keep the refcount elevated; there's no way
15508 * to remove one half of this device without removing the other
15509 */
15510 pci_dev_put(peer);
15511
15512 return peer;
15513 }
15514
15515 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15516 {
15517 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15518 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15519 u32 reg;
15520
15521 /* All devices that use the alternate
15522 * ASIC REV location have a CPMU.
15523 */
15524 tg3_flag_set(tp, CPMU_PRESENT);
15525
15526 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15527 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15528 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15529 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15530 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15531 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15532 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15533 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15534 reg = TG3PCI_GEN2_PRODID_ASICREV;
15535 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15536 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15537 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15538 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15539 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15540 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15541 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15542 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15543 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15544 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15545 reg = TG3PCI_GEN15_PRODID_ASICREV;
15546 else
15547 reg = TG3PCI_PRODID_ASICREV;
15548
15549 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15550 }
15551
15552 /* Wrong chip ID in 5752 A0. This code can be removed later
15553 * as A0 is not in production.
15554 */
15555 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15556 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15557
15558 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15559 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15560
15561 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15562 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15563 tg3_asic_rev(tp) == ASIC_REV_5720)
15564 tg3_flag_set(tp, 5717_PLUS);
15565
15566 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15567 tg3_asic_rev(tp) == ASIC_REV_57766)
15568 tg3_flag_set(tp, 57765_CLASS);
15569
15570 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15571 tg3_asic_rev(tp) == ASIC_REV_5762)
15572 tg3_flag_set(tp, 57765_PLUS);
15573
15574 /* Intentionally exclude ASIC_REV_5906 */
15575 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15576 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15577 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15578 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15579 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15580 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15581 tg3_flag(tp, 57765_PLUS))
15582 tg3_flag_set(tp, 5755_PLUS);
15583
15584 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15585 tg3_asic_rev(tp) == ASIC_REV_5714)
15586 tg3_flag_set(tp, 5780_CLASS);
15587
15588 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15589 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15590 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15591 tg3_flag(tp, 5755_PLUS) ||
15592 tg3_flag(tp, 5780_CLASS))
15593 tg3_flag_set(tp, 5750_PLUS);
15594
15595 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15596 tg3_flag(tp, 5750_PLUS))
15597 tg3_flag_set(tp, 5705_PLUS);
15598 }
15599
15600 static bool tg3_10_100_only_device(struct tg3 *tp,
15601 const struct pci_device_id *ent)
15602 {
15603 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15604
15605 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15606 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15607 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15608 return true;
15609
15610 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15611 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15612 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15613 return true;
15614 } else {
15615 return true;
15616 }
15617 }
15618
15619 return false;
15620 }
15621
15622 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15623 {
15624 u32 misc_ctrl_reg;
15625 u32 pci_state_reg, grc_misc_cfg;
15626 u32 val;
15627 u16 pci_cmd;
15628 int err;
15629
15630 /* Force memory write invalidate off. If we leave it on,
15631 * then on 5700_BX chips we have to enable a workaround.
15632 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15633 * to match the cacheline size. The Broadcom driver have this
15634 * workaround but turns MWI off all the times so never uses
15635 * it. This seems to suggest that the workaround is insufficient.
15636 */
15637 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15638 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15639 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15640
15641 /* Important! -- Make sure register accesses are byteswapped
15642 * correctly. Also, for those chips that require it, make
15643 * sure that indirect register accesses are enabled before
15644 * the first operation.
15645 */
15646 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15647 &misc_ctrl_reg);
15648 tp->misc_host_ctrl |= (misc_ctrl_reg &
15649 MISC_HOST_CTRL_CHIPREV);
15650 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15651 tp->misc_host_ctrl);
15652
15653 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15654
15655 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15656 * we need to disable memory and use config. cycles
15657 * only to access all registers. The 5702/03 chips
15658 * can mistakenly decode the special cycles from the
15659 * ICH chipsets as memory write cycles, causing corruption
15660 * of register and memory space. Only certain ICH bridges
15661 * will drive special cycles with non-zero data during the
15662 * address phase which can fall within the 5703's address
15663 * range. This is not an ICH bug as the PCI spec allows
15664 * non-zero address during special cycles. However, only
15665 * these ICH bridges are known to drive non-zero addresses
15666 * during special cycles.
15667 *
15668 * Since special cycles do not cross PCI bridges, we only
15669 * enable this workaround if the 5703 is on the secondary
15670 * bus of these ICH bridges.
15671 */
15672 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15673 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15674 static struct tg3_dev_id {
15675 u32 vendor;
15676 u32 device;
15677 u32 rev;
15678 } ich_chipsets[] = {
15679 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15680 PCI_ANY_ID },
15681 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15682 PCI_ANY_ID },
15683 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15684 0xa },
15685 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15686 PCI_ANY_ID },
15687 { },
15688 };
15689 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15690 struct pci_dev *bridge = NULL;
15691
15692 while (pci_id->vendor != 0) {
15693 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15694 bridge);
15695 if (!bridge) {
15696 pci_id++;
15697 continue;
15698 }
15699 if (pci_id->rev != PCI_ANY_ID) {
15700 if (bridge->revision > pci_id->rev)
15701 continue;
15702 }
15703 if (bridge->subordinate &&
15704 (bridge->subordinate->number ==
15705 tp->pdev->bus->number)) {
15706 tg3_flag_set(tp, ICH_WORKAROUND);
15707 pci_dev_put(bridge);
15708 break;
15709 }
15710 }
15711 }
15712
15713 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15714 static struct tg3_dev_id {
15715 u32 vendor;
15716 u32 device;
15717 } bridge_chipsets[] = {
15718 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15719 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15720 { },
15721 };
15722 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15723 struct pci_dev *bridge = NULL;
15724
15725 while (pci_id->vendor != 0) {
15726 bridge = pci_get_device(pci_id->vendor,
15727 pci_id->device,
15728 bridge);
15729 if (!bridge) {
15730 pci_id++;
15731 continue;
15732 }
15733 if (bridge->subordinate &&
15734 (bridge->subordinate->number <=
15735 tp->pdev->bus->number) &&
15736 (bridge->subordinate->busn_res.end >=
15737 tp->pdev->bus->number)) {
15738 tg3_flag_set(tp, 5701_DMA_BUG);
15739 pci_dev_put(bridge);
15740 break;
15741 }
15742 }
15743 }
15744
15745 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15746 * DMA addresses > 40-bit. This bridge may have other additional
15747 * 57xx devices behind it in some 4-port NIC designs for example.
15748 * Any tg3 device found behind the bridge will also need the 40-bit
15749 * DMA workaround.
15750 */
15751 if (tg3_flag(tp, 5780_CLASS)) {
15752 tg3_flag_set(tp, 40BIT_DMA_BUG);
15753 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15754 } else {
15755 struct pci_dev *bridge = NULL;
15756
15757 do {
15758 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15759 PCI_DEVICE_ID_SERVERWORKS_EPB,
15760 bridge);
15761 if (bridge && bridge->subordinate &&
15762 (bridge->subordinate->number <=
15763 tp->pdev->bus->number) &&
15764 (bridge->subordinate->busn_res.end >=
15765 tp->pdev->bus->number)) {
15766 tg3_flag_set(tp, 40BIT_DMA_BUG);
15767 pci_dev_put(bridge);
15768 break;
15769 }
15770 } while (bridge);
15771 }
15772
15773 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15774 tg3_asic_rev(tp) == ASIC_REV_5714)
15775 tp->pdev_peer = tg3_find_peer(tp);
15776
15777 /* Determine TSO capabilities */
15778 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15779 ; /* Do nothing. HW bug. */
15780 else if (tg3_flag(tp, 57765_PLUS))
15781 tg3_flag_set(tp, HW_TSO_3);
15782 else if (tg3_flag(tp, 5755_PLUS) ||
15783 tg3_asic_rev(tp) == ASIC_REV_5906)
15784 tg3_flag_set(tp, HW_TSO_2);
15785 else if (tg3_flag(tp, 5750_PLUS)) {
15786 tg3_flag_set(tp, HW_TSO_1);
15787 tg3_flag_set(tp, TSO_BUG);
15788 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15789 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15790 tg3_flag_clear(tp, TSO_BUG);
15791 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15792 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15793 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15794 tg3_flag_set(tp, FW_TSO);
15795 tg3_flag_set(tp, TSO_BUG);
15796 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15797 tp->fw_needed = FIRMWARE_TG3TSO5;
15798 else
15799 tp->fw_needed = FIRMWARE_TG3TSO;
15800 }
15801
15802 /* Selectively allow TSO based on operating conditions */
15803 if (tg3_flag(tp, HW_TSO_1) ||
15804 tg3_flag(tp, HW_TSO_2) ||
15805 tg3_flag(tp, HW_TSO_3) ||
15806 tg3_flag(tp, FW_TSO)) {
15807 /* For firmware TSO, assume ASF is disabled.
15808 * We'll disable TSO later if we discover ASF
15809 * is enabled in tg3_get_eeprom_hw_cfg().
15810 */
15811 tg3_flag_set(tp, TSO_CAPABLE);
15812 } else {
15813 tg3_flag_clear(tp, TSO_CAPABLE);
15814 tg3_flag_clear(tp, TSO_BUG);
15815 tp->fw_needed = NULL;
15816 }
15817
15818 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15819 tp->fw_needed = FIRMWARE_TG3;
15820
15821 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15822 tp->fw_needed = FIRMWARE_TG357766;
15823
15824 tp->irq_max = 1;
15825
15826 if (tg3_flag(tp, 5750_PLUS)) {
15827 tg3_flag_set(tp, SUPPORT_MSI);
15828 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15829 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15830 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15831 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15832 tp->pdev_peer == tp->pdev))
15833 tg3_flag_clear(tp, SUPPORT_MSI);
15834
15835 if (tg3_flag(tp, 5755_PLUS) ||
15836 tg3_asic_rev(tp) == ASIC_REV_5906) {
15837 tg3_flag_set(tp, 1SHOT_MSI);
15838 }
15839
15840 if (tg3_flag(tp, 57765_PLUS)) {
15841 tg3_flag_set(tp, SUPPORT_MSIX);
15842 tp->irq_max = TG3_IRQ_MAX_VECS;
15843 }
15844 }
15845
15846 tp->txq_max = 1;
15847 tp->rxq_max = 1;
15848 if (tp->irq_max > 1) {
15849 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15850 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15851
15852 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15853 tg3_asic_rev(tp) == ASIC_REV_5720)
15854 tp->txq_max = tp->irq_max - 1;
15855 }
15856
15857 if (tg3_flag(tp, 5755_PLUS) ||
15858 tg3_asic_rev(tp) == ASIC_REV_5906)
15859 tg3_flag_set(tp, SHORT_DMA_BUG);
15860
15861 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15862 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15863
15864 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15865 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15866 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15867 tg3_asic_rev(tp) == ASIC_REV_5762)
15868 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15869
15870 if (tg3_flag(tp, 57765_PLUS) &&
15871 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15872 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15873
15874 if (!tg3_flag(tp, 5705_PLUS) ||
15875 tg3_flag(tp, 5780_CLASS) ||
15876 tg3_flag(tp, USE_JUMBO_BDFLAG))
15877 tg3_flag_set(tp, JUMBO_CAPABLE);
15878
15879 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15880 &pci_state_reg);
15881
15882 if (pci_is_pcie(tp->pdev)) {
15883 u16 lnkctl;
15884
15885 tg3_flag_set(tp, PCI_EXPRESS);
15886
15887 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15888 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15889 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15890 tg3_flag_clear(tp, HW_TSO_2);
15891 tg3_flag_clear(tp, TSO_CAPABLE);
15892 }
15893 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15894 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15895 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15896 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15897 tg3_flag_set(tp, CLKREQ_BUG);
15898 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15899 tg3_flag_set(tp, L1PLLPD_EN);
15900 }
15901 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15902 /* BCM5785 devices are effectively PCIe devices, and should
15903 * follow PCIe codepaths, but do not have a PCIe capabilities
15904 * section.
15905 */
15906 tg3_flag_set(tp, PCI_EXPRESS);
15907 } else if (!tg3_flag(tp, 5705_PLUS) ||
15908 tg3_flag(tp, 5780_CLASS)) {
15909 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15910 if (!tp->pcix_cap) {
15911 dev_err(&tp->pdev->dev,
15912 "Cannot find PCI-X capability, aborting\n");
15913 return -EIO;
15914 }
15915
15916 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15917 tg3_flag_set(tp, PCIX_MODE);
15918 }
15919
15920 /* If we have an AMD 762 or VIA K8T800 chipset, write
15921 * reordering to the mailbox registers done by the host
15922 * controller can cause major troubles. We read back from
15923 * every mailbox register write to force the writes to be
15924 * posted to the chip in order.
15925 */
15926 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15927 !tg3_flag(tp, PCI_EXPRESS))
15928 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15929
15930 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15931 &tp->pci_cacheline_sz);
15932 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15933 &tp->pci_lat_timer);
15934 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15935 tp->pci_lat_timer < 64) {
15936 tp->pci_lat_timer = 64;
15937 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15938 tp->pci_lat_timer);
15939 }
15940
15941 /* Important! -- It is critical that the PCI-X hw workaround
15942 * situation is decided before the first MMIO register access.
15943 */
15944 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15945 /* 5700 BX chips need to have their TX producer index
15946 * mailboxes written twice to workaround a bug.
15947 */
15948 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15949
15950 /* If we are in PCI-X mode, enable register write workaround.
15951 *
15952 * The workaround is to use indirect register accesses
15953 * for all chip writes not to mailbox registers.
15954 */
15955 if (tg3_flag(tp, PCIX_MODE)) {
15956 u32 pm_reg;
15957
15958 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15959
15960 /* The chip can have it's power management PCI config
15961 * space registers clobbered due to this bug.
15962 * So explicitly force the chip into D0 here.
15963 */
15964 pci_read_config_dword(tp->pdev,
15965 tp->pm_cap + PCI_PM_CTRL,
15966 &pm_reg);
15967 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15968 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15969 pci_write_config_dword(tp->pdev,
15970 tp->pm_cap + PCI_PM_CTRL,
15971 pm_reg);
15972
15973 /* Also, force SERR#/PERR# in PCI command. */
15974 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15975 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15976 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15977 }
15978 }
15979
15980 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15981 tg3_flag_set(tp, PCI_HIGH_SPEED);
15982 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15983 tg3_flag_set(tp, PCI_32BIT);
15984
15985 /* Chip-specific fixup from Broadcom driver */
15986 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15987 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15988 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15989 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15990 }
15991
15992 /* Default fast path register access methods */
15993 tp->read32 = tg3_read32;
15994 tp->write32 = tg3_write32;
15995 tp->read32_mbox = tg3_read32;
15996 tp->write32_mbox = tg3_write32;
15997 tp->write32_tx_mbox = tg3_write32;
15998 tp->write32_rx_mbox = tg3_write32;
15999
16000 /* Various workaround register access methods */
16001 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16002 tp->write32 = tg3_write_indirect_reg32;
16003 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16004 (tg3_flag(tp, PCI_EXPRESS) &&
16005 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16006 /*
16007 * Back to back register writes can cause problems on these
16008 * chips, the workaround is to read back all reg writes
16009 * except those to mailbox regs.
16010 *
16011 * See tg3_write_indirect_reg32().
16012 */
16013 tp->write32 = tg3_write_flush_reg32;
16014 }
16015
16016 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16017 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16018 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16019 tp->write32_rx_mbox = tg3_write_flush_reg32;
16020 }
16021
16022 if (tg3_flag(tp, ICH_WORKAROUND)) {
16023 tp->read32 = tg3_read_indirect_reg32;
16024 tp->write32 = tg3_write_indirect_reg32;
16025 tp->read32_mbox = tg3_read_indirect_mbox;
16026 tp->write32_mbox = tg3_write_indirect_mbox;
16027 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16028 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16029
16030 iounmap(tp->regs);
16031 tp->regs = NULL;
16032
16033 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16034 pci_cmd &= ~PCI_COMMAND_MEMORY;
16035 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16036 }
16037 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16038 tp->read32_mbox = tg3_read32_mbox_5906;
16039 tp->write32_mbox = tg3_write32_mbox_5906;
16040 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16041 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16042 }
16043
16044 if (tp->write32 == tg3_write_indirect_reg32 ||
16045 (tg3_flag(tp, PCIX_MODE) &&
16046 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16047 tg3_asic_rev(tp) == ASIC_REV_5701)))
16048 tg3_flag_set(tp, SRAM_USE_CONFIG);
16049
16050 /* The memory arbiter has to be enabled in order for SRAM accesses
16051 * to succeed. Normally on powerup the tg3 chip firmware will make
16052 * sure it is enabled, but other entities such as system netboot
16053 * code might disable it.
16054 */
16055 val = tr32(MEMARB_MODE);
16056 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16057
16058 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16059 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16060 tg3_flag(tp, 5780_CLASS)) {
16061 if (tg3_flag(tp, PCIX_MODE)) {
16062 pci_read_config_dword(tp->pdev,
16063 tp->pcix_cap + PCI_X_STATUS,
16064 &val);
16065 tp->pci_fn = val & 0x7;
16066 }
16067 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16068 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16069 tg3_asic_rev(tp) == ASIC_REV_5720) {
16070 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16071 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16072 val = tr32(TG3_CPMU_STATUS);
16073
16074 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16075 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16076 else
16077 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16078 TG3_CPMU_STATUS_FSHFT_5719;
16079 }
16080
16081 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16082 tp->write32_tx_mbox = tg3_write_flush_reg32;
16083 tp->write32_rx_mbox = tg3_write_flush_reg32;
16084 }
16085
16086 /* Get eeprom hw config before calling tg3_set_power_state().
16087 * In particular, the TG3_FLAG_IS_NIC flag must be
16088 * determined before calling tg3_set_power_state() so that
16089 * we know whether or not to switch out of Vaux power.
16090 * When the flag is set, it means that GPIO1 is used for eeprom
16091 * write protect and also implies that it is a LOM where GPIOs
16092 * are not used to switch power.
16093 */
16094 tg3_get_eeprom_hw_cfg(tp);
16095
16096 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16097 tg3_flag_clear(tp, TSO_CAPABLE);
16098 tg3_flag_clear(tp, TSO_BUG);
16099 tp->fw_needed = NULL;
16100 }
16101
16102 if (tg3_flag(tp, ENABLE_APE)) {
16103 /* Allow reads and writes to the
16104 * APE register and memory space.
16105 */
16106 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16107 PCISTATE_ALLOW_APE_SHMEM_WR |
16108 PCISTATE_ALLOW_APE_PSPACE_WR;
16109 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16110 pci_state_reg);
16111
16112 tg3_ape_lock_init(tp);
16113 }
16114
16115 /* Set up tp->grc_local_ctrl before calling
16116 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16117 * will bring 5700's external PHY out of reset.
16118 * It is also used as eeprom write protect on LOMs.
16119 */
16120 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16121 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16122 tg3_flag(tp, EEPROM_WRITE_PROT))
16123 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16124 GRC_LCLCTRL_GPIO_OUTPUT1);
16125 /* Unused GPIO3 must be driven as output on 5752 because there
16126 * are no pull-up resistors on unused GPIO pins.
16127 */
16128 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16129 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16130
16131 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16132 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16133 tg3_flag(tp, 57765_CLASS))
16134 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16135
16136 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16137 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16138 /* Turn off the debug UART. */
16139 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16140 if (tg3_flag(tp, IS_NIC))
16141 /* Keep VMain power. */
16142 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16143 GRC_LCLCTRL_GPIO_OUTPUT0;
16144 }
16145
16146 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16147 tp->grc_local_ctrl |=
16148 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16149
16150 /* Switch out of Vaux if it is a NIC */
16151 tg3_pwrsrc_switch_to_vmain(tp);
16152
16153 /* Derive initial jumbo mode from MTU assigned in
16154 * ether_setup() via the alloc_etherdev() call
16155 */
16156 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16157 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16158
16159 /* Determine WakeOnLan speed to use. */
16160 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16161 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16162 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16163 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16164 tg3_flag_clear(tp, WOL_SPEED_100MB);
16165 } else {
16166 tg3_flag_set(tp, WOL_SPEED_100MB);
16167 }
16168
16169 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16170 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16171
16172 /* A few boards don't want Ethernet@WireSpeed phy feature */
16173 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16174 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16175 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16176 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16177 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16178 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16179 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16180
16181 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16182 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16183 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16184 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16185 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16186
16187 if (tg3_flag(tp, 5705_PLUS) &&
16188 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16189 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16190 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16191 !tg3_flag(tp, 57765_PLUS)) {
16192 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16193 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16194 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16195 tg3_asic_rev(tp) == ASIC_REV_5761) {
16196 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16197 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16198 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16199 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16200 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16201 } else
16202 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16203 }
16204
16205 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16206 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16207 tp->phy_otp = tg3_read_otp_phycfg(tp);
16208 if (tp->phy_otp == 0)
16209 tp->phy_otp = TG3_OTP_DEFAULT;
16210 }
16211
16212 if (tg3_flag(tp, CPMU_PRESENT))
16213 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16214 else
16215 tp->mi_mode = MAC_MI_MODE_BASE;
16216
16217 tp->coalesce_mode = 0;
16218 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16219 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16220 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16221
16222 /* Set these bits to enable statistics workaround. */
16223 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16224 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16225 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16226 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16227 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16228 }
16229
16230 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16231 tg3_asic_rev(tp) == ASIC_REV_57780)
16232 tg3_flag_set(tp, USE_PHYLIB);
16233
16234 err = tg3_mdio_init(tp);
16235 if (err)
16236 return err;
16237
16238 /* Initialize data/descriptor byte/word swapping. */
16239 val = tr32(GRC_MODE);
16240 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16241 tg3_asic_rev(tp) == ASIC_REV_5762)
16242 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16243 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16244 GRC_MODE_B2HRX_ENABLE |
16245 GRC_MODE_HTX2B_ENABLE |
16246 GRC_MODE_HOST_STACKUP);
16247 else
16248 val &= GRC_MODE_HOST_STACKUP;
16249
16250 tw32(GRC_MODE, val | tp->grc_mode);
16251
16252 tg3_switch_clocks(tp);
16253
16254 /* Clear this out for sanity. */
16255 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16256
16257 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16258 &pci_state_reg);
16259 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16260 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16261 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16262 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16263 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16264 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16265 void __iomem *sram_base;
16266
16267 /* Write some dummy words into the SRAM status block
16268 * area, see if it reads back correctly. If the return
16269 * value is bad, force enable the PCIX workaround.
16270 */
16271 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16272
16273 writel(0x00000000, sram_base);
16274 writel(0x00000000, sram_base + 4);
16275 writel(0xffffffff, sram_base + 4);
16276 if (readl(sram_base) != 0x00000000)
16277 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16278 }
16279 }
16280
16281 udelay(50);
16282 tg3_nvram_init(tp);
16283
16284 /* If the device has an NVRAM, no need to load patch firmware */
16285 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16286 !tg3_flag(tp, NO_NVRAM))
16287 tp->fw_needed = NULL;
16288
16289 grc_misc_cfg = tr32(GRC_MISC_CFG);
16290 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16291
16292 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16293 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16294 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16295 tg3_flag_set(tp, IS_5788);
16296
16297 if (!tg3_flag(tp, IS_5788) &&
16298 tg3_asic_rev(tp) != ASIC_REV_5700)
16299 tg3_flag_set(tp, TAGGED_STATUS);
16300 if (tg3_flag(tp, TAGGED_STATUS)) {
16301 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16302 HOSTCC_MODE_CLRTICK_TXBD);
16303
16304 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16305 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16306 tp->misc_host_ctrl);
16307 }
16308
16309 /* Preserve the APE MAC_MODE bits */
16310 if (tg3_flag(tp, ENABLE_APE))
16311 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16312 else
16313 tp->mac_mode = 0;
16314
16315 if (tg3_10_100_only_device(tp, ent))
16316 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16317
16318 err = tg3_phy_probe(tp);
16319 if (err) {
16320 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16321 /* ... but do not return immediately ... */
16322 tg3_mdio_fini(tp);
16323 }
16324
16325 tg3_read_vpd(tp);
16326 tg3_read_fw_ver(tp);
16327
16328 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16329 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16330 } else {
16331 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16332 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16333 else
16334 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16335 }
16336
16337 /* 5700 {AX,BX} chips have a broken status block link
16338 * change bit implementation, so we must use the
16339 * status register in those cases.
16340 */
16341 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16342 tg3_flag_set(tp, USE_LINKCHG_REG);
16343 else
16344 tg3_flag_clear(tp, USE_LINKCHG_REG);
16345
16346 /* The led_ctrl is set during tg3_phy_probe, here we might
16347 * have to force the link status polling mechanism based
16348 * upon subsystem IDs.
16349 */
16350 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16351 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16352 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16353 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16354 tg3_flag_set(tp, USE_LINKCHG_REG);
16355 }
16356
16357 /* For all SERDES we poll the MAC status register. */
16358 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16359 tg3_flag_set(tp, POLL_SERDES);
16360 else
16361 tg3_flag_clear(tp, POLL_SERDES);
16362
16363 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16364 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16365 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16366 tg3_flag(tp, PCIX_MODE)) {
16367 tp->rx_offset = NET_SKB_PAD;
16368 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16369 tp->rx_copy_thresh = ~(u16)0;
16370 #endif
16371 }
16372
16373 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16374 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16375 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16376
16377 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16378
16379 /* Increment the rx prod index on the rx std ring by at most
16380 * 8 for these chips to workaround hw errata.
16381 */
16382 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16383 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16384 tg3_asic_rev(tp) == ASIC_REV_5755)
16385 tp->rx_std_max_post = 8;
16386
16387 if (tg3_flag(tp, ASPM_WORKAROUND))
16388 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16389 PCIE_PWR_MGMT_L1_THRESH_MSK;
16390
16391 return err;
16392 }
16393
16394 #ifdef CONFIG_SPARC
16395 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16396 {
16397 struct net_device *dev = tp->dev;
16398 struct pci_dev *pdev = tp->pdev;
16399 struct device_node *dp = pci_device_to_OF_node(pdev);
16400 const unsigned char *addr;
16401 int len;
16402
16403 addr = of_get_property(dp, "local-mac-address", &len);
16404 if (addr && len == 6) {
16405 memcpy(dev->dev_addr, addr, 6);
16406 return 0;
16407 }
16408 return -ENODEV;
16409 }
16410
16411 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16412 {
16413 struct net_device *dev = tp->dev;
16414
16415 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16416 return 0;
16417 }
16418 #endif
16419
16420 static int tg3_get_device_address(struct tg3 *tp)
16421 {
16422 struct net_device *dev = tp->dev;
16423 u32 hi, lo, mac_offset;
16424 int addr_ok = 0;
16425 int err;
16426
16427 #ifdef CONFIG_SPARC
16428 if (!tg3_get_macaddr_sparc(tp))
16429 return 0;
16430 #endif
16431
16432 if (tg3_flag(tp, IS_SSB_CORE)) {
16433 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16434 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16435 return 0;
16436 }
16437
16438 mac_offset = 0x7c;
16439 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16440 tg3_flag(tp, 5780_CLASS)) {
16441 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16442 mac_offset = 0xcc;
16443 if (tg3_nvram_lock(tp))
16444 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16445 else
16446 tg3_nvram_unlock(tp);
16447 } else if (tg3_flag(tp, 5717_PLUS)) {
16448 if (tp->pci_fn & 1)
16449 mac_offset = 0xcc;
16450 if (tp->pci_fn > 1)
16451 mac_offset += 0x18c;
16452 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16453 mac_offset = 0x10;
16454
16455 /* First try to get it from MAC address mailbox. */
16456 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16457 if ((hi >> 16) == 0x484b) {
16458 dev->dev_addr[0] = (hi >> 8) & 0xff;
16459 dev->dev_addr[1] = (hi >> 0) & 0xff;
16460
16461 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16462 dev->dev_addr[2] = (lo >> 24) & 0xff;
16463 dev->dev_addr[3] = (lo >> 16) & 0xff;
16464 dev->dev_addr[4] = (lo >> 8) & 0xff;
16465 dev->dev_addr[5] = (lo >> 0) & 0xff;
16466
16467 /* Some old bootcode may report a 0 MAC address in SRAM */
16468 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16469 }
16470 if (!addr_ok) {
16471 /* Next, try NVRAM. */
16472 if (!tg3_flag(tp, NO_NVRAM) &&
16473 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16474 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16475 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16476 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16477 }
16478 /* Finally just fetch it out of the MAC control regs. */
16479 else {
16480 hi = tr32(MAC_ADDR_0_HIGH);
16481 lo = tr32(MAC_ADDR_0_LOW);
16482
16483 dev->dev_addr[5] = lo & 0xff;
16484 dev->dev_addr[4] = (lo >> 8) & 0xff;
16485 dev->dev_addr[3] = (lo >> 16) & 0xff;
16486 dev->dev_addr[2] = (lo >> 24) & 0xff;
16487 dev->dev_addr[1] = hi & 0xff;
16488 dev->dev_addr[0] = (hi >> 8) & 0xff;
16489 }
16490 }
16491
16492 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16493 #ifdef CONFIG_SPARC
16494 if (!tg3_get_default_macaddr_sparc(tp))
16495 return 0;
16496 #endif
16497 return -EINVAL;
16498 }
16499 return 0;
16500 }
16501
16502 #define BOUNDARY_SINGLE_CACHELINE 1
16503 #define BOUNDARY_MULTI_CACHELINE 2
16504
16505 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16506 {
16507 int cacheline_size;
16508 u8 byte;
16509 int goal;
16510
16511 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16512 if (byte == 0)
16513 cacheline_size = 1024;
16514 else
16515 cacheline_size = (int) byte * 4;
16516
16517 /* On 5703 and later chips, the boundary bits have no
16518 * effect.
16519 */
16520 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16521 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16522 !tg3_flag(tp, PCI_EXPRESS))
16523 goto out;
16524
16525 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16526 goal = BOUNDARY_MULTI_CACHELINE;
16527 #else
16528 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16529 goal = BOUNDARY_SINGLE_CACHELINE;
16530 #else
16531 goal = 0;
16532 #endif
16533 #endif
16534
16535 if (tg3_flag(tp, 57765_PLUS)) {
16536 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16537 goto out;
16538 }
16539
16540 if (!goal)
16541 goto out;
16542
16543 /* PCI controllers on most RISC systems tend to disconnect
16544 * when a device tries to burst across a cache-line boundary.
16545 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16546 *
16547 * Unfortunately, for PCI-E there are only limited
16548 * write-side controls for this, and thus for reads
16549 * we will still get the disconnects. We'll also waste
16550 * these PCI cycles for both read and write for chips
16551 * other than 5700 and 5701 which do not implement the
16552 * boundary bits.
16553 */
16554 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16555 switch (cacheline_size) {
16556 case 16:
16557 case 32:
16558 case 64:
16559 case 128:
16560 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16561 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16562 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16563 } else {
16564 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16565 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16566 }
16567 break;
16568
16569 case 256:
16570 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16571 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16572 break;
16573
16574 default:
16575 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16576 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16577 break;
16578 }
16579 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16580 switch (cacheline_size) {
16581 case 16:
16582 case 32:
16583 case 64:
16584 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16585 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16586 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16587 break;
16588 }
16589 /* fallthrough */
16590 case 128:
16591 default:
16592 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16593 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16594 break;
16595 }
16596 } else {
16597 switch (cacheline_size) {
16598 case 16:
16599 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16600 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16601 DMA_RWCTRL_WRITE_BNDRY_16);
16602 break;
16603 }
16604 /* fallthrough */
16605 case 32:
16606 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16607 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16608 DMA_RWCTRL_WRITE_BNDRY_32);
16609 break;
16610 }
16611 /* fallthrough */
16612 case 64:
16613 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16614 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16615 DMA_RWCTRL_WRITE_BNDRY_64);
16616 break;
16617 }
16618 /* fallthrough */
16619 case 128:
16620 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16621 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16622 DMA_RWCTRL_WRITE_BNDRY_128);
16623 break;
16624 }
16625 /* fallthrough */
16626 case 256:
16627 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16628 DMA_RWCTRL_WRITE_BNDRY_256);
16629 break;
16630 case 512:
16631 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16632 DMA_RWCTRL_WRITE_BNDRY_512);
16633 break;
16634 case 1024:
16635 default:
16636 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16637 DMA_RWCTRL_WRITE_BNDRY_1024);
16638 break;
16639 }
16640 }
16641
16642 out:
16643 return val;
16644 }
16645
16646 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16647 int size, bool to_device)
16648 {
16649 struct tg3_internal_buffer_desc test_desc;
16650 u32 sram_dma_descs;
16651 int i, ret;
16652
16653 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16654
16655 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16656 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16657 tw32(RDMAC_STATUS, 0);
16658 tw32(WDMAC_STATUS, 0);
16659
16660 tw32(BUFMGR_MODE, 0);
16661 tw32(FTQ_RESET, 0);
16662
16663 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16664 test_desc.addr_lo = buf_dma & 0xffffffff;
16665 test_desc.nic_mbuf = 0x00002100;
16666 test_desc.len = size;
16667
16668 /*
16669 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16670 * the *second* time the tg3 driver was getting loaded after an
16671 * initial scan.
16672 *
16673 * Broadcom tells me:
16674 * ...the DMA engine is connected to the GRC block and a DMA
16675 * reset may affect the GRC block in some unpredictable way...
16676 * The behavior of resets to individual blocks has not been tested.
16677 *
16678 * Broadcom noted the GRC reset will also reset all sub-components.
16679 */
16680 if (to_device) {
16681 test_desc.cqid_sqid = (13 << 8) | 2;
16682
16683 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16684 udelay(40);
16685 } else {
16686 test_desc.cqid_sqid = (16 << 8) | 7;
16687
16688 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16689 udelay(40);
16690 }
16691 test_desc.flags = 0x00000005;
16692
16693 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16694 u32 val;
16695
16696 val = *(((u32 *)&test_desc) + i);
16697 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16698 sram_dma_descs + (i * sizeof(u32)));
16699 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16700 }
16701 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16702
16703 if (to_device)
16704 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16705 else
16706 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16707
16708 ret = -ENODEV;
16709 for (i = 0; i < 40; i++) {
16710 u32 val;
16711
16712 if (to_device)
16713 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16714 else
16715 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16716 if ((val & 0xffff) == sram_dma_descs) {
16717 ret = 0;
16718 break;
16719 }
16720
16721 udelay(100);
16722 }
16723
16724 return ret;
16725 }
16726
16727 #define TEST_BUFFER_SIZE 0x2000
16728
16729 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16730 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16731 { },
16732 };
16733
16734 static int tg3_test_dma(struct tg3 *tp)
16735 {
16736 dma_addr_t buf_dma;
16737 u32 *buf, saved_dma_rwctrl;
16738 int ret = 0;
16739
16740 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16741 &buf_dma, GFP_KERNEL);
16742 if (!buf) {
16743 ret = -ENOMEM;
16744 goto out_nofree;
16745 }
16746
16747 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16748 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16749
16750 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16751
16752 if (tg3_flag(tp, 57765_PLUS))
16753 goto out;
16754
16755 if (tg3_flag(tp, PCI_EXPRESS)) {
16756 /* DMA read watermark not used on PCIE */
16757 tp->dma_rwctrl |= 0x00180000;
16758 } else if (!tg3_flag(tp, PCIX_MODE)) {
16759 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16760 tg3_asic_rev(tp) == ASIC_REV_5750)
16761 tp->dma_rwctrl |= 0x003f0000;
16762 else
16763 tp->dma_rwctrl |= 0x003f000f;
16764 } else {
16765 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16766 tg3_asic_rev(tp) == ASIC_REV_5704) {
16767 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16768 u32 read_water = 0x7;
16769
16770 /* If the 5704 is behind the EPB bridge, we can
16771 * do the less restrictive ONE_DMA workaround for
16772 * better performance.
16773 */
16774 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16775 tg3_asic_rev(tp) == ASIC_REV_5704)
16776 tp->dma_rwctrl |= 0x8000;
16777 else if (ccval == 0x6 || ccval == 0x7)
16778 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16779
16780 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16781 read_water = 4;
16782 /* Set bit 23 to enable PCIX hw bug fix */
16783 tp->dma_rwctrl |=
16784 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16785 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16786 (1 << 23);
16787 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16788 /* 5780 always in PCIX mode */
16789 tp->dma_rwctrl |= 0x00144000;
16790 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16791 /* 5714 always in PCIX mode */
16792 tp->dma_rwctrl |= 0x00148000;
16793 } else {
16794 tp->dma_rwctrl |= 0x001b000f;
16795 }
16796 }
16797 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16798 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16799
16800 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16801 tg3_asic_rev(tp) == ASIC_REV_5704)
16802 tp->dma_rwctrl &= 0xfffffff0;
16803
16804 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16805 tg3_asic_rev(tp) == ASIC_REV_5701) {
16806 /* Remove this if it causes problems for some boards. */
16807 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16808
16809 /* On 5700/5701 chips, we need to set this bit.
16810 * Otherwise the chip will issue cacheline transactions
16811 * to streamable DMA memory with not all the byte
16812 * enables turned on. This is an error on several
16813 * RISC PCI controllers, in particular sparc64.
16814 *
16815 * On 5703/5704 chips, this bit has been reassigned
16816 * a different meaning. In particular, it is used
16817 * on those chips to enable a PCI-X workaround.
16818 */
16819 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16820 }
16821
16822 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16823
16824 #if 0
16825 /* Unneeded, already done by tg3_get_invariants. */
16826 tg3_switch_clocks(tp);
16827 #endif
16828
16829 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16830 tg3_asic_rev(tp) != ASIC_REV_5701)
16831 goto out;
16832
16833 /* It is best to perform DMA test with maximum write burst size
16834 * to expose the 5700/5701 write DMA bug.
16835 */
16836 saved_dma_rwctrl = tp->dma_rwctrl;
16837 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16838 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16839
16840 while (1) {
16841 u32 *p = buf, i;
16842
16843 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16844 p[i] = i;
16845
16846 /* Send the buffer to the chip. */
16847 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16848 if (ret) {
16849 dev_err(&tp->pdev->dev,
16850 "%s: Buffer write failed. err = %d\n",
16851 __func__, ret);
16852 break;
16853 }
16854
16855 #if 0
16856 /* validate data reached card RAM correctly. */
16857 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16858 u32 val;
16859 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16860 if (le32_to_cpu(val) != p[i]) {
16861 dev_err(&tp->pdev->dev,
16862 "%s: Buffer corrupted on device! "
16863 "(%d != %d)\n", __func__, val, i);
16864 /* ret = -ENODEV here? */
16865 }
16866 p[i] = 0;
16867 }
16868 #endif
16869 /* Now read it back. */
16870 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16871 if (ret) {
16872 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16873 "err = %d\n", __func__, ret);
16874 break;
16875 }
16876
16877 /* Verify it. */
16878 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16879 if (p[i] == i)
16880 continue;
16881
16882 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16883 DMA_RWCTRL_WRITE_BNDRY_16) {
16884 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16885 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16886 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16887 break;
16888 } else {
16889 dev_err(&tp->pdev->dev,
16890 "%s: Buffer corrupted on read back! "
16891 "(%d != %d)\n", __func__, p[i], i);
16892 ret = -ENODEV;
16893 goto out;
16894 }
16895 }
16896
16897 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16898 /* Success. */
16899 ret = 0;
16900 break;
16901 }
16902 }
16903 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16904 DMA_RWCTRL_WRITE_BNDRY_16) {
16905 /* DMA test passed without adjusting DMA boundary,
16906 * now look for chipsets that are known to expose the
16907 * DMA bug without failing the test.
16908 */
16909 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16910 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16912 } else {
16913 /* Safe to use the calculated DMA boundary. */
16914 tp->dma_rwctrl = saved_dma_rwctrl;
16915 }
16916
16917 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16918 }
16919
16920 out:
16921 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16922 out_nofree:
16923 return ret;
16924 }
16925
16926 static void tg3_init_bufmgr_config(struct tg3 *tp)
16927 {
16928 if (tg3_flag(tp, 57765_PLUS)) {
16929 tp->bufmgr_config.mbuf_read_dma_low_water =
16930 DEFAULT_MB_RDMA_LOW_WATER_5705;
16931 tp->bufmgr_config.mbuf_mac_rx_low_water =
16932 DEFAULT_MB_MACRX_LOW_WATER_57765;
16933 tp->bufmgr_config.mbuf_high_water =
16934 DEFAULT_MB_HIGH_WATER_57765;
16935
16936 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16937 DEFAULT_MB_RDMA_LOW_WATER_5705;
16938 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16939 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16940 tp->bufmgr_config.mbuf_high_water_jumbo =
16941 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16942 } else if (tg3_flag(tp, 5705_PLUS)) {
16943 tp->bufmgr_config.mbuf_read_dma_low_water =
16944 DEFAULT_MB_RDMA_LOW_WATER_5705;
16945 tp->bufmgr_config.mbuf_mac_rx_low_water =
16946 DEFAULT_MB_MACRX_LOW_WATER_5705;
16947 tp->bufmgr_config.mbuf_high_water =
16948 DEFAULT_MB_HIGH_WATER_5705;
16949 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16950 tp->bufmgr_config.mbuf_mac_rx_low_water =
16951 DEFAULT_MB_MACRX_LOW_WATER_5906;
16952 tp->bufmgr_config.mbuf_high_water =
16953 DEFAULT_MB_HIGH_WATER_5906;
16954 }
16955
16956 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16957 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16958 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16959 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16960 tp->bufmgr_config.mbuf_high_water_jumbo =
16961 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16962 } else {
16963 tp->bufmgr_config.mbuf_read_dma_low_water =
16964 DEFAULT_MB_RDMA_LOW_WATER;
16965 tp->bufmgr_config.mbuf_mac_rx_low_water =
16966 DEFAULT_MB_MACRX_LOW_WATER;
16967 tp->bufmgr_config.mbuf_high_water =
16968 DEFAULT_MB_HIGH_WATER;
16969
16970 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16971 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16972 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16973 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16974 tp->bufmgr_config.mbuf_high_water_jumbo =
16975 DEFAULT_MB_HIGH_WATER_JUMBO;
16976 }
16977
16978 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16979 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16980 }
16981
16982 static char *tg3_phy_string(struct tg3 *tp)
16983 {
16984 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16985 case TG3_PHY_ID_BCM5400: return "5400";
16986 case TG3_PHY_ID_BCM5401: return "5401";
16987 case TG3_PHY_ID_BCM5411: return "5411";
16988 case TG3_PHY_ID_BCM5701: return "5701";
16989 case TG3_PHY_ID_BCM5703: return "5703";
16990 case TG3_PHY_ID_BCM5704: return "5704";
16991 case TG3_PHY_ID_BCM5705: return "5705";
16992 case TG3_PHY_ID_BCM5750: return "5750";
16993 case TG3_PHY_ID_BCM5752: return "5752";
16994 case TG3_PHY_ID_BCM5714: return "5714";
16995 case TG3_PHY_ID_BCM5780: return "5780";
16996 case TG3_PHY_ID_BCM5755: return "5755";
16997 case TG3_PHY_ID_BCM5787: return "5787";
16998 case TG3_PHY_ID_BCM5784: return "5784";
16999 case TG3_PHY_ID_BCM5756: return "5722/5756";
17000 case TG3_PHY_ID_BCM5906: return "5906";
17001 case TG3_PHY_ID_BCM5761: return "5761";
17002 case TG3_PHY_ID_BCM5718C: return "5718C";
17003 case TG3_PHY_ID_BCM5718S: return "5718S";
17004 case TG3_PHY_ID_BCM57765: return "57765";
17005 case TG3_PHY_ID_BCM5719C: return "5719C";
17006 case TG3_PHY_ID_BCM5720C: return "5720C";
17007 case TG3_PHY_ID_BCM5762: return "5762C";
17008 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17009 case 0: return "serdes";
17010 default: return "unknown";
17011 }
17012 }
17013
17014 static char *tg3_bus_string(struct tg3 *tp, char *str)
17015 {
17016 if (tg3_flag(tp, PCI_EXPRESS)) {
17017 strcpy(str, "PCI Express");
17018 return str;
17019 } else if (tg3_flag(tp, PCIX_MODE)) {
17020 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17021
17022 strcpy(str, "PCIX:");
17023
17024 if ((clock_ctrl == 7) ||
17025 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17026 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17027 strcat(str, "133MHz");
17028 else if (clock_ctrl == 0)
17029 strcat(str, "33MHz");
17030 else if (clock_ctrl == 2)
17031 strcat(str, "50MHz");
17032 else if (clock_ctrl == 4)
17033 strcat(str, "66MHz");
17034 else if (clock_ctrl == 6)
17035 strcat(str, "100MHz");
17036 } else {
17037 strcpy(str, "PCI:");
17038 if (tg3_flag(tp, PCI_HIGH_SPEED))
17039 strcat(str, "66MHz");
17040 else
17041 strcat(str, "33MHz");
17042 }
17043 if (tg3_flag(tp, PCI_32BIT))
17044 strcat(str, ":32-bit");
17045 else
17046 strcat(str, ":64-bit");
17047 return str;
17048 }
17049
17050 static void tg3_init_coal(struct tg3 *tp)
17051 {
17052 struct ethtool_coalesce *ec = &tp->coal;
17053
17054 memset(ec, 0, sizeof(*ec));
17055 ec->cmd = ETHTOOL_GCOALESCE;
17056 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17057 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17058 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17059 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17060 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17061 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17062 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17063 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17064 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17065
17066 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17067 HOSTCC_MODE_CLRTICK_TXBD)) {
17068 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17069 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17070 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17071 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17072 }
17073
17074 if (tg3_flag(tp, 5705_PLUS)) {
17075 ec->rx_coalesce_usecs_irq = 0;
17076 ec->tx_coalesce_usecs_irq = 0;
17077 ec->stats_block_coalesce_usecs = 0;
17078 }
17079 }
17080
17081 static int tg3_init_one(struct pci_dev *pdev,
17082 const struct pci_device_id *ent)
17083 {
17084 struct net_device *dev;
17085 struct tg3 *tp;
17086 int i, err, pm_cap;
17087 u32 sndmbx, rcvmbx, intmbx;
17088 char str[40];
17089 u64 dma_mask, persist_dma_mask;
17090 netdev_features_t features = 0;
17091
17092 printk_once(KERN_INFO "%s\n", version);
17093
17094 err = pci_enable_device(pdev);
17095 if (err) {
17096 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17097 return err;
17098 }
17099
17100 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17101 if (err) {
17102 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17103 goto err_out_disable_pdev;
17104 }
17105
17106 pci_set_master(pdev);
17107
17108 /* Find power-management capability. */
17109 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17110 if (pm_cap == 0) {
17111 dev_err(&pdev->dev,
17112 "Cannot find Power Management capability, aborting\n");
17113 err = -EIO;
17114 goto err_out_free_res;
17115 }
17116
17117 err = pci_set_power_state(pdev, PCI_D0);
17118 if (err) {
17119 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17120 goto err_out_free_res;
17121 }
17122
17123 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17124 if (!dev) {
17125 err = -ENOMEM;
17126 goto err_out_power_down;
17127 }
17128
17129 SET_NETDEV_DEV(dev, &pdev->dev);
17130
17131 tp = netdev_priv(dev);
17132 tp->pdev = pdev;
17133 tp->dev = dev;
17134 tp->pm_cap = pm_cap;
17135 tp->rx_mode = TG3_DEF_RX_MODE;
17136 tp->tx_mode = TG3_DEF_TX_MODE;
17137 tp->irq_sync = 1;
17138
17139 if (tg3_debug > 0)
17140 tp->msg_enable = tg3_debug;
17141 else
17142 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17143
17144 if (pdev_is_ssb_gige_core(pdev)) {
17145 tg3_flag_set(tp, IS_SSB_CORE);
17146 if (ssb_gige_must_flush_posted_writes(pdev))
17147 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17148 if (ssb_gige_one_dma_at_once(pdev))
17149 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17150 if (ssb_gige_have_roboswitch(pdev))
17151 tg3_flag_set(tp, ROBOSWITCH);
17152 if (ssb_gige_is_rgmii(pdev))
17153 tg3_flag_set(tp, RGMII_MODE);
17154 }
17155
17156 /* The word/byte swap controls here control register access byte
17157 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17158 * setting below.
17159 */
17160 tp->misc_host_ctrl =
17161 MISC_HOST_CTRL_MASK_PCI_INT |
17162 MISC_HOST_CTRL_WORD_SWAP |
17163 MISC_HOST_CTRL_INDIR_ACCESS |
17164 MISC_HOST_CTRL_PCISTATE_RW;
17165
17166 /* The NONFRM (non-frame) byte/word swap controls take effect
17167 * on descriptor entries, anything which isn't packet data.
17168 *
17169 * The StrongARM chips on the board (one for tx, one for rx)
17170 * are running in big-endian mode.
17171 */
17172 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17173 GRC_MODE_WSWAP_NONFRM_DATA);
17174 #ifdef __BIG_ENDIAN
17175 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17176 #endif
17177 spin_lock_init(&tp->lock);
17178 spin_lock_init(&tp->indirect_lock);
17179 INIT_WORK(&tp->reset_task, tg3_reset_task);
17180
17181 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17182 if (!tp->regs) {
17183 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17184 err = -ENOMEM;
17185 goto err_out_free_dev;
17186 }
17187
17188 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17189 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17190 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17191 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17192 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17193 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17194 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17195 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17196 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17198 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17199 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17200 tg3_flag_set(tp, ENABLE_APE);
17201 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17202 if (!tp->aperegs) {
17203 dev_err(&pdev->dev,
17204 "Cannot map APE registers, aborting\n");
17205 err = -ENOMEM;
17206 goto err_out_iounmap;
17207 }
17208 }
17209
17210 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17211 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17212
17213 dev->ethtool_ops = &tg3_ethtool_ops;
17214 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17215 dev->netdev_ops = &tg3_netdev_ops;
17216 dev->irq = pdev->irq;
17217
17218 err = tg3_get_invariants(tp, ent);
17219 if (err) {
17220 dev_err(&pdev->dev,
17221 "Problem fetching invariants of chip, aborting\n");
17222 goto err_out_apeunmap;
17223 }
17224
17225 /* The EPB bridge inside 5714, 5715, and 5780 and any
17226 * device behind the EPB cannot support DMA addresses > 40-bit.
17227 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17228 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17229 * do DMA address check in tg3_start_xmit().
17230 */
17231 if (tg3_flag(tp, IS_5788))
17232 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17233 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17234 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17235 #ifdef CONFIG_HIGHMEM
17236 dma_mask = DMA_BIT_MASK(64);
17237 #endif
17238 } else
17239 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17240
17241 /* Configure DMA attributes. */
17242 if (dma_mask > DMA_BIT_MASK(32)) {
17243 err = pci_set_dma_mask(pdev, dma_mask);
17244 if (!err) {
17245 features |= NETIF_F_HIGHDMA;
17246 err = pci_set_consistent_dma_mask(pdev,
17247 persist_dma_mask);
17248 if (err < 0) {
17249 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17250 "DMA for consistent allocations\n");
17251 goto err_out_apeunmap;
17252 }
17253 }
17254 }
17255 if (err || dma_mask == DMA_BIT_MASK(32)) {
17256 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17257 if (err) {
17258 dev_err(&pdev->dev,
17259 "No usable DMA configuration, aborting\n");
17260 goto err_out_apeunmap;
17261 }
17262 }
17263
17264 tg3_init_bufmgr_config(tp);
17265
17266 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17267
17268 /* 5700 B0 chips do not support checksumming correctly due
17269 * to hardware bugs.
17270 */
17271 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17272 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17273
17274 if (tg3_flag(tp, 5755_PLUS))
17275 features |= NETIF_F_IPV6_CSUM;
17276 }
17277
17278 /* TSO is on by default on chips that support hardware TSO.
17279 * Firmware TSO on older chips gives lower performance, so it
17280 * is off by default, but can be enabled using ethtool.
17281 */
17282 if ((tg3_flag(tp, HW_TSO_1) ||
17283 tg3_flag(tp, HW_TSO_2) ||
17284 tg3_flag(tp, HW_TSO_3)) &&
17285 (features & NETIF_F_IP_CSUM))
17286 features |= NETIF_F_TSO;
17287 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17288 if (features & NETIF_F_IPV6_CSUM)
17289 features |= NETIF_F_TSO6;
17290 if (tg3_flag(tp, HW_TSO_3) ||
17291 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17292 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17293 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17294 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17295 tg3_asic_rev(tp) == ASIC_REV_57780)
17296 features |= NETIF_F_TSO_ECN;
17297 }
17298
17299 dev->features |= features;
17300 dev->vlan_features |= features;
17301
17302 /*
17303 * Add loopback capability only for a subset of devices that support
17304 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17305 * loopback for the remaining devices.
17306 */
17307 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17308 !tg3_flag(tp, CPMU_PRESENT))
17309 /* Add the loopback capability */
17310 features |= NETIF_F_LOOPBACK;
17311
17312 dev->hw_features |= features;
17313
17314 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17315 !tg3_flag(tp, TSO_CAPABLE) &&
17316 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17317 tg3_flag_set(tp, MAX_RXPEND_64);
17318 tp->rx_pending = 63;
17319 }
17320
17321 err = tg3_get_device_address(tp);
17322 if (err) {
17323 dev_err(&pdev->dev,
17324 "Could not obtain valid ethernet address, aborting\n");
17325 goto err_out_apeunmap;
17326 }
17327
17328 /*
17329 * Reset chip in case UNDI or EFI driver did not shutdown
17330 * DMA self test will enable WDMAC and we'll see (spurious)
17331 * pending DMA on the PCI bus at that point.
17332 */
17333 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17334 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17335 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17336 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17337 }
17338
17339 err = tg3_test_dma(tp);
17340 if (err) {
17341 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17342 goto err_out_apeunmap;
17343 }
17344
17345 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17346 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17347 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17348 for (i = 0; i < tp->irq_max; i++) {
17349 struct tg3_napi *tnapi = &tp->napi[i];
17350
17351 tnapi->tp = tp;
17352 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17353
17354 tnapi->int_mbox = intmbx;
17355 if (i <= 4)
17356 intmbx += 0x8;
17357 else
17358 intmbx += 0x4;
17359
17360 tnapi->consmbox = rcvmbx;
17361 tnapi->prodmbox = sndmbx;
17362
17363 if (i)
17364 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17365 else
17366 tnapi->coal_now = HOSTCC_MODE_NOW;
17367
17368 if (!tg3_flag(tp, SUPPORT_MSIX))
17369 break;
17370
17371 /*
17372 * If we support MSIX, we'll be using RSS. If we're using
17373 * RSS, the first vector only handles link interrupts and the
17374 * remaining vectors handle rx and tx interrupts. Reuse the
17375 * mailbox values for the next iteration. The values we setup
17376 * above are still useful for the single vectored mode.
17377 */
17378 if (!i)
17379 continue;
17380
17381 rcvmbx += 0x8;
17382
17383 if (sndmbx & 0x4)
17384 sndmbx -= 0x4;
17385 else
17386 sndmbx += 0xc;
17387 }
17388
17389 tg3_init_coal(tp);
17390
17391 pci_set_drvdata(pdev, dev);
17392
17393 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17394 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17395 tg3_asic_rev(tp) == ASIC_REV_5762)
17396 tg3_flag_set(tp, PTP_CAPABLE);
17397
17398 if (tg3_flag(tp, 5717_PLUS)) {
17399 /* Resume a low-power mode */
17400 tg3_frob_aux_power(tp, false);
17401 }
17402
17403 tg3_timer_init(tp);
17404
17405 tg3_carrier_off(tp);
17406
17407 err = register_netdev(dev);
17408 if (err) {
17409 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17410 goto err_out_apeunmap;
17411 }
17412
17413 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17414 tp->board_part_number,
17415 tg3_chip_rev_id(tp),
17416 tg3_bus_string(tp, str),
17417 dev->dev_addr);
17418
17419 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17420 struct phy_device *phydev;
17421 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17422 netdev_info(dev,
17423 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17424 phydev->drv->name, dev_name(&phydev->dev));
17425 } else {
17426 char *ethtype;
17427
17428 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17429 ethtype = "10/100Base-TX";
17430 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17431 ethtype = "1000Base-SX";
17432 else
17433 ethtype = "10/100/1000Base-T";
17434
17435 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17436 "(WireSpeed[%d], EEE[%d])\n",
17437 tg3_phy_string(tp), ethtype,
17438 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17439 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17440 }
17441
17442 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17443 (dev->features & NETIF_F_RXCSUM) != 0,
17444 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17445 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17446 tg3_flag(tp, ENABLE_ASF) != 0,
17447 tg3_flag(tp, TSO_CAPABLE) != 0);
17448 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17449 tp->dma_rwctrl,
17450 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17451 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17452
17453 pci_save_state(pdev);
17454
17455 return 0;
17456
17457 err_out_apeunmap:
17458 if (tp->aperegs) {
17459 iounmap(tp->aperegs);
17460 tp->aperegs = NULL;
17461 }
17462
17463 err_out_iounmap:
17464 if (tp->regs) {
17465 iounmap(tp->regs);
17466 tp->regs = NULL;
17467 }
17468
17469 err_out_free_dev:
17470 free_netdev(dev);
17471
17472 err_out_power_down:
17473 pci_set_power_state(pdev, PCI_D3hot);
17474
17475 err_out_free_res:
17476 pci_release_regions(pdev);
17477
17478 err_out_disable_pdev:
17479 pci_disable_device(pdev);
17480 pci_set_drvdata(pdev, NULL);
17481 return err;
17482 }
17483
17484 static void tg3_remove_one(struct pci_dev *pdev)
17485 {
17486 struct net_device *dev = pci_get_drvdata(pdev);
17487
17488 if (dev) {
17489 struct tg3 *tp = netdev_priv(dev);
17490
17491 release_firmware(tp->fw);
17492
17493 tg3_reset_task_cancel(tp);
17494
17495 if (tg3_flag(tp, USE_PHYLIB)) {
17496 tg3_phy_fini(tp);
17497 tg3_mdio_fini(tp);
17498 }
17499
17500 unregister_netdev(dev);
17501 if (tp->aperegs) {
17502 iounmap(tp->aperegs);
17503 tp->aperegs = NULL;
17504 }
17505 if (tp->regs) {
17506 iounmap(tp->regs);
17507 tp->regs = NULL;
17508 }
17509 free_netdev(dev);
17510 pci_release_regions(pdev);
17511 pci_disable_device(pdev);
17512 pci_set_drvdata(pdev, NULL);
17513 }
17514 }
17515
17516 #ifdef CONFIG_PM_SLEEP
17517 static int tg3_suspend(struct device *device)
17518 {
17519 struct pci_dev *pdev = to_pci_dev(device);
17520 struct net_device *dev = pci_get_drvdata(pdev);
17521 struct tg3 *tp = netdev_priv(dev);
17522 int err;
17523
17524 if (!netif_running(dev))
17525 return 0;
17526
17527 tg3_reset_task_cancel(tp);
17528 tg3_phy_stop(tp);
17529 tg3_netif_stop(tp);
17530
17531 tg3_timer_stop(tp);
17532
17533 tg3_full_lock(tp, 1);
17534 tg3_disable_ints(tp);
17535 tg3_full_unlock(tp);
17536
17537 netif_device_detach(dev);
17538
17539 tg3_full_lock(tp, 0);
17540 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17541 tg3_flag_clear(tp, INIT_COMPLETE);
17542 tg3_full_unlock(tp);
17543
17544 err = tg3_power_down_prepare(tp);
17545 if (err) {
17546 int err2;
17547
17548 tg3_full_lock(tp, 0);
17549
17550 tg3_flag_set(tp, INIT_COMPLETE);
17551 err2 = tg3_restart_hw(tp, true);
17552 if (err2)
17553 goto out;
17554
17555 tg3_timer_start(tp);
17556
17557 netif_device_attach(dev);
17558 tg3_netif_start(tp);
17559
17560 out:
17561 tg3_full_unlock(tp);
17562
17563 if (!err2)
17564 tg3_phy_start(tp);
17565 }
17566
17567 return err;
17568 }
17569
17570 static int tg3_resume(struct device *device)
17571 {
17572 struct pci_dev *pdev = to_pci_dev(device);
17573 struct net_device *dev = pci_get_drvdata(pdev);
17574 struct tg3 *tp = netdev_priv(dev);
17575 int err;
17576
17577 if (!netif_running(dev))
17578 return 0;
17579
17580 netif_device_attach(dev);
17581
17582 tg3_full_lock(tp, 0);
17583
17584 tg3_flag_set(tp, INIT_COMPLETE);
17585 err = tg3_restart_hw(tp,
17586 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17587 if (err)
17588 goto out;
17589
17590 tg3_timer_start(tp);
17591
17592 tg3_netif_start(tp);
17593
17594 out:
17595 tg3_full_unlock(tp);
17596
17597 if (!err)
17598 tg3_phy_start(tp);
17599
17600 return err;
17601 }
17602 #endif /* CONFIG_PM_SLEEP */
17603
17604 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17605
17606 /**
17607 * tg3_io_error_detected - called when PCI error is detected
17608 * @pdev: Pointer to PCI device
17609 * @state: The current pci connection state
17610 *
17611 * This function is called after a PCI bus error affecting
17612 * this device has been detected.
17613 */
17614 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17615 pci_channel_state_t state)
17616 {
17617 struct net_device *netdev = pci_get_drvdata(pdev);
17618 struct tg3 *tp = netdev_priv(netdev);
17619 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17620
17621 netdev_info(netdev, "PCI I/O error detected\n");
17622
17623 rtnl_lock();
17624
17625 if (!netif_running(netdev))
17626 goto done;
17627
17628 tg3_phy_stop(tp);
17629
17630 tg3_netif_stop(tp);
17631
17632 tg3_timer_stop(tp);
17633
17634 /* Want to make sure that the reset task doesn't run */
17635 tg3_reset_task_cancel(tp);
17636
17637 netif_device_detach(netdev);
17638
17639 /* Clean up software state, even if MMIO is blocked */
17640 tg3_full_lock(tp, 0);
17641 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17642 tg3_full_unlock(tp);
17643
17644 done:
17645 if (state == pci_channel_io_perm_failure)
17646 err = PCI_ERS_RESULT_DISCONNECT;
17647 else
17648 pci_disable_device(pdev);
17649
17650 rtnl_unlock();
17651
17652 return err;
17653 }
17654
17655 /**
17656 * tg3_io_slot_reset - called after the pci bus has been reset.
17657 * @pdev: Pointer to PCI device
17658 *
17659 * Restart the card from scratch, as if from a cold-boot.
17660 * At this point, the card has exprienced a hard reset,
17661 * followed by fixups by BIOS, and has its config space
17662 * set up identically to what it was at cold boot.
17663 */
17664 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17665 {
17666 struct net_device *netdev = pci_get_drvdata(pdev);
17667 struct tg3 *tp = netdev_priv(netdev);
17668 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17669 int err;
17670
17671 rtnl_lock();
17672
17673 if (pci_enable_device(pdev)) {
17674 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17675 goto done;
17676 }
17677
17678 pci_set_master(pdev);
17679 pci_restore_state(pdev);
17680 pci_save_state(pdev);
17681
17682 if (!netif_running(netdev)) {
17683 rc = PCI_ERS_RESULT_RECOVERED;
17684 goto done;
17685 }
17686
17687 err = tg3_power_up(tp);
17688 if (err)
17689 goto done;
17690
17691 rc = PCI_ERS_RESULT_RECOVERED;
17692
17693 done:
17694 rtnl_unlock();
17695
17696 return rc;
17697 }
17698
17699 /**
17700 * tg3_io_resume - called when traffic can start flowing again.
17701 * @pdev: Pointer to PCI device
17702 *
17703 * This callback is called when the error recovery driver tells
17704 * us that its OK to resume normal operation.
17705 */
17706 static void tg3_io_resume(struct pci_dev *pdev)
17707 {
17708 struct net_device *netdev = pci_get_drvdata(pdev);
17709 struct tg3 *tp = netdev_priv(netdev);
17710 int err;
17711
17712 rtnl_lock();
17713
17714 if (!netif_running(netdev))
17715 goto done;
17716
17717 tg3_full_lock(tp, 0);
17718 tg3_flag_set(tp, INIT_COMPLETE);
17719 err = tg3_restart_hw(tp, true);
17720 if (err) {
17721 tg3_full_unlock(tp);
17722 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17723 goto done;
17724 }
17725
17726 netif_device_attach(netdev);
17727
17728 tg3_timer_start(tp);
17729
17730 tg3_netif_start(tp);
17731
17732 tg3_full_unlock(tp);
17733
17734 tg3_phy_start(tp);
17735
17736 done:
17737 rtnl_unlock();
17738 }
17739
17740 static const struct pci_error_handlers tg3_err_handler = {
17741 .error_detected = tg3_io_error_detected,
17742 .slot_reset = tg3_io_slot_reset,
17743 .resume = tg3_io_resume
17744 };
17745
17746 static struct pci_driver tg3_driver = {
17747 .name = DRV_MODULE_NAME,
17748 .id_table = tg3_pci_tbl,
17749 .probe = tg3_init_one,
17750 .remove = tg3_remove_one,
17751 .err_handler = &tg3_err_handler,
17752 .driver.pm = &tg3_pm_ops,
17753 };
17754
17755 static int __init tg3_init(void)
17756 {
17757 return pci_register_driver(&tg3_driver);
17758 }
17759
17760 static void __exit tg3_cleanup(void)
17761 {
17762 pci_unregister_driver(&tg3_driver);
17763 }
17764
17765 module_init(tg3_init);
17766 module_exit(tg3_cleanup);
This page took 0.398048 seconds and 4 git commands to generate.