2b2bee61ddd75864083ddd72710b0ab7932a2492
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 udelay(10);
748 }
749
750 if (status != bit) {
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
753 ret = -EBUSY;
754 }
755
756 return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761 u32 gnt, bit;
762
763 if (!tg3_flag(tp, ENABLE_APE))
764 return;
765
766 switch (locknum) {
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 return;
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
772 if (!tp->pci_fn)
773 bit = APE_LOCK_GRANT_DRIVER;
774 else
775 bit = 1 << tp->pci_fn;
776 break;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
782 break;
783 default:
784 return;
785 }
786
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
789 else
790 gnt = TG3_APE_PER_LOCK_GRANT;
791
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797 u32 apedata;
798
799 while (timeout_us) {
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 return -EBUSY;
802
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 break;
806
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809 udelay(10);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 }
812
813 return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818 u32 i, apedata;
819
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 break;
825
826 udelay(10);
827 }
828
829 return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 u32 len)
834 {
835 int err;
836 u32 i, bufoff, msgoff, maxlen, apedata;
837
838 if (!tg3_flag(tp, APE_HAS_NCSI))
839 return 0;
840
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
843 return -ENODEV;
844
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
847 return -EAGAIN;
848
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 TG3_APE_SHMEM_BASE;
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854 while (len) {
855 u32 length;
856
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
859 len -= length;
860
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
863 return -EAGAIN;
864
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
867 if (err)
868 return err;
869
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881 base_off += length;
882
883 if (tg3_ape_wait_for_event(tp, 30000))
884 return -EAGAIN;
885
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
889 data++;
890 }
891 }
892
893 return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898 int err;
899 u32 apedata;
900
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
903 return -EAGAIN;
904
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
907 return -EAGAIN;
908
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
911 if (err)
912 return err;
913
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
916
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920 return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925 u32 event;
926 u32 apedata;
927
928 if (!tg3_flag(tp, ENABLE_APE))
929 return;
930
931 switch (kind) {
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
945
946 event = APE_EVENT_STATUS_STATE_START;
947 break;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
953 */
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 } else
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 break;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
970 break;
971 default:
972 return;
973 }
974
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977 tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 int i;
983
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 int i;
993
994 tp->irq_sync = 0;
995 wmb();
996
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008 tp->coal_now |= tnapi->coal_now;
1009 }
1010
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 else
1016 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1026
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1030 work_exists = 1;
1031 }
1032
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 work_exists = 1;
1036
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 work_exists = 1;
1041
1042 return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1049 */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 struct tg3 *tp = tnapi->tp;
1053
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 mmiowb();
1056
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1060 */
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 u32 clock_ctrl;
1069 u32 orig_clock_ctrl;
1070
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 return;
1073
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1079 0x1f);
1080 tp->pci_clock_ctrl = clock_ctrl;
1081
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 }
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 clock_ctrl |
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 40);
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 40);
1095 }
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS 5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 u32 *val)
1103 {
1104 u32 frame_val;
1105 unsigned int loops;
1106 int ret;
1107
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 tw32_f(MAC_MI_MODE,
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 udelay(80);
1112 }
1113
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116 *val = 0x0;
1117
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124 tw32_f(MAC_MI_COM, frame_val);
1125
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1128 udelay(10);
1129 frame_val = tr32(MAC_MI_COM);
1130
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1132 udelay(5);
1133 frame_val = tr32(MAC_MI_COM);
1134 break;
1135 }
1136 loops -= 1;
1137 }
1138
1139 ret = -EBUSY;
1140 if (loops != 0) {
1141 *val = frame_val & MI_COM_DATA_MASK;
1142 ret = 0;
1143 }
1144
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 udelay(80);
1148 }
1149
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152 return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 u32 val)
1162 {
1163 u32 frame_val;
1164 unsigned int loops;
1165 int ret;
1166
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 return 0;
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE,
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 udelay(80);
1175 }
1176
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186 tw32_f(MAC_MI_COM, frame_val);
1187
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1190 udelay(10);
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1193 udelay(5);
1194 frame_val = tr32(MAC_MI_COM);
1195 break;
1196 }
1197 loops -= 1;
1198 }
1199
1200 ret = -EBUSY;
1201 if (loops != 0)
1202 ret = 0;
1203
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 udelay(80);
1207 }
1208
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211 return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 int err;
1222
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 if (err)
1225 goto done;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 if (err)
1234 goto done;
1235
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239 return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 int err;
1245
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 if (err)
1252 goto done;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 if (err)
1257 goto done;
1258
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262 return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 int err;
1268
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 if (!err)
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273 return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 int err;
1279
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 if (!err)
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284 return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 int err;
1290
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 if (!err)
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297 return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 u32 val;
1311 int err;
1312
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315 if (err)
1316 return err;
1317 if (enable)
1318
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 else
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326 return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 u32 phy_control;
1332 int limit, err;
1333
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1336 */
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 if (err != 0)
1340 return -EBUSY;
1341
1342 limit = 5000;
1343 while (limit--) {
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if (err != 0)
1346 return -EBUSY;
1347
1348 if ((phy_control & BMCR_RESET) == 0) {
1349 udelay(40);
1350 break;
1351 }
1352 udelay(10);
1353 }
1354 if (limit < 0)
1355 return -EBUSY;
1356
1357 return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 struct tg3 *tp = bp->priv;
1363 u32 val;
1364
1365 spin_lock_bh(&tp->lock);
1366
1367 if (tg3_readphy(tp, reg, &val))
1368 val = -EIO;
1369
1370 spin_unlock_bh(&tp->lock);
1371
1372 return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 struct tg3 *tp = bp->priv;
1378 u32 ret = 0;
1379
1380 spin_lock_bh(&tp->lock);
1381
1382 if (tg3_writephy(tp, reg, val))
1383 ret = -EIO;
1384
1385 spin_unlock_bh(&tp->lock);
1386
1387 return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 u32 val;
1398 struct phy_device *phydev;
1399
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1405 break;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 break;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 break;
1415 default:
1416 return;
1417 }
1418
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1421
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1427
1428 return;
1429 }
1430
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1438
1439 tw32(MAC_PHYCFG2, val);
1440
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 }
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1453
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1472 }
1473 tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 udelay(80);
1481
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 int i;
1490 u32 reg;
1491 struct phy_device *phydev;
1492
1493 if (tg3_flag(tp, 5717_PLUS)) {
1494 u32 is_serdes;
1495
1496 tp->phy_addr = tp->pci_fn + 1;
1497
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 else
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 if (is_serdes)
1504 tp->phy_addr += 7;
1505 } else
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508 tg3_mdio_start(tp);
1509
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 return 0;
1512
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1515 return -ENOMEM;
1516
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1535 */
1536 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 tg3_bmcr_reset(tp);
1538
1539 i = mdiobus_register(tp->mdio_bus);
1540 if (i) {
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1543 return i;
1544 }
1545
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1552 return -ENODEV;
1553 }
1554
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 break;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 /* fallthru */
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 break;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 break;
1582 }
1583
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1588
1589 return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1598 }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 u32 val;
1605
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610 tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 int i;
1619 unsigned int delay_cnt;
1620 long time_remain;
1621
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 (long)jiffies;
1626 if (time_remain < 0)
1627 return;
1628
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1634
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break;
1638 udelay(8);
1639 }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645 u32 reg, val;
1646
1647 val = 0;
1648 if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 val = reg << 16;
1650 if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 val |= (reg & 0xffff);
1652 *data++ = val;
1653
1654 val = 0;
1655 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 val = reg << 16;
1657 if (!tg3_readphy(tp, MII_LPA, &reg))
1658 val |= (reg & 0xffff);
1659 *data++ = val;
1660
1661 val = 0;
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 val |= (reg & 0xffff);
1667 }
1668 *data++ = val;
1669
1670 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 val = reg << 16;
1672 else
1673 val = 0;
1674 *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680 u32 data[4];
1681
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 return;
1684
1685 tg3_phy_gather_ump_data(tp, data);
1686
1687 tg3_wait_for_event_ack(tp);
1688
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696 tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1705
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708 tg3_generate_fw_event(tp);
1709
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1712 }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 switch (kind) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 DRV_STATE_START);
1726 break;
1727
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 DRV_STATE_UNLOAD);
1731 break;
1732
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 DRV_STATE_SUSPEND);
1736 break;
1737
1738 default:
1739 break;
1740 }
1741 }
1742
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 switch (kind) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1756 break;
1757
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1761 break;
1762
1763 default:
1764 break;
1765 }
1766 }
1767
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1776 switch (kind) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 DRV_STATE_START);
1780 break;
1781
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 DRV_STATE_UNLOAD);
1785 break;
1786
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 DRV_STATE_SUSPEND);
1790 break;
1791
1792 default:
1793 break;
1794 }
1795 }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800 int i;
1801 u32 val;
1802
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1805 return 0;
1806 }
1807
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 return 0;
1813 udelay(100);
1814 }
1815 return -ENODEV;
1816 }
1817
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 break;
1823 udelay(10);
1824 }
1825
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1830 */
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834 netdev_info(tp->dev, "No firmware running\n");
1835 }
1836
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1840 */
1841 mdelay(10);
1842 }
1843
1844 return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1855 1000 :
1856 (tp->link_config.active_speed == SPEED_100 ?
1857 100 : 10)),
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 "full" : "half"));
1860
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 "on" : "off",
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 "on" : "off");
1866
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1870
1871 tg3_ump_link_report(tp);
1872 }
1873 }
1874
1875 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1876 {
1877 u16 miireg;
1878
1879 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1880 miireg = ADVERTISE_1000XPAUSE;
1881 else if (flow_ctrl & FLOW_CTRL_TX)
1882 miireg = ADVERTISE_1000XPSE_ASYM;
1883 else if (flow_ctrl & FLOW_CTRL_RX)
1884 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1885 else
1886 miireg = 0;
1887
1888 return miireg;
1889 }
1890
1891 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1892 {
1893 u8 cap = 0;
1894
1895 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1896 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1897 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1898 if (lcladv & ADVERTISE_1000XPAUSE)
1899 cap = FLOW_CTRL_RX;
1900 if (rmtadv & ADVERTISE_1000XPAUSE)
1901 cap = FLOW_CTRL_TX;
1902 }
1903
1904 return cap;
1905 }
1906
1907 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1908 {
1909 u8 autoneg;
1910 u8 flowctrl = 0;
1911 u32 old_rx_mode = tp->rx_mode;
1912 u32 old_tx_mode = tp->tx_mode;
1913
1914 if (tg3_flag(tp, USE_PHYLIB))
1915 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1916 else
1917 autoneg = tp->link_config.autoneg;
1918
1919 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1920 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1921 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1922 else
1923 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1924 } else
1925 flowctrl = tp->link_config.flowctrl;
1926
1927 tp->link_config.active_flowctrl = flowctrl;
1928
1929 if (flowctrl & FLOW_CTRL_RX)
1930 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1931 else
1932 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1933
1934 if (old_rx_mode != tp->rx_mode)
1935 tw32_f(MAC_RX_MODE, tp->rx_mode);
1936
1937 if (flowctrl & FLOW_CTRL_TX)
1938 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1939 else
1940 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1941
1942 if (old_tx_mode != tp->tx_mode)
1943 tw32_f(MAC_TX_MODE, tp->tx_mode);
1944 }
1945
1946 static void tg3_adjust_link(struct net_device *dev)
1947 {
1948 u8 oldflowctrl, linkmesg = 0;
1949 u32 mac_mode, lcl_adv, rmt_adv;
1950 struct tg3 *tp = netdev_priv(dev);
1951 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1952
1953 spin_lock_bh(&tp->lock);
1954
1955 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1956 MAC_MODE_HALF_DUPLEX);
1957
1958 oldflowctrl = tp->link_config.active_flowctrl;
1959
1960 if (phydev->link) {
1961 lcl_adv = 0;
1962 rmt_adv = 0;
1963
1964 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1965 mac_mode |= MAC_MODE_PORT_MODE_MII;
1966 else if (phydev->speed == SPEED_1000 ||
1967 tg3_asic_rev(tp) != ASIC_REV_5785)
1968 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1969 else
1970 mac_mode |= MAC_MODE_PORT_MODE_MII;
1971
1972 if (phydev->duplex == DUPLEX_HALF)
1973 mac_mode |= MAC_MODE_HALF_DUPLEX;
1974 else {
1975 lcl_adv = mii_advertise_flowctrl(
1976 tp->link_config.flowctrl);
1977
1978 if (phydev->pause)
1979 rmt_adv = LPA_PAUSE_CAP;
1980 if (phydev->asym_pause)
1981 rmt_adv |= LPA_PAUSE_ASYM;
1982 }
1983
1984 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1985 } else
1986 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1987
1988 if (mac_mode != tp->mac_mode) {
1989 tp->mac_mode = mac_mode;
1990 tw32_f(MAC_MODE, tp->mac_mode);
1991 udelay(40);
1992 }
1993
1994 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1995 if (phydev->speed == SPEED_10)
1996 tw32(MAC_MI_STAT,
1997 MAC_MI_STAT_10MBPS_MODE |
1998 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1999 else
2000 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001 }
2002
2003 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2004 tw32(MAC_TX_LENGTHS,
2005 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2006 (6 << TX_LENGTHS_IPG_SHIFT) |
2007 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2008 else
2009 tw32(MAC_TX_LENGTHS,
2010 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2011 (6 << TX_LENGTHS_IPG_SHIFT) |
2012 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2013
2014 if (phydev->link != tp->old_link ||
2015 phydev->speed != tp->link_config.active_speed ||
2016 phydev->duplex != tp->link_config.active_duplex ||
2017 oldflowctrl != tp->link_config.active_flowctrl)
2018 linkmesg = 1;
2019
2020 tp->old_link = phydev->link;
2021 tp->link_config.active_speed = phydev->speed;
2022 tp->link_config.active_duplex = phydev->duplex;
2023
2024 spin_unlock_bh(&tp->lock);
2025
2026 if (linkmesg)
2027 tg3_link_report(tp);
2028 }
2029
2030 static int tg3_phy_init(struct tg3 *tp)
2031 {
2032 struct phy_device *phydev;
2033
2034 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2035 return 0;
2036
2037 /* Bring the PHY back to a known state. */
2038 tg3_bmcr_reset(tp);
2039
2040 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2041
2042 /* Attach the MAC to the PHY. */
2043 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2044 tg3_adjust_link, phydev->interface);
2045 if (IS_ERR(phydev)) {
2046 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2047 return PTR_ERR(phydev);
2048 }
2049
2050 /* Mask with MAC supported features. */
2051 switch (phydev->interface) {
2052 case PHY_INTERFACE_MODE_GMII:
2053 case PHY_INTERFACE_MODE_RGMII:
2054 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2055 phydev->supported &= (PHY_GBIT_FEATURES |
2056 SUPPORTED_Pause |
2057 SUPPORTED_Asym_Pause);
2058 break;
2059 }
2060 /* fallthru */
2061 case PHY_INTERFACE_MODE_MII:
2062 phydev->supported &= (PHY_BASIC_FEATURES |
2063 SUPPORTED_Pause |
2064 SUPPORTED_Asym_Pause);
2065 break;
2066 default:
2067 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2068 return -EINVAL;
2069 }
2070
2071 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2072
2073 phydev->advertising = phydev->supported;
2074
2075 return 0;
2076 }
2077
2078 static void tg3_phy_start(struct tg3 *tp)
2079 {
2080 struct phy_device *phydev;
2081
2082 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2083 return;
2084
2085 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2086
2087 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2088 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2089 phydev->speed = tp->link_config.speed;
2090 phydev->duplex = tp->link_config.duplex;
2091 phydev->autoneg = tp->link_config.autoneg;
2092 phydev->advertising = tp->link_config.advertising;
2093 }
2094
2095 phy_start(phydev);
2096
2097 phy_start_aneg(phydev);
2098 }
2099
2100 static void tg3_phy_stop(struct tg3 *tp)
2101 {
2102 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2103 return;
2104
2105 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2106 }
2107
2108 static void tg3_phy_fini(struct tg3 *tp)
2109 {
2110 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2111 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2112 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2113 }
2114 }
2115
2116 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2117 {
2118 int err;
2119 u32 val;
2120
2121 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2122 return 0;
2123
2124 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2125 /* Cannot do read-modify-write on 5401 */
2126 err = tg3_phy_auxctl_write(tp,
2127 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2128 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2129 0x4c20);
2130 goto done;
2131 }
2132
2133 err = tg3_phy_auxctl_read(tp,
2134 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2135 if (err)
2136 return err;
2137
2138 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2139 err = tg3_phy_auxctl_write(tp,
2140 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2141
2142 done:
2143 return err;
2144 }
2145
2146 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2147 {
2148 u32 phytest;
2149
2150 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2151 u32 phy;
2152
2153 tg3_writephy(tp, MII_TG3_FET_TEST,
2154 phytest | MII_TG3_FET_SHADOW_EN);
2155 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2156 if (enable)
2157 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2158 else
2159 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2161 }
2162 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2163 }
2164 }
2165
2166 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2167 {
2168 u32 reg;
2169
2170 if (!tg3_flag(tp, 5705_PLUS) ||
2171 (tg3_flag(tp, 5717_PLUS) &&
2172 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2173 return;
2174
2175 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2176 tg3_phy_fet_toggle_apd(tp, enable);
2177 return;
2178 }
2179
2180 reg = MII_TG3_MISC_SHDW_WREN |
2181 MII_TG3_MISC_SHDW_SCR5_SEL |
2182 MII_TG3_MISC_SHDW_SCR5_LPED |
2183 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2184 MII_TG3_MISC_SHDW_SCR5_SDTL |
2185 MII_TG3_MISC_SHDW_SCR5_C125OE;
2186 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2187 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2188
2189 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2190
2191
2192 reg = MII_TG3_MISC_SHDW_WREN |
2193 MII_TG3_MISC_SHDW_APD_SEL |
2194 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2195 if (enable)
2196 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2197
2198 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2199 }
2200
2201 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2202 {
2203 u32 phy;
2204
2205 if (!tg3_flag(tp, 5705_PLUS) ||
2206 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2207 return;
2208
2209 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2210 u32 ephy;
2211
2212 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2213 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2214
2215 tg3_writephy(tp, MII_TG3_FET_TEST,
2216 ephy | MII_TG3_FET_SHADOW_EN);
2217 if (!tg3_readphy(tp, reg, &phy)) {
2218 if (enable)
2219 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2220 else
2221 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222 tg3_writephy(tp, reg, phy);
2223 }
2224 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2225 }
2226 } else {
2227 int ret;
2228
2229 ret = tg3_phy_auxctl_read(tp,
2230 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2231 if (!ret) {
2232 if (enable)
2233 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2234 else
2235 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2236 tg3_phy_auxctl_write(tp,
2237 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2238 }
2239 }
2240 }
2241
2242 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2243 {
2244 int ret;
2245 u32 val;
2246
2247 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2248 return;
2249
2250 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2251 if (!ret)
2252 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2253 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2254 }
2255
2256 static void tg3_phy_apply_otp(struct tg3 *tp)
2257 {
2258 u32 otp, phy;
2259
2260 if (!tp->phy_otp)
2261 return;
2262
2263 otp = tp->phy_otp;
2264
2265 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2266 return;
2267
2268 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2269 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2270 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2271
2272 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2273 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2274 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2275
2276 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2277 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2278 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2279
2280 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2281 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2282
2283 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2284 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2285
2286 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2287 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2288 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2289
2290 tg3_phy_toggle_auxctl_smdsp(tp, false);
2291 }
2292
2293 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2294 {
2295 u32 val;
2296
2297 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2298 return;
2299
2300 tp->setlpicnt = 0;
2301
2302 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2303 current_link_up == 1 &&
2304 tp->link_config.active_duplex == DUPLEX_FULL &&
2305 (tp->link_config.active_speed == SPEED_100 ||
2306 tp->link_config.active_speed == SPEED_1000)) {
2307 u32 eeectl;
2308
2309 if (tp->link_config.active_speed == SPEED_1000)
2310 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2311 else
2312 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2313
2314 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2315
2316 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2317 TG3_CL45_D7_EEERES_STAT, &val);
2318
2319 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2320 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2321 tp->setlpicnt = 2;
2322 }
2323
2324 if (!tp->setlpicnt) {
2325 if (current_link_up == 1 &&
2326 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2327 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2328 tg3_phy_toggle_auxctl_smdsp(tp, false);
2329 }
2330
2331 val = tr32(TG3_CPMU_EEE_MODE);
2332 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2333 }
2334 }
2335
2336 static void tg3_phy_eee_enable(struct tg3 *tp)
2337 {
2338 u32 val;
2339
2340 if (tp->link_config.active_speed == SPEED_1000 &&
2341 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2342 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2343 tg3_flag(tp, 57765_CLASS)) &&
2344 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2345 val = MII_TG3_DSP_TAP26_ALNOKO |
2346 MII_TG3_DSP_TAP26_RMRXSTO;
2347 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2348 tg3_phy_toggle_auxctl_smdsp(tp, false);
2349 }
2350
2351 val = tr32(TG3_CPMU_EEE_MODE);
2352 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2353 }
2354
2355 static int tg3_wait_macro_done(struct tg3 *tp)
2356 {
2357 int limit = 100;
2358
2359 while (limit--) {
2360 u32 tmp32;
2361
2362 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2363 if ((tmp32 & 0x1000) == 0)
2364 break;
2365 }
2366 }
2367 if (limit < 0)
2368 return -EBUSY;
2369
2370 return 0;
2371 }
2372
2373 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2374 {
2375 static const u32 test_pat[4][6] = {
2376 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2377 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2378 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2379 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2380 };
2381 int chan;
2382
2383 for (chan = 0; chan < 4; chan++) {
2384 int i;
2385
2386 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2387 (chan * 0x2000) | 0x0200);
2388 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2389
2390 for (i = 0; i < 6; i++)
2391 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2392 test_pat[chan][i]);
2393
2394 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2395 if (tg3_wait_macro_done(tp)) {
2396 *resetp = 1;
2397 return -EBUSY;
2398 }
2399
2400 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2401 (chan * 0x2000) | 0x0200);
2402 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2403 if (tg3_wait_macro_done(tp)) {
2404 *resetp = 1;
2405 return -EBUSY;
2406 }
2407
2408 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2409 if (tg3_wait_macro_done(tp)) {
2410 *resetp = 1;
2411 return -EBUSY;
2412 }
2413
2414 for (i = 0; i < 6; i += 2) {
2415 u32 low, high;
2416
2417 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2418 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2419 tg3_wait_macro_done(tp)) {
2420 *resetp = 1;
2421 return -EBUSY;
2422 }
2423 low &= 0x7fff;
2424 high &= 0x000f;
2425 if (low != test_pat[chan][i] ||
2426 high != test_pat[chan][i+1]) {
2427 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2429 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2430
2431 return -EBUSY;
2432 }
2433 }
2434 }
2435
2436 return 0;
2437 }
2438
2439 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2440 {
2441 int chan;
2442
2443 for (chan = 0; chan < 4; chan++) {
2444 int i;
2445
2446 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2447 (chan * 0x2000) | 0x0200);
2448 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2449 for (i = 0; i < 6; i++)
2450 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2451 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2452 if (tg3_wait_macro_done(tp))
2453 return -EBUSY;
2454 }
2455
2456 return 0;
2457 }
2458
2459 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2460 {
2461 u32 reg32, phy9_orig;
2462 int retries, do_phy_reset, err;
2463
2464 retries = 10;
2465 do_phy_reset = 1;
2466 do {
2467 if (do_phy_reset) {
2468 err = tg3_bmcr_reset(tp);
2469 if (err)
2470 return err;
2471 do_phy_reset = 0;
2472 }
2473
2474 /* Disable transmitter and interrupt. */
2475 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2476 continue;
2477
2478 reg32 |= 0x3000;
2479 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2480
2481 /* Set full-duplex, 1000 mbps. */
2482 tg3_writephy(tp, MII_BMCR,
2483 BMCR_FULLDPLX | BMCR_SPEED1000);
2484
2485 /* Set to master mode. */
2486 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2487 continue;
2488
2489 tg3_writephy(tp, MII_CTRL1000,
2490 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2491
2492 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2493 if (err)
2494 return err;
2495
2496 /* Block the PHY control access. */
2497 tg3_phydsp_write(tp, 0x8005, 0x0800);
2498
2499 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2500 if (!err)
2501 break;
2502 } while (--retries);
2503
2504 err = tg3_phy_reset_chanpat(tp);
2505 if (err)
2506 return err;
2507
2508 tg3_phydsp_write(tp, 0x8005, 0x0000);
2509
2510 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2511 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2512
2513 tg3_phy_toggle_auxctl_smdsp(tp, false);
2514
2515 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2516
2517 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2518 reg32 &= ~0x3000;
2519 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2520 } else if (!err)
2521 err = -EBUSY;
2522
2523 return err;
2524 }
2525
2526 static void tg3_carrier_on(struct tg3 *tp)
2527 {
2528 netif_carrier_on(tp->dev);
2529 tp->link_up = true;
2530 }
2531
2532 static void tg3_carrier_off(struct tg3 *tp)
2533 {
2534 netif_carrier_off(tp->dev);
2535 tp->link_up = false;
2536 }
2537
2538 /* This will reset the tigon3 PHY if there is no valid
2539 * link unless the FORCE argument is non-zero.
2540 */
2541 static int tg3_phy_reset(struct tg3 *tp)
2542 {
2543 u32 val, cpmuctrl;
2544 int err;
2545
2546 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2547 val = tr32(GRC_MISC_CFG);
2548 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2549 udelay(40);
2550 }
2551 err = tg3_readphy(tp, MII_BMSR, &val);
2552 err |= tg3_readphy(tp, MII_BMSR, &val);
2553 if (err != 0)
2554 return -EBUSY;
2555
2556 if (netif_running(tp->dev) && tp->link_up) {
2557 tg3_carrier_off(tp);
2558 tg3_link_report(tp);
2559 }
2560
2561 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2562 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2563 tg3_asic_rev(tp) == ASIC_REV_5705) {
2564 err = tg3_phy_reset_5703_4_5(tp);
2565 if (err)
2566 return err;
2567 goto out;
2568 }
2569
2570 cpmuctrl = 0;
2571 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2572 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2573 cpmuctrl = tr32(TG3_CPMU_CTRL);
2574 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2575 tw32(TG3_CPMU_CTRL,
2576 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2577 }
2578
2579 err = tg3_bmcr_reset(tp);
2580 if (err)
2581 return err;
2582
2583 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2584 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2585 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2586
2587 tw32(TG3_CPMU_CTRL, cpmuctrl);
2588 }
2589
2590 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2591 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2592 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2593 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2594 CPMU_LSPD_1000MB_MACCLK_12_5) {
2595 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2596 udelay(40);
2597 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2598 }
2599 }
2600
2601 if (tg3_flag(tp, 5717_PLUS) &&
2602 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2603 return 0;
2604
2605 tg3_phy_apply_otp(tp);
2606
2607 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2608 tg3_phy_toggle_apd(tp, true);
2609 else
2610 tg3_phy_toggle_apd(tp, false);
2611
2612 out:
2613 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2614 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2615 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2616 tg3_phydsp_write(tp, 0x000a, 0x0323);
2617 tg3_phy_toggle_auxctl_smdsp(tp, false);
2618 }
2619
2620 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2621 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2623 }
2624
2625 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2626 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2627 tg3_phydsp_write(tp, 0x000a, 0x310b);
2628 tg3_phydsp_write(tp, 0x201f, 0x9506);
2629 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2630 tg3_phy_toggle_auxctl_smdsp(tp, false);
2631 }
2632 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2633 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2634 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2635 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2636 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2637 tg3_writephy(tp, MII_TG3_TEST1,
2638 MII_TG3_TEST1_TRIM_EN | 0x4);
2639 } else
2640 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2641
2642 tg3_phy_toggle_auxctl_smdsp(tp, false);
2643 }
2644 }
2645
2646 /* Set Extended packet length bit (bit 14) on all chips that */
2647 /* support jumbo frames */
2648 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2649 /* Cannot do read-modify-write on 5401 */
2650 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2651 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2652 /* Set bit 14 with read-modify-write to preserve other bits */
2653 err = tg3_phy_auxctl_read(tp,
2654 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2655 if (!err)
2656 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2657 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2658 }
2659
2660 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2661 * jumbo frames transmission.
2662 */
2663 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2664 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2665 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2666 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2667 }
2668
2669 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2670 /* adjust output voltage */
2671 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2672 }
2673
2674 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2675 tg3_phydsp_write(tp, 0xffb, 0x4000);
2676
2677 tg3_phy_toggle_automdix(tp, 1);
2678 tg3_phy_set_wirespeed(tp);
2679 return 0;
2680 }
2681
2682 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2683 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2684 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2685 TG3_GPIO_MSG_NEED_VAUX)
2686 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2687 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2690 (TG3_GPIO_MSG_DRVR_PRES << 12))
2691
2692 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2693 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2696 (TG3_GPIO_MSG_NEED_VAUX << 12))
2697
2698 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2699 {
2700 u32 status, shift;
2701
2702 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2703 tg3_asic_rev(tp) == ASIC_REV_5719)
2704 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2705 else
2706 status = tr32(TG3_CPMU_DRV_STATUS);
2707
2708 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2709 status &= ~(TG3_GPIO_MSG_MASK << shift);
2710 status |= (newstat << shift);
2711
2712 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2713 tg3_asic_rev(tp) == ASIC_REV_5719)
2714 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2715 else
2716 tw32(TG3_CPMU_DRV_STATUS, status);
2717
2718 return status >> TG3_APE_GPIO_MSG_SHIFT;
2719 }
2720
2721 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2722 {
2723 if (!tg3_flag(tp, IS_NIC))
2724 return 0;
2725
2726 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2727 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2728 tg3_asic_rev(tp) == ASIC_REV_5720) {
2729 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2730 return -EIO;
2731
2732 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2733
2734 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2735 TG3_GRC_LCLCTL_PWRSW_DELAY);
2736
2737 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2738 } else {
2739 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2740 TG3_GRC_LCLCTL_PWRSW_DELAY);
2741 }
2742
2743 return 0;
2744 }
2745
2746 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2747 {
2748 u32 grc_local_ctrl;
2749
2750 if (!tg3_flag(tp, IS_NIC) ||
2751 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2752 tg3_asic_rev(tp) == ASIC_REV_5701)
2753 return;
2754
2755 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2756
2757 tw32_wait_f(GRC_LOCAL_CTRL,
2758 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2759 TG3_GRC_LCLCTL_PWRSW_DELAY);
2760
2761 tw32_wait_f(GRC_LOCAL_CTRL,
2762 grc_local_ctrl,
2763 TG3_GRC_LCLCTL_PWRSW_DELAY);
2764
2765 tw32_wait_f(GRC_LOCAL_CTRL,
2766 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2767 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 }
2769
2770 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2771 {
2772 if (!tg3_flag(tp, IS_NIC))
2773 return;
2774
2775 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2776 tg3_asic_rev(tp) == ASIC_REV_5701) {
2777 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2778 (GRC_LCLCTRL_GPIO_OE0 |
2779 GRC_LCLCTRL_GPIO_OE1 |
2780 GRC_LCLCTRL_GPIO_OE2 |
2781 GRC_LCLCTRL_GPIO_OUTPUT0 |
2782 GRC_LCLCTRL_GPIO_OUTPUT1),
2783 TG3_GRC_LCLCTL_PWRSW_DELAY);
2784 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2785 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2786 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2787 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2788 GRC_LCLCTRL_GPIO_OE1 |
2789 GRC_LCLCTRL_GPIO_OE2 |
2790 GRC_LCLCTRL_GPIO_OUTPUT0 |
2791 GRC_LCLCTRL_GPIO_OUTPUT1 |
2792 tp->grc_local_ctrl;
2793 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2797 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799
2800 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2801 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2802 TG3_GRC_LCLCTL_PWRSW_DELAY);
2803 } else {
2804 u32 no_gpio2;
2805 u32 grc_local_ctrl = 0;
2806
2807 /* Workaround to prevent overdrawing Amps. */
2808 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2809 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2810 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2811 grc_local_ctrl,
2812 TG3_GRC_LCLCTL_PWRSW_DELAY);
2813 }
2814
2815 /* On 5753 and variants, GPIO2 cannot be used. */
2816 no_gpio2 = tp->nic_sram_data_cfg &
2817 NIC_SRAM_DATA_CFG_NO_GPIO2;
2818
2819 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2820 GRC_LCLCTRL_GPIO_OE1 |
2821 GRC_LCLCTRL_GPIO_OE2 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2823 GRC_LCLCTRL_GPIO_OUTPUT2;
2824 if (no_gpio2) {
2825 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2826 GRC_LCLCTRL_GPIO_OUTPUT2);
2827 }
2828 tw32_wait_f(GRC_LOCAL_CTRL,
2829 tp->grc_local_ctrl | grc_local_ctrl,
2830 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831
2832 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2833
2834 tw32_wait_f(GRC_LOCAL_CTRL,
2835 tp->grc_local_ctrl | grc_local_ctrl,
2836 TG3_GRC_LCLCTL_PWRSW_DELAY);
2837
2838 if (!no_gpio2) {
2839 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2840 tw32_wait_f(GRC_LOCAL_CTRL,
2841 tp->grc_local_ctrl | grc_local_ctrl,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 }
2844 }
2845 }
2846
2847 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2848 {
2849 u32 msg = 0;
2850
2851 /* Serialize power state transitions */
2852 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2853 return;
2854
2855 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2856 msg = TG3_GPIO_MSG_NEED_VAUX;
2857
2858 msg = tg3_set_function_status(tp, msg);
2859
2860 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2861 goto done;
2862
2863 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2864 tg3_pwrsrc_switch_to_vaux(tp);
2865 else
2866 tg3_pwrsrc_die_with_vmain(tp);
2867
2868 done:
2869 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2870 }
2871
2872 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2873 {
2874 bool need_vaux = false;
2875
2876 /* The GPIOs do something completely different on 57765. */
2877 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2878 return;
2879
2880 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2881 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2882 tg3_asic_rev(tp) == ASIC_REV_5720) {
2883 tg3_frob_aux_power_5717(tp, include_wol ?
2884 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2885 return;
2886 }
2887
2888 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2889 struct net_device *dev_peer;
2890
2891 dev_peer = pci_get_drvdata(tp->pdev_peer);
2892
2893 /* remove_one() may have been run on the peer. */
2894 if (dev_peer) {
2895 struct tg3 *tp_peer = netdev_priv(dev_peer);
2896
2897 if (tg3_flag(tp_peer, INIT_COMPLETE))
2898 return;
2899
2900 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2901 tg3_flag(tp_peer, ENABLE_ASF))
2902 need_vaux = true;
2903 }
2904 }
2905
2906 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2907 tg3_flag(tp, ENABLE_ASF))
2908 need_vaux = true;
2909
2910 if (need_vaux)
2911 tg3_pwrsrc_switch_to_vaux(tp);
2912 else
2913 tg3_pwrsrc_die_with_vmain(tp);
2914 }
2915
2916 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2917 {
2918 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2919 return 1;
2920 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2921 if (speed != SPEED_10)
2922 return 1;
2923 } else if (speed == SPEED_10)
2924 return 1;
2925
2926 return 0;
2927 }
2928
2929 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2930 {
2931 u32 val;
2932
2933 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2934 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2935 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2936 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2937
2938 sg_dig_ctrl |=
2939 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2940 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2941 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2942 }
2943 return;
2944 }
2945
2946 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2947 tg3_bmcr_reset(tp);
2948 val = tr32(GRC_MISC_CFG);
2949 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2950 udelay(40);
2951 return;
2952 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2953 u32 phytest;
2954 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2955 u32 phy;
2956
2957 tg3_writephy(tp, MII_ADVERTISE, 0);
2958 tg3_writephy(tp, MII_BMCR,
2959 BMCR_ANENABLE | BMCR_ANRESTART);
2960
2961 tg3_writephy(tp, MII_TG3_FET_TEST,
2962 phytest | MII_TG3_FET_SHADOW_EN);
2963 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2964 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2965 tg3_writephy(tp,
2966 MII_TG3_FET_SHDW_AUXMODE4,
2967 phy);
2968 }
2969 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2970 }
2971 return;
2972 } else if (do_low_power) {
2973 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2974 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2975
2976 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2977 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2978 MII_TG3_AUXCTL_PCTL_VREG_11V;
2979 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2980 }
2981
2982 /* The PHY should not be powered down on some chips because
2983 * of bugs.
2984 */
2985 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2986 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2987 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2988 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2989 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2990 !tp->pci_fn))
2991 return;
2992
2993 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2994 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2995 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2996 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2997 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2998 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2999 }
3000
3001 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3002 }
3003
3004 /* tp->lock is held. */
3005 static int tg3_nvram_lock(struct tg3 *tp)
3006 {
3007 if (tg3_flag(tp, NVRAM)) {
3008 int i;
3009
3010 if (tp->nvram_lock_cnt == 0) {
3011 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3012 for (i = 0; i < 8000; i++) {
3013 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3014 break;
3015 udelay(20);
3016 }
3017 if (i == 8000) {
3018 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3019 return -ENODEV;
3020 }
3021 }
3022 tp->nvram_lock_cnt++;
3023 }
3024 return 0;
3025 }
3026
3027 /* tp->lock is held. */
3028 static void tg3_nvram_unlock(struct tg3 *tp)
3029 {
3030 if (tg3_flag(tp, NVRAM)) {
3031 if (tp->nvram_lock_cnt > 0)
3032 tp->nvram_lock_cnt--;
3033 if (tp->nvram_lock_cnt == 0)
3034 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3035 }
3036 }
3037
3038 /* tp->lock is held. */
3039 static void tg3_enable_nvram_access(struct tg3 *tp)
3040 {
3041 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3042 u32 nvaccess = tr32(NVRAM_ACCESS);
3043
3044 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3045 }
3046 }
3047
3048 /* tp->lock is held. */
3049 static void tg3_disable_nvram_access(struct tg3 *tp)
3050 {
3051 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3052 u32 nvaccess = tr32(NVRAM_ACCESS);
3053
3054 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3055 }
3056 }
3057
3058 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3059 u32 offset, u32 *val)
3060 {
3061 u32 tmp;
3062 int i;
3063
3064 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3065 return -EINVAL;
3066
3067 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3068 EEPROM_ADDR_DEVID_MASK |
3069 EEPROM_ADDR_READ);
3070 tw32(GRC_EEPROM_ADDR,
3071 tmp |
3072 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3073 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3074 EEPROM_ADDR_ADDR_MASK) |
3075 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3076
3077 for (i = 0; i < 1000; i++) {
3078 tmp = tr32(GRC_EEPROM_ADDR);
3079
3080 if (tmp & EEPROM_ADDR_COMPLETE)
3081 break;
3082 msleep(1);
3083 }
3084 if (!(tmp & EEPROM_ADDR_COMPLETE))
3085 return -EBUSY;
3086
3087 tmp = tr32(GRC_EEPROM_DATA);
3088
3089 /*
3090 * The data will always be opposite the native endian
3091 * format. Perform a blind byteswap to compensate.
3092 */
3093 *val = swab32(tmp);
3094
3095 return 0;
3096 }
3097
3098 #define NVRAM_CMD_TIMEOUT 10000
3099
3100 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3101 {
3102 int i;
3103
3104 tw32(NVRAM_CMD, nvram_cmd);
3105 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3106 udelay(10);
3107 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3108 udelay(10);
3109 break;
3110 }
3111 }
3112
3113 if (i == NVRAM_CMD_TIMEOUT)
3114 return -EBUSY;
3115
3116 return 0;
3117 }
3118
3119 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3120 {
3121 if (tg3_flag(tp, NVRAM) &&
3122 tg3_flag(tp, NVRAM_BUFFERED) &&
3123 tg3_flag(tp, FLASH) &&
3124 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3125 (tp->nvram_jedecnum == JEDEC_ATMEL))
3126
3127 addr = ((addr / tp->nvram_pagesize) <<
3128 ATMEL_AT45DB0X1B_PAGE_POS) +
3129 (addr % tp->nvram_pagesize);
3130
3131 return addr;
3132 }
3133
3134 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3135 {
3136 if (tg3_flag(tp, NVRAM) &&
3137 tg3_flag(tp, NVRAM_BUFFERED) &&
3138 tg3_flag(tp, FLASH) &&
3139 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3140 (tp->nvram_jedecnum == JEDEC_ATMEL))
3141
3142 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3143 tp->nvram_pagesize) +
3144 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3145
3146 return addr;
3147 }
3148
3149 /* NOTE: Data read in from NVRAM is byteswapped according to
3150 * the byteswapping settings for all other register accesses.
3151 * tg3 devices are BE devices, so on a BE machine, the data
3152 * returned will be exactly as it is seen in NVRAM. On a LE
3153 * machine, the 32-bit value will be byteswapped.
3154 */
3155 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3156 {
3157 int ret;
3158
3159 if (!tg3_flag(tp, NVRAM))
3160 return tg3_nvram_read_using_eeprom(tp, offset, val);
3161
3162 offset = tg3_nvram_phys_addr(tp, offset);
3163
3164 if (offset > NVRAM_ADDR_MSK)
3165 return -EINVAL;
3166
3167 ret = tg3_nvram_lock(tp);
3168 if (ret)
3169 return ret;
3170
3171 tg3_enable_nvram_access(tp);
3172
3173 tw32(NVRAM_ADDR, offset);
3174 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3175 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3176
3177 if (ret == 0)
3178 *val = tr32(NVRAM_RDDATA);
3179
3180 tg3_disable_nvram_access(tp);
3181
3182 tg3_nvram_unlock(tp);
3183
3184 return ret;
3185 }
3186
3187 /* Ensures NVRAM data is in bytestream format. */
3188 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3189 {
3190 u32 v;
3191 int res = tg3_nvram_read(tp, offset, &v);
3192 if (!res)
3193 *val = cpu_to_be32(v);
3194 return res;
3195 }
3196
3197 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3198 u32 offset, u32 len, u8 *buf)
3199 {
3200 int i, j, rc = 0;
3201 u32 val;
3202
3203 for (i = 0; i < len; i += 4) {
3204 u32 addr;
3205 __be32 data;
3206
3207 addr = offset + i;
3208
3209 memcpy(&data, buf + i, 4);
3210
3211 /*
3212 * The SEEPROM interface expects the data to always be opposite
3213 * the native endian format. We accomplish this by reversing
3214 * all the operations that would have been performed on the
3215 * data from a call to tg3_nvram_read_be32().
3216 */
3217 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3218
3219 val = tr32(GRC_EEPROM_ADDR);
3220 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3221
3222 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3223 EEPROM_ADDR_READ);
3224 tw32(GRC_EEPROM_ADDR, val |
3225 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3226 (addr & EEPROM_ADDR_ADDR_MASK) |
3227 EEPROM_ADDR_START |
3228 EEPROM_ADDR_WRITE);
3229
3230 for (j = 0; j < 1000; j++) {
3231 val = tr32(GRC_EEPROM_ADDR);
3232
3233 if (val & EEPROM_ADDR_COMPLETE)
3234 break;
3235 msleep(1);
3236 }
3237 if (!(val & EEPROM_ADDR_COMPLETE)) {
3238 rc = -EBUSY;
3239 break;
3240 }
3241 }
3242
3243 return rc;
3244 }
3245
3246 /* offset and length are dword aligned */
3247 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3248 u8 *buf)
3249 {
3250 int ret = 0;
3251 u32 pagesize = tp->nvram_pagesize;
3252 u32 pagemask = pagesize - 1;
3253 u32 nvram_cmd;
3254 u8 *tmp;
3255
3256 tmp = kmalloc(pagesize, GFP_KERNEL);
3257 if (tmp == NULL)
3258 return -ENOMEM;
3259
3260 while (len) {
3261 int j;
3262 u32 phy_addr, page_off, size;
3263
3264 phy_addr = offset & ~pagemask;
3265
3266 for (j = 0; j < pagesize; j += 4) {
3267 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3268 (__be32 *) (tmp + j));
3269 if (ret)
3270 break;
3271 }
3272 if (ret)
3273 break;
3274
3275 page_off = offset & pagemask;
3276 size = pagesize;
3277 if (len < size)
3278 size = len;
3279
3280 len -= size;
3281
3282 memcpy(tmp + page_off, buf, size);
3283
3284 offset = offset + (pagesize - page_off);
3285
3286 tg3_enable_nvram_access(tp);
3287
3288 /*
3289 * Before we can erase the flash page, we need
3290 * to issue a special "write enable" command.
3291 */
3292 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293
3294 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3295 break;
3296
3297 /* Erase the target page */
3298 tw32(NVRAM_ADDR, phy_addr);
3299
3300 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3301 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3302
3303 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3304 break;
3305
3306 /* Issue another write enable to start the write. */
3307 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3308
3309 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3310 break;
3311
3312 for (j = 0; j < pagesize; j += 4) {
3313 __be32 data;
3314
3315 data = *((__be32 *) (tmp + j));
3316
3317 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3318
3319 tw32(NVRAM_ADDR, phy_addr + j);
3320
3321 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3322 NVRAM_CMD_WR;
3323
3324 if (j == 0)
3325 nvram_cmd |= NVRAM_CMD_FIRST;
3326 else if (j == (pagesize - 4))
3327 nvram_cmd |= NVRAM_CMD_LAST;
3328
3329 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3330 if (ret)
3331 break;
3332 }
3333 if (ret)
3334 break;
3335 }
3336
3337 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3338 tg3_nvram_exec_cmd(tp, nvram_cmd);
3339
3340 kfree(tmp);
3341
3342 return ret;
3343 }
3344
3345 /* offset and length are dword aligned */
3346 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3347 u8 *buf)
3348 {
3349 int i, ret = 0;
3350
3351 for (i = 0; i < len; i += 4, offset += 4) {
3352 u32 page_off, phy_addr, nvram_cmd;
3353 __be32 data;
3354
3355 memcpy(&data, buf + i, 4);
3356 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3357
3358 page_off = offset % tp->nvram_pagesize;
3359
3360 phy_addr = tg3_nvram_phys_addr(tp, offset);
3361
3362 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3363
3364 if (page_off == 0 || i == 0)
3365 nvram_cmd |= NVRAM_CMD_FIRST;
3366 if (page_off == (tp->nvram_pagesize - 4))
3367 nvram_cmd |= NVRAM_CMD_LAST;
3368
3369 if (i == (len - 4))
3370 nvram_cmd |= NVRAM_CMD_LAST;
3371
3372 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3373 !tg3_flag(tp, FLASH) ||
3374 !tg3_flag(tp, 57765_PLUS))
3375 tw32(NVRAM_ADDR, phy_addr);
3376
3377 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3378 !tg3_flag(tp, 5755_PLUS) &&
3379 (tp->nvram_jedecnum == JEDEC_ST) &&
3380 (nvram_cmd & NVRAM_CMD_FIRST)) {
3381 u32 cmd;
3382
3383 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3384 ret = tg3_nvram_exec_cmd(tp, cmd);
3385 if (ret)
3386 break;
3387 }
3388 if (!tg3_flag(tp, FLASH)) {
3389 /* We always do complete word writes to eeprom. */
3390 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3391 }
3392
3393 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3394 if (ret)
3395 break;
3396 }
3397 return ret;
3398 }
3399
3400 /* offset and length are dword aligned */
3401 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3402 {
3403 int ret;
3404
3405 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3406 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3407 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3408 udelay(40);
3409 }
3410
3411 if (!tg3_flag(tp, NVRAM)) {
3412 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3413 } else {
3414 u32 grc_mode;
3415
3416 ret = tg3_nvram_lock(tp);
3417 if (ret)
3418 return ret;
3419
3420 tg3_enable_nvram_access(tp);
3421 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3422 tw32(NVRAM_WRITE1, 0x406);
3423
3424 grc_mode = tr32(GRC_MODE);
3425 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3426
3427 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3428 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3429 buf);
3430 } else {
3431 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3432 buf);
3433 }
3434
3435 grc_mode = tr32(GRC_MODE);
3436 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3437
3438 tg3_disable_nvram_access(tp);
3439 tg3_nvram_unlock(tp);
3440 }
3441
3442 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3443 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3444 udelay(40);
3445 }
3446
3447 return ret;
3448 }
3449
3450 #define RX_CPU_SCRATCH_BASE 0x30000
3451 #define RX_CPU_SCRATCH_SIZE 0x04000
3452 #define TX_CPU_SCRATCH_BASE 0x34000
3453 #define TX_CPU_SCRATCH_SIZE 0x04000
3454
3455 /* tp->lock is held. */
3456 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3457 {
3458 int i;
3459 const int iters = 10000;
3460
3461 for (i = 0; i < iters; i++) {
3462 tw32(cpu_base + CPU_STATE, 0xffffffff);
3463 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3464 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3465 break;
3466 }
3467
3468 return (i == iters) ? -EBUSY : 0;
3469 }
3470
3471 /* tp->lock is held. */
3472 static int tg3_rxcpu_pause(struct tg3 *tp)
3473 {
3474 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3475
3476 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3477 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3478 udelay(10);
3479
3480 return rc;
3481 }
3482
3483 /* tp->lock is held. */
3484 static int tg3_txcpu_pause(struct tg3 *tp)
3485 {
3486 return tg3_pause_cpu(tp, TX_CPU_BASE);
3487 }
3488
3489 /* tp->lock is held. */
3490 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3491 {
3492 tw32(cpu_base + CPU_STATE, 0xffffffff);
3493 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3494 }
3495
3496 /* tp->lock is held. */
3497 static void tg3_rxcpu_resume(struct tg3 *tp)
3498 {
3499 tg3_resume_cpu(tp, RX_CPU_BASE);
3500 }
3501
3502 /* tp->lock is held. */
3503 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3504 {
3505 int rc;
3506
3507 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3508
3509 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3510 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3511
3512 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3513 return 0;
3514 }
3515 if (cpu_base == RX_CPU_BASE) {
3516 rc = tg3_rxcpu_pause(tp);
3517 } else {
3518 /*
3519 * There is only an Rx CPU for the 5750 derivative in the
3520 * BCM4785.
3521 */
3522 if (tg3_flag(tp, IS_SSB_CORE))
3523 return 0;
3524
3525 rc = tg3_txcpu_pause(tp);
3526 }
3527
3528 if (rc) {
3529 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3530 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3531 return -ENODEV;
3532 }
3533
3534 /* Clear firmware's nvram arbitration. */
3535 if (tg3_flag(tp, NVRAM))
3536 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3537 return 0;
3538 }
3539
3540 static int tg3_fw_data_len(struct tg3 *tp,
3541 const struct tg3_firmware_hdr *fw_hdr)
3542 {
3543 int fw_len;
3544
3545 /* Non fragmented firmware have one firmware header followed by a
3546 * contiguous chunk of data to be written. The length field in that
3547 * header is not the length of data to be written but the complete
3548 * length of the bss. The data length is determined based on
3549 * tp->fw->size minus headers.
3550 *
3551 * Fragmented firmware have a main header followed by multiple
3552 * fragments. Each fragment is identical to non fragmented firmware
3553 * with a firmware header followed by a contiguous chunk of data. In
3554 * the main header, the length field is unused and set to 0xffffffff.
3555 * In each fragment header the length is the entire size of that
3556 * fragment i.e. fragment data + header length. Data length is
3557 * therefore length field in the header minus TG3_FW_HDR_LEN.
3558 */
3559 if (tp->fw_len == 0xffffffff)
3560 fw_len = be32_to_cpu(fw_hdr->len);
3561 else
3562 fw_len = tp->fw->size;
3563
3564 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3565 }
3566
3567 /* tp->lock is held. */
3568 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3569 u32 cpu_scratch_base, int cpu_scratch_size,
3570 const struct tg3_firmware_hdr *fw_hdr)
3571 {
3572 int err, i;
3573 void (*write_op)(struct tg3 *, u32, u32);
3574 int total_len = tp->fw->size;
3575
3576 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3577 netdev_err(tp->dev,
3578 "%s: Trying to load TX cpu firmware which is 5705\n",
3579 __func__);
3580 return -EINVAL;
3581 }
3582
3583 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3584 write_op = tg3_write_mem;
3585 else
3586 write_op = tg3_write_indirect_reg32;
3587
3588 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3589 /* It is possible that bootcode is still loading at this point.
3590 * Get the nvram lock first before halting the cpu.
3591 */
3592 int lock_err = tg3_nvram_lock(tp);
3593 err = tg3_halt_cpu(tp, cpu_base);
3594 if (!lock_err)
3595 tg3_nvram_unlock(tp);
3596 if (err)
3597 goto out;
3598
3599 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3600 write_op(tp, cpu_scratch_base + i, 0);
3601 tw32(cpu_base + CPU_STATE, 0xffffffff);
3602 tw32(cpu_base + CPU_MODE,
3603 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3604 } else {
3605 /* Subtract additional main header for fragmented firmware and
3606 * advance to the first fragment
3607 */
3608 total_len -= TG3_FW_HDR_LEN;
3609 fw_hdr++;
3610 }
3611
3612 do {
3613 u32 *fw_data = (u32 *)(fw_hdr + 1);
3614 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3615 write_op(tp, cpu_scratch_base +
3616 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3617 (i * sizeof(u32)),
3618 be32_to_cpu(fw_data[i]));
3619
3620 total_len -= be32_to_cpu(fw_hdr->len);
3621
3622 /* Advance to next fragment */
3623 fw_hdr = (struct tg3_firmware_hdr *)
3624 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3625 } while (total_len > 0);
3626
3627 err = 0;
3628
3629 out:
3630 return err;
3631 }
3632
3633 /* tp->lock is held. */
3634 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3635 {
3636 int i;
3637 const int iters = 5;
3638
3639 tw32(cpu_base + CPU_STATE, 0xffffffff);
3640 tw32_f(cpu_base + CPU_PC, pc);
3641
3642 for (i = 0; i < iters; i++) {
3643 if (tr32(cpu_base + CPU_PC) == pc)
3644 break;
3645 tw32(cpu_base + CPU_STATE, 0xffffffff);
3646 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3647 tw32_f(cpu_base + CPU_PC, pc);
3648 udelay(1000);
3649 }
3650
3651 return (i == iters) ? -EBUSY : 0;
3652 }
3653
3654 /* tp->lock is held. */
3655 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3656 {
3657 const struct tg3_firmware_hdr *fw_hdr;
3658 int err;
3659
3660 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3661
3662 /* Firmware blob starts with version numbers, followed by
3663 start address and length. We are setting complete length.
3664 length = end_address_of_bss - start_address_of_text.
3665 Remainder is the blob to be loaded contiguously
3666 from start address. */
3667
3668 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3669 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3670 fw_hdr);
3671 if (err)
3672 return err;
3673
3674 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3675 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3676 fw_hdr);
3677 if (err)
3678 return err;
3679
3680 /* Now startup only the RX cpu. */
3681 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3682 be32_to_cpu(fw_hdr->base_addr));
3683 if (err) {
3684 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3685 "should be %08x\n", __func__,
3686 tr32(RX_CPU_BASE + CPU_PC),
3687 be32_to_cpu(fw_hdr->base_addr));
3688 return -ENODEV;
3689 }
3690
3691 tg3_rxcpu_resume(tp);
3692
3693 return 0;
3694 }
3695
3696 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3697 {
3698 const int iters = 1000;
3699 int i;
3700 u32 val;
3701
3702 /* Wait for boot code to complete initialization and enter service
3703 * loop. It is then safe to download service patches
3704 */
3705 for (i = 0; i < iters; i++) {
3706 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3707 break;
3708
3709 udelay(10);
3710 }
3711
3712 if (i == iters) {
3713 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3714 return -EBUSY;
3715 }
3716
3717 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3718 if (val & 0xff) {
3719 netdev_warn(tp->dev,
3720 "Other patches exist. Not downloading EEE patch\n");
3721 return -EEXIST;
3722 }
3723
3724 return 0;
3725 }
3726
3727 /* tp->lock is held. */
3728 static void tg3_load_57766_firmware(struct tg3 *tp)
3729 {
3730 struct tg3_firmware_hdr *fw_hdr;
3731
3732 if (!tg3_flag(tp, NO_NVRAM))
3733 return;
3734
3735 if (tg3_validate_rxcpu_state(tp))
3736 return;
3737
3738 if (!tp->fw)
3739 return;
3740
3741 /* This firmware blob has a different format than older firmware
3742 * releases as given below. The main difference is we have fragmented
3743 * data to be written to non-contiguous locations.
3744 *
3745 * In the beginning we have a firmware header identical to other
3746 * firmware which consists of version, base addr and length. The length
3747 * here is unused and set to 0xffffffff.
3748 *
3749 * This is followed by a series of firmware fragments which are
3750 * individually identical to previous firmware. i.e. they have the
3751 * firmware header and followed by data for that fragment. The version
3752 * field of the individual fragment header is unused.
3753 */
3754
3755 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3756 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3757 return;
3758
3759 if (tg3_rxcpu_pause(tp))
3760 return;
3761
3762 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3763 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3764
3765 tg3_rxcpu_resume(tp);
3766 }
3767
3768 /* tp->lock is held. */
3769 static int tg3_load_tso_firmware(struct tg3 *tp)
3770 {
3771 const struct tg3_firmware_hdr *fw_hdr;
3772 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3773 int err;
3774
3775 if (!tg3_flag(tp, FW_TSO))
3776 return 0;
3777
3778 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3779
3780 /* Firmware blob starts with version numbers, followed by
3781 start address and length. We are setting complete length.
3782 length = end_address_of_bss - start_address_of_text.
3783 Remainder is the blob to be loaded contiguously
3784 from start address. */
3785
3786 cpu_scratch_size = tp->fw_len;
3787
3788 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3789 cpu_base = RX_CPU_BASE;
3790 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3791 } else {
3792 cpu_base = TX_CPU_BASE;
3793 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3794 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3795 }
3796
3797 err = tg3_load_firmware_cpu(tp, cpu_base,
3798 cpu_scratch_base, cpu_scratch_size,
3799 fw_hdr);
3800 if (err)
3801 return err;
3802
3803 /* Now startup the cpu. */
3804 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3805 be32_to_cpu(fw_hdr->base_addr));
3806 if (err) {
3807 netdev_err(tp->dev,
3808 "%s fails to set CPU PC, is %08x should be %08x\n",
3809 __func__, tr32(cpu_base + CPU_PC),
3810 be32_to_cpu(fw_hdr->base_addr));
3811 return -ENODEV;
3812 }
3813
3814 tg3_resume_cpu(tp, cpu_base);
3815 return 0;
3816 }
3817
3818
3819 /* tp->lock is held. */
3820 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3821 {
3822 u32 addr_high, addr_low;
3823 int i;
3824
3825 addr_high = ((tp->dev->dev_addr[0] << 8) |
3826 tp->dev->dev_addr[1]);
3827 addr_low = ((tp->dev->dev_addr[2] << 24) |
3828 (tp->dev->dev_addr[3] << 16) |
3829 (tp->dev->dev_addr[4] << 8) |
3830 (tp->dev->dev_addr[5] << 0));
3831 for (i = 0; i < 4; i++) {
3832 if (i == 1 && skip_mac_1)
3833 continue;
3834 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3835 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3836 }
3837
3838 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3839 tg3_asic_rev(tp) == ASIC_REV_5704) {
3840 for (i = 0; i < 12; i++) {
3841 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3842 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3843 }
3844 }
3845
3846 addr_high = (tp->dev->dev_addr[0] +
3847 tp->dev->dev_addr[1] +
3848 tp->dev->dev_addr[2] +
3849 tp->dev->dev_addr[3] +
3850 tp->dev->dev_addr[4] +
3851 tp->dev->dev_addr[5]) &
3852 TX_BACKOFF_SEED_MASK;
3853 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3854 }
3855
3856 static void tg3_enable_register_access(struct tg3 *tp)
3857 {
3858 /*
3859 * Make sure register accesses (indirect or otherwise) will function
3860 * correctly.
3861 */
3862 pci_write_config_dword(tp->pdev,
3863 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3864 }
3865
3866 static int tg3_power_up(struct tg3 *tp)
3867 {
3868 int err;
3869
3870 tg3_enable_register_access(tp);
3871
3872 err = pci_set_power_state(tp->pdev, PCI_D0);
3873 if (!err) {
3874 /* Switch out of Vaux if it is a NIC */
3875 tg3_pwrsrc_switch_to_vmain(tp);
3876 } else {
3877 netdev_err(tp->dev, "Transition to D0 failed\n");
3878 }
3879
3880 return err;
3881 }
3882
3883 static int tg3_setup_phy(struct tg3 *, int);
3884
3885 static int tg3_power_down_prepare(struct tg3 *tp)
3886 {
3887 u32 misc_host_ctrl;
3888 bool device_should_wake, do_low_power;
3889
3890 tg3_enable_register_access(tp);
3891
3892 /* Restore the CLKREQ setting. */
3893 if (tg3_flag(tp, CLKREQ_BUG))
3894 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3895 PCI_EXP_LNKCTL_CLKREQ_EN);
3896
3897 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3898 tw32(TG3PCI_MISC_HOST_CTRL,
3899 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3900
3901 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3902 tg3_flag(tp, WOL_ENABLE);
3903
3904 if (tg3_flag(tp, USE_PHYLIB)) {
3905 do_low_power = false;
3906 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3907 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3908 struct phy_device *phydev;
3909 u32 phyid, advertising;
3910
3911 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3912
3913 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3914
3915 tp->link_config.speed = phydev->speed;
3916 tp->link_config.duplex = phydev->duplex;
3917 tp->link_config.autoneg = phydev->autoneg;
3918 tp->link_config.advertising = phydev->advertising;
3919
3920 advertising = ADVERTISED_TP |
3921 ADVERTISED_Pause |
3922 ADVERTISED_Autoneg |
3923 ADVERTISED_10baseT_Half;
3924
3925 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3926 if (tg3_flag(tp, WOL_SPEED_100MB))
3927 advertising |=
3928 ADVERTISED_100baseT_Half |
3929 ADVERTISED_100baseT_Full |
3930 ADVERTISED_10baseT_Full;
3931 else
3932 advertising |= ADVERTISED_10baseT_Full;
3933 }
3934
3935 phydev->advertising = advertising;
3936
3937 phy_start_aneg(phydev);
3938
3939 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3940 if (phyid != PHY_ID_BCMAC131) {
3941 phyid &= PHY_BCM_OUI_MASK;
3942 if (phyid == PHY_BCM_OUI_1 ||
3943 phyid == PHY_BCM_OUI_2 ||
3944 phyid == PHY_BCM_OUI_3)
3945 do_low_power = true;
3946 }
3947 }
3948 } else {
3949 do_low_power = true;
3950
3951 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3952 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3953
3954 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3955 tg3_setup_phy(tp, 0);
3956 }
3957
3958 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3959 u32 val;
3960
3961 val = tr32(GRC_VCPU_EXT_CTRL);
3962 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3963 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3964 int i;
3965 u32 val;
3966
3967 for (i = 0; i < 200; i++) {
3968 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3969 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3970 break;
3971 msleep(1);
3972 }
3973 }
3974 if (tg3_flag(tp, WOL_CAP))
3975 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3976 WOL_DRV_STATE_SHUTDOWN |
3977 WOL_DRV_WOL |
3978 WOL_SET_MAGIC_PKT);
3979
3980 if (device_should_wake) {
3981 u32 mac_mode;
3982
3983 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3984 if (do_low_power &&
3985 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3986 tg3_phy_auxctl_write(tp,
3987 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3988 MII_TG3_AUXCTL_PCTL_WOL_EN |
3989 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3990 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3991 udelay(40);
3992 }
3993
3994 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3995 mac_mode = MAC_MODE_PORT_MODE_GMII;
3996 else
3997 mac_mode = MAC_MODE_PORT_MODE_MII;
3998
3999 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4000 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4001 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4002 SPEED_100 : SPEED_10;
4003 if (tg3_5700_link_polarity(tp, speed))
4004 mac_mode |= MAC_MODE_LINK_POLARITY;
4005 else
4006 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4007 }
4008 } else {
4009 mac_mode = MAC_MODE_PORT_MODE_TBI;
4010 }
4011
4012 if (!tg3_flag(tp, 5750_PLUS))
4013 tw32(MAC_LED_CTRL, tp->led_ctrl);
4014
4015 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4016 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4017 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4018 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4019
4020 if (tg3_flag(tp, ENABLE_APE))
4021 mac_mode |= MAC_MODE_APE_TX_EN |
4022 MAC_MODE_APE_RX_EN |
4023 MAC_MODE_TDE_ENABLE;
4024
4025 tw32_f(MAC_MODE, mac_mode);
4026 udelay(100);
4027
4028 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4029 udelay(10);
4030 }
4031
4032 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4033 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4034 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4035 u32 base_val;
4036
4037 base_val = tp->pci_clock_ctrl;
4038 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4039 CLOCK_CTRL_TXCLK_DISABLE);
4040
4041 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4042 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4043 } else if (tg3_flag(tp, 5780_CLASS) ||
4044 tg3_flag(tp, CPMU_PRESENT) ||
4045 tg3_asic_rev(tp) == ASIC_REV_5906) {
4046 /* do nothing */
4047 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4048 u32 newbits1, newbits2;
4049
4050 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4051 tg3_asic_rev(tp) == ASIC_REV_5701) {
4052 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4053 CLOCK_CTRL_TXCLK_DISABLE |
4054 CLOCK_CTRL_ALTCLK);
4055 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4056 } else if (tg3_flag(tp, 5705_PLUS)) {
4057 newbits1 = CLOCK_CTRL_625_CORE;
4058 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4059 } else {
4060 newbits1 = CLOCK_CTRL_ALTCLK;
4061 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4062 }
4063
4064 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4065 40);
4066
4067 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4068 40);
4069
4070 if (!tg3_flag(tp, 5705_PLUS)) {
4071 u32 newbits3;
4072
4073 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4074 tg3_asic_rev(tp) == ASIC_REV_5701) {
4075 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4076 CLOCK_CTRL_TXCLK_DISABLE |
4077 CLOCK_CTRL_44MHZ_CORE);
4078 } else {
4079 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4080 }
4081
4082 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4083 tp->pci_clock_ctrl | newbits3, 40);
4084 }
4085 }
4086
4087 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4088 tg3_power_down_phy(tp, do_low_power);
4089
4090 tg3_frob_aux_power(tp, true);
4091
4092 /* Workaround for unstable PLL clock */
4093 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4094 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4095 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4096 u32 val = tr32(0x7d00);
4097
4098 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4099 tw32(0x7d00, val);
4100 if (!tg3_flag(tp, ENABLE_ASF)) {
4101 int err;
4102
4103 err = tg3_nvram_lock(tp);
4104 tg3_halt_cpu(tp, RX_CPU_BASE);
4105 if (!err)
4106 tg3_nvram_unlock(tp);
4107 }
4108 }
4109
4110 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4111
4112 return 0;
4113 }
4114
4115 static void tg3_power_down(struct tg3 *tp)
4116 {
4117 tg3_power_down_prepare(tp);
4118
4119 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4120 pci_set_power_state(tp->pdev, PCI_D3hot);
4121 }
4122
4123 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4124 {
4125 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4126 case MII_TG3_AUX_STAT_10HALF:
4127 *speed = SPEED_10;
4128 *duplex = DUPLEX_HALF;
4129 break;
4130
4131 case MII_TG3_AUX_STAT_10FULL:
4132 *speed = SPEED_10;
4133 *duplex = DUPLEX_FULL;
4134 break;
4135
4136 case MII_TG3_AUX_STAT_100HALF:
4137 *speed = SPEED_100;
4138 *duplex = DUPLEX_HALF;
4139 break;
4140
4141 case MII_TG3_AUX_STAT_100FULL:
4142 *speed = SPEED_100;
4143 *duplex = DUPLEX_FULL;
4144 break;
4145
4146 case MII_TG3_AUX_STAT_1000HALF:
4147 *speed = SPEED_1000;
4148 *duplex = DUPLEX_HALF;
4149 break;
4150
4151 case MII_TG3_AUX_STAT_1000FULL:
4152 *speed = SPEED_1000;
4153 *duplex = DUPLEX_FULL;
4154 break;
4155
4156 default:
4157 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4158 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4159 SPEED_10;
4160 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4161 DUPLEX_HALF;
4162 break;
4163 }
4164 *speed = SPEED_UNKNOWN;
4165 *duplex = DUPLEX_UNKNOWN;
4166 break;
4167 }
4168 }
4169
4170 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4171 {
4172 int err = 0;
4173 u32 val, new_adv;
4174
4175 new_adv = ADVERTISE_CSMA;
4176 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4177 new_adv |= mii_advertise_flowctrl(flowctrl);
4178
4179 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4180 if (err)
4181 goto done;
4182
4183 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4184 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4185
4186 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4187 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4188 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4189
4190 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4191 if (err)
4192 goto done;
4193 }
4194
4195 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4196 goto done;
4197
4198 tw32(TG3_CPMU_EEE_MODE,
4199 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4200
4201 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4202 if (!err) {
4203 u32 err2;
4204
4205 val = 0;
4206 /* Advertise 100-BaseTX EEE ability */
4207 if (advertise & ADVERTISED_100baseT_Full)
4208 val |= MDIO_AN_EEE_ADV_100TX;
4209 /* Advertise 1000-BaseT EEE ability */
4210 if (advertise & ADVERTISED_1000baseT_Full)
4211 val |= MDIO_AN_EEE_ADV_1000T;
4212 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4213 if (err)
4214 val = 0;
4215
4216 switch (tg3_asic_rev(tp)) {
4217 case ASIC_REV_5717:
4218 case ASIC_REV_57765:
4219 case ASIC_REV_57766:
4220 case ASIC_REV_5719:
4221 /* If we advertised any eee advertisements above... */
4222 if (val)
4223 val = MII_TG3_DSP_TAP26_ALNOKO |
4224 MII_TG3_DSP_TAP26_RMRXSTO |
4225 MII_TG3_DSP_TAP26_OPCSINPT;
4226 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4227 /* Fall through */
4228 case ASIC_REV_5720:
4229 case ASIC_REV_5762:
4230 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4231 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4232 MII_TG3_DSP_CH34TP2_HIBW01);
4233 }
4234
4235 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4236 if (!err)
4237 err = err2;
4238 }
4239
4240 done:
4241 return err;
4242 }
4243
4244 static void tg3_phy_copper_begin(struct tg3 *tp)
4245 {
4246 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4247 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4248 u32 adv, fc;
4249
4250 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4251 adv = ADVERTISED_10baseT_Half |
4252 ADVERTISED_10baseT_Full;
4253 if (tg3_flag(tp, WOL_SPEED_100MB))
4254 adv |= ADVERTISED_100baseT_Half |
4255 ADVERTISED_100baseT_Full;
4256
4257 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4258 } else {
4259 adv = tp->link_config.advertising;
4260 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4261 adv &= ~(ADVERTISED_1000baseT_Half |
4262 ADVERTISED_1000baseT_Full);
4263
4264 fc = tp->link_config.flowctrl;
4265 }
4266
4267 tg3_phy_autoneg_cfg(tp, adv, fc);
4268
4269 tg3_writephy(tp, MII_BMCR,
4270 BMCR_ANENABLE | BMCR_ANRESTART);
4271 } else {
4272 int i;
4273 u32 bmcr, orig_bmcr;
4274
4275 tp->link_config.active_speed = tp->link_config.speed;
4276 tp->link_config.active_duplex = tp->link_config.duplex;
4277
4278 bmcr = 0;
4279 switch (tp->link_config.speed) {
4280 default:
4281 case SPEED_10:
4282 break;
4283
4284 case SPEED_100:
4285 bmcr |= BMCR_SPEED100;
4286 break;
4287
4288 case SPEED_1000:
4289 bmcr |= BMCR_SPEED1000;
4290 break;
4291 }
4292
4293 if (tp->link_config.duplex == DUPLEX_FULL)
4294 bmcr |= BMCR_FULLDPLX;
4295
4296 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4297 (bmcr != orig_bmcr)) {
4298 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4299 for (i = 0; i < 1500; i++) {
4300 u32 tmp;
4301
4302 udelay(10);
4303 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4304 tg3_readphy(tp, MII_BMSR, &tmp))
4305 continue;
4306 if (!(tmp & BMSR_LSTATUS)) {
4307 udelay(40);
4308 break;
4309 }
4310 }
4311 tg3_writephy(tp, MII_BMCR, bmcr);
4312 udelay(40);
4313 }
4314 }
4315 }
4316
4317 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4318 {
4319 int err;
4320
4321 /* Turn off tap power management. */
4322 /* Set Extended packet length bit */
4323 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4324
4325 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4326 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4327 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4328 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4329 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4330
4331 udelay(40);
4332
4333 return err;
4334 }
4335
4336 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4337 {
4338 u32 advmsk, tgtadv, advertising;
4339
4340 advertising = tp->link_config.advertising;
4341 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4342
4343 advmsk = ADVERTISE_ALL;
4344 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4345 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4346 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4347 }
4348
4349 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4350 return false;
4351
4352 if ((*lcladv & advmsk) != tgtadv)
4353 return false;
4354
4355 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4356 u32 tg3_ctrl;
4357
4358 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4359
4360 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4361 return false;
4362
4363 if (tgtadv &&
4364 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4365 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4366 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4367 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4368 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4369 } else {
4370 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4371 }
4372
4373 if (tg3_ctrl != tgtadv)
4374 return false;
4375 }
4376
4377 return true;
4378 }
4379
4380 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4381 {
4382 u32 lpeth = 0;
4383
4384 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4385 u32 val;
4386
4387 if (tg3_readphy(tp, MII_STAT1000, &val))
4388 return false;
4389
4390 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4391 }
4392
4393 if (tg3_readphy(tp, MII_LPA, rmtadv))
4394 return false;
4395
4396 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4397 tp->link_config.rmt_adv = lpeth;
4398
4399 return true;
4400 }
4401
4402 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4403 {
4404 if (curr_link_up != tp->link_up) {
4405 if (curr_link_up) {
4406 tg3_carrier_on(tp);
4407 } else {
4408 tg3_carrier_off(tp);
4409 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4410 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4411 }
4412
4413 tg3_link_report(tp);
4414 return true;
4415 }
4416
4417 return false;
4418 }
4419
4420 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4421 {
4422 int current_link_up;
4423 u32 bmsr, val;
4424 u32 lcl_adv, rmt_adv;
4425 u16 current_speed;
4426 u8 current_duplex;
4427 int i, err;
4428
4429 tw32(MAC_EVENT, 0);
4430
4431 tw32_f(MAC_STATUS,
4432 (MAC_STATUS_SYNC_CHANGED |
4433 MAC_STATUS_CFG_CHANGED |
4434 MAC_STATUS_MI_COMPLETION |
4435 MAC_STATUS_LNKSTATE_CHANGED));
4436 udelay(40);
4437
4438 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4439 tw32_f(MAC_MI_MODE,
4440 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4441 udelay(80);
4442 }
4443
4444 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4445
4446 /* Some third-party PHYs need to be reset on link going
4447 * down.
4448 */
4449 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4450 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4451 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4452 tp->link_up) {
4453 tg3_readphy(tp, MII_BMSR, &bmsr);
4454 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4455 !(bmsr & BMSR_LSTATUS))
4456 force_reset = 1;
4457 }
4458 if (force_reset)
4459 tg3_phy_reset(tp);
4460
4461 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4462 tg3_readphy(tp, MII_BMSR, &bmsr);
4463 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4464 !tg3_flag(tp, INIT_COMPLETE))
4465 bmsr = 0;
4466
4467 if (!(bmsr & BMSR_LSTATUS)) {
4468 err = tg3_init_5401phy_dsp(tp);
4469 if (err)
4470 return err;
4471
4472 tg3_readphy(tp, MII_BMSR, &bmsr);
4473 for (i = 0; i < 1000; i++) {
4474 udelay(10);
4475 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4476 (bmsr & BMSR_LSTATUS)) {
4477 udelay(40);
4478 break;
4479 }
4480 }
4481
4482 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4483 TG3_PHY_REV_BCM5401_B0 &&
4484 !(bmsr & BMSR_LSTATUS) &&
4485 tp->link_config.active_speed == SPEED_1000) {
4486 err = tg3_phy_reset(tp);
4487 if (!err)
4488 err = tg3_init_5401phy_dsp(tp);
4489 if (err)
4490 return err;
4491 }
4492 }
4493 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4494 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4495 /* 5701 {A0,B0} CRC bug workaround */
4496 tg3_writephy(tp, 0x15, 0x0a75);
4497 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4498 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4499 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4500 }
4501
4502 /* Clear pending interrupts... */
4503 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4504 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4505
4506 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4507 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4508 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4509 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4510
4511 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4512 tg3_asic_rev(tp) == ASIC_REV_5701) {
4513 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4514 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4515 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4516 else
4517 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4518 }
4519
4520 current_link_up = 0;
4521 current_speed = SPEED_UNKNOWN;
4522 current_duplex = DUPLEX_UNKNOWN;
4523 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4524 tp->link_config.rmt_adv = 0;
4525
4526 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4527 err = tg3_phy_auxctl_read(tp,
4528 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4529 &val);
4530 if (!err && !(val & (1 << 10))) {
4531 tg3_phy_auxctl_write(tp,
4532 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4533 val | (1 << 10));
4534 goto relink;
4535 }
4536 }
4537
4538 bmsr = 0;
4539 for (i = 0; i < 100; i++) {
4540 tg3_readphy(tp, MII_BMSR, &bmsr);
4541 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4542 (bmsr & BMSR_LSTATUS))
4543 break;
4544 udelay(40);
4545 }
4546
4547 if (bmsr & BMSR_LSTATUS) {
4548 u32 aux_stat, bmcr;
4549
4550 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4551 for (i = 0; i < 2000; i++) {
4552 udelay(10);
4553 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4554 aux_stat)
4555 break;
4556 }
4557
4558 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4559 &current_speed,
4560 &current_duplex);
4561
4562 bmcr = 0;
4563 for (i = 0; i < 200; i++) {
4564 tg3_readphy(tp, MII_BMCR, &bmcr);
4565 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4566 continue;
4567 if (bmcr && bmcr != 0x7fff)
4568 break;
4569 udelay(10);
4570 }
4571
4572 lcl_adv = 0;
4573 rmt_adv = 0;
4574
4575 tp->link_config.active_speed = current_speed;
4576 tp->link_config.active_duplex = current_duplex;
4577
4578 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4579 if ((bmcr & BMCR_ANENABLE) &&
4580 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4581 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4582 current_link_up = 1;
4583 } else {
4584 if (!(bmcr & BMCR_ANENABLE) &&
4585 tp->link_config.speed == current_speed &&
4586 tp->link_config.duplex == current_duplex &&
4587 tp->link_config.flowctrl ==
4588 tp->link_config.active_flowctrl) {
4589 current_link_up = 1;
4590 }
4591 }
4592
4593 if (current_link_up == 1 &&
4594 tp->link_config.active_duplex == DUPLEX_FULL) {
4595 u32 reg, bit;
4596
4597 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4598 reg = MII_TG3_FET_GEN_STAT;
4599 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4600 } else {
4601 reg = MII_TG3_EXT_STAT;
4602 bit = MII_TG3_EXT_STAT_MDIX;
4603 }
4604
4605 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4606 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4607
4608 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4609 }
4610 }
4611
4612 relink:
4613 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4614 tg3_phy_copper_begin(tp);
4615
4616 if (tg3_flag(tp, ROBOSWITCH)) {
4617 current_link_up = 1;
4618 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4619 current_speed = SPEED_1000;
4620 current_duplex = DUPLEX_FULL;
4621 tp->link_config.active_speed = current_speed;
4622 tp->link_config.active_duplex = current_duplex;
4623 }
4624
4625 tg3_readphy(tp, MII_BMSR, &bmsr);
4626 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4627 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4628 current_link_up = 1;
4629 }
4630
4631 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4632 if (current_link_up == 1) {
4633 if (tp->link_config.active_speed == SPEED_100 ||
4634 tp->link_config.active_speed == SPEED_10)
4635 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4636 else
4637 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4638 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4639 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4640 else
4641 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4642
4643 /* In order for the 5750 core in BCM4785 chip to work properly
4644 * in RGMII mode, the Led Control Register must be set up.
4645 */
4646 if (tg3_flag(tp, RGMII_MODE)) {
4647 u32 led_ctrl = tr32(MAC_LED_CTRL);
4648 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4649
4650 if (tp->link_config.active_speed == SPEED_10)
4651 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4652 else if (tp->link_config.active_speed == SPEED_100)
4653 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4654 LED_CTRL_100MBPS_ON);
4655 else if (tp->link_config.active_speed == SPEED_1000)
4656 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4657 LED_CTRL_1000MBPS_ON);
4658
4659 tw32(MAC_LED_CTRL, led_ctrl);
4660 udelay(40);
4661 }
4662
4663 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4664 if (tp->link_config.active_duplex == DUPLEX_HALF)
4665 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4666
4667 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4668 if (current_link_up == 1 &&
4669 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4670 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4671 else
4672 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4673 }
4674
4675 /* ??? Without this setting Netgear GA302T PHY does not
4676 * ??? send/receive packets...
4677 */
4678 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4679 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4680 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4681 tw32_f(MAC_MI_MODE, tp->mi_mode);
4682 udelay(80);
4683 }
4684
4685 tw32_f(MAC_MODE, tp->mac_mode);
4686 udelay(40);
4687
4688 tg3_phy_eee_adjust(tp, current_link_up);
4689
4690 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4691 /* Polled via timer. */
4692 tw32_f(MAC_EVENT, 0);
4693 } else {
4694 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4695 }
4696 udelay(40);
4697
4698 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4699 current_link_up == 1 &&
4700 tp->link_config.active_speed == SPEED_1000 &&
4701 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4702 udelay(120);
4703 tw32_f(MAC_STATUS,
4704 (MAC_STATUS_SYNC_CHANGED |
4705 MAC_STATUS_CFG_CHANGED));
4706 udelay(40);
4707 tg3_write_mem(tp,
4708 NIC_SRAM_FIRMWARE_MBOX,
4709 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4710 }
4711
4712 /* Prevent send BD corruption. */
4713 if (tg3_flag(tp, CLKREQ_BUG)) {
4714 if (tp->link_config.active_speed == SPEED_100 ||
4715 tp->link_config.active_speed == SPEED_10)
4716 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4717 PCI_EXP_LNKCTL_CLKREQ_EN);
4718 else
4719 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4720 PCI_EXP_LNKCTL_CLKREQ_EN);
4721 }
4722
4723 tg3_test_and_report_link_chg(tp, current_link_up);
4724
4725 return 0;
4726 }
4727
4728 struct tg3_fiber_aneginfo {
4729 int state;
4730 #define ANEG_STATE_UNKNOWN 0
4731 #define ANEG_STATE_AN_ENABLE 1
4732 #define ANEG_STATE_RESTART_INIT 2
4733 #define ANEG_STATE_RESTART 3
4734 #define ANEG_STATE_DISABLE_LINK_OK 4
4735 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4736 #define ANEG_STATE_ABILITY_DETECT 6
4737 #define ANEG_STATE_ACK_DETECT_INIT 7
4738 #define ANEG_STATE_ACK_DETECT 8
4739 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4740 #define ANEG_STATE_COMPLETE_ACK 10
4741 #define ANEG_STATE_IDLE_DETECT_INIT 11
4742 #define ANEG_STATE_IDLE_DETECT 12
4743 #define ANEG_STATE_LINK_OK 13
4744 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4745 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4746
4747 u32 flags;
4748 #define MR_AN_ENABLE 0x00000001
4749 #define MR_RESTART_AN 0x00000002
4750 #define MR_AN_COMPLETE 0x00000004
4751 #define MR_PAGE_RX 0x00000008
4752 #define MR_NP_LOADED 0x00000010
4753 #define MR_TOGGLE_TX 0x00000020
4754 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4755 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4756 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4757 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4758 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4759 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4760 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4761 #define MR_TOGGLE_RX 0x00002000
4762 #define MR_NP_RX 0x00004000
4763
4764 #define MR_LINK_OK 0x80000000
4765
4766 unsigned long link_time, cur_time;
4767
4768 u32 ability_match_cfg;
4769 int ability_match_count;
4770
4771 char ability_match, idle_match, ack_match;
4772
4773 u32 txconfig, rxconfig;
4774 #define ANEG_CFG_NP 0x00000080
4775 #define ANEG_CFG_ACK 0x00000040
4776 #define ANEG_CFG_RF2 0x00000020
4777 #define ANEG_CFG_RF1 0x00000010
4778 #define ANEG_CFG_PS2 0x00000001
4779 #define ANEG_CFG_PS1 0x00008000
4780 #define ANEG_CFG_HD 0x00004000
4781 #define ANEG_CFG_FD 0x00002000
4782 #define ANEG_CFG_INVAL 0x00001f06
4783
4784 };
4785 #define ANEG_OK 0
4786 #define ANEG_DONE 1
4787 #define ANEG_TIMER_ENAB 2
4788 #define ANEG_FAILED -1
4789
4790 #define ANEG_STATE_SETTLE_TIME 10000
4791
4792 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4793 struct tg3_fiber_aneginfo *ap)
4794 {
4795 u16 flowctrl;
4796 unsigned long delta;
4797 u32 rx_cfg_reg;
4798 int ret;
4799
4800 if (ap->state == ANEG_STATE_UNKNOWN) {
4801 ap->rxconfig = 0;
4802 ap->link_time = 0;
4803 ap->cur_time = 0;
4804 ap->ability_match_cfg = 0;
4805 ap->ability_match_count = 0;
4806 ap->ability_match = 0;
4807 ap->idle_match = 0;
4808 ap->ack_match = 0;
4809 }
4810 ap->cur_time++;
4811
4812 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4813 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4814
4815 if (rx_cfg_reg != ap->ability_match_cfg) {
4816 ap->ability_match_cfg = rx_cfg_reg;
4817 ap->ability_match = 0;
4818 ap->ability_match_count = 0;
4819 } else {
4820 if (++ap->ability_match_count > 1) {
4821 ap->ability_match = 1;
4822 ap->ability_match_cfg = rx_cfg_reg;
4823 }
4824 }
4825 if (rx_cfg_reg & ANEG_CFG_ACK)
4826 ap->ack_match = 1;
4827 else
4828 ap->ack_match = 0;
4829
4830 ap->idle_match = 0;
4831 } else {
4832 ap->idle_match = 1;
4833 ap->ability_match_cfg = 0;
4834 ap->ability_match_count = 0;
4835 ap->ability_match = 0;
4836 ap->ack_match = 0;
4837
4838 rx_cfg_reg = 0;
4839 }
4840
4841 ap->rxconfig = rx_cfg_reg;
4842 ret = ANEG_OK;
4843
4844 switch (ap->state) {
4845 case ANEG_STATE_UNKNOWN:
4846 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4847 ap->state = ANEG_STATE_AN_ENABLE;
4848
4849 /* fallthru */
4850 case ANEG_STATE_AN_ENABLE:
4851 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4852 if (ap->flags & MR_AN_ENABLE) {
4853 ap->link_time = 0;
4854 ap->cur_time = 0;
4855 ap->ability_match_cfg = 0;
4856 ap->ability_match_count = 0;
4857 ap->ability_match = 0;
4858 ap->idle_match = 0;
4859 ap->ack_match = 0;
4860
4861 ap->state = ANEG_STATE_RESTART_INIT;
4862 } else {
4863 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4864 }
4865 break;
4866
4867 case ANEG_STATE_RESTART_INIT:
4868 ap->link_time = ap->cur_time;
4869 ap->flags &= ~(MR_NP_LOADED);
4870 ap->txconfig = 0;
4871 tw32(MAC_TX_AUTO_NEG, 0);
4872 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4873 tw32_f(MAC_MODE, tp->mac_mode);
4874 udelay(40);
4875
4876 ret = ANEG_TIMER_ENAB;
4877 ap->state = ANEG_STATE_RESTART;
4878
4879 /* fallthru */
4880 case ANEG_STATE_RESTART:
4881 delta = ap->cur_time - ap->link_time;
4882 if (delta > ANEG_STATE_SETTLE_TIME)
4883 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4884 else
4885 ret = ANEG_TIMER_ENAB;
4886 break;
4887
4888 case ANEG_STATE_DISABLE_LINK_OK:
4889 ret = ANEG_DONE;
4890 break;
4891
4892 case ANEG_STATE_ABILITY_DETECT_INIT:
4893 ap->flags &= ~(MR_TOGGLE_TX);
4894 ap->txconfig = ANEG_CFG_FD;
4895 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4896 if (flowctrl & ADVERTISE_1000XPAUSE)
4897 ap->txconfig |= ANEG_CFG_PS1;
4898 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4899 ap->txconfig |= ANEG_CFG_PS2;
4900 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4901 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4902 tw32_f(MAC_MODE, tp->mac_mode);
4903 udelay(40);
4904
4905 ap->state = ANEG_STATE_ABILITY_DETECT;
4906 break;
4907
4908 case ANEG_STATE_ABILITY_DETECT:
4909 if (ap->ability_match != 0 && ap->rxconfig != 0)
4910 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4911 break;
4912
4913 case ANEG_STATE_ACK_DETECT_INIT:
4914 ap->txconfig |= ANEG_CFG_ACK;
4915 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4916 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4917 tw32_f(MAC_MODE, tp->mac_mode);
4918 udelay(40);
4919
4920 ap->state = ANEG_STATE_ACK_DETECT;
4921
4922 /* fallthru */
4923 case ANEG_STATE_ACK_DETECT:
4924 if (ap->ack_match != 0) {
4925 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4926 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4927 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4928 } else {
4929 ap->state = ANEG_STATE_AN_ENABLE;
4930 }
4931 } else if (ap->ability_match != 0 &&
4932 ap->rxconfig == 0) {
4933 ap->state = ANEG_STATE_AN_ENABLE;
4934 }
4935 break;
4936
4937 case ANEG_STATE_COMPLETE_ACK_INIT:
4938 if (ap->rxconfig & ANEG_CFG_INVAL) {
4939 ret = ANEG_FAILED;
4940 break;
4941 }
4942 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4943 MR_LP_ADV_HALF_DUPLEX |
4944 MR_LP_ADV_SYM_PAUSE |
4945 MR_LP_ADV_ASYM_PAUSE |
4946 MR_LP_ADV_REMOTE_FAULT1 |
4947 MR_LP_ADV_REMOTE_FAULT2 |
4948 MR_LP_ADV_NEXT_PAGE |
4949 MR_TOGGLE_RX |
4950 MR_NP_RX);
4951 if (ap->rxconfig & ANEG_CFG_FD)
4952 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4953 if (ap->rxconfig & ANEG_CFG_HD)
4954 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4955 if (ap->rxconfig & ANEG_CFG_PS1)
4956 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4957 if (ap->rxconfig & ANEG_CFG_PS2)
4958 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4959 if (ap->rxconfig & ANEG_CFG_RF1)
4960 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4961 if (ap->rxconfig & ANEG_CFG_RF2)
4962 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4963 if (ap->rxconfig & ANEG_CFG_NP)
4964 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4965
4966 ap->link_time = ap->cur_time;
4967
4968 ap->flags ^= (MR_TOGGLE_TX);
4969 if (ap->rxconfig & 0x0008)
4970 ap->flags |= MR_TOGGLE_RX;
4971 if (ap->rxconfig & ANEG_CFG_NP)
4972 ap->flags |= MR_NP_RX;
4973 ap->flags |= MR_PAGE_RX;
4974
4975 ap->state = ANEG_STATE_COMPLETE_ACK;
4976 ret = ANEG_TIMER_ENAB;
4977 break;
4978
4979 case ANEG_STATE_COMPLETE_ACK:
4980 if (ap->ability_match != 0 &&
4981 ap->rxconfig == 0) {
4982 ap->state = ANEG_STATE_AN_ENABLE;
4983 break;
4984 }
4985 delta = ap->cur_time - ap->link_time;
4986 if (delta > ANEG_STATE_SETTLE_TIME) {
4987 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4988 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4989 } else {
4990 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4991 !(ap->flags & MR_NP_RX)) {
4992 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4993 } else {
4994 ret = ANEG_FAILED;
4995 }
4996 }
4997 }
4998 break;
4999
5000 case ANEG_STATE_IDLE_DETECT_INIT:
5001 ap->link_time = ap->cur_time;
5002 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5003 tw32_f(MAC_MODE, tp->mac_mode);
5004 udelay(40);
5005
5006 ap->state = ANEG_STATE_IDLE_DETECT;
5007 ret = ANEG_TIMER_ENAB;
5008 break;
5009
5010 case ANEG_STATE_IDLE_DETECT:
5011 if (ap->ability_match != 0 &&
5012 ap->rxconfig == 0) {
5013 ap->state = ANEG_STATE_AN_ENABLE;
5014 break;
5015 }
5016 delta = ap->cur_time - ap->link_time;
5017 if (delta > ANEG_STATE_SETTLE_TIME) {
5018 /* XXX another gem from the Broadcom driver :( */
5019 ap->state = ANEG_STATE_LINK_OK;
5020 }
5021 break;
5022
5023 case ANEG_STATE_LINK_OK:
5024 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5025 ret = ANEG_DONE;
5026 break;
5027
5028 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5029 /* ??? unimplemented */
5030 break;
5031
5032 case ANEG_STATE_NEXT_PAGE_WAIT:
5033 /* ??? unimplemented */
5034 break;
5035
5036 default:
5037 ret = ANEG_FAILED;
5038 break;
5039 }
5040
5041 return ret;
5042 }
5043
5044 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5045 {
5046 int res = 0;
5047 struct tg3_fiber_aneginfo aninfo;
5048 int status = ANEG_FAILED;
5049 unsigned int tick;
5050 u32 tmp;
5051
5052 tw32_f(MAC_TX_AUTO_NEG, 0);
5053
5054 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5055 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5056 udelay(40);
5057
5058 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5059 udelay(40);
5060
5061 memset(&aninfo, 0, sizeof(aninfo));
5062 aninfo.flags |= MR_AN_ENABLE;
5063 aninfo.state = ANEG_STATE_UNKNOWN;
5064 aninfo.cur_time = 0;
5065 tick = 0;
5066 while (++tick < 195000) {
5067 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5068 if (status == ANEG_DONE || status == ANEG_FAILED)
5069 break;
5070
5071 udelay(1);
5072 }
5073
5074 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5075 tw32_f(MAC_MODE, tp->mac_mode);
5076 udelay(40);
5077
5078 *txflags = aninfo.txconfig;
5079 *rxflags = aninfo.flags;
5080
5081 if (status == ANEG_DONE &&
5082 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5083 MR_LP_ADV_FULL_DUPLEX)))
5084 res = 1;
5085
5086 return res;
5087 }
5088
5089 static void tg3_init_bcm8002(struct tg3 *tp)
5090 {
5091 u32 mac_status = tr32(MAC_STATUS);
5092 int i;
5093
5094 /* Reset when initting first time or we have a link. */
5095 if (tg3_flag(tp, INIT_COMPLETE) &&
5096 !(mac_status & MAC_STATUS_PCS_SYNCED))
5097 return;
5098
5099 /* Set PLL lock range. */
5100 tg3_writephy(tp, 0x16, 0x8007);
5101
5102 /* SW reset */
5103 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5104
5105 /* Wait for reset to complete. */
5106 /* XXX schedule_timeout() ... */
5107 for (i = 0; i < 500; i++)
5108 udelay(10);
5109
5110 /* Config mode; select PMA/Ch 1 regs. */
5111 tg3_writephy(tp, 0x10, 0x8411);
5112
5113 /* Enable auto-lock and comdet, select txclk for tx. */
5114 tg3_writephy(tp, 0x11, 0x0a10);
5115
5116 tg3_writephy(tp, 0x18, 0x00a0);
5117 tg3_writephy(tp, 0x16, 0x41ff);
5118
5119 /* Assert and deassert POR. */
5120 tg3_writephy(tp, 0x13, 0x0400);
5121 udelay(40);
5122 tg3_writephy(tp, 0x13, 0x0000);
5123
5124 tg3_writephy(tp, 0x11, 0x0a50);
5125 udelay(40);
5126 tg3_writephy(tp, 0x11, 0x0a10);
5127
5128 /* Wait for signal to stabilize */
5129 /* XXX schedule_timeout() ... */
5130 for (i = 0; i < 15000; i++)
5131 udelay(10);
5132
5133 /* Deselect the channel register so we can read the PHYID
5134 * later.
5135 */
5136 tg3_writephy(tp, 0x10, 0x8011);
5137 }
5138
5139 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5140 {
5141 u16 flowctrl;
5142 u32 sg_dig_ctrl, sg_dig_status;
5143 u32 serdes_cfg, expected_sg_dig_ctrl;
5144 int workaround, port_a;
5145 int current_link_up;
5146
5147 serdes_cfg = 0;
5148 expected_sg_dig_ctrl = 0;
5149 workaround = 0;
5150 port_a = 1;
5151 current_link_up = 0;
5152
5153 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5154 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5155 workaround = 1;
5156 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5157 port_a = 0;
5158
5159 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5160 /* preserve bits 20-23 for voltage regulator */
5161 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5162 }
5163
5164 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5165
5166 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5167 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5168 if (workaround) {
5169 u32 val = serdes_cfg;
5170
5171 if (port_a)
5172 val |= 0xc010000;
5173 else
5174 val |= 0x4010000;
5175 tw32_f(MAC_SERDES_CFG, val);
5176 }
5177
5178 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5179 }
5180 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5181 tg3_setup_flow_control(tp, 0, 0);
5182 current_link_up = 1;
5183 }
5184 goto out;
5185 }
5186
5187 /* Want auto-negotiation. */
5188 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5189
5190 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5191 if (flowctrl & ADVERTISE_1000XPAUSE)
5192 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5193 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5194 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5195
5196 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5197 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5198 tp->serdes_counter &&
5199 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5200 MAC_STATUS_RCVD_CFG)) ==
5201 MAC_STATUS_PCS_SYNCED)) {
5202 tp->serdes_counter--;
5203 current_link_up = 1;
5204 goto out;
5205 }
5206 restart_autoneg:
5207 if (workaround)
5208 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5209 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5210 udelay(5);
5211 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5212
5213 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5214 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5215 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5216 MAC_STATUS_SIGNAL_DET)) {
5217 sg_dig_status = tr32(SG_DIG_STATUS);
5218 mac_status = tr32(MAC_STATUS);
5219
5220 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5221 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5222 u32 local_adv = 0, remote_adv = 0;
5223
5224 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5225 local_adv |= ADVERTISE_1000XPAUSE;
5226 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5227 local_adv |= ADVERTISE_1000XPSE_ASYM;
5228
5229 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5230 remote_adv |= LPA_1000XPAUSE;
5231 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5232 remote_adv |= LPA_1000XPAUSE_ASYM;
5233
5234 tp->link_config.rmt_adv =
5235 mii_adv_to_ethtool_adv_x(remote_adv);
5236
5237 tg3_setup_flow_control(tp, local_adv, remote_adv);
5238 current_link_up = 1;
5239 tp->serdes_counter = 0;
5240 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5241 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5242 if (tp->serdes_counter)
5243 tp->serdes_counter--;
5244 else {
5245 if (workaround) {
5246 u32 val = serdes_cfg;
5247
5248 if (port_a)
5249 val |= 0xc010000;
5250 else
5251 val |= 0x4010000;
5252
5253 tw32_f(MAC_SERDES_CFG, val);
5254 }
5255
5256 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5257 udelay(40);
5258
5259 /* Link parallel detection - link is up */
5260 /* only if we have PCS_SYNC and not */
5261 /* receiving config code words */
5262 mac_status = tr32(MAC_STATUS);
5263 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5264 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5265 tg3_setup_flow_control(tp, 0, 0);
5266 current_link_up = 1;
5267 tp->phy_flags |=
5268 TG3_PHYFLG_PARALLEL_DETECT;
5269 tp->serdes_counter =
5270 SERDES_PARALLEL_DET_TIMEOUT;
5271 } else
5272 goto restart_autoneg;
5273 }
5274 }
5275 } else {
5276 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5277 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5278 }
5279
5280 out:
5281 return current_link_up;
5282 }
5283
5284 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5285 {
5286 int current_link_up = 0;
5287
5288 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5289 goto out;
5290
5291 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5292 u32 txflags, rxflags;
5293 int i;
5294
5295 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5296 u32 local_adv = 0, remote_adv = 0;
5297
5298 if (txflags & ANEG_CFG_PS1)
5299 local_adv |= ADVERTISE_1000XPAUSE;
5300 if (txflags & ANEG_CFG_PS2)
5301 local_adv |= ADVERTISE_1000XPSE_ASYM;
5302
5303 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5304 remote_adv |= LPA_1000XPAUSE;
5305 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5306 remote_adv |= LPA_1000XPAUSE_ASYM;
5307
5308 tp->link_config.rmt_adv =
5309 mii_adv_to_ethtool_adv_x(remote_adv);
5310
5311 tg3_setup_flow_control(tp, local_adv, remote_adv);
5312
5313 current_link_up = 1;
5314 }
5315 for (i = 0; i < 30; i++) {
5316 udelay(20);
5317 tw32_f(MAC_STATUS,
5318 (MAC_STATUS_SYNC_CHANGED |
5319 MAC_STATUS_CFG_CHANGED));
5320 udelay(40);
5321 if ((tr32(MAC_STATUS) &
5322 (MAC_STATUS_SYNC_CHANGED |
5323 MAC_STATUS_CFG_CHANGED)) == 0)
5324 break;
5325 }
5326
5327 mac_status = tr32(MAC_STATUS);
5328 if (current_link_up == 0 &&
5329 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5330 !(mac_status & MAC_STATUS_RCVD_CFG))
5331 current_link_up = 1;
5332 } else {
5333 tg3_setup_flow_control(tp, 0, 0);
5334
5335 /* Forcing 1000FD link up. */
5336 current_link_up = 1;
5337
5338 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5339 udelay(40);
5340
5341 tw32_f(MAC_MODE, tp->mac_mode);
5342 udelay(40);
5343 }
5344
5345 out:
5346 return current_link_up;
5347 }
5348
5349 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5350 {
5351 u32 orig_pause_cfg;
5352 u16 orig_active_speed;
5353 u8 orig_active_duplex;
5354 u32 mac_status;
5355 int current_link_up;
5356 int i;
5357
5358 orig_pause_cfg = tp->link_config.active_flowctrl;
5359 orig_active_speed = tp->link_config.active_speed;
5360 orig_active_duplex = tp->link_config.active_duplex;
5361
5362 if (!tg3_flag(tp, HW_AUTONEG) &&
5363 tp->link_up &&
5364 tg3_flag(tp, INIT_COMPLETE)) {
5365 mac_status = tr32(MAC_STATUS);
5366 mac_status &= (MAC_STATUS_PCS_SYNCED |
5367 MAC_STATUS_SIGNAL_DET |
5368 MAC_STATUS_CFG_CHANGED |
5369 MAC_STATUS_RCVD_CFG);
5370 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5371 MAC_STATUS_SIGNAL_DET)) {
5372 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5373 MAC_STATUS_CFG_CHANGED));
5374 return 0;
5375 }
5376 }
5377
5378 tw32_f(MAC_TX_AUTO_NEG, 0);
5379
5380 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5381 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5382 tw32_f(MAC_MODE, tp->mac_mode);
5383 udelay(40);
5384
5385 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5386 tg3_init_bcm8002(tp);
5387
5388 /* Enable link change event even when serdes polling. */
5389 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5390 udelay(40);
5391
5392 current_link_up = 0;
5393 tp->link_config.rmt_adv = 0;
5394 mac_status = tr32(MAC_STATUS);
5395
5396 if (tg3_flag(tp, HW_AUTONEG))
5397 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5398 else
5399 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5400
5401 tp->napi[0].hw_status->status =
5402 (SD_STATUS_UPDATED |
5403 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5404
5405 for (i = 0; i < 100; i++) {
5406 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5407 MAC_STATUS_CFG_CHANGED));
5408 udelay(5);
5409 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5410 MAC_STATUS_CFG_CHANGED |
5411 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5412 break;
5413 }
5414
5415 mac_status = tr32(MAC_STATUS);
5416 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5417 current_link_up = 0;
5418 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5419 tp->serdes_counter == 0) {
5420 tw32_f(MAC_MODE, (tp->mac_mode |
5421 MAC_MODE_SEND_CONFIGS));
5422 udelay(1);
5423 tw32_f(MAC_MODE, tp->mac_mode);
5424 }
5425 }
5426
5427 if (current_link_up == 1) {
5428 tp->link_config.active_speed = SPEED_1000;
5429 tp->link_config.active_duplex = DUPLEX_FULL;
5430 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5431 LED_CTRL_LNKLED_OVERRIDE |
5432 LED_CTRL_1000MBPS_ON));
5433 } else {
5434 tp->link_config.active_speed = SPEED_UNKNOWN;
5435 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5436 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5437 LED_CTRL_LNKLED_OVERRIDE |
5438 LED_CTRL_TRAFFIC_OVERRIDE));
5439 }
5440
5441 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5442 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5443 if (orig_pause_cfg != now_pause_cfg ||
5444 orig_active_speed != tp->link_config.active_speed ||
5445 orig_active_duplex != tp->link_config.active_duplex)
5446 tg3_link_report(tp);
5447 }
5448
5449 return 0;
5450 }
5451
5452 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5453 {
5454 int current_link_up, err = 0;
5455 u32 bmsr, bmcr;
5456 u16 current_speed;
5457 u8 current_duplex;
5458 u32 local_adv, remote_adv;
5459
5460 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5461 tw32_f(MAC_MODE, tp->mac_mode);
5462 udelay(40);
5463
5464 tw32(MAC_EVENT, 0);
5465
5466 tw32_f(MAC_STATUS,
5467 (MAC_STATUS_SYNC_CHANGED |
5468 MAC_STATUS_CFG_CHANGED |
5469 MAC_STATUS_MI_COMPLETION |
5470 MAC_STATUS_LNKSTATE_CHANGED));
5471 udelay(40);
5472
5473 if (force_reset)
5474 tg3_phy_reset(tp);
5475
5476 current_link_up = 0;
5477 current_speed = SPEED_UNKNOWN;
5478 current_duplex = DUPLEX_UNKNOWN;
5479 tp->link_config.rmt_adv = 0;
5480
5481 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5482 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5483 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5484 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5485 bmsr |= BMSR_LSTATUS;
5486 else
5487 bmsr &= ~BMSR_LSTATUS;
5488 }
5489
5490 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5491
5492 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5493 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5494 /* do nothing, just check for link up at the end */
5495 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5496 u32 adv, newadv;
5497
5498 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5499 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5500 ADVERTISE_1000XPAUSE |
5501 ADVERTISE_1000XPSE_ASYM |
5502 ADVERTISE_SLCT);
5503
5504 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5505 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5506
5507 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5508 tg3_writephy(tp, MII_ADVERTISE, newadv);
5509 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5510 tg3_writephy(tp, MII_BMCR, bmcr);
5511
5512 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5513 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5514 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5515
5516 return err;
5517 }
5518 } else {
5519 u32 new_bmcr;
5520
5521 bmcr &= ~BMCR_SPEED1000;
5522 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5523
5524 if (tp->link_config.duplex == DUPLEX_FULL)
5525 new_bmcr |= BMCR_FULLDPLX;
5526
5527 if (new_bmcr != bmcr) {
5528 /* BMCR_SPEED1000 is a reserved bit that needs
5529 * to be set on write.
5530 */
5531 new_bmcr |= BMCR_SPEED1000;
5532
5533 /* Force a linkdown */
5534 if (tp->link_up) {
5535 u32 adv;
5536
5537 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5538 adv &= ~(ADVERTISE_1000XFULL |
5539 ADVERTISE_1000XHALF |
5540 ADVERTISE_SLCT);
5541 tg3_writephy(tp, MII_ADVERTISE, adv);
5542 tg3_writephy(tp, MII_BMCR, bmcr |
5543 BMCR_ANRESTART |
5544 BMCR_ANENABLE);
5545 udelay(10);
5546 tg3_carrier_off(tp);
5547 }
5548 tg3_writephy(tp, MII_BMCR, new_bmcr);
5549 bmcr = new_bmcr;
5550 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5551 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5552 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5553 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5554 bmsr |= BMSR_LSTATUS;
5555 else
5556 bmsr &= ~BMSR_LSTATUS;
5557 }
5558 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5559 }
5560 }
5561
5562 if (bmsr & BMSR_LSTATUS) {
5563 current_speed = SPEED_1000;
5564 current_link_up = 1;
5565 if (bmcr & BMCR_FULLDPLX)
5566 current_duplex = DUPLEX_FULL;
5567 else
5568 current_duplex = DUPLEX_HALF;
5569
5570 local_adv = 0;
5571 remote_adv = 0;
5572
5573 if (bmcr & BMCR_ANENABLE) {
5574 u32 common;
5575
5576 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5577 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5578 common = local_adv & remote_adv;
5579 if (common & (ADVERTISE_1000XHALF |
5580 ADVERTISE_1000XFULL)) {
5581 if (common & ADVERTISE_1000XFULL)
5582 current_duplex = DUPLEX_FULL;
5583 else
5584 current_duplex = DUPLEX_HALF;
5585
5586 tp->link_config.rmt_adv =
5587 mii_adv_to_ethtool_adv_x(remote_adv);
5588 } else if (!tg3_flag(tp, 5780_CLASS)) {
5589 /* Link is up via parallel detect */
5590 } else {
5591 current_link_up = 0;
5592 }
5593 }
5594 }
5595
5596 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5597 tg3_setup_flow_control(tp, local_adv, remote_adv);
5598
5599 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5600 if (tp->link_config.active_duplex == DUPLEX_HALF)
5601 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5602
5603 tw32_f(MAC_MODE, tp->mac_mode);
5604 udelay(40);
5605
5606 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5607
5608 tp->link_config.active_speed = current_speed;
5609 tp->link_config.active_duplex = current_duplex;
5610
5611 tg3_test_and_report_link_chg(tp, current_link_up);
5612 return err;
5613 }
5614
5615 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5616 {
5617 if (tp->serdes_counter) {
5618 /* Give autoneg time to complete. */
5619 tp->serdes_counter--;
5620 return;
5621 }
5622
5623 if (!tp->link_up &&
5624 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5625 u32 bmcr;
5626
5627 tg3_readphy(tp, MII_BMCR, &bmcr);
5628 if (bmcr & BMCR_ANENABLE) {
5629 u32 phy1, phy2;
5630
5631 /* Select shadow register 0x1f */
5632 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5633 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5634
5635 /* Select expansion interrupt status register */
5636 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5637 MII_TG3_DSP_EXP1_INT_STAT);
5638 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5639 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5640
5641 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5642 /* We have signal detect and not receiving
5643 * config code words, link is up by parallel
5644 * detection.
5645 */
5646
5647 bmcr &= ~BMCR_ANENABLE;
5648 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5649 tg3_writephy(tp, MII_BMCR, bmcr);
5650 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5651 }
5652 }
5653 } else if (tp->link_up &&
5654 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5655 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5656 u32 phy2;
5657
5658 /* Select expansion interrupt status register */
5659 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5660 MII_TG3_DSP_EXP1_INT_STAT);
5661 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5662 if (phy2 & 0x20) {
5663 u32 bmcr;
5664
5665 /* Config code words received, turn on autoneg. */
5666 tg3_readphy(tp, MII_BMCR, &bmcr);
5667 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5668
5669 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5670
5671 }
5672 }
5673 }
5674
5675 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5676 {
5677 u32 val;
5678 int err;
5679
5680 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5681 err = tg3_setup_fiber_phy(tp, force_reset);
5682 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5683 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5684 else
5685 err = tg3_setup_copper_phy(tp, force_reset);
5686
5687 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5688 u32 scale;
5689
5690 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5691 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5692 scale = 65;
5693 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5694 scale = 6;
5695 else
5696 scale = 12;
5697
5698 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5699 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5700 tw32(GRC_MISC_CFG, val);
5701 }
5702
5703 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5704 (6 << TX_LENGTHS_IPG_SHIFT);
5705 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5706 tg3_asic_rev(tp) == ASIC_REV_5762)
5707 val |= tr32(MAC_TX_LENGTHS) &
5708 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5709 TX_LENGTHS_CNT_DWN_VAL_MSK);
5710
5711 if (tp->link_config.active_speed == SPEED_1000 &&
5712 tp->link_config.active_duplex == DUPLEX_HALF)
5713 tw32(MAC_TX_LENGTHS, val |
5714 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5715 else
5716 tw32(MAC_TX_LENGTHS, val |
5717 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5718
5719 if (!tg3_flag(tp, 5705_PLUS)) {
5720 if (tp->link_up) {
5721 tw32(HOSTCC_STAT_COAL_TICKS,
5722 tp->coal.stats_block_coalesce_usecs);
5723 } else {
5724 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5725 }
5726 }
5727
5728 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5729 val = tr32(PCIE_PWR_MGMT_THRESH);
5730 if (!tp->link_up)
5731 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5732 tp->pwrmgmt_thresh;
5733 else
5734 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5735 tw32(PCIE_PWR_MGMT_THRESH, val);
5736 }
5737
5738 return err;
5739 }
5740
5741 /* tp->lock must be held */
5742 static u64 tg3_refclk_read(struct tg3 *tp)
5743 {
5744 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5745 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5746 }
5747
5748 /* tp->lock must be held */
5749 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5750 {
5751 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5752 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5753 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5754 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5755 }
5756
5757 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5758 static inline void tg3_full_unlock(struct tg3 *tp);
5759 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5760 {
5761 struct tg3 *tp = netdev_priv(dev);
5762
5763 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5764 SOF_TIMESTAMPING_RX_SOFTWARE |
5765 SOF_TIMESTAMPING_SOFTWARE |
5766 SOF_TIMESTAMPING_TX_HARDWARE |
5767 SOF_TIMESTAMPING_RX_HARDWARE |
5768 SOF_TIMESTAMPING_RAW_HARDWARE;
5769
5770 if (tp->ptp_clock)
5771 info->phc_index = ptp_clock_index(tp->ptp_clock);
5772 else
5773 info->phc_index = -1;
5774
5775 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5776
5777 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5778 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5779 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5780 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5781 return 0;
5782 }
5783
5784 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5785 {
5786 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5787 bool neg_adj = false;
5788 u32 correction = 0;
5789
5790 if (ppb < 0) {
5791 neg_adj = true;
5792 ppb = -ppb;
5793 }
5794
5795 /* Frequency adjustment is performed using hardware with a 24 bit
5796 * accumulator and a programmable correction value. On each clk, the
5797 * correction value gets added to the accumulator and when it
5798 * overflows, the time counter is incremented/decremented.
5799 *
5800 * So conversion from ppb to correction value is
5801 * ppb * (1 << 24) / 1000000000
5802 */
5803 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5804 TG3_EAV_REF_CLK_CORRECT_MASK;
5805
5806 tg3_full_lock(tp, 0);
5807
5808 if (correction)
5809 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5810 TG3_EAV_REF_CLK_CORRECT_EN |
5811 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5812 else
5813 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5814
5815 tg3_full_unlock(tp);
5816
5817 return 0;
5818 }
5819
5820 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5821 {
5822 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5823
5824 tg3_full_lock(tp, 0);
5825 tp->ptp_adjust += delta;
5826 tg3_full_unlock(tp);
5827
5828 return 0;
5829 }
5830
5831 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5832 {
5833 u64 ns;
5834 u32 remainder;
5835 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5836
5837 tg3_full_lock(tp, 0);
5838 ns = tg3_refclk_read(tp);
5839 ns += tp->ptp_adjust;
5840 tg3_full_unlock(tp);
5841
5842 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5843 ts->tv_nsec = remainder;
5844
5845 return 0;
5846 }
5847
5848 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5849 const struct timespec *ts)
5850 {
5851 u64 ns;
5852 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5853
5854 ns = timespec_to_ns(ts);
5855
5856 tg3_full_lock(tp, 0);
5857 tg3_refclk_write(tp, ns);
5858 tp->ptp_adjust = 0;
5859 tg3_full_unlock(tp);
5860
5861 return 0;
5862 }
5863
5864 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5865 struct ptp_clock_request *rq, int on)
5866 {
5867 return -EOPNOTSUPP;
5868 }
5869
5870 static const struct ptp_clock_info tg3_ptp_caps = {
5871 .owner = THIS_MODULE,
5872 .name = "tg3 clock",
5873 .max_adj = 250000000,
5874 .n_alarm = 0,
5875 .n_ext_ts = 0,
5876 .n_per_out = 0,
5877 .pps = 0,
5878 .adjfreq = tg3_ptp_adjfreq,
5879 .adjtime = tg3_ptp_adjtime,
5880 .gettime = tg3_ptp_gettime,
5881 .settime = tg3_ptp_settime,
5882 .enable = tg3_ptp_enable,
5883 };
5884
5885 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5886 struct skb_shared_hwtstamps *timestamp)
5887 {
5888 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5889 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5890 tp->ptp_adjust);
5891 }
5892
5893 /* tp->lock must be held */
5894 static void tg3_ptp_init(struct tg3 *tp)
5895 {
5896 if (!tg3_flag(tp, PTP_CAPABLE))
5897 return;
5898
5899 /* Initialize the hardware clock to the system time. */
5900 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5901 tp->ptp_adjust = 0;
5902 tp->ptp_info = tg3_ptp_caps;
5903 }
5904
5905 /* tp->lock must be held */
5906 static void tg3_ptp_resume(struct tg3 *tp)
5907 {
5908 if (!tg3_flag(tp, PTP_CAPABLE))
5909 return;
5910
5911 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5912 tp->ptp_adjust = 0;
5913 }
5914
5915 static void tg3_ptp_fini(struct tg3 *tp)
5916 {
5917 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5918 return;
5919
5920 ptp_clock_unregister(tp->ptp_clock);
5921 tp->ptp_clock = NULL;
5922 tp->ptp_adjust = 0;
5923 }
5924
5925 static inline int tg3_irq_sync(struct tg3 *tp)
5926 {
5927 return tp->irq_sync;
5928 }
5929
5930 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5931 {
5932 int i;
5933
5934 dst = (u32 *)((u8 *)dst + off);
5935 for (i = 0; i < len; i += sizeof(u32))
5936 *dst++ = tr32(off + i);
5937 }
5938
5939 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5940 {
5941 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5942 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5943 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5944 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5945 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5946 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5947 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5948 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5949 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5950 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5951 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5952 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5953 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5954 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5955 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5956 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5957 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5958 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5959 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5960
5961 if (tg3_flag(tp, SUPPORT_MSIX))
5962 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5963
5964 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5965 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5966 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5967 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5968 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5969 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5970 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5971 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5972
5973 if (!tg3_flag(tp, 5705_PLUS)) {
5974 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5975 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5976 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5977 }
5978
5979 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5980 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5981 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5982 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5983 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5984
5985 if (tg3_flag(tp, NVRAM))
5986 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5987 }
5988
5989 static void tg3_dump_state(struct tg3 *tp)
5990 {
5991 int i;
5992 u32 *regs;
5993
5994 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5995 if (!regs)
5996 return;
5997
5998 if (tg3_flag(tp, PCI_EXPRESS)) {
5999 /* Read up to but not including private PCI registers */
6000 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6001 regs[i / sizeof(u32)] = tr32(i);
6002 } else
6003 tg3_dump_legacy_regs(tp, regs);
6004
6005 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6006 if (!regs[i + 0] && !regs[i + 1] &&
6007 !regs[i + 2] && !regs[i + 3])
6008 continue;
6009
6010 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6011 i * 4,
6012 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6013 }
6014
6015 kfree(regs);
6016
6017 for (i = 0; i < tp->irq_cnt; i++) {
6018 struct tg3_napi *tnapi = &tp->napi[i];
6019
6020 /* SW status block */
6021 netdev_err(tp->dev,
6022 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6023 i,
6024 tnapi->hw_status->status,
6025 tnapi->hw_status->status_tag,
6026 tnapi->hw_status->rx_jumbo_consumer,
6027 tnapi->hw_status->rx_consumer,
6028 tnapi->hw_status->rx_mini_consumer,
6029 tnapi->hw_status->idx[0].rx_producer,
6030 tnapi->hw_status->idx[0].tx_consumer);
6031
6032 netdev_err(tp->dev,
6033 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6034 i,
6035 tnapi->last_tag, tnapi->last_irq_tag,
6036 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6037 tnapi->rx_rcb_ptr,
6038 tnapi->prodring.rx_std_prod_idx,
6039 tnapi->prodring.rx_std_cons_idx,
6040 tnapi->prodring.rx_jmb_prod_idx,
6041 tnapi->prodring.rx_jmb_cons_idx);
6042 }
6043 }
6044
6045 /* This is called whenever we suspect that the system chipset is re-
6046 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6047 * is bogus tx completions. We try to recover by setting the
6048 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6049 * in the workqueue.
6050 */
6051 static void tg3_tx_recover(struct tg3 *tp)
6052 {
6053 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6054 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6055
6056 netdev_warn(tp->dev,
6057 "The system may be re-ordering memory-mapped I/O "
6058 "cycles to the network device, attempting to recover. "
6059 "Please report the problem to the driver maintainer "
6060 "and include system chipset information.\n");
6061
6062 spin_lock(&tp->lock);
6063 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6064 spin_unlock(&tp->lock);
6065 }
6066
6067 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6068 {
6069 /* Tell compiler to fetch tx indices from memory. */
6070 barrier();
6071 return tnapi->tx_pending -
6072 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6073 }
6074
6075 /* Tigon3 never reports partial packet sends. So we do not
6076 * need special logic to handle SKBs that have not had all
6077 * of their frags sent yet, like SunGEM does.
6078 */
6079 static void tg3_tx(struct tg3_napi *tnapi)
6080 {
6081 struct tg3 *tp = tnapi->tp;
6082 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6083 u32 sw_idx = tnapi->tx_cons;
6084 struct netdev_queue *txq;
6085 int index = tnapi - tp->napi;
6086 unsigned int pkts_compl = 0, bytes_compl = 0;
6087
6088 if (tg3_flag(tp, ENABLE_TSS))
6089 index--;
6090
6091 txq = netdev_get_tx_queue(tp->dev, index);
6092
6093 while (sw_idx != hw_idx) {
6094 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6095 struct sk_buff *skb = ri->skb;
6096 int i, tx_bug = 0;
6097
6098 if (unlikely(skb == NULL)) {
6099 tg3_tx_recover(tp);
6100 return;
6101 }
6102
6103 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6104 struct skb_shared_hwtstamps timestamp;
6105 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6106 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6107
6108 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6109
6110 skb_tstamp_tx(skb, &timestamp);
6111 }
6112
6113 pci_unmap_single(tp->pdev,
6114 dma_unmap_addr(ri, mapping),
6115 skb_headlen(skb),
6116 PCI_DMA_TODEVICE);
6117
6118 ri->skb = NULL;
6119
6120 while (ri->fragmented) {
6121 ri->fragmented = false;
6122 sw_idx = NEXT_TX(sw_idx);
6123 ri = &tnapi->tx_buffers[sw_idx];
6124 }
6125
6126 sw_idx = NEXT_TX(sw_idx);
6127
6128 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6129 ri = &tnapi->tx_buffers[sw_idx];
6130 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6131 tx_bug = 1;
6132
6133 pci_unmap_page(tp->pdev,
6134 dma_unmap_addr(ri, mapping),
6135 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6136 PCI_DMA_TODEVICE);
6137
6138 while (ri->fragmented) {
6139 ri->fragmented = false;
6140 sw_idx = NEXT_TX(sw_idx);
6141 ri = &tnapi->tx_buffers[sw_idx];
6142 }
6143
6144 sw_idx = NEXT_TX(sw_idx);
6145 }
6146
6147 pkts_compl++;
6148 bytes_compl += skb->len;
6149
6150 dev_kfree_skb(skb);
6151
6152 if (unlikely(tx_bug)) {
6153 tg3_tx_recover(tp);
6154 return;
6155 }
6156 }
6157
6158 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6159
6160 tnapi->tx_cons = sw_idx;
6161
6162 /* Need to make the tx_cons update visible to tg3_start_xmit()
6163 * before checking for netif_queue_stopped(). Without the
6164 * memory barrier, there is a small possibility that tg3_start_xmit()
6165 * will miss it and cause the queue to be stopped forever.
6166 */
6167 smp_mb();
6168
6169 if (unlikely(netif_tx_queue_stopped(txq) &&
6170 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6171 __netif_tx_lock(txq, smp_processor_id());
6172 if (netif_tx_queue_stopped(txq) &&
6173 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6174 netif_tx_wake_queue(txq);
6175 __netif_tx_unlock(txq);
6176 }
6177 }
6178
6179 static void tg3_frag_free(bool is_frag, void *data)
6180 {
6181 if (is_frag)
6182 put_page(virt_to_head_page(data));
6183 else
6184 kfree(data);
6185 }
6186
6187 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6188 {
6189 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6191
6192 if (!ri->data)
6193 return;
6194
6195 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6196 map_sz, PCI_DMA_FROMDEVICE);
6197 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6198 ri->data = NULL;
6199 }
6200
6201
6202 /* Returns size of skb allocated or < 0 on error.
6203 *
6204 * We only need to fill in the address because the other members
6205 * of the RX descriptor are invariant, see tg3_init_rings.
6206 *
6207 * Note the purposeful assymetry of cpu vs. chip accesses. For
6208 * posting buffers we only dirty the first cache line of the RX
6209 * descriptor (containing the address). Whereas for the RX status
6210 * buffers the cpu only reads the last cacheline of the RX descriptor
6211 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6212 */
6213 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6214 u32 opaque_key, u32 dest_idx_unmasked,
6215 unsigned int *frag_size)
6216 {
6217 struct tg3_rx_buffer_desc *desc;
6218 struct ring_info *map;
6219 u8 *data;
6220 dma_addr_t mapping;
6221 int skb_size, data_size, dest_idx;
6222
6223 switch (opaque_key) {
6224 case RXD_OPAQUE_RING_STD:
6225 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6226 desc = &tpr->rx_std[dest_idx];
6227 map = &tpr->rx_std_buffers[dest_idx];
6228 data_size = tp->rx_pkt_map_sz;
6229 break;
6230
6231 case RXD_OPAQUE_RING_JUMBO:
6232 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6233 desc = &tpr->rx_jmb[dest_idx].std;
6234 map = &tpr->rx_jmb_buffers[dest_idx];
6235 data_size = TG3_RX_JMB_MAP_SZ;
6236 break;
6237
6238 default:
6239 return -EINVAL;
6240 }
6241
6242 /* Do not overwrite any of the map or rp information
6243 * until we are sure we can commit to a new buffer.
6244 *
6245 * Callers depend upon this behavior and assume that
6246 * we leave everything unchanged if we fail.
6247 */
6248 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6250 if (skb_size <= PAGE_SIZE) {
6251 data = netdev_alloc_frag(skb_size);
6252 *frag_size = skb_size;
6253 } else {
6254 data = kmalloc(skb_size, GFP_ATOMIC);
6255 *frag_size = 0;
6256 }
6257 if (!data)
6258 return -ENOMEM;
6259
6260 mapping = pci_map_single(tp->pdev,
6261 data + TG3_RX_OFFSET(tp),
6262 data_size,
6263 PCI_DMA_FROMDEVICE);
6264 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6265 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6266 return -EIO;
6267 }
6268
6269 map->data = data;
6270 dma_unmap_addr_set(map, mapping, mapping);
6271
6272 desc->addr_hi = ((u64)mapping >> 32);
6273 desc->addr_lo = ((u64)mapping & 0xffffffff);
6274
6275 return data_size;
6276 }
6277
6278 /* We only need to move over in the address because the other
6279 * members of the RX descriptor are invariant. See notes above
6280 * tg3_alloc_rx_data for full details.
6281 */
6282 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6283 struct tg3_rx_prodring_set *dpr,
6284 u32 opaque_key, int src_idx,
6285 u32 dest_idx_unmasked)
6286 {
6287 struct tg3 *tp = tnapi->tp;
6288 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6289 struct ring_info *src_map, *dest_map;
6290 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6291 int dest_idx;
6292
6293 switch (opaque_key) {
6294 case RXD_OPAQUE_RING_STD:
6295 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6296 dest_desc = &dpr->rx_std[dest_idx];
6297 dest_map = &dpr->rx_std_buffers[dest_idx];
6298 src_desc = &spr->rx_std[src_idx];
6299 src_map = &spr->rx_std_buffers[src_idx];
6300 break;
6301
6302 case RXD_OPAQUE_RING_JUMBO:
6303 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6304 dest_desc = &dpr->rx_jmb[dest_idx].std;
6305 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6306 src_desc = &spr->rx_jmb[src_idx].std;
6307 src_map = &spr->rx_jmb_buffers[src_idx];
6308 break;
6309
6310 default:
6311 return;
6312 }
6313
6314 dest_map->data = src_map->data;
6315 dma_unmap_addr_set(dest_map, mapping,
6316 dma_unmap_addr(src_map, mapping));
6317 dest_desc->addr_hi = src_desc->addr_hi;
6318 dest_desc->addr_lo = src_desc->addr_lo;
6319
6320 /* Ensure that the update to the skb happens after the physical
6321 * addresses have been transferred to the new BD location.
6322 */
6323 smp_wmb();
6324
6325 src_map->data = NULL;
6326 }
6327
6328 /* The RX ring scheme is composed of multiple rings which post fresh
6329 * buffers to the chip, and one special ring the chip uses to report
6330 * status back to the host.
6331 *
6332 * The special ring reports the status of received packets to the
6333 * host. The chip does not write into the original descriptor the
6334 * RX buffer was obtained from. The chip simply takes the original
6335 * descriptor as provided by the host, updates the status and length
6336 * field, then writes this into the next status ring entry.
6337 *
6338 * Each ring the host uses to post buffers to the chip is described
6339 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6340 * it is first placed into the on-chip ram. When the packet's length
6341 * is known, it walks down the TG3_BDINFO entries to select the ring.
6342 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6343 * which is within the range of the new packet's length is chosen.
6344 *
6345 * The "separate ring for rx status" scheme may sound queer, but it makes
6346 * sense from a cache coherency perspective. If only the host writes
6347 * to the buffer post rings, and only the chip writes to the rx status
6348 * rings, then cache lines never move beyond shared-modified state.
6349 * If both the host and chip were to write into the same ring, cache line
6350 * eviction could occur since both entities want it in an exclusive state.
6351 */
6352 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6353 {
6354 struct tg3 *tp = tnapi->tp;
6355 u32 work_mask, rx_std_posted = 0;
6356 u32 std_prod_idx, jmb_prod_idx;
6357 u32 sw_idx = tnapi->rx_rcb_ptr;
6358 u16 hw_idx;
6359 int received;
6360 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6361
6362 hw_idx = *(tnapi->rx_rcb_prod_idx);
6363 /*
6364 * We need to order the read of hw_idx and the read of
6365 * the opaque cookie.
6366 */
6367 rmb();
6368 work_mask = 0;
6369 received = 0;
6370 std_prod_idx = tpr->rx_std_prod_idx;
6371 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6372 while (sw_idx != hw_idx && budget > 0) {
6373 struct ring_info *ri;
6374 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6375 unsigned int len;
6376 struct sk_buff *skb;
6377 dma_addr_t dma_addr;
6378 u32 opaque_key, desc_idx, *post_ptr;
6379 u8 *data;
6380 u64 tstamp = 0;
6381
6382 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6383 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6384 if (opaque_key == RXD_OPAQUE_RING_STD) {
6385 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6386 dma_addr = dma_unmap_addr(ri, mapping);
6387 data = ri->data;
6388 post_ptr = &std_prod_idx;
6389 rx_std_posted++;
6390 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6391 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6392 dma_addr = dma_unmap_addr(ri, mapping);
6393 data = ri->data;
6394 post_ptr = &jmb_prod_idx;
6395 } else
6396 goto next_pkt_nopost;
6397
6398 work_mask |= opaque_key;
6399
6400 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6401 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6402 drop_it:
6403 tg3_recycle_rx(tnapi, tpr, opaque_key,
6404 desc_idx, *post_ptr);
6405 drop_it_no_recycle:
6406 /* Other statistics kept track of by card. */
6407 tp->rx_dropped++;
6408 goto next_pkt;
6409 }
6410
6411 prefetch(data + TG3_RX_OFFSET(tp));
6412 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6413 ETH_FCS_LEN;
6414
6415 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6416 RXD_FLAG_PTPSTAT_PTPV1 ||
6417 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6418 RXD_FLAG_PTPSTAT_PTPV2) {
6419 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6420 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6421 }
6422
6423 if (len > TG3_RX_COPY_THRESH(tp)) {
6424 int skb_size;
6425 unsigned int frag_size;
6426
6427 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6428 *post_ptr, &frag_size);
6429 if (skb_size < 0)
6430 goto drop_it;
6431
6432 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6433 PCI_DMA_FROMDEVICE);
6434
6435 skb = build_skb(data, frag_size);
6436 if (!skb) {
6437 tg3_frag_free(frag_size != 0, data);
6438 goto drop_it_no_recycle;
6439 }
6440 skb_reserve(skb, TG3_RX_OFFSET(tp));
6441 /* Ensure that the update to the data happens
6442 * after the usage of the old DMA mapping.
6443 */
6444 smp_wmb();
6445
6446 ri->data = NULL;
6447
6448 } else {
6449 tg3_recycle_rx(tnapi, tpr, opaque_key,
6450 desc_idx, *post_ptr);
6451
6452 skb = netdev_alloc_skb(tp->dev,
6453 len + TG3_RAW_IP_ALIGN);
6454 if (skb == NULL)
6455 goto drop_it_no_recycle;
6456
6457 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6458 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6459 memcpy(skb->data,
6460 data + TG3_RX_OFFSET(tp),
6461 len);
6462 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6463 }
6464
6465 skb_put(skb, len);
6466 if (tstamp)
6467 tg3_hwclock_to_timestamp(tp, tstamp,
6468 skb_hwtstamps(skb));
6469
6470 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6471 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6472 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6473 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6474 skb->ip_summed = CHECKSUM_UNNECESSARY;
6475 else
6476 skb_checksum_none_assert(skb);
6477
6478 skb->protocol = eth_type_trans(skb, tp->dev);
6479
6480 if (len > (tp->dev->mtu + ETH_HLEN) &&
6481 skb->protocol != htons(ETH_P_8021Q)) {
6482 dev_kfree_skb(skb);
6483 goto drop_it_no_recycle;
6484 }
6485
6486 if (desc->type_flags & RXD_FLAG_VLAN &&
6487 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6488 __vlan_hwaccel_put_tag(skb,
6489 desc->err_vlan & RXD_VLAN_MASK);
6490
6491 napi_gro_receive(&tnapi->napi, skb);
6492
6493 received++;
6494 budget--;
6495
6496 next_pkt:
6497 (*post_ptr)++;
6498
6499 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6500 tpr->rx_std_prod_idx = std_prod_idx &
6501 tp->rx_std_ring_mask;
6502 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6503 tpr->rx_std_prod_idx);
6504 work_mask &= ~RXD_OPAQUE_RING_STD;
6505 rx_std_posted = 0;
6506 }
6507 next_pkt_nopost:
6508 sw_idx++;
6509 sw_idx &= tp->rx_ret_ring_mask;
6510
6511 /* Refresh hw_idx to see if there is new work */
6512 if (sw_idx == hw_idx) {
6513 hw_idx = *(tnapi->rx_rcb_prod_idx);
6514 rmb();
6515 }
6516 }
6517
6518 /* ACK the status ring. */
6519 tnapi->rx_rcb_ptr = sw_idx;
6520 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6521
6522 /* Refill RX ring(s). */
6523 if (!tg3_flag(tp, ENABLE_RSS)) {
6524 /* Sync BD data before updating mailbox */
6525 wmb();
6526
6527 if (work_mask & RXD_OPAQUE_RING_STD) {
6528 tpr->rx_std_prod_idx = std_prod_idx &
6529 tp->rx_std_ring_mask;
6530 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6531 tpr->rx_std_prod_idx);
6532 }
6533 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6534 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6535 tp->rx_jmb_ring_mask;
6536 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6537 tpr->rx_jmb_prod_idx);
6538 }
6539 mmiowb();
6540 } else if (work_mask) {
6541 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6542 * updated before the producer indices can be updated.
6543 */
6544 smp_wmb();
6545
6546 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6547 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6548
6549 if (tnapi != &tp->napi[1]) {
6550 tp->rx_refill = true;
6551 napi_schedule(&tp->napi[1].napi);
6552 }
6553 }
6554
6555 return received;
6556 }
6557
6558 static void tg3_poll_link(struct tg3 *tp)
6559 {
6560 /* handle link change and other phy events */
6561 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6562 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6563
6564 if (sblk->status & SD_STATUS_LINK_CHG) {
6565 sblk->status = SD_STATUS_UPDATED |
6566 (sblk->status & ~SD_STATUS_LINK_CHG);
6567 spin_lock(&tp->lock);
6568 if (tg3_flag(tp, USE_PHYLIB)) {
6569 tw32_f(MAC_STATUS,
6570 (MAC_STATUS_SYNC_CHANGED |
6571 MAC_STATUS_CFG_CHANGED |
6572 MAC_STATUS_MI_COMPLETION |
6573 MAC_STATUS_LNKSTATE_CHANGED));
6574 udelay(40);
6575 } else
6576 tg3_setup_phy(tp, 0);
6577 spin_unlock(&tp->lock);
6578 }
6579 }
6580 }
6581
6582 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6583 struct tg3_rx_prodring_set *dpr,
6584 struct tg3_rx_prodring_set *spr)
6585 {
6586 u32 si, di, cpycnt, src_prod_idx;
6587 int i, err = 0;
6588
6589 while (1) {
6590 src_prod_idx = spr->rx_std_prod_idx;
6591
6592 /* Make sure updates to the rx_std_buffers[] entries and the
6593 * standard producer index are seen in the correct order.
6594 */
6595 smp_rmb();
6596
6597 if (spr->rx_std_cons_idx == src_prod_idx)
6598 break;
6599
6600 if (spr->rx_std_cons_idx < src_prod_idx)
6601 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6602 else
6603 cpycnt = tp->rx_std_ring_mask + 1 -
6604 spr->rx_std_cons_idx;
6605
6606 cpycnt = min(cpycnt,
6607 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6608
6609 si = spr->rx_std_cons_idx;
6610 di = dpr->rx_std_prod_idx;
6611
6612 for (i = di; i < di + cpycnt; i++) {
6613 if (dpr->rx_std_buffers[i].data) {
6614 cpycnt = i - di;
6615 err = -ENOSPC;
6616 break;
6617 }
6618 }
6619
6620 if (!cpycnt)
6621 break;
6622
6623 /* Ensure that updates to the rx_std_buffers ring and the
6624 * shadowed hardware producer ring from tg3_recycle_skb() are
6625 * ordered correctly WRT the skb check above.
6626 */
6627 smp_rmb();
6628
6629 memcpy(&dpr->rx_std_buffers[di],
6630 &spr->rx_std_buffers[si],
6631 cpycnt * sizeof(struct ring_info));
6632
6633 for (i = 0; i < cpycnt; i++, di++, si++) {
6634 struct tg3_rx_buffer_desc *sbd, *dbd;
6635 sbd = &spr->rx_std[si];
6636 dbd = &dpr->rx_std[di];
6637 dbd->addr_hi = sbd->addr_hi;
6638 dbd->addr_lo = sbd->addr_lo;
6639 }
6640
6641 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6642 tp->rx_std_ring_mask;
6643 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6644 tp->rx_std_ring_mask;
6645 }
6646
6647 while (1) {
6648 src_prod_idx = spr->rx_jmb_prod_idx;
6649
6650 /* Make sure updates to the rx_jmb_buffers[] entries and
6651 * the jumbo producer index are seen in the correct order.
6652 */
6653 smp_rmb();
6654
6655 if (spr->rx_jmb_cons_idx == src_prod_idx)
6656 break;
6657
6658 if (spr->rx_jmb_cons_idx < src_prod_idx)
6659 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6660 else
6661 cpycnt = tp->rx_jmb_ring_mask + 1 -
6662 spr->rx_jmb_cons_idx;
6663
6664 cpycnt = min(cpycnt,
6665 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6666
6667 si = spr->rx_jmb_cons_idx;
6668 di = dpr->rx_jmb_prod_idx;
6669
6670 for (i = di; i < di + cpycnt; i++) {
6671 if (dpr->rx_jmb_buffers[i].data) {
6672 cpycnt = i - di;
6673 err = -ENOSPC;
6674 break;
6675 }
6676 }
6677
6678 if (!cpycnt)
6679 break;
6680
6681 /* Ensure that updates to the rx_jmb_buffers ring and the
6682 * shadowed hardware producer ring from tg3_recycle_skb() are
6683 * ordered correctly WRT the skb check above.
6684 */
6685 smp_rmb();
6686
6687 memcpy(&dpr->rx_jmb_buffers[di],
6688 &spr->rx_jmb_buffers[si],
6689 cpycnt * sizeof(struct ring_info));
6690
6691 for (i = 0; i < cpycnt; i++, di++, si++) {
6692 struct tg3_rx_buffer_desc *sbd, *dbd;
6693 sbd = &spr->rx_jmb[si].std;
6694 dbd = &dpr->rx_jmb[di].std;
6695 dbd->addr_hi = sbd->addr_hi;
6696 dbd->addr_lo = sbd->addr_lo;
6697 }
6698
6699 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6700 tp->rx_jmb_ring_mask;
6701 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6702 tp->rx_jmb_ring_mask;
6703 }
6704
6705 return err;
6706 }
6707
6708 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6709 {
6710 struct tg3 *tp = tnapi->tp;
6711
6712 /* run TX completion thread */
6713 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6714 tg3_tx(tnapi);
6715 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6716 return work_done;
6717 }
6718
6719 if (!tnapi->rx_rcb_prod_idx)
6720 return work_done;
6721
6722 /* run RX thread, within the bounds set by NAPI.
6723 * All RX "locking" is done by ensuring outside
6724 * code synchronizes with tg3->napi.poll()
6725 */
6726 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6727 work_done += tg3_rx(tnapi, budget - work_done);
6728
6729 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6730 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6731 int i, err = 0;
6732 u32 std_prod_idx = dpr->rx_std_prod_idx;
6733 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6734
6735 tp->rx_refill = false;
6736 for (i = 1; i <= tp->rxq_cnt; i++)
6737 err |= tg3_rx_prodring_xfer(tp, dpr,
6738 &tp->napi[i].prodring);
6739
6740 wmb();
6741
6742 if (std_prod_idx != dpr->rx_std_prod_idx)
6743 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6744 dpr->rx_std_prod_idx);
6745
6746 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6747 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6748 dpr->rx_jmb_prod_idx);
6749
6750 mmiowb();
6751
6752 if (err)
6753 tw32_f(HOSTCC_MODE, tp->coal_now);
6754 }
6755
6756 return work_done;
6757 }
6758
6759 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6760 {
6761 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6762 schedule_work(&tp->reset_task);
6763 }
6764
6765 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6766 {
6767 cancel_work_sync(&tp->reset_task);
6768 tg3_flag_clear(tp, RESET_TASK_PENDING);
6769 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6770 }
6771
6772 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6773 {
6774 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6775 struct tg3 *tp = tnapi->tp;
6776 int work_done = 0;
6777 struct tg3_hw_status *sblk = tnapi->hw_status;
6778
6779 while (1) {
6780 work_done = tg3_poll_work(tnapi, work_done, budget);
6781
6782 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6783 goto tx_recovery;
6784
6785 if (unlikely(work_done >= budget))
6786 break;
6787
6788 /* tp->last_tag is used in tg3_int_reenable() below
6789 * to tell the hw how much work has been processed,
6790 * so we must read it before checking for more work.
6791 */
6792 tnapi->last_tag = sblk->status_tag;
6793 tnapi->last_irq_tag = tnapi->last_tag;
6794 rmb();
6795
6796 /* check for RX/TX work to do */
6797 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6798 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6799
6800 /* This test here is not race free, but will reduce
6801 * the number of interrupts by looping again.
6802 */
6803 if (tnapi == &tp->napi[1] && tp->rx_refill)
6804 continue;
6805
6806 napi_complete(napi);
6807 /* Reenable interrupts. */
6808 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6809
6810 /* This test here is synchronized by napi_schedule()
6811 * and napi_complete() to close the race condition.
6812 */
6813 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6814 tw32(HOSTCC_MODE, tp->coalesce_mode |
6815 HOSTCC_MODE_ENABLE |
6816 tnapi->coal_now);
6817 }
6818 mmiowb();
6819 break;
6820 }
6821 }
6822
6823 return work_done;
6824
6825 tx_recovery:
6826 /* work_done is guaranteed to be less than budget. */
6827 napi_complete(napi);
6828 tg3_reset_task_schedule(tp);
6829 return work_done;
6830 }
6831
6832 static void tg3_process_error(struct tg3 *tp)
6833 {
6834 u32 val;
6835 bool real_error = false;
6836
6837 if (tg3_flag(tp, ERROR_PROCESSED))
6838 return;
6839
6840 /* Check Flow Attention register */
6841 val = tr32(HOSTCC_FLOW_ATTN);
6842 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6843 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6844 real_error = true;
6845 }
6846
6847 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6848 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6849 real_error = true;
6850 }
6851
6852 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6853 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6854 real_error = true;
6855 }
6856
6857 if (!real_error)
6858 return;
6859
6860 tg3_dump_state(tp);
6861
6862 tg3_flag_set(tp, ERROR_PROCESSED);
6863 tg3_reset_task_schedule(tp);
6864 }
6865
6866 static int tg3_poll(struct napi_struct *napi, int budget)
6867 {
6868 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6869 struct tg3 *tp = tnapi->tp;
6870 int work_done = 0;
6871 struct tg3_hw_status *sblk = tnapi->hw_status;
6872
6873 while (1) {
6874 if (sblk->status & SD_STATUS_ERROR)
6875 tg3_process_error(tp);
6876
6877 tg3_poll_link(tp);
6878
6879 work_done = tg3_poll_work(tnapi, work_done, budget);
6880
6881 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6882 goto tx_recovery;
6883
6884 if (unlikely(work_done >= budget))
6885 break;
6886
6887 if (tg3_flag(tp, TAGGED_STATUS)) {
6888 /* tp->last_tag is used in tg3_int_reenable() below
6889 * to tell the hw how much work has been processed,
6890 * so we must read it before checking for more work.
6891 */
6892 tnapi->last_tag = sblk->status_tag;
6893 tnapi->last_irq_tag = tnapi->last_tag;
6894 rmb();
6895 } else
6896 sblk->status &= ~SD_STATUS_UPDATED;
6897
6898 if (likely(!tg3_has_work(tnapi))) {
6899 napi_complete(napi);
6900 tg3_int_reenable(tnapi);
6901 break;
6902 }
6903 }
6904
6905 return work_done;
6906
6907 tx_recovery:
6908 /* work_done is guaranteed to be less than budget. */
6909 napi_complete(napi);
6910 tg3_reset_task_schedule(tp);
6911 return work_done;
6912 }
6913
6914 static void tg3_napi_disable(struct tg3 *tp)
6915 {
6916 int i;
6917
6918 for (i = tp->irq_cnt - 1; i >= 0; i--)
6919 napi_disable(&tp->napi[i].napi);
6920 }
6921
6922 static void tg3_napi_enable(struct tg3 *tp)
6923 {
6924 int i;
6925
6926 for (i = 0; i < tp->irq_cnt; i++)
6927 napi_enable(&tp->napi[i].napi);
6928 }
6929
6930 static void tg3_napi_init(struct tg3 *tp)
6931 {
6932 int i;
6933
6934 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6935 for (i = 1; i < tp->irq_cnt; i++)
6936 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6937 }
6938
6939 static void tg3_napi_fini(struct tg3 *tp)
6940 {
6941 int i;
6942
6943 for (i = 0; i < tp->irq_cnt; i++)
6944 netif_napi_del(&tp->napi[i].napi);
6945 }
6946
6947 static inline void tg3_netif_stop(struct tg3 *tp)
6948 {
6949 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6950 tg3_napi_disable(tp);
6951 netif_carrier_off(tp->dev);
6952 netif_tx_disable(tp->dev);
6953 }
6954
6955 /* tp->lock must be held */
6956 static inline void tg3_netif_start(struct tg3 *tp)
6957 {
6958 tg3_ptp_resume(tp);
6959
6960 /* NOTE: unconditional netif_tx_wake_all_queues is only
6961 * appropriate so long as all callers are assured to
6962 * have free tx slots (such as after tg3_init_hw)
6963 */
6964 netif_tx_wake_all_queues(tp->dev);
6965
6966 if (tp->link_up)
6967 netif_carrier_on(tp->dev);
6968
6969 tg3_napi_enable(tp);
6970 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6971 tg3_enable_ints(tp);
6972 }
6973
6974 static void tg3_irq_quiesce(struct tg3 *tp)
6975 {
6976 int i;
6977
6978 BUG_ON(tp->irq_sync);
6979
6980 tp->irq_sync = 1;
6981 smp_mb();
6982
6983 for (i = 0; i < tp->irq_cnt; i++)
6984 synchronize_irq(tp->napi[i].irq_vec);
6985 }
6986
6987 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6988 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6989 * with as well. Most of the time, this is not necessary except when
6990 * shutting down the device.
6991 */
6992 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6993 {
6994 spin_lock_bh(&tp->lock);
6995 if (irq_sync)
6996 tg3_irq_quiesce(tp);
6997 }
6998
6999 static inline void tg3_full_unlock(struct tg3 *tp)
7000 {
7001 spin_unlock_bh(&tp->lock);
7002 }
7003
7004 /* One-shot MSI handler - Chip automatically disables interrupt
7005 * after sending MSI so driver doesn't have to do it.
7006 */
7007 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7008 {
7009 struct tg3_napi *tnapi = dev_id;
7010 struct tg3 *tp = tnapi->tp;
7011
7012 prefetch(tnapi->hw_status);
7013 if (tnapi->rx_rcb)
7014 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7015
7016 if (likely(!tg3_irq_sync(tp)))
7017 napi_schedule(&tnapi->napi);
7018
7019 return IRQ_HANDLED;
7020 }
7021
7022 /* MSI ISR - No need to check for interrupt sharing and no need to
7023 * flush status block and interrupt mailbox. PCI ordering rules
7024 * guarantee that MSI will arrive after the status block.
7025 */
7026 static irqreturn_t tg3_msi(int irq, void *dev_id)
7027 {
7028 struct tg3_napi *tnapi = dev_id;
7029 struct tg3 *tp = tnapi->tp;
7030
7031 prefetch(tnapi->hw_status);
7032 if (tnapi->rx_rcb)
7033 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7034 /*
7035 * Writing any value to intr-mbox-0 clears PCI INTA# and
7036 * chip-internal interrupt pending events.
7037 * Writing non-zero to intr-mbox-0 additional tells the
7038 * NIC to stop sending us irqs, engaging "in-intr-handler"
7039 * event coalescing.
7040 */
7041 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7042 if (likely(!tg3_irq_sync(tp)))
7043 napi_schedule(&tnapi->napi);
7044
7045 return IRQ_RETVAL(1);
7046 }
7047
7048 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7049 {
7050 struct tg3_napi *tnapi = dev_id;
7051 struct tg3 *tp = tnapi->tp;
7052 struct tg3_hw_status *sblk = tnapi->hw_status;
7053 unsigned int handled = 1;
7054
7055 /* In INTx mode, it is possible for the interrupt to arrive at
7056 * the CPU before the status block posted prior to the interrupt.
7057 * Reading the PCI State register will confirm whether the
7058 * interrupt is ours and will flush the status block.
7059 */
7060 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7061 if (tg3_flag(tp, CHIP_RESETTING) ||
7062 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7063 handled = 0;
7064 goto out;
7065 }
7066 }
7067
7068 /*
7069 * Writing any value to intr-mbox-0 clears PCI INTA# and
7070 * chip-internal interrupt pending events.
7071 * Writing non-zero to intr-mbox-0 additional tells the
7072 * NIC to stop sending us irqs, engaging "in-intr-handler"
7073 * event coalescing.
7074 *
7075 * Flush the mailbox to de-assert the IRQ immediately to prevent
7076 * spurious interrupts. The flush impacts performance but
7077 * excessive spurious interrupts can be worse in some cases.
7078 */
7079 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7080 if (tg3_irq_sync(tp))
7081 goto out;
7082 sblk->status &= ~SD_STATUS_UPDATED;
7083 if (likely(tg3_has_work(tnapi))) {
7084 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7085 napi_schedule(&tnapi->napi);
7086 } else {
7087 /* No work, shared interrupt perhaps? re-enable
7088 * interrupts, and flush that PCI write
7089 */
7090 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7091 0x00000000);
7092 }
7093 out:
7094 return IRQ_RETVAL(handled);
7095 }
7096
7097 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7098 {
7099 struct tg3_napi *tnapi = dev_id;
7100 struct tg3 *tp = tnapi->tp;
7101 struct tg3_hw_status *sblk = tnapi->hw_status;
7102 unsigned int handled = 1;
7103
7104 /* In INTx mode, it is possible for the interrupt to arrive at
7105 * the CPU before the status block posted prior to the interrupt.
7106 * Reading the PCI State register will confirm whether the
7107 * interrupt is ours and will flush the status block.
7108 */
7109 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7110 if (tg3_flag(tp, CHIP_RESETTING) ||
7111 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7112 handled = 0;
7113 goto out;
7114 }
7115 }
7116
7117 /*
7118 * writing any value to intr-mbox-0 clears PCI INTA# and
7119 * chip-internal interrupt pending events.
7120 * writing non-zero to intr-mbox-0 additional tells the
7121 * NIC to stop sending us irqs, engaging "in-intr-handler"
7122 * event coalescing.
7123 *
7124 * Flush the mailbox to de-assert the IRQ immediately to prevent
7125 * spurious interrupts. The flush impacts performance but
7126 * excessive spurious interrupts can be worse in some cases.
7127 */
7128 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7129
7130 /*
7131 * In a shared interrupt configuration, sometimes other devices'
7132 * interrupts will scream. We record the current status tag here
7133 * so that the above check can report that the screaming interrupts
7134 * are unhandled. Eventually they will be silenced.
7135 */
7136 tnapi->last_irq_tag = sblk->status_tag;
7137
7138 if (tg3_irq_sync(tp))
7139 goto out;
7140
7141 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7142
7143 napi_schedule(&tnapi->napi);
7144
7145 out:
7146 return IRQ_RETVAL(handled);
7147 }
7148
7149 /* ISR for interrupt test */
7150 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7151 {
7152 struct tg3_napi *tnapi = dev_id;
7153 struct tg3 *tp = tnapi->tp;
7154 struct tg3_hw_status *sblk = tnapi->hw_status;
7155
7156 if ((sblk->status & SD_STATUS_UPDATED) ||
7157 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7158 tg3_disable_ints(tp);
7159 return IRQ_RETVAL(1);
7160 }
7161 return IRQ_RETVAL(0);
7162 }
7163
7164 #ifdef CONFIG_NET_POLL_CONTROLLER
7165 static void tg3_poll_controller(struct net_device *dev)
7166 {
7167 int i;
7168 struct tg3 *tp = netdev_priv(dev);
7169
7170 if (tg3_irq_sync(tp))
7171 return;
7172
7173 for (i = 0; i < tp->irq_cnt; i++)
7174 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7175 }
7176 #endif
7177
7178 static void tg3_tx_timeout(struct net_device *dev)
7179 {
7180 struct tg3 *tp = netdev_priv(dev);
7181
7182 if (netif_msg_tx_err(tp)) {
7183 netdev_err(dev, "transmit timed out, resetting\n");
7184 tg3_dump_state(tp);
7185 }
7186
7187 tg3_reset_task_schedule(tp);
7188 }
7189
7190 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7191 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7192 {
7193 u32 base = (u32) mapping & 0xffffffff;
7194
7195 return (base > 0xffffdcc0) && (base + len + 8 < base);
7196 }
7197
7198 /* Test for DMA addresses > 40-bit */
7199 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7200 int len)
7201 {
7202 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7203 if (tg3_flag(tp, 40BIT_DMA_BUG))
7204 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7205 return 0;
7206 #else
7207 return 0;
7208 #endif
7209 }
7210
7211 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7212 dma_addr_t mapping, u32 len, u32 flags,
7213 u32 mss, u32 vlan)
7214 {
7215 txbd->addr_hi = ((u64) mapping >> 32);
7216 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7217 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7218 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7219 }
7220
7221 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7222 dma_addr_t map, u32 len, u32 flags,
7223 u32 mss, u32 vlan)
7224 {
7225 struct tg3 *tp = tnapi->tp;
7226 bool hwbug = false;
7227
7228 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7229 hwbug = true;
7230
7231 if (tg3_4g_overflow_test(map, len))
7232 hwbug = true;
7233
7234 if (tg3_40bit_overflow_test(tp, map, len))
7235 hwbug = true;
7236
7237 if (tp->dma_limit) {
7238 u32 prvidx = *entry;
7239 u32 tmp_flag = flags & ~TXD_FLAG_END;
7240 while (len > tp->dma_limit && *budget) {
7241 u32 frag_len = tp->dma_limit;
7242 len -= tp->dma_limit;
7243
7244 /* Avoid the 8byte DMA problem */
7245 if (len <= 8) {
7246 len += tp->dma_limit / 2;
7247 frag_len = tp->dma_limit / 2;
7248 }
7249
7250 tnapi->tx_buffers[*entry].fragmented = true;
7251
7252 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7253 frag_len, tmp_flag, mss, vlan);
7254 *budget -= 1;
7255 prvidx = *entry;
7256 *entry = NEXT_TX(*entry);
7257
7258 map += frag_len;
7259 }
7260
7261 if (len) {
7262 if (*budget) {
7263 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7264 len, flags, mss, vlan);
7265 *budget -= 1;
7266 *entry = NEXT_TX(*entry);
7267 } else {
7268 hwbug = true;
7269 tnapi->tx_buffers[prvidx].fragmented = false;
7270 }
7271 }
7272 } else {
7273 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7274 len, flags, mss, vlan);
7275 *entry = NEXT_TX(*entry);
7276 }
7277
7278 return hwbug;
7279 }
7280
7281 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7282 {
7283 int i;
7284 struct sk_buff *skb;
7285 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7286
7287 skb = txb->skb;
7288 txb->skb = NULL;
7289
7290 pci_unmap_single(tnapi->tp->pdev,
7291 dma_unmap_addr(txb, mapping),
7292 skb_headlen(skb),
7293 PCI_DMA_TODEVICE);
7294
7295 while (txb->fragmented) {
7296 txb->fragmented = false;
7297 entry = NEXT_TX(entry);
7298 txb = &tnapi->tx_buffers[entry];
7299 }
7300
7301 for (i = 0; i <= last; i++) {
7302 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7303
7304 entry = NEXT_TX(entry);
7305 txb = &tnapi->tx_buffers[entry];
7306
7307 pci_unmap_page(tnapi->tp->pdev,
7308 dma_unmap_addr(txb, mapping),
7309 skb_frag_size(frag), PCI_DMA_TODEVICE);
7310
7311 while (txb->fragmented) {
7312 txb->fragmented = false;
7313 entry = NEXT_TX(entry);
7314 txb = &tnapi->tx_buffers[entry];
7315 }
7316 }
7317 }
7318
7319 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7320 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7321 struct sk_buff **pskb,
7322 u32 *entry, u32 *budget,
7323 u32 base_flags, u32 mss, u32 vlan)
7324 {
7325 struct tg3 *tp = tnapi->tp;
7326 struct sk_buff *new_skb, *skb = *pskb;
7327 dma_addr_t new_addr = 0;
7328 int ret = 0;
7329
7330 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7331 new_skb = skb_copy(skb, GFP_ATOMIC);
7332 else {
7333 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7334
7335 new_skb = skb_copy_expand(skb,
7336 skb_headroom(skb) + more_headroom,
7337 skb_tailroom(skb), GFP_ATOMIC);
7338 }
7339
7340 if (!new_skb) {
7341 ret = -1;
7342 } else {
7343 /* New SKB is guaranteed to be linear. */
7344 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7345 PCI_DMA_TODEVICE);
7346 /* Make sure the mapping succeeded */
7347 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7348 dev_kfree_skb(new_skb);
7349 ret = -1;
7350 } else {
7351 u32 save_entry = *entry;
7352
7353 base_flags |= TXD_FLAG_END;
7354
7355 tnapi->tx_buffers[*entry].skb = new_skb;
7356 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7357 mapping, new_addr);
7358
7359 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7360 new_skb->len, base_flags,
7361 mss, vlan)) {
7362 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7363 dev_kfree_skb(new_skb);
7364 ret = -1;
7365 }
7366 }
7367 }
7368
7369 dev_kfree_skb(skb);
7370 *pskb = new_skb;
7371 return ret;
7372 }
7373
7374 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7375
7376 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7377 * TSO header is greater than 80 bytes.
7378 */
7379 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7380 {
7381 struct sk_buff *segs, *nskb;
7382 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7383
7384 /* Estimate the number of fragments in the worst case */
7385 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7386 netif_stop_queue(tp->dev);
7387
7388 /* netif_tx_stop_queue() must be done before checking
7389 * checking tx index in tg3_tx_avail() below, because in
7390 * tg3_tx(), we update tx index before checking for
7391 * netif_tx_queue_stopped().
7392 */
7393 smp_mb();
7394 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7395 return NETDEV_TX_BUSY;
7396
7397 netif_wake_queue(tp->dev);
7398 }
7399
7400 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7401 if (IS_ERR(segs))
7402 goto tg3_tso_bug_end;
7403
7404 do {
7405 nskb = segs;
7406 segs = segs->next;
7407 nskb->next = NULL;
7408 tg3_start_xmit(nskb, tp->dev);
7409 } while (segs);
7410
7411 tg3_tso_bug_end:
7412 dev_kfree_skb(skb);
7413
7414 return NETDEV_TX_OK;
7415 }
7416
7417 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7418 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7419 */
7420 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7421 {
7422 struct tg3 *tp = netdev_priv(dev);
7423 u32 len, entry, base_flags, mss, vlan = 0;
7424 u32 budget;
7425 int i = -1, would_hit_hwbug;
7426 dma_addr_t mapping;
7427 struct tg3_napi *tnapi;
7428 struct netdev_queue *txq;
7429 unsigned int last;
7430
7431 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7432 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7433 if (tg3_flag(tp, ENABLE_TSS))
7434 tnapi++;
7435
7436 budget = tg3_tx_avail(tnapi);
7437
7438 /* We are running in BH disabled context with netif_tx_lock
7439 * and TX reclaim runs via tp->napi.poll inside of a software
7440 * interrupt. Furthermore, IRQ processing runs lockless so we have
7441 * no IRQ context deadlocks to worry about either. Rejoice!
7442 */
7443 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7444 if (!netif_tx_queue_stopped(txq)) {
7445 netif_tx_stop_queue(txq);
7446
7447 /* This is a hard error, log it. */
7448 netdev_err(dev,
7449 "BUG! Tx Ring full when queue awake!\n");
7450 }
7451 return NETDEV_TX_BUSY;
7452 }
7453
7454 entry = tnapi->tx_prod;
7455 base_flags = 0;
7456 if (skb->ip_summed == CHECKSUM_PARTIAL)
7457 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7458
7459 mss = skb_shinfo(skb)->gso_size;
7460 if (mss) {
7461 struct iphdr *iph;
7462 u32 tcp_opt_len, hdr_len;
7463
7464 if (skb_header_cloned(skb) &&
7465 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7466 goto drop;
7467
7468 iph = ip_hdr(skb);
7469 tcp_opt_len = tcp_optlen(skb);
7470
7471 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7472
7473 if (!skb_is_gso_v6(skb)) {
7474 iph->check = 0;
7475 iph->tot_len = htons(mss + hdr_len);
7476 }
7477
7478 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7479 tg3_flag(tp, TSO_BUG))
7480 return tg3_tso_bug(tp, skb);
7481
7482 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7483 TXD_FLAG_CPU_POST_DMA);
7484
7485 if (tg3_flag(tp, HW_TSO_1) ||
7486 tg3_flag(tp, HW_TSO_2) ||
7487 tg3_flag(tp, HW_TSO_3)) {
7488 tcp_hdr(skb)->check = 0;
7489 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7490 } else
7491 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7492 iph->daddr, 0,
7493 IPPROTO_TCP,
7494 0);
7495
7496 if (tg3_flag(tp, HW_TSO_3)) {
7497 mss |= (hdr_len & 0xc) << 12;
7498 if (hdr_len & 0x10)
7499 base_flags |= 0x00000010;
7500 base_flags |= (hdr_len & 0x3e0) << 5;
7501 } else if (tg3_flag(tp, HW_TSO_2))
7502 mss |= hdr_len << 9;
7503 else if (tg3_flag(tp, HW_TSO_1) ||
7504 tg3_asic_rev(tp) == ASIC_REV_5705) {
7505 if (tcp_opt_len || iph->ihl > 5) {
7506 int tsflags;
7507
7508 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7509 mss |= (tsflags << 11);
7510 }
7511 } else {
7512 if (tcp_opt_len || iph->ihl > 5) {
7513 int tsflags;
7514
7515 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7516 base_flags |= tsflags << 12;
7517 }
7518 }
7519 }
7520
7521 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7522 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7523 base_flags |= TXD_FLAG_JMB_PKT;
7524
7525 if (vlan_tx_tag_present(skb)) {
7526 base_flags |= TXD_FLAG_VLAN;
7527 vlan = vlan_tx_tag_get(skb);
7528 }
7529
7530 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7531 tg3_flag(tp, TX_TSTAMP_EN)) {
7532 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7533 base_flags |= TXD_FLAG_HWTSTAMP;
7534 }
7535
7536 len = skb_headlen(skb);
7537
7538 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7539 if (pci_dma_mapping_error(tp->pdev, mapping))
7540 goto drop;
7541
7542
7543 tnapi->tx_buffers[entry].skb = skb;
7544 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7545
7546 would_hit_hwbug = 0;
7547
7548 if (tg3_flag(tp, 5701_DMA_BUG))
7549 would_hit_hwbug = 1;
7550
7551 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7552 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7553 mss, vlan)) {
7554 would_hit_hwbug = 1;
7555 } else if (skb_shinfo(skb)->nr_frags > 0) {
7556 u32 tmp_mss = mss;
7557
7558 if (!tg3_flag(tp, HW_TSO_1) &&
7559 !tg3_flag(tp, HW_TSO_2) &&
7560 !tg3_flag(tp, HW_TSO_3))
7561 tmp_mss = 0;
7562
7563 /* Now loop through additional data
7564 * fragments, and queue them.
7565 */
7566 last = skb_shinfo(skb)->nr_frags - 1;
7567 for (i = 0; i <= last; i++) {
7568 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7569
7570 len = skb_frag_size(frag);
7571 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7572 len, DMA_TO_DEVICE);
7573
7574 tnapi->tx_buffers[entry].skb = NULL;
7575 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7576 mapping);
7577 if (dma_mapping_error(&tp->pdev->dev, mapping))
7578 goto dma_error;
7579
7580 if (!budget ||
7581 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7582 len, base_flags |
7583 ((i == last) ? TXD_FLAG_END : 0),
7584 tmp_mss, vlan)) {
7585 would_hit_hwbug = 1;
7586 break;
7587 }
7588 }
7589 }
7590
7591 if (would_hit_hwbug) {
7592 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7593
7594 /* If the workaround fails due to memory/mapping
7595 * failure, silently drop this packet.
7596 */
7597 entry = tnapi->tx_prod;
7598 budget = tg3_tx_avail(tnapi);
7599 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7600 base_flags, mss, vlan))
7601 goto drop_nofree;
7602 }
7603
7604 skb_tx_timestamp(skb);
7605 netdev_tx_sent_queue(txq, skb->len);
7606
7607 /* Sync BD data before updating mailbox */
7608 wmb();
7609
7610 /* Packets are ready, update Tx producer idx local and on card. */
7611 tw32_tx_mbox(tnapi->prodmbox, entry);
7612
7613 tnapi->tx_prod = entry;
7614 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7615 netif_tx_stop_queue(txq);
7616
7617 /* netif_tx_stop_queue() must be done before checking
7618 * checking tx index in tg3_tx_avail() below, because in
7619 * tg3_tx(), we update tx index before checking for
7620 * netif_tx_queue_stopped().
7621 */
7622 smp_mb();
7623 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7624 netif_tx_wake_queue(txq);
7625 }
7626
7627 mmiowb();
7628 return NETDEV_TX_OK;
7629
7630 dma_error:
7631 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7632 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7633 drop:
7634 dev_kfree_skb(skb);
7635 drop_nofree:
7636 tp->tx_dropped++;
7637 return NETDEV_TX_OK;
7638 }
7639
7640 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7641 {
7642 if (enable) {
7643 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7644 MAC_MODE_PORT_MODE_MASK);
7645
7646 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7647
7648 if (!tg3_flag(tp, 5705_PLUS))
7649 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7650
7651 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7652 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7653 else
7654 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7655 } else {
7656 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7657
7658 if (tg3_flag(tp, 5705_PLUS) ||
7659 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7660 tg3_asic_rev(tp) == ASIC_REV_5700)
7661 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7662 }
7663
7664 tw32(MAC_MODE, tp->mac_mode);
7665 udelay(40);
7666 }
7667
7668 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7669 {
7670 u32 val, bmcr, mac_mode, ptest = 0;
7671
7672 tg3_phy_toggle_apd(tp, false);
7673 tg3_phy_toggle_automdix(tp, 0);
7674
7675 if (extlpbk && tg3_phy_set_extloopbk(tp))
7676 return -EIO;
7677
7678 bmcr = BMCR_FULLDPLX;
7679 switch (speed) {
7680 case SPEED_10:
7681 break;
7682 case SPEED_100:
7683 bmcr |= BMCR_SPEED100;
7684 break;
7685 case SPEED_1000:
7686 default:
7687 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7688 speed = SPEED_100;
7689 bmcr |= BMCR_SPEED100;
7690 } else {
7691 speed = SPEED_1000;
7692 bmcr |= BMCR_SPEED1000;
7693 }
7694 }
7695
7696 if (extlpbk) {
7697 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7698 tg3_readphy(tp, MII_CTRL1000, &val);
7699 val |= CTL1000_AS_MASTER |
7700 CTL1000_ENABLE_MASTER;
7701 tg3_writephy(tp, MII_CTRL1000, val);
7702 } else {
7703 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7704 MII_TG3_FET_PTEST_TRIM_2;
7705 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7706 }
7707 } else
7708 bmcr |= BMCR_LOOPBACK;
7709
7710 tg3_writephy(tp, MII_BMCR, bmcr);
7711
7712 /* The write needs to be flushed for the FETs */
7713 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7714 tg3_readphy(tp, MII_BMCR, &bmcr);
7715
7716 udelay(40);
7717
7718 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7719 tg3_asic_rev(tp) == ASIC_REV_5785) {
7720 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7721 MII_TG3_FET_PTEST_FRC_TX_LINK |
7722 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7723
7724 /* The write needs to be flushed for the AC131 */
7725 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7726 }
7727
7728 /* Reset to prevent losing 1st rx packet intermittently */
7729 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7730 tg3_flag(tp, 5780_CLASS)) {
7731 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7732 udelay(10);
7733 tw32_f(MAC_RX_MODE, tp->rx_mode);
7734 }
7735
7736 mac_mode = tp->mac_mode &
7737 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7738 if (speed == SPEED_1000)
7739 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7740 else
7741 mac_mode |= MAC_MODE_PORT_MODE_MII;
7742
7743 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7744 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7745
7746 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7747 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7748 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7749 mac_mode |= MAC_MODE_LINK_POLARITY;
7750
7751 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7752 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7753 }
7754
7755 tw32(MAC_MODE, mac_mode);
7756 udelay(40);
7757
7758 return 0;
7759 }
7760
7761 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7762 {
7763 struct tg3 *tp = netdev_priv(dev);
7764
7765 if (features & NETIF_F_LOOPBACK) {
7766 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7767 return;
7768
7769 spin_lock_bh(&tp->lock);
7770 tg3_mac_loopback(tp, true);
7771 netif_carrier_on(tp->dev);
7772 spin_unlock_bh(&tp->lock);
7773 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7774 } else {
7775 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7776 return;
7777
7778 spin_lock_bh(&tp->lock);
7779 tg3_mac_loopback(tp, false);
7780 /* Force link status check */
7781 tg3_setup_phy(tp, 1);
7782 spin_unlock_bh(&tp->lock);
7783 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7784 }
7785 }
7786
7787 static netdev_features_t tg3_fix_features(struct net_device *dev,
7788 netdev_features_t features)
7789 {
7790 struct tg3 *tp = netdev_priv(dev);
7791
7792 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7793 features &= ~NETIF_F_ALL_TSO;
7794
7795 return features;
7796 }
7797
7798 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7799 {
7800 netdev_features_t changed = dev->features ^ features;
7801
7802 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7803 tg3_set_loopback(dev, features);
7804
7805 return 0;
7806 }
7807
7808 static void tg3_rx_prodring_free(struct tg3 *tp,
7809 struct tg3_rx_prodring_set *tpr)
7810 {
7811 int i;
7812
7813 if (tpr != &tp->napi[0].prodring) {
7814 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7815 i = (i + 1) & tp->rx_std_ring_mask)
7816 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7817 tp->rx_pkt_map_sz);
7818
7819 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7820 for (i = tpr->rx_jmb_cons_idx;
7821 i != tpr->rx_jmb_prod_idx;
7822 i = (i + 1) & tp->rx_jmb_ring_mask) {
7823 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7824 TG3_RX_JMB_MAP_SZ);
7825 }
7826 }
7827
7828 return;
7829 }
7830
7831 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7832 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7833 tp->rx_pkt_map_sz);
7834
7835 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7836 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7837 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7838 TG3_RX_JMB_MAP_SZ);
7839 }
7840 }
7841
7842 /* Initialize rx rings for packet processing.
7843 *
7844 * The chip has been shut down and the driver detached from
7845 * the networking, so no interrupts or new tx packets will
7846 * end up in the driver. tp->{tx,}lock are held and thus
7847 * we may not sleep.
7848 */
7849 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7850 struct tg3_rx_prodring_set *tpr)
7851 {
7852 u32 i, rx_pkt_dma_sz;
7853
7854 tpr->rx_std_cons_idx = 0;
7855 tpr->rx_std_prod_idx = 0;
7856 tpr->rx_jmb_cons_idx = 0;
7857 tpr->rx_jmb_prod_idx = 0;
7858
7859 if (tpr != &tp->napi[0].prodring) {
7860 memset(&tpr->rx_std_buffers[0], 0,
7861 TG3_RX_STD_BUFF_RING_SIZE(tp));
7862 if (tpr->rx_jmb_buffers)
7863 memset(&tpr->rx_jmb_buffers[0], 0,
7864 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7865 goto done;
7866 }
7867
7868 /* Zero out all descriptors. */
7869 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7870
7871 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7872 if (tg3_flag(tp, 5780_CLASS) &&
7873 tp->dev->mtu > ETH_DATA_LEN)
7874 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7875 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7876
7877 /* Initialize invariants of the rings, we only set this
7878 * stuff once. This works because the card does not
7879 * write into the rx buffer posting rings.
7880 */
7881 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7882 struct tg3_rx_buffer_desc *rxd;
7883
7884 rxd = &tpr->rx_std[i];
7885 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7886 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7887 rxd->opaque = (RXD_OPAQUE_RING_STD |
7888 (i << RXD_OPAQUE_INDEX_SHIFT));
7889 }
7890
7891 /* Now allocate fresh SKBs for each rx ring. */
7892 for (i = 0; i < tp->rx_pending; i++) {
7893 unsigned int frag_size;
7894
7895 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7896 &frag_size) < 0) {
7897 netdev_warn(tp->dev,
7898 "Using a smaller RX standard ring. Only "
7899 "%d out of %d buffers were allocated "
7900 "successfully\n", i, tp->rx_pending);
7901 if (i == 0)
7902 goto initfail;
7903 tp->rx_pending = i;
7904 break;
7905 }
7906 }
7907
7908 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7909 goto done;
7910
7911 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7912
7913 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7914 goto done;
7915
7916 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7917 struct tg3_rx_buffer_desc *rxd;
7918
7919 rxd = &tpr->rx_jmb[i].std;
7920 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7921 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7922 RXD_FLAG_JUMBO;
7923 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7924 (i << RXD_OPAQUE_INDEX_SHIFT));
7925 }
7926
7927 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7928 unsigned int frag_size;
7929
7930 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7931 &frag_size) < 0) {
7932 netdev_warn(tp->dev,
7933 "Using a smaller RX jumbo ring. Only %d "
7934 "out of %d buffers were allocated "
7935 "successfully\n", i, tp->rx_jumbo_pending);
7936 if (i == 0)
7937 goto initfail;
7938 tp->rx_jumbo_pending = i;
7939 break;
7940 }
7941 }
7942
7943 done:
7944 return 0;
7945
7946 initfail:
7947 tg3_rx_prodring_free(tp, tpr);
7948 return -ENOMEM;
7949 }
7950
7951 static void tg3_rx_prodring_fini(struct tg3 *tp,
7952 struct tg3_rx_prodring_set *tpr)
7953 {
7954 kfree(tpr->rx_std_buffers);
7955 tpr->rx_std_buffers = NULL;
7956 kfree(tpr->rx_jmb_buffers);
7957 tpr->rx_jmb_buffers = NULL;
7958 if (tpr->rx_std) {
7959 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7960 tpr->rx_std, tpr->rx_std_mapping);
7961 tpr->rx_std = NULL;
7962 }
7963 if (tpr->rx_jmb) {
7964 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7965 tpr->rx_jmb, tpr->rx_jmb_mapping);
7966 tpr->rx_jmb = NULL;
7967 }
7968 }
7969
7970 static int tg3_rx_prodring_init(struct tg3 *tp,
7971 struct tg3_rx_prodring_set *tpr)
7972 {
7973 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7974 GFP_KERNEL);
7975 if (!tpr->rx_std_buffers)
7976 return -ENOMEM;
7977
7978 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7979 TG3_RX_STD_RING_BYTES(tp),
7980 &tpr->rx_std_mapping,
7981 GFP_KERNEL);
7982 if (!tpr->rx_std)
7983 goto err_out;
7984
7985 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7986 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7987 GFP_KERNEL);
7988 if (!tpr->rx_jmb_buffers)
7989 goto err_out;
7990
7991 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7992 TG3_RX_JMB_RING_BYTES(tp),
7993 &tpr->rx_jmb_mapping,
7994 GFP_KERNEL);
7995 if (!tpr->rx_jmb)
7996 goto err_out;
7997 }
7998
7999 return 0;
8000
8001 err_out:
8002 tg3_rx_prodring_fini(tp, tpr);
8003 return -ENOMEM;
8004 }
8005
8006 /* Free up pending packets in all rx/tx rings.
8007 *
8008 * The chip has been shut down and the driver detached from
8009 * the networking, so no interrupts or new tx packets will
8010 * end up in the driver. tp->{tx,}lock is not held and we are not
8011 * in an interrupt context and thus may sleep.
8012 */
8013 static void tg3_free_rings(struct tg3 *tp)
8014 {
8015 int i, j;
8016
8017 for (j = 0; j < tp->irq_cnt; j++) {
8018 struct tg3_napi *tnapi = &tp->napi[j];
8019
8020 tg3_rx_prodring_free(tp, &tnapi->prodring);
8021
8022 if (!tnapi->tx_buffers)
8023 continue;
8024
8025 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8026 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8027
8028 if (!skb)
8029 continue;
8030
8031 tg3_tx_skb_unmap(tnapi, i,
8032 skb_shinfo(skb)->nr_frags - 1);
8033
8034 dev_kfree_skb_any(skb);
8035 }
8036 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8037 }
8038 }
8039
8040 /* Initialize tx/rx rings for packet processing.
8041 *
8042 * The chip has been shut down and the driver detached from
8043 * the networking, so no interrupts or new tx packets will
8044 * end up in the driver. tp->{tx,}lock are held and thus
8045 * we may not sleep.
8046 */
8047 static int tg3_init_rings(struct tg3 *tp)
8048 {
8049 int i;
8050
8051 /* Free up all the SKBs. */
8052 tg3_free_rings(tp);
8053
8054 for (i = 0; i < tp->irq_cnt; i++) {
8055 struct tg3_napi *tnapi = &tp->napi[i];
8056
8057 tnapi->last_tag = 0;
8058 tnapi->last_irq_tag = 0;
8059 tnapi->hw_status->status = 0;
8060 tnapi->hw_status->status_tag = 0;
8061 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8062
8063 tnapi->tx_prod = 0;
8064 tnapi->tx_cons = 0;
8065 if (tnapi->tx_ring)
8066 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8067
8068 tnapi->rx_rcb_ptr = 0;
8069 if (tnapi->rx_rcb)
8070 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8071
8072 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8073 tg3_free_rings(tp);
8074 return -ENOMEM;
8075 }
8076 }
8077
8078 return 0;
8079 }
8080
8081 static void tg3_mem_tx_release(struct tg3 *tp)
8082 {
8083 int i;
8084
8085 for (i = 0; i < tp->irq_max; i++) {
8086 struct tg3_napi *tnapi = &tp->napi[i];
8087
8088 if (tnapi->tx_ring) {
8089 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8090 tnapi->tx_ring, tnapi->tx_desc_mapping);
8091 tnapi->tx_ring = NULL;
8092 }
8093
8094 kfree(tnapi->tx_buffers);
8095 tnapi->tx_buffers = NULL;
8096 }
8097 }
8098
8099 static int tg3_mem_tx_acquire(struct tg3 *tp)
8100 {
8101 int i;
8102 struct tg3_napi *tnapi = &tp->napi[0];
8103
8104 /* If multivector TSS is enabled, vector 0 does not handle
8105 * tx interrupts. Don't allocate any resources for it.
8106 */
8107 if (tg3_flag(tp, ENABLE_TSS))
8108 tnapi++;
8109
8110 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8111 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8112 TG3_TX_RING_SIZE, GFP_KERNEL);
8113 if (!tnapi->tx_buffers)
8114 goto err_out;
8115
8116 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8117 TG3_TX_RING_BYTES,
8118 &tnapi->tx_desc_mapping,
8119 GFP_KERNEL);
8120 if (!tnapi->tx_ring)
8121 goto err_out;
8122 }
8123
8124 return 0;
8125
8126 err_out:
8127 tg3_mem_tx_release(tp);
8128 return -ENOMEM;
8129 }
8130
8131 static void tg3_mem_rx_release(struct tg3 *tp)
8132 {
8133 int i;
8134
8135 for (i = 0; i < tp->irq_max; i++) {
8136 struct tg3_napi *tnapi = &tp->napi[i];
8137
8138 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8139
8140 if (!tnapi->rx_rcb)
8141 continue;
8142
8143 dma_free_coherent(&tp->pdev->dev,
8144 TG3_RX_RCB_RING_BYTES(tp),
8145 tnapi->rx_rcb,
8146 tnapi->rx_rcb_mapping);
8147 tnapi->rx_rcb = NULL;
8148 }
8149 }
8150
8151 static int tg3_mem_rx_acquire(struct tg3 *tp)
8152 {
8153 unsigned int i, limit;
8154
8155 limit = tp->rxq_cnt;
8156
8157 /* If RSS is enabled, we need a (dummy) producer ring
8158 * set on vector zero. This is the true hw prodring.
8159 */
8160 if (tg3_flag(tp, ENABLE_RSS))
8161 limit++;
8162
8163 for (i = 0; i < limit; i++) {
8164 struct tg3_napi *tnapi = &tp->napi[i];
8165
8166 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8167 goto err_out;
8168
8169 /* If multivector RSS is enabled, vector 0
8170 * does not handle rx or tx interrupts.
8171 * Don't allocate any resources for it.
8172 */
8173 if (!i && tg3_flag(tp, ENABLE_RSS))
8174 continue;
8175
8176 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8177 TG3_RX_RCB_RING_BYTES(tp),
8178 &tnapi->rx_rcb_mapping,
8179 GFP_KERNEL);
8180 if (!tnapi->rx_rcb)
8181 goto err_out;
8182
8183 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8184 }
8185
8186 return 0;
8187
8188 err_out:
8189 tg3_mem_rx_release(tp);
8190 return -ENOMEM;
8191 }
8192
8193 /*
8194 * Must not be invoked with interrupt sources disabled and
8195 * the hardware shutdown down.
8196 */
8197 static void tg3_free_consistent(struct tg3 *tp)
8198 {
8199 int i;
8200
8201 for (i = 0; i < tp->irq_cnt; i++) {
8202 struct tg3_napi *tnapi = &tp->napi[i];
8203
8204 if (tnapi->hw_status) {
8205 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8206 tnapi->hw_status,
8207 tnapi->status_mapping);
8208 tnapi->hw_status = NULL;
8209 }
8210 }
8211
8212 tg3_mem_rx_release(tp);
8213 tg3_mem_tx_release(tp);
8214
8215 if (tp->hw_stats) {
8216 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8217 tp->hw_stats, tp->stats_mapping);
8218 tp->hw_stats = NULL;
8219 }
8220 }
8221
8222 /*
8223 * Must not be invoked with interrupt sources disabled and
8224 * the hardware shutdown down. Can sleep.
8225 */
8226 static int tg3_alloc_consistent(struct tg3 *tp)
8227 {
8228 int i;
8229
8230 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8231 sizeof(struct tg3_hw_stats),
8232 &tp->stats_mapping,
8233 GFP_KERNEL);
8234 if (!tp->hw_stats)
8235 goto err_out;
8236
8237 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8238
8239 for (i = 0; i < tp->irq_cnt; i++) {
8240 struct tg3_napi *tnapi = &tp->napi[i];
8241 struct tg3_hw_status *sblk;
8242
8243 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8244 TG3_HW_STATUS_SIZE,
8245 &tnapi->status_mapping,
8246 GFP_KERNEL);
8247 if (!tnapi->hw_status)
8248 goto err_out;
8249
8250 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8251 sblk = tnapi->hw_status;
8252
8253 if (tg3_flag(tp, ENABLE_RSS)) {
8254 u16 *prodptr = NULL;
8255
8256 /*
8257 * When RSS is enabled, the status block format changes
8258 * slightly. The "rx_jumbo_consumer", "reserved",
8259 * and "rx_mini_consumer" members get mapped to the
8260 * other three rx return ring producer indexes.
8261 */
8262 switch (i) {
8263 case 1:
8264 prodptr = &sblk->idx[0].rx_producer;
8265 break;
8266 case 2:
8267 prodptr = &sblk->rx_jumbo_consumer;
8268 break;
8269 case 3:
8270 prodptr = &sblk->reserved;
8271 break;
8272 case 4:
8273 prodptr = &sblk->rx_mini_consumer;
8274 break;
8275 }
8276 tnapi->rx_rcb_prod_idx = prodptr;
8277 } else {
8278 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8279 }
8280 }
8281
8282 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8283 goto err_out;
8284
8285 return 0;
8286
8287 err_out:
8288 tg3_free_consistent(tp);
8289 return -ENOMEM;
8290 }
8291
8292 #define MAX_WAIT_CNT 1000
8293
8294 /* To stop a block, clear the enable bit and poll till it
8295 * clears. tp->lock is held.
8296 */
8297 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8298 {
8299 unsigned int i;
8300 u32 val;
8301
8302 if (tg3_flag(tp, 5705_PLUS)) {
8303 switch (ofs) {
8304 case RCVLSC_MODE:
8305 case DMAC_MODE:
8306 case MBFREE_MODE:
8307 case BUFMGR_MODE:
8308 case MEMARB_MODE:
8309 /* We can't enable/disable these bits of the
8310 * 5705/5750, just say success.
8311 */
8312 return 0;
8313
8314 default:
8315 break;
8316 }
8317 }
8318
8319 val = tr32(ofs);
8320 val &= ~enable_bit;
8321 tw32_f(ofs, val);
8322
8323 for (i = 0; i < MAX_WAIT_CNT; i++) {
8324 udelay(100);
8325 val = tr32(ofs);
8326 if ((val & enable_bit) == 0)
8327 break;
8328 }
8329
8330 if (i == MAX_WAIT_CNT && !silent) {
8331 dev_err(&tp->pdev->dev,
8332 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8333 ofs, enable_bit);
8334 return -ENODEV;
8335 }
8336
8337 return 0;
8338 }
8339
8340 /* tp->lock is held. */
8341 static int tg3_abort_hw(struct tg3 *tp, int silent)
8342 {
8343 int i, err;
8344
8345 tg3_disable_ints(tp);
8346
8347 tp->rx_mode &= ~RX_MODE_ENABLE;
8348 tw32_f(MAC_RX_MODE, tp->rx_mode);
8349 udelay(10);
8350
8351 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8352 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8353 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8354 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8355 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8356 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8357
8358 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8359 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8360 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8361 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8362 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8363 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8364 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8365
8366 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8367 tw32_f(MAC_MODE, tp->mac_mode);
8368 udelay(40);
8369
8370 tp->tx_mode &= ~TX_MODE_ENABLE;
8371 tw32_f(MAC_TX_MODE, tp->tx_mode);
8372
8373 for (i = 0; i < MAX_WAIT_CNT; i++) {
8374 udelay(100);
8375 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8376 break;
8377 }
8378 if (i >= MAX_WAIT_CNT) {
8379 dev_err(&tp->pdev->dev,
8380 "%s timed out, TX_MODE_ENABLE will not clear "
8381 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8382 err |= -ENODEV;
8383 }
8384
8385 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8386 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8387 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8388
8389 tw32(FTQ_RESET, 0xffffffff);
8390 tw32(FTQ_RESET, 0x00000000);
8391
8392 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8393 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8394
8395 for (i = 0; i < tp->irq_cnt; i++) {
8396 struct tg3_napi *tnapi = &tp->napi[i];
8397 if (tnapi->hw_status)
8398 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8399 }
8400
8401 return err;
8402 }
8403
8404 /* Save PCI command register before chip reset */
8405 static void tg3_save_pci_state(struct tg3 *tp)
8406 {
8407 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8408 }
8409
8410 /* Restore PCI state after chip reset */
8411 static void tg3_restore_pci_state(struct tg3 *tp)
8412 {
8413 u32 val;
8414
8415 /* Re-enable indirect register accesses. */
8416 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8417 tp->misc_host_ctrl);
8418
8419 /* Set MAX PCI retry to zero. */
8420 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8421 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8422 tg3_flag(tp, PCIX_MODE))
8423 val |= PCISTATE_RETRY_SAME_DMA;
8424 /* Allow reads and writes to the APE register and memory space. */
8425 if (tg3_flag(tp, ENABLE_APE))
8426 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8427 PCISTATE_ALLOW_APE_SHMEM_WR |
8428 PCISTATE_ALLOW_APE_PSPACE_WR;
8429 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8430
8431 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8432
8433 if (!tg3_flag(tp, PCI_EXPRESS)) {
8434 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8435 tp->pci_cacheline_sz);
8436 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8437 tp->pci_lat_timer);
8438 }
8439
8440 /* Make sure PCI-X relaxed ordering bit is clear. */
8441 if (tg3_flag(tp, PCIX_MODE)) {
8442 u16 pcix_cmd;
8443
8444 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8445 &pcix_cmd);
8446 pcix_cmd &= ~PCI_X_CMD_ERO;
8447 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8448 pcix_cmd);
8449 }
8450
8451 if (tg3_flag(tp, 5780_CLASS)) {
8452
8453 /* Chip reset on 5780 will reset MSI enable bit,
8454 * so need to restore it.
8455 */
8456 if (tg3_flag(tp, USING_MSI)) {
8457 u16 ctrl;
8458
8459 pci_read_config_word(tp->pdev,
8460 tp->msi_cap + PCI_MSI_FLAGS,
8461 &ctrl);
8462 pci_write_config_word(tp->pdev,
8463 tp->msi_cap + PCI_MSI_FLAGS,
8464 ctrl | PCI_MSI_FLAGS_ENABLE);
8465 val = tr32(MSGINT_MODE);
8466 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8467 }
8468 }
8469 }
8470
8471 /* tp->lock is held. */
8472 static int tg3_chip_reset(struct tg3 *tp)
8473 {
8474 u32 val;
8475 void (*write_op)(struct tg3 *, u32, u32);
8476 int i, err;
8477
8478 tg3_nvram_lock(tp);
8479
8480 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8481
8482 /* No matching tg3_nvram_unlock() after this because
8483 * chip reset below will undo the nvram lock.
8484 */
8485 tp->nvram_lock_cnt = 0;
8486
8487 /* GRC_MISC_CFG core clock reset will clear the memory
8488 * enable bit in PCI register 4 and the MSI enable bit
8489 * on some chips, so we save relevant registers here.
8490 */
8491 tg3_save_pci_state(tp);
8492
8493 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8494 tg3_flag(tp, 5755_PLUS))
8495 tw32(GRC_FASTBOOT_PC, 0);
8496
8497 /*
8498 * We must avoid the readl() that normally takes place.
8499 * It locks machines, causes machine checks, and other
8500 * fun things. So, temporarily disable the 5701
8501 * hardware workaround, while we do the reset.
8502 */
8503 write_op = tp->write32;
8504 if (write_op == tg3_write_flush_reg32)
8505 tp->write32 = tg3_write32;
8506
8507 /* Prevent the irq handler from reading or writing PCI registers
8508 * during chip reset when the memory enable bit in the PCI command
8509 * register may be cleared. The chip does not generate interrupt
8510 * at this time, but the irq handler may still be called due to irq
8511 * sharing or irqpoll.
8512 */
8513 tg3_flag_set(tp, CHIP_RESETTING);
8514 for (i = 0; i < tp->irq_cnt; i++) {
8515 struct tg3_napi *tnapi = &tp->napi[i];
8516 if (tnapi->hw_status) {
8517 tnapi->hw_status->status = 0;
8518 tnapi->hw_status->status_tag = 0;
8519 }
8520 tnapi->last_tag = 0;
8521 tnapi->last_irq_tag = 0;
8522 }
8523 smp_mb();
8524
8525 for (i = 0; i < tp->irq_cnt; i++)
8526 synchronize_irq(tp->napi[i].irq_vec);
8527
8528 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8529 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8530 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8531 }
8532
8533 /* do the reset */
8534 val = GRC_MISC_CFG_CORECLK_RESET;
8535
8536 if (tg3_flag(tp, PCI_EXPRESS)) {
8537 /* Force PCIe 1.0a mode */
8538 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8539 !tg3_flag(tp, 57765_PLUS) &&
8540 tr32(TG3_PCIE_PHY_TSTCTL) ==
8541 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8542 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8543
8544 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8545 tw32(GRC_MISC_CFG, (1 << 29));
8546 val |= (1 << 29);
8547 }
8548 }
8549
8550 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8551 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8552 tw32(GRC_VCPU_EXT_CTRL,
8553 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8554 }
8555
8556 /* Manage gphy power for all CPMU absent PCIe devices. */
8557 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8558 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8559
8560 tw32(GRC_MISC_CFG, val);
8561
8562 /* restore 5701 hardware bug workaround write method */
8563 tp->write32 = write_op;
8564
8565 /* Unfortunately, we have to delay before the PCI read back.
8566 * Some 575X chips even will not respond to a PCI cfg access
8567 * when the reset command is given to the chip.
8568 *
8569 * How do these hardware designers expect things to work
8570 * properly if the PCI write is posted for a long period
8571 * of time? It is always necessary to have some method by
8572 * which a register read back can occur to push the write
8573 * out which does the reset.
8574 *
8575 * For most tg3 variants the trick below was working.
8576 * Ho hum...
8577 */
8578 udelay(120);
8579
8580 /* Flush PCI posted writes. The normal MMIO registers
8581 * are inaccessible at this time so this is the only
8582 * way to make this reliably (actually, this is no longer
8583 * the case, see above). I tried to use indirect
8584 * register read/write but this upset some 5701 variants.
8585 */
8586 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8587
8588 udelay(120);
8589
8590 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8591 u16 val16;
8592
8593 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8594 int j;
8595 u32 cfg_val;
8596
8597 /* Wait for link training to complete. */
8598 for (j = 0; j < 5000; j++)
8599 udelay(100);
8600
8601 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8602 pci_write_config_dword(tp->pdev, 0xc4,
8603 cfg_val | (1 << 15));
8604 }
8605
8606 /* Clear the "no snoop" and "relaxed ordering" bits. */
8607 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8608 /*
8609 * Older PCIe devices only support the 128 byte
8610 * MPS setting. Enforce the restriction.
8611 */
8612 if (!tg3_flag(tp, CPMU_PRESENT))
8613 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8614 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8615
8616 /* Clear error status */
8617 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8618 PCI_EXP_DEVSTA_CED |
8619 PCI_EXP_DEVSTA_NFED |
8620 PCI_EXP_DEVSTA_FED |
8621 PCI_EXP_DEVSTA_URD);
8622 }
8623
8624 tg3_restore_pci_state(tp);
8625
8626 tg3_flag_clear(tp, CHIP_RESETTING);
8627 tg3_flag_clear(tp, ERROR_PROCESSED);
8628
8629 val = 0;
8630 if (tg3_flag(tp, 5780_CLASS))
8631 val = tr32(MEMARB_MODE);
8632 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8633
8634 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8635 tg3_stop_fw(tp);
8636 tw32(0x5000, 0x400);
8637 }
8638
8639 if (tg3_flag(tp, IS_SSB_CORE)) {
8640 /*
8641 * BCM4785: In order to avoid repercussions from using
8642 * potentially defective internal ROM, stop the Rx RISC CPU,
8643 * which is not required.
8644 */
8645 tg3_stop_fw(tp);
8646 tg3_halt_cpu(tp, RX_CPU_BASE);
8647 }
8648
8649 tw32(GRC_MODE, tp->grc_mode);
8650
8651 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8652 val = tr32(0xc4);
8653
8654 tw32(0xc4, val | (1 << 15));
8655 }
8656
8657 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8658 tg3_asic_rev(tp) == ASIC_REV_5705) {
8659 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8660 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8661 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8662 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8663 }
8664
8665 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8666 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8667 val = tp->mac_mode;
8668 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8669 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8670 val = tp->mac_mode;
8671 } else
8672 val = 0;
8673
8674 tw32_f(MAC_MODE, val);
8675 udelay(40);
8676
8677 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8678
8679 err = tg3_poll_fw(tp);
8680 if (err)
8681 return err;
8682
8683 tg3_mdio_start(tp);
8684
8685 if (tg3_flag(tp, PCI_EXPRESS) &&
8686 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8687 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8688 !tg3_flag(tp, 57765_PLUS)) {
8689 val = tr32(0x7c00);
8690
8691 tw32(0x7c00, val | (1 << 25));
8692 }
8693
8694 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8695 val = tr32(TG3_CPMU_CLCK_ORIDE);
8696 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8697 }
8698
8699 /* Reprobe ASF enable state. */
8700 tg3_flag_clear(tp, ENABLE_ASF);
8701 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8702 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8703 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8704 u32 nic_cfg;
8705
8706 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8707 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8708 tg3_flag_set(tp, ENABLE_ASF);
8709 tp->last_event_jiffies = jiffies;
8710 if (tg3_flag(tp, 5750_PLUS))
8711 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8712 }
8713 }
8714
8715 return 0;
8716 }
8717
8718 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8719 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8720
8721 /* tp->lock is held. */
8722 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8723 {
8724 int err;
8725
8726 tg3_stop_fw(tp);
8727
8728 tg3_write_sig_pre_reset(tp, kind);
8729
8730 tg3_abort_hw(tp, silent);
8731 err = tg3_chip_reset(tp);
8732
8733 __tg3_set_mac_addr(tp, 0);
8734
8735 tg3_write_sig_legacy(tp, kind);
8736 tg3_write_sig_post_reset(tp, kind);
8737
8738 if (tp->hw_stats) {
8739 /* Save the stats across chip resets... */
8740 tg3_get_nstats(tp, &tp->net_stats_prev);
8741 tg3_get_estats(tp, &tp->estats_prev);
8742
8743 /* And make sure the next sample is new data */
8744 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8745 }
8746
8747 if (err)
8748 return err;
8749
8750 return 0;
8751 }
8752
8753 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8754 {
8755 struct tg3 *tp = netdev_priv(dev);
8756 struct sockaddr *addr = p;
8757 int err = 0, skip_mac_1 = 0;
8758
8759 if (!is_valid_ether_addr(addr->sa_data))
8760 return -EADDRNOTAVAIL;
8761
8762 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8763
8764 if (!netif_running(dev))
8765 return 0;
8766
8767 if (tg3_flag(tp, ENABLE_ASF)) {
8768 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8769
8770 addr0_high = tr32(MAC_ADDR_0_HIGH);
8771 addr0_low = tr32(MAC_ADDR_0_LOW);
8772 addr1_high = tr32(MAC_ADDR_1_HIGH);
8773 addr1_low = tr32(MAC_ADDR_1_LOW);
8774
8775 /* Skip MAC addr 1 if ASF is using it. */
8776 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8777 !(addr1_high == 0 && addr1_low == 0))
8778 skip_mac_1 = 1;
8779 }
8780 spin_lock_bh(&tp->lock);
8781 __tg3_set_mac_addr(tp, skip_mac_1);
8782 spin_unlock_bh(&tp->lock);
8783
8784 return err;
8785 }
8786
8787 /* tp->lock is held. */
8788 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8789 dma_addr_t mapping, u32 maxlen_flags,
8790 u32 nic_addr)
8791 {
8792 tg3_write_mem(tp,
8793 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8794 ((u64) mapping >> 32));
8795 tg3_write_mem(tp,
8796 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8797 ((u64) mapping & 0xffffffff));
8798 tg3_write_mem(tp,
8799 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8800 maxlen_flags);
8801
8802 if (!tg3_flag(tp, 5705_PLUS))
8803 tg3_write_mem(tp,
8804 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8805 nic_addr);
8806 }
8807
8808
8809 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8810 {
8811 int i = 0;
8812
8813 if (!tg3_flag(tp, ENABLE_TSS)) {
8814 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8815 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8816 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8817 } else {
8818 tw32(HOSTCC_TXCOL_TICKS, 0);
8819 tw32(HOSTCC_TXMAX_FRAMES, 0);
8820 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8821
8822 for (; i < tp->txq_cnt; i++) {
8823 u32 reg;
8824
8825 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8826 tw32(reg, ec->tx_coalesce_usecs);
8827 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8828 tw32(reg, ec->tx_max_coalesced_frames);
8829 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8830 tw32(reg, ec->tx_max_coalesced_frames_irq);
8831 }
8832 }
8833
8834 for (; i < tp->irq_max - 1; i++) {
8835 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8836 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8837 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8838 }
8839 }
8840
8841 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8842 {
8843 int i = 0;
8844 u32 limit = tp->rxq_cnt;
8845
8846 if (!tg3_flag(tp, ENABLE_RSS)) {
8847 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8848 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8849 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8850 limit--;
8851 } else {
8852 tw32(HOSTCC_RXCOL_TICKS, 0);
8853 tw32(HOSTCC_RXMAX_FRAMES, 0);
8854 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8855 }
8856
8857 for (; i < limit; i++) {
8858 u32 reg;
8859
8860 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8861 tw32(reg, ec->rx_coalesce_usecs);
8862 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8863 tw32(reg, ec->rx_max_coalesced_frames);
8864 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8865 tw32(reg, ec->rx_max_coalesced_frames_irq);
8866 }
8867
8868 for (; i < tp->irq_max - 1; i++) {
8869 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8870 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8871 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8872 }
8873 }
8874
8875 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8876 {
8877 tg3_coal_tx_init(tp, ec);
8878 tg3_coal_rx_init(tp, ec);
8879
8880 if (!tg3_flag(tp, 5705_PLUS)) {
8881 u32 val = ec->stats_block_coalesce_usecs;
8882
8883 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8884 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8885
8886 if (!tp->link_up)
8887 val = 0;
8888
8889 tw32(HOSTCC_STAT_COAL_TICKS, val);
8890 }
8891 }
8892
8893 /* tp->lock is held. */
8894 static void tg3_rings_reset(struct tg3 *tp)
8895 {
8896 int i;
8897 u32 stblk, txrcb, rxrcb, limit;
8898 struct tg3_napi *tnapi = &tp->napi[0];
8899
8900 /* Disable all transmit rings but the first. */
8901 if (!tg3_flag(tp, 5705_PLUS))
8902 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8903 else if (tg3_flag(tp, 5717_PLUS))
8904 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8905 else if (tg3_flag(tp, 57765_CLASS) ||
8906 tg3_asic_rev(tp) == ASIC_REV_5762)
8907 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8908 else
8909 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8910
8911 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8912 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8913 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8914 BDINFO_FLAGS_DISABLED);
8915
8916
8917 /* Disable all receive return rings but the first. */
8918 if (tg3_flag(tp, 5717_PLUS))
8919 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8920 else if (!tg3_flag(tp, 5705_PLUS))
8921 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8922 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8923 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8924 tg3_flag(tp, 57765_CLASS))
8925 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8926 else
8927 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8928
8929 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8930 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8931 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8932 BDINFO_FLAGS_DISABLED);
8933
8934 /* Disable interrupts */
8935 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8936 tp->napi[0].chk_msi_cnt = 0;
8937 tp->napi[0].last_rx_cons = 0;
8938 tp->napi[0].last_tx_cons = 0;
8939
8940 /* Zero mailbox registers. */
8941 if (tg3_flag(tp, SUPPORT_MSIX)) {
8942 for (i = 1; i < tp->irq_max; i++) {
8943 tp->napi[i].tx_prod = 0;
8944 tp->napi[i].tx_cons = 0;
8945 if (tg3_flag(tp, ENABLE_TSS))
8946 tw32_mailbox(tp->napi[i].prodmbox, 0);
8947 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8948 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8949 tp->napi[i].chk_msi_cnt = 0;
8950 tp->napi[i].last_rx_cons = 0;
8951 tp->napi[i].last_tx_cons = 0;
8952 }
8953 if (!tg3_flag(tp, ENABLE_TSS))
8954 tw32_mailbox(tp->napi[0].prodmbox, 0);
8955 } else {
8956 tp->napi[0].tx_prod = 0;
8957 tp->napi[0].tx_cons = 0;
8958 tw32_mailbox(tp->napi[0].prodmbox, 0);
8959 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8960 }
8961
8962 /* Make sure the NIC-based send BD rings are disabled. */
8963 if (!tg3_flag(tp, 5705_PLUS)) {
8964 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8965 for (i = 0; i < 16; i++)
8966 tw32_tx_mbox(mbox + i * 8, 0);
8967 }
8968
8969 txrcb = NIC_SRAM_SEND_RCB;
8970 rxrcb = NIC_SRAM_RCV_RET_RCB;
8971
8972 /* Clear status block in ram. */
8973 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8974
8975 /* Set status block DMA address */
8976 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8977 ((u64) tnapi->status_mapping >> 32));
8978 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8979 ((u64) tnapi->status_mapping & 0xffffffff));
8980
8981 if (tnapi->tx_ring) {
8982 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8983 (TG3_TX_RING_SIZE <<
8984 BDINFO_FLAGS_MAXLEN_SHIFT),
8985 NIC_SRAM_TX_BUFFER_DESC);
8986 txrcb += TG3_BDINFO_SIZE;
8987 }
8988
8989 if (tnapi->rx_rcb) {
8990 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8991 (tp->rx_ret_ring_mask + 1) <<
8992 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8993 rxrcb += TG3_BDINFO_SIZE;
8994 }
8995
8996 stblk = HOSTCC_STATBLCK_RING1;
8997
8998 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8999 u64 mapping = (u64)tnapi->status_mapping;
9000 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9001 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9002
9003 /* Clear status block in ram. */
9004 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9005
9006 if (tnapi->tx_ring) {
9007 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9008 (TG3_TX_RING_SIZE <<
9009 BDINFO_FLAGS_MAXLEN_SHIFT),
9010 NIC_SRAM_TX_BUFFER_DESC);
9011 txrcb += TG3_BDINFO_SIZE;
9012 }
9013
9014 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9015 ((tp->rx_ret_ring_mask + 1) <<
9016 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9017
9018 stblk += 8;
9019 rxrcb += TG3_BDINFO_SIZE;
9020 }
9021 }
9022
9023 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9024 {
9025 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9026
9027 if (!tg3_flag(tp, 5750_PLUS) ||
9028 tg3_flag(tp, 5780_CLASS) ||
9029 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9030 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9031 tg3_flag(tp, 57765_PLUS))
9032 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9033 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9034 tg3_asic_rev(tp) == ASIC_REV_5787)
9035 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9036 else
9037 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9038
9039 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9040 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9041
9042 val = min(nic_rep_thresh, host_rep_thresh);
9043 tw32(RCVBDI_STD_THRESH, val);
9044
9045 if (tg3_flag(tp, 57765_PLUS))
9046 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9047
9048 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9049 return;
9050
9051 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9052
9053 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9054
9055 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9056 tw32(RCVBDI_JUMBO_THRESH, val);
9057
9058 if (tg3_flag(tp, 57765_PLUS))
9059 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9060 }
9061
9062 static inline u32 calc_crc(unsigned char *buf, int len)
9063 {
9064 u32 reg;
9065 u32 tmp;
9066 int j, k;
9067
9068 reg = 0xffffffff;
9069
9070 for (j = 0; j < len; j++) {
9071 reg ^= buf[j];
9072
9073 for (k = 0; k < 8; k++) {
9074 tmp = reg & 0x01;
9075
9076 reg >>= 1;
9077
9078 if (tmp)
9079 reg ^= 0xedb88320;
9080 }
9081 }
9082
9083 return ~reg;
9084 }
9085
9086 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9087 {
9088 /* accept or reject all multicast frames */
9089 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9090 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9091 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9092 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9093 }
9094
9095 static void __tg3_set_rx_mode(struct net_device *dev)
9096 {
9097 struct tg3 *tp = netdev_priv(dev);
9098 u32 rx_mode;
9099
9100 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9101 RX_MODE_KEEP_VLAN_TAG);
9102
9103 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9104 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9105 * flag clear.
9106 */
9107 if (!tg3_flag(tp, ENABLE_ASF))
9108 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9109 #endif
9110
9111 if (dev->flags & IFF_PROMISC) {
9112 /* Promiscuous mode. */
9113 rx_mode |= RX_MODE_PROMISC;
9114 } else if (dev->flags & IFF_ALLMULTI) {
9115 /* Accept all multicast. */
9116 tg3_set_multi(tp, 1);
9117 } else if (netdev_mc_empty(dev)) {
9118 /* Reject all multicast. */
9119 tg3_set_multi(tp, 0);
9120 } else {
9121 /* Accept one or more multicast(s). */
9122 struct netdev_hw_addr *ha;
9123 u32 mc_filter[4] = { 0, };
9124 u32 regidx;
9125 u32 bit;
9126 u32 crc;
9127
9128 netdev_for_each_mc_addr(ha, dev) {
9129 crc = calc_crc(ha->addr, ETH_ALEN);
9130 bit = ~crc & 0x7f;
9131 regidx = (bit & 0x60) >> 5;
9132 bit &= 0x1f;
9133 mc_filter[regidx] |= (1 << bit);
9134 }
9135
9136 tw32(MAC_HASH_REG_0, mc_filter[0]);
9137 tw32(MAC_HASH_REG_1, mc_filter[1]);
9138 tw32(MAC_HASH_REG_2, mc_filter[2]);
9139 tw32(MAC_HASH_REG_3, mc_filter[3]);
9140 }
9141
9142 if (rx_mode != tp->rx_mode) {
9143 tp->rx_mode = rx_mode;
9144 tw32_f(MAC_RX_MODE, rx_mode);
9145 udelay(10);
9146 }
9147 }
9148
9149 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9150 {
9151 int i;
9152
9153 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9154 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9155 }
9156
9157 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9158 {
9159 int i;
9160
9161 if (!tg3_flag(tp, SUPPORT_MSIX))
9162 return;
9163
9164 if (tp->rxq_cnt == 1) {
9165 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9166 return;
9167 }
9168
9169 /* Validate table against current IRQ count */
9170 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9171 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9172 break;
9173 }
9174
9175 if (i != TG3_RSS_INDIR_TBL_SIZE)
9176 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9177 }
9178
9179 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9180 {
9181 int i = 0;
9182 u32 reg = MAC_RSS_INDIR_TBL_0;
9183
9184 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9185 u32 val = tp->rss_ind_tbl[i];
9186 i++;
9187 for (; i % 8; i++) {
9188 val <<= 4;
9189 val |= tp->rss_ind_tbl[i];
9190 }
9191 tw32(reg, val);
9192 reg += 4;
9193 }
9194 }
9195
9196 /* tp->lock is held. */
9197 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9198 {
9199 u32 val, rdmac_mode;
9200 int i, err, limit;
9201 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9202
9203 tg3_disable_ints(tp);
9204
9205 tg3_stop_fw(tp);
9206
9207 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9208
9209 if (tg3_flag(tp, INIT_COMPLETE))
9210 tg3_abort_hw(tp, 1);
9211
9212 /* Enable MAC control of LPI */
9213 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9214 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9215 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9216 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9217 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9218
9219 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9220
9221 tw32_f(TG3_CPMU_EEE_CTRL,
9222 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9223
9224 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9225 TG3_CPMU_EEEMD_LPI_IN_TX |
9226 TG3_CPMU_EEEMD_LPI_IN_RX |
9227 TG3_CPMU_EEEMD_EEE_ENABLE;
9228
9229 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9230 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9231
9232 if (tg3_flag(tp, ENABLE_APE))
9233 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9234
9235 tw32_f(TG3_CPMU_EEE_MODE, val);
9236
9237 tw32_f(TG3_CPMU_EEE_DBTMR1,
9238 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9239 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9240
9241 tw32_f(TG3_CPMU_EEE_DBTMR2,
9242 TG3_CPMU_DBTMR2_APE_TX_2047US |
9243 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9244 }
9245
9246 if (reset_phy)
9247 tg3_phy_reset(tp);
9248
9249 err = tg3_chip_reset(tp);
9250 if (err)
9251 return err;
9252
9253 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9254
9255 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9256 val = tr32(TG3_CPMU_CTRL);
9257 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9258 tw32(TG3_CPMU_CTRL, val);
9259
9260 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9261 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9262 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9263 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9264
9265 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9266 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9267 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9268 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9269
9270 val = tr32(TG3_CPMU_HST_ACC);
9271 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9272 val |= CPMU_HST_ACC_MACCLK_6_25;
9273 tw32(TG3_CPMU_HST_ACC, val);
9274 }
9275
9276 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9277 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9278 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9279 PCIE_PWR_MGMT_L1_THRESH_4MS;
9280 tw32(PCIE_PWR_MGMT_THRESH, val);
9281
9282 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9283 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9284
9285 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9286
9287 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9288 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9289 }
9290
9291 if (tg3_flag(tp, L1PLLPD_EN)) {
9292 u32 grc_mode = tr32(GRC_MODE);
9293
9294 /* Access the lower 1K of PL PCIE block registers. */
9295 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9296 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9297
9298 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9299 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9300 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9301
9302 tw32(GRC_MODE, grc_mode);
9303 }
9304
9305 if (tg3_flag(tp, 57765_CLASS)) {
9306 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9307 u32 grc_mode = tr32(GRC_MODE);
9308
9309 /* Access the lower 1K of PL PCIE block registers. */
9310 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9311 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9312
9313 val = tr32(TG3_PCIE_TLDLPL_PORT +
9314 TG3_PCIE_PL_LO_PHYCTL5);
9315 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9316 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9317
9318 tw32(GRC_MODE, grc_mode);
9319 }
9320
9321 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9322 u32 grc_mode;
9323
9324 /* Fix transmit hangs */
9325 val = tr32(TG3_CPMU_PADRNG_CTL);
9326 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9327 tw32(TG3_CPMU_PADRNG_CTL, val);
9328
9329 grc_mode = tr32(GRC_MODE);
9330
9331 /* Access the lower 1K of DL PCIE block registers. */
9332 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9333 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9334
9335 val = tr32(TG3_PCIE_TLDLPL_PORT +
9336 TG3_PCIE_DL_LO_FTSMAX);
9337 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9338 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9339 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9340
9341 tw32(GRC_MODE, grc_mode);
9342 }
9343
9344 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9345 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9346 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9347 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9348 }
9349
9350 /* This works around an issue with Athlon chipsets on
9351 * B3 tigon3 silicon. This bit has no effect on any
9352 * other revision. But do not set this on PCI Express
9353 * chips and don't even touch the clocks if the CPMU is present.
9354 */
9355 if (!tg3_flag(tp, CPMU_PRESENT)) {
9356 if (!tg3_flag(tp, PCI_EXPRESS))
9357 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9358 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9359 }
9360
9361 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9362 tg3_flag(tp, PCIX_MODE)) {
9363 val = tr32(TG3PCI_PCISTATE);
9364 val |= PCISTATE_RETRY_SAME_DMA;
9365 tw32(TG3PCI_PCISTATE, val);
9366 }
9367
9368 if (tg3_flag(tp, ENABLE_APE)) {
9369 /* Allow reads and writes to the
9370 * APE register and memory space.
9371 */
9372 val = tr32(TG3PCI_PCISTATE);
9373 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9374 PCISTATE_ALLOW_APE_SHMEM_WR |
9375 PCISTATE_ALLOW_APE_PSPACE_WR;
9376 tw32(TG3PCI_PCISTATE, val);
9377 }
9378
9379 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9380 /* Enable some hw fixes. */
9381 val = tr32(TG3PCI_MSI_DATA);
9382 val |= (1 << 26) | (1 << 28) | (1 << 29);
9383 tw32(TG3PCI_MSI_DATA, val);
9384 }
9385
9386 /* Descriptor ring init may make accesses to the
9387 * NIC SRAM area to setup the TX descriptors, so we
9388 * can only do this after the hardware has been
9389 * successfully reset.
9390 */
9391 err = tg3_init_rings(tp);
9392 if (err)
9393 return err;
9394
9395 if (tg3_flag(tp, 57765_PLUS)) {
9396 val = tr32(TG3PCI_DMA_RW_CTRL) &
9397 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9398 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9399 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9400 if (!tg3_flag(tp, 57765_CLASS) &&
9401 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9402 tg3_asic_rev(tp) != ASIC_REV_5762)
9403 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9404 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9405 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9406 tg3_asic_rev(tp) != ASIC_REV_5761) {
9407 /* This value is determined during the probe time DMA
9408 * engine test, tg3_test_dma.
9409 */
9410 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9411 }
9412
9413 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9414 GRC_MODE_4X_NIC_SEND_RINGS |
9415 GRC_MODE_NO_TX_PHDR_CSUM |
9416 GRC_MODE_NO_RX_PHDR_CSUM);
9417 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9418
9419 /* Pseudo-header checksum is done by hardware logic and not
9420 * the offload processers, so make the chip do the pseudo-
9421 * header checksums on receive. For transmit it is more
9422 * convenient to do the pseudo-header checksum in software
9423 * as Linux does that on transmit for us in all cases.
9424 */
9425 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9426
9427 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9428 if (tp->rxptpctl)
9429 tw32(TG3_RX_PTP_CTL,
9430 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9431
9432 if (tg3_flag(tp, PTP_CAPABLE))
9433 val |= GRC_MODE_TIME_SYNC_ENABLE;
9434
9435 tw32(GRC_MODE, tp->grc_mode | val);
9436
9437 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9438 val = tr32(GRC_MISC_CFG);
9439 val &= ~0xff;
9440 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9441 tw32(GRC_MISC_CFG, val);
9442
9443 /* Initialize MBUF/DESC pool. */
9444 if (tg3_flag(tp, 5750_PLUS)) {
9445 /* Do nothing. */
9446 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9447 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9448 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9449 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9450 else
9451 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9452 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9453 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9454 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9455 int fw_len;
9456
9457 fw_len = tp->fw_len;
9458 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9459 tw32(BUFMGR_MB_POOL_ADDR,
9460 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9461 tw32(BUFMGR_MB_POOL_SIZE,
9462 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9463 }
9464
9465 if (tp->dev->mtu <= ETH_DATA_LEN) {
9466 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9467 tp->bufmgr_config.mbuf_read_dma_low_water);
9468 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9469 tp->bufmgr_config.mbuf_mac_rx_low_water);
9470 tw32(BUFMGR_MB_HIGH_WATER,
9471 tp->bufmgr_config.mbuf_high_water);
9472 } else {
9473 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9474 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9475 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9476 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9477 tw32(BUFMGR_MB_HIGH_WATER,
9478 tp->bufmgr_config.mbuf_high_water_jumbo);
9479 }
9480 tw32(BUFMGR_DMA_LOW_WATER,
9481 tp->bufmgr_config.dma_low_water);
9482 tw32(BUFMGR_DMA_HIGH_WATER,
9483 tp->bufmgr_config.dma_high_water);
9484
9485 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9486 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9487 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9488 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9489 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9490 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9491 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9492 tw32(BUFMGR_MODE, val);
9493 for (i = 0; i < 2000; i++) {
9494 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9495 break;
9496 udelay(10);
9497 }
9498 if (i >= 2000) {
9499 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9500 return -ENODEV;
9501 }
9502
9503 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9504 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9505
9506 tg3_setup_rxbd_thresholds(tp);
9507
9508 /* Initialize TG3_BDINFO's at:
9509 * RCVDBDI_STD_BD: standard eth size rx ring
9510 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9511 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9512 *
9513 * like so:
9514 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9515 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9516 * ring attribute flags
9517 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9518 *
9519 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9520 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9521 *
9522 * The size of each ring is fixed in the firmware, but the location is
9523 * configurable.
9524 */
9525 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9526 ((u64) tpr->rx_std_mapping >> 32));
9527 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9528 ((u64) tpr->rx_std_mapping & 0xffffffff));
9529 if (!tg3_flag(tp, 5717_PLUS))
9530 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9531 NIC_SRAM_RX_BUFFER_DESC);
9532
9533 /* Disable the mini ring */
9534 if (!tg3_flag(tp, 5705_PLUS))
9535 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9536 BDINFO_FLAGS_DISABLED);
9537
9538 /* Program the jumbo buffer descriptor ring control
9539 * blocks on those devices that have them.
9540 */
9541 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9542 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9543
9544 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9545 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9546 ((u64) tpr->rx_jmb_mapping >> 32));
9547 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9548 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9549 val = TG3_RX_JMB_RING_SIZE(tp) <<
9550 BDINFO_FLAGS_MAXLEN_SHIFT;
9551 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9552 val | BDINFO_FLAGS_USE_EXT_RECV);
9553 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9554 tg3_flag(tp, 57765_CLASS) ||
9555 tg3_asic_rev(tp) == ASIC_REV_5762)
9556 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9557 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9558 } else {
9559 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9560 BDINFO_FLAGS_DISABLED);
9561 }
9562
9563 if (tg3_flag(tp, 57765_PLUS)) {
9564 val = TG3_RX_STD_RING_SIZE(tp);
9565 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9566 val |= (TG3_RX_STD_DMA_SZ << 2);
9567 } else
9568 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9569 } else
9570 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9571
9572 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9573
9574 tpr->rx_std_prod_idx = tp->rx_pending;
9575 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9576
9577 tpr->rx_jmb_prod_idx =
9578 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9579 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9580
9581 tg3_rings_reset(tp);
9582
9583 /* Initialize MAC address and backoff seed. */
9584 __tg3_set_mac_addr(tp, 0);
9585
9586 /* MTU + ethernet header + FCS + optional VLAN tag */
9587 tw32(MAC_RX_MTU_SIZE,
9588 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9589
9590 /* The slot time is changed by tg3_setup_phy if we
9591 * run at gigabit with half duplex.
9592 */
9593 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9594 (6 << TX_LENGTHS_IPG_SHIFT) |
9595 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9596
9597 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9598 tg3_asic_rev(tp) == ASIC_REV_5762)
9599 val |= tr32(MAC_TX_LENGTHS) &
9600 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9601 TX_LENGTHS_CNT_DWN_VAL_MSK);
9602
9603 tw32(MAC_TX_LENGTHS, val);
9604
9605 /* Receive rules. */
9606 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9607 tw32(RCVLPC_CONFIG, 0x0181);
9608
9609 /* Calculate RDMAC_MODE setting early, we need it to determine
9610 * the RCVLPC_STATE_ENABLE mask.
9611 */
9612 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9613 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9614 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9615 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9616 RDMAC_MODE_LNGREAD_ENAB);
9617
9618 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9619 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9620
9621 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9622 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9623 tg3_asic_rev(tp) == ASIC_REV_57780)
9624 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9625 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9626 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9627
9628 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9629 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9630 if (tg3_flag(tp, TSO_CAPABLE) &&
9631 tg3_asic_rev(tp) == ASIC_REV_5705) {
9632 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9633 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9634 !tg3_flag(tp, IS_5788)) {
9635 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9636 }
9637 }
9638
9639 if (tg3_flag(tp, PCI_EXPRESS))
9640 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9641
9642 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9643 tp->dma_limit = 0;
9644 if (tp->dev->mtu <= ETH_DATA_LEN) {
9645 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9646 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9647 }
9648 }
9649
9650 if (tg3_flag(tp, HW_TSO_1) ||
9651 tg3_flag(tp, HW_TSO_2) ||
9652 tg3_flag(tp, HW_TSO_3))
9653 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9654
9655 if (tg3_flag(tp, 57765_PLUS) ||
9656 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9657 tg3_asic_rev(tp) == ASIC_REV_57780)
9658 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9659
9660 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9661 tg3_asic_rev(tp) == ASIC_REV_5762)
9662 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9663
9664 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9665 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9666 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9667 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9668 tg3_flag(tp, 57765_PLUS)) {
9669 u32 tgtreg;
9670
9671 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9672 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9673 else
9674 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9675
9676 val = tr32(tgtreg);
9677 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9678 tg3_asic_rev(tp) == ASIC_REV_5762) {
9679 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9680 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9681 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9682 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9683 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9684 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9685 }
9686 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9687 }
9688
9689 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9690 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9691 tg3_asic_rev(tp) == ASIC_REV_5762) {
9692 u32 tgtreg;
9693
9694 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9695 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9696 else
9697 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9698
9699 val = tr32(tgtreg);
9700 tw32(tgtreg, val |
9701 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9702 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9703 }
9704
9705 /* Receive/send statistics. */
9706 if (tg3_flag(tp, 5750_PLUS)) {
9707 val = tr32(RCVLPC_STATS_ENABLE);
9708 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9709 tw32(RCVLPC_STATS_ENABLE, val);
9710 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9711 tg3_flag(tp, TSO_CAPABLE)) {
9712 val = tr32(RCVLPC_STATS_ENABLE);
9713 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9714 tw32(RCVLPC_STATS_ENABLE, val);
9715 } else {
9716 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9717 }
9718 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9719 tw32(SNDDATAI_STATSENAB, 0xffffff);
9720 tw32(SNDDATAI_STATSCTRL,
9721 (SNDDATAI_SCTRL_ENABLE |
9722 SNDDATAI_SCTRL_FASTUPD));
9723
9724 /* Setup host coalescing engine. */
9725 tw32(HOSTCC_MODE, 0);
9726 for (i = 0; i < 2000; i++) {
9727 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9728 break;
9729 udelay(10);
9730 }
9731
9732 __tg3_set_coalesce(tp, &tp->coal);
9733
9734 if (!tg3_flag(tp, 5705_PLUS)) {
9735 /* Status/statistics block address. See tg3_timer,
9736 * the tg3_periodic_fetch_stats call there, and
9737 * tg3_get_stats to see how this works for 5705/5750 chips.
9738 */
9739 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9740 ((u64) tp->stats_mapping >> 32));
9741 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9742 ((u64) tp->stats_mapping & 0xffffffff));
9743 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9744
9745 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9746
9747 /* Clear statistics and status block memory areas */
9748 for (i = NIC_SRAM_STATS_BLK;
9749 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9750 i += sizeof(u32)) {
9751 tg3_write_mem(tp, i, 0);
9752 udelay(40);
9753 }
9754 }
9755
9756 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9757
9758 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9759 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9760 if (!tg3_flag(tp, 5705_PLUS))
9761 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9762
9763 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9764 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9765 /* reset to prevent losing 1st rx packet intermittently */
9766 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9767 udelay(10);
9768 }
9769
9770 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9771 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9772 MAC_MODE_FHDE_ENABLE;
9773 if (tg3_flag(tp, ENABLE_APE))
9774 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9775 if (!tg3_flag(tp, 5705_PLUS) &&
9776 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9777 tg3_asic_rev(tp) != ASIC_REV_5700)
9778 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9779 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9780 udelay(40);
9781
9782 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9783 * If TG3_FLAG_IS_NIC is zero, we should read the
9784 * register to preserve the GPIO settings for LOMs. The GPIOs,
9785 * whether used as inputs or outputs, are set by boot code after
9786 * reset.
9787 */
9788 if (!tg3_flag(tp, IS_NIC)) {
9789 u32 gpio_mask;
9790
9791 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9792 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9793 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9794
9795 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9796 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9797 GRC_LCLCTRL_GPIO_OUTPUT3;
9798
9799 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9800 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9801
9802 tp->grc_local_ctrl &= ~gpio_mask;
9803 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9804
9805 /* GPIO1 must be driven high for eeprom write protect */
9806 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9807 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9808 GRC_LCLCTRL_GPIO_OUTPUT1);
9809 }
9810 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9811 udelay(100);
9812
9813 if (tg3_flag(tp, USING_MSIX)) {
9814 val = tr32(MSGINT_MODE);
9815 val |= MSGINT_MODE_ENABLE;
9816 if (tp->irq_cnt > 1)
9817 val |= MSGINT_MODE_MULTIVEC_EN;
9818 if (!tg3_flag(tp, 1SHOT_MSI))
9819 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9820 tw32(MSGINT_MODE, val);
9821 }
9822
9823 if (!tg3_flag(tp, 5705_PLUS)) {
9824 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9825 udelay(40);
9826 }
9827
9828 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9829 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9830 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9831 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9832 WDMAC_MODE_LNGREAD_ENAB);
9833
9834 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9835 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9836 if (tg3_flag(tp, TSO_CAPABLE) &&
9837 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9838 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9839 /* nothing */
9840 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9841 !tg3_flag(tp, IS_5788)) {
9842 val |= WDMAC_MODE_RX_ACCEL;
9843 }
9844 }
9845
9846 /* Enable host coalescing bug fix */
9847 if (tg3_flag(tp, 5755_PLUS))
9848 val |= WDMAC_MODE_STATUS_TAG_FIX;
9849
9850 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9851 val |= WDMAC_MODE_BURST_ALL_DATA;
9852
9853 tw32_f(WDMAC_MODE, val);
9854 udelay(40);
9855
9856 if (tg3_flag(tp, PCIX_MODE)) {
9857 u16 pcix_cmd;
9858
9859 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9860 &pcix_cmd);
9861 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9862 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9863 pcix_cmd |= PCI_X_CMD_READ_2K;
9864 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9865 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9866 pcix_cmd |= PCI_X_CMD_READ_2K;
9867 }
9868 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9869 pcix_cmd);
9870 }
9871
9872 tw32_f(RDMAC_MODE, rdmac_mode);
9873 udelay(40);
9874
9875 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9876 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9877 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9878 break;
9879 }
9880 if (i < TG3_NUM_RDMA_CHANNELS) {
9881 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9882 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9883 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9884 tg3_flag_set(tp, 5719_RDMA_BUG);
9885 }
9886 }
9887
9888 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9889 if (!tg3_flag(tp, 5705_PLUS))
9890 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9891
9892 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9893 tw32(SNDDATAC_MODE,
9894 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9895 else
9896 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9897
9898 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9899 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9900 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9901 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9902 val |= RCVDBDI_MODE_LRG_RING_SZ;
9903 tw32(RCVDBDI_MODE, val);
9904 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9905 if (tg3_flag(tp, HW_TSO_1) ||
9906 tg3_flag(tp, HW_TSO_2) ||
9907 tg3_flag(tp, HW_TSO_3))
9908 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9909 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9910 if (tg3_flag(tp, ENABLE_TSS))
9911 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9912 tw32(SNDBDI_MODE, val);
9913 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9914
9915 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9916 err = tg3_load_5701_a0_firmware_fix(tp);
9917 if (err)
9918 return err;
9919 }
9920
9921 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9922 /* Ignore any errors for the firmware download. If download
9923 * fails, the device will operate with EEE disabled
9924 */
9925 tg3_load_57766_firmware(tp);
9926 }
9927
9928 if (tg3_flag(tp, TSO_CAPABLE)) {
9929 err = tg3_load_tso_firmware(tp);
9930 if (err)
9931 return err;
9932 }
9933
9934 tp->tx_mode = TX_MODE_ENABLE;
9935
9936 if (tg3_flag(tp, 5755_PLUS) ||
9937 tg3_asic_rev(tp) == ASIC_REV_5906)
9938 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9939
9940 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9941 tg3_asic_rev(tp) == ASIC_REV_5762) {
9942 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9943 tp->tx_mode &= ~val;
9944 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9945 }
9946
9947 tw32_f(MAC_TX_MODE, tp->tx_mode);
9948 udelay(100);
9949
9950 if (tg3_flag(tp, ENABLE_RSS)) {
9951 tg3_rss_write_indir_tbl(tp);
9952
9953 /* Setup the "secret" hash key. */
9954 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9955 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9956 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9957 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9958 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9959 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9960 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9961 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9962 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9963 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9964 }
9965
9966 tp->rx_mode = RX_MODE_ENABLE;
9967 if (tg3_flag(tp, 5755_PLUS))
9968 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9969
9970 if (tg3_flag(tp, ENABLE_RSS))
9971 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9972 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9973 RX_MODE_RSS_IPV6_HASH_EN |
9974 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9975 RX_MODE_RSS_IPV4_HASH_EN |
9976 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9977
9978 tw32_f(MAC_RX_MODE, tp->rx_mode);
9979 udelay(10);
9980
9981 tw32(MAC_LED_CTRL, tp->led_ctrl);
9982
9983 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9984 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9985 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9986 udelay(10);
9987 }
9988 tw32_f(MAC_RX_MODE, tp->rx_mode);
9989 udelay(10);
9990
9991 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9992 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9993 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9994 /* Set drive transmission level to 1.2V */
9995 /* only if the signal pre-emphasis bit is not set */
9996 val = tr32(MAC_SERDES_CFG);
9997 val &= 0xfffff000;
9998 val |= 0x880;
9999 tw32(MAC_SERDES_CFG, val);
10000 }
10001 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10002 tw32(MAC_SERDES_CFG, 0x616000);
10003 }
10004
10005 /* Prevent chip from dropping frames when flow control
10006 * is enabled.
10007 */
10008 if (tg3_flag(tp, 57765_CLASS))
10009 val = 1;
10010 else
10011 val = 2;
10012 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10013
10014 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10015 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10016 /* Use hardware link auto-negotiation */
10017 tg3_flag_set(tp, HW_AUTONEG);
10018 }
10019
10020 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10021 tg3_asic_rev(tp) == ASIC_REV_5714) {
10022 u32 tmp;
10023
10024 tmp = tr32(SERDES_RX_CTRL);
10025 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10026 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10027 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10028 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10029 }
10030
10031 if (!tg3_flag(tp, USE_PHYLIB)) {
10032 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10033 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10034
10035 err = tg3_setup_phy(tp, 0);
10036 if (err)
10037 return err;
10038
10039 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10040 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10041 u32 tmp;
10042
10043 /* Clear CRC stats. */
10044 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10045 tg3_writephy(tp, MII_TG3_TEST1,
10046 tmp | MII_TG3_TEST1_CRC_EN);
10047 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10048 }
10049 }
10050 }
10051
10052 __tg3_set_rx_mode(tp->dev);
10053
10054 /* Initialize receive rules. */
10055 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10056 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10057 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10058 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10059
10060 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10061 limit = 8;
10062 else
10063 limit = 16;
10064 if (tg3_flag(tp, ENABLE_ASF))
10065 limit -= 4;
10066 switch (limit) {
10067 case 16:
10068 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10069 case 15:
10070 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10071 case 14:
10072 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10073 case 13:
10074 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10075 case 12:
10076 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10077 case 11:
10078 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10079 case 10:
10080 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10081 case 9:
10082 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10083 case 8:
10084 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10085 case 7:
10086 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10087 case 6:
10088 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10089 case 5:
10090 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10091 case 4:
10092 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10093 case 3:
10094 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10095 case 2:
10096 case 1:
10097
10098 default:
10099 break;
10100 }
10101
10102 if (tg3_flag(tp, ENABLE_APE))
10103 /* Write our heartbeat update interval to APE. */
10104 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10105 APE_HOST_HEARTBEAT_INT_DISABLE);
10106
10107 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10108
10109 return 0;
10110 }
10111
10112 /* Called at device open time to get the chip ready for
10113 * packet processing. Invoked with tp->lock held.
10114 */
10115 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10116 {
10117 tg3_switch_clocks(tp);
10118
10119 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10120
10121 return tg3_reset_hw(tp, reset_phy);
10122 }
10123
10124 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10125 {
10126 int i;
10127
10128 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10129 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10130
10131 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10132 off += len;
10133
10134 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10135 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10136 memset(ocir, 0, TG3_OCIR_LEN);
10137 }
10138 }
10139
10140 /* sysfs attributes for hwmon */
10141 static ssize_t tg3_show_temp(struct device *dev,
10142 struct device_attribute *devattr, char *buf)
10143 {
10144 struct pci_dev *pdev = to_pci_dev(dev);
10145 struct net_device *netdev = pci_get_drvdata(pdev);
10146 struct tg3 *tp = netdev_priv(netdev);
10147 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10148 u32 temperature;
10149
10150 spin_lock_bh(&tp->lock);
10151 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10152 sizeof(temperature));
10153 spin_unlock_bh(&tp->lock);
10154 return sprintf(buf, "%u\n", temperature);
10155 }
10156
10157
10158 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10159 TG3_TEMP_SENSOR_OFFSET);
10160 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10161 TG3_TEMP_CAUTION_OFFSET);
10162 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10163 TG3_TEMP_MAX_OFFSET);
10164
10165 static struct attribute *tg3_attributes[] = {
10166 &sensor_dev_attr_temp1_input.dev_attr.attr,
10167 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10168 &sensor_dev_attr_temp1_max.dev_attr.attr,
10169 NULL
10170 };
10171
10172 static const struct attribute_group tg3_group = {
10173 .attrs = tg3_attributes,
10174 };
10175
10176 static void tg3_hwmon_close(struct tg3 *tp)
10177 {
10178 if (tp->hwmon_dev) {
10179 hwmon_device_unregister(tp->hwmon_dev);
10180 tp->hwmon_dev = NULL;
10181 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10182 }
10183 }
10184
10185 static void tg3_hwmon_open(struct tg3 *tp)
10186 {
10187 int i, err;
10188 u32 size = 0;
10189 struct pci_dev *pdev = tp->pdev;
10190 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10191
10192 tg3_sd_scan_scratchpad(tp, ocirs);
10193
10194 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10195 if (!ocirs[i].src_data_length)
10196 continue;
10197
10198 size += ocirs[i].src_hdr_length;
10199 size += ocirs[i].src_data_length;
10200 }
10201
10202 if (!size)
10203 return;
10204
10205 /* Register hwmon sysfs hooks */
10206 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10207 if (err) {
10208 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10209 return;
10210 }
10211
10212 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10213 if (IS_ERR(tp->hwmon_dev)) {
10214 tp->hwmon_dev = NULL;
10215 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10216 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10217 }
10218 }
10219
10220
10221 #define TG3_STAT_ADD32(PSTAT, REG) \
10222 do { u32 __val = tr32(REG); \
10223 (PSTAT)->low += __val; \
10224 if ((PSTAT)->low < __val) \
10225 (PSTAT)->high += 1; \
10226 } while (0)
10227
10228 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10229 {
10230 struct tg3_hw_stats *sp = tp->hw_stats;
10231
10232 if (!tp->link_up)
10233 return;
10234
10235 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10236 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10237 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10238 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10239 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10240 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10241 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10242 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10243 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10244 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10245 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10246 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10247 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10248 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10249 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10250 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10251 u32 val;
10252
10253 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10254 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10255 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10256 tg3_flag_clear(tp, 5719_RDMA_BUG);
10257 }
10258
10259 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10260 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10261 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10262 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10263 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10264 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10265 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10266 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10267 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10268 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10269 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10270 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10271 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10272 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10273
10274 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10275 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10276 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10277 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10278 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10279 } else {
10280 u32 val = tr32(HOSTCC_FLOW_ATTN);
10281 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10282 if (val) {
10283 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10284 sp->rx_discards.low += val;
10285 if (sp->rx_discards.low < val)
10286 sp->rx_discards.high += 1;
10287 }
10288 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10289 }
10290 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10291 }
10292
10293 static void tg3_chk_missed_msi(struct tg3 *tp)
10294 {
10295 u32 i;
10296
10297 for (i = 0; i < tp->irq_cnt; i++) {
10298 struct tg3_napi *tnapi = &tp->napi[i];
10299
10300 if (tg3_has_work(tnapi)) {
10301 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10302 tnapi->last_tx_cons == tnapi->tx_cons) {
10303 if (tnapi->chk_msi_cnt < 1) {
10304 tnapi->chk_msi_cnt++;
10305 return;
10306 }
10307 tg3_msi(0, tnapi);
10308 }
10309 }
10310 tnapi->chk_msi_cnt = 0;
10311 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10312 tnapi->last_tx_cons = tnapi->tx_cons;
10313 }
10314 }
10315
10316 static void tg3_timer(unsigned long __opaque)
10317 {
10318 struct tg3 *tp = (struct tg3 *) __opaque;
10319
10320 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10321 goto restart_timer;
10322
10323 spin_lock(&tp->lock);
10324
10325 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10326 tg3_flag(tp, 57765_CLASS))
10327 tg3_chk_missed_msi(tp);
10328
10329 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10330 /* BCM4785: Flush posted writes from GbE to host memory. */
10331 tr32(HOSTCC_MODE);
10332 }
10333
10334 if (!tg3_flag(tp, TAGGED_STATUS)) {
10335 /* All of this garbage is because when using non-tagged
10336 * IRQ status the mailbox/status_block protocol the chip
10337 * uses with the cpu is race prone.
10338 */
10339 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10340 tw32(GRC_LOCAL_CTRL,
10341 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10342 } else {
10343 tw32(HOSTCC_MODE, tp->coalesce_mode |
10344 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10345 }
10346
10347 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10348 spin_unlock(&tp->lock);
10349 tg3_reset_task_schedule(tp);
10350 goto restart_timer;
10351 }
10352 }
10353
10354 /* This part only runs once per second. */
10355 if (!--tp->timer_counter) {
10356 if (tg3_flag(tp, 5705_PLUS))
10357 tg3_periodic_fetch_stats(tp);
10358
10359 if (tp->setlpicnt && !--tp->setlpicnt)
10360 tg3_phy_eee_enable(tp);
10361
10362 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10363 u32 mac_stat;
10364 int phy_event;
10365
10366 mac_stat = tr32(MAC_STATUS);
10367
10368 phy_event = 0;
10369 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10370 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10371 phy_event = 1;
10372 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10373 phy_event = 1;
10374
10375 if (phy_event)
10376 tg3_setup_phy(tp, 0);
10377 } else if (tg3_flag(tp, POLL_SERDES)) {
10378 u32 mac_stat = tr32(MAC_STATUS);
10379 int need_setup = 0;
10380
10381 if (tp->link_up &&
10382 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10383 need_setup = 1;
10384 }
10385 if (!tp->link_up &&
10386 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10387 MAC_STATUS_SIGNAL_DET))) {
10388 need_setup = 1;
10389 }
10390 if (need_setup) {
10391 if (!tp->serdes_counter) {
10392 tw32_f(MAC_MODE,
10393 (tp->mac_mode &
10394 ~MAC_MODE_PORT_MODE_MASK));
10395 udelay(40);
10396 tw32_f(MAC_MODE, tp->mac_mode);
10397 udelay(40);
10398 }
10399 tg3_setup_phy(tp, 0);
10400 }
10401 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10402 tg3_flag(tp, 5780_CLASS)) {
10403 tg3_serdes_parallel_detect(tp);
10404 }
10405
10406 tp->timer_counter = tp->timer_multiplier;
10407 }
10408
10409 /* Heartbeat is only sent once every 2 seconds.
10410 *
10411 * The heartbeat is to tell the ASF firmware that the host
10412 * driver is still alive. In the event that the OS crashes,
10413 * ASF needs to reset the hardware to free up the FIFO space
10414 * that may be filled with rx packets destined for the host.
10415 * If the FIFO is full, ASF will no longer function properly.
10416 *
10417 * Unintended resets have been reported on real time kernels
10418 * where the timer doesn't run on time. Netpoll will also have
10419 * same problem.
10420 *
10421 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10422 * to check the ring condition when the heartbeat is expiring
10423 * before doing the reset. This will prevent most unintended
10424 * resets.
10425 */
10426 if (!--tp->asf_counter) {
10427 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10428 tg3_wait_for_event_ack(tp);
10429
10430 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10431 FWCMD_NICDRV_ALIVE3);
10432 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10433 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10434 TG3_FW_UPDATE_TIMEOUT_SEC);
10435
10436 tg3_generate_fw_event(tp);
10437 }
10438 tp->asf_counter = tp->asf_multiplier;
10439 }
10440
10441 spin_unlock(&tp->lock);
10442
10443 restart_timer:
10444 tp->timer.expires = jiffies + tp->timer_offset;
10445 add_timer(&tp->timer);
10446 }
10447
10448 static void tg3_timer_init(struct tg3 *tp)
10449 {
10450 if (tg3_flag(tp, TAGGED_STATUS) &&
10451 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10452 !tg3_flag(tp, 57765_CLASS))
10453 tp->timer_offset = HZ;
10454 else
10455 tp->timer_offset = HZ / 10;
10456
10457 BUG_ON(tp->timer_offset > HZ);
10458
10459 tp->timer_multiplier = (HZ / tp->timer_offset);
10460 tp->asf_multiplier = (HZ / tp->timer_offset) *
10461 TG3_FW_UPDATE_FREQ_SEC;
10462
10463 init_timer(&tp->timer);
10464 tp->timer.data = (unsigned long) tp;
10465 tp->timer.function = tg3_timer;
10466 }
10467
10468 static void tg3_timer_start(struct tg3 *tp)
10469 {
10470 tp->asf_counter = tp->asf_multiplier;
10471 tp->timer_counter = tp->timer_multiplier;
10472
10473 tp->timer.expires = jiffies + tp->timer_offset;
10474 add_timer(&tp->timer);
10475 }
10476
10477 static void tg3_timer_stop(struct tg3 *tp)
10478 {
10479 del_timer_sync(&tp->timer);
10480 }
10481
10482 /* Restart hardware after configuration changes, self-test, etc.
10483 * Invoked with tp->lock held.
10484 */
10485 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10486 __releases(tp->lock)
10487 __acquires(tp->lock)
10488 {
10489 int err;
10490
10491 err = tg3_init_hw(tp, reset_phy);
10492 if (err) {
10493 netdev_err(tp->dev,
10494 "Failed to re-initialize device, aborting\n");
10495 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10496 tg3_full_unlock(tp);
10497 tg3_timer_stop(tp);
10498 tp->irq_sync = 0;
10499 tg3_napi_enable(tp);
10500 dev_close(tp->dev);
10501 tg3_full_lock(tp, 0);
10502 }
10503 return err;
10504 }
10505
10506 static void tg3_reset_task(struct work_struct *work)
10507 {
10508 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10509 int err;
10510
10511 tg3_full_lock(tp, 0);
10512
10513 if (!netif_running(tp->dev)) {
10514 tg3_flag_clear(tp, RESET_TASK_PENDING);
10515 tg3_full_unlock(tp);
10516 return;
10517 }
10518
10519 tg3_full_unlock(tp);
10520
10521 tg3_phy_stop(tp);
10522
10523 tg3_netif_stop(tp);
10524
10525 tg3_full_lock(tp, 1);
10526
10527 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10528 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10529 tp->write32_rx_mbox = tg3_write_flush_reg32;
10530 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10531 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10532 }
10533
10534 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10535 err = tg3_init_hw(tp, 1);
10536 if (err)
10537 goto out;
10538
10539 tg3_netif_start(tp);
10540
10541 out:
10542 tg3_full_unlock(tp);
10543
10544 if (!err)
10545 tg3_phy_start(tp);
10546
10547 tg3_flag_clear(tp, RESET_TASK_PENDING);
10548 }
10549
10550 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10551 {
10552 irq_handler_t fn;
10553 unsigned long flags;
10554 char *name;
10555 struct tg3_napi *tnapi = &tp->napi[irq_num];
10556
10557 if (tp->irq_cnt == 1)
10558 name = tp->dev->name;
10559 else {
10560 name = &tnapi->irq_lbl[0];
10561 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10562 name[IFNAMSIZ-1] = 0;
10563 }
10564
10565 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10566 fn = tg3_msi;
10567 if (tg3_flag(tp, 1SHOT_MSI))
10568 fn = tg3_msi_1shot;
10569 flags = 0;
10570 } else {
10571 fn = tg3_interrupt;
10572 if (tg3_flag(tp, TAGGED_STATUS))
10573 fn = tg3_interrupt_tagged;
10574 flags = IRQF_SHARED;
10575 }
10576
10577 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10578 }
10579
10580 static int tg3_test_interrupt(struct tg3 *tp)
10581 {
10582 struct tg3_napi *tnapi = &tp->napi[0];
10583 struct net_device *dev = tp->dev;
10584 int err, i, intr_ok = 0;
10585 u32 val;
10586
10587 if (!netif_running(dev))
10588 return -ENODEV;
10589
10590 tg3_disable_ints(tp);
10591
10592 free_irq(tnapi->irq_vec, tnapi);
10593
10594 /*
10595 * Turn off MSI one shot mode. Otherwise this test has no
10596 * observable way to know whether the interrupt was delivered.
10597 */
10598 if (tg3_flag(tp, 57765_PLUS)) {
10599 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10600 tw32(MSGINT_MODE, val);
10601 }
10602
10603 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10604 IRQF_SHARED, dev->name, tnapi);
10605 if (err)
10606 return err;
10607
10608 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10609 tg3_enable_ints(tp);
10610
10611 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10612 tnapi->coal_now);
10613
10614 for (i = 0; i < 5; i++) {
10615 u32 int_mbox, misc_host_ctrl;
10616
10617 int_mbox = tr32_mailbox(tnapi->int_mbox);
10618 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10619
10620 if ((int_mbox != 0) ||
10621 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10622 intr_ok = 1;
10623 break;
10624 }
10625
10626 if (tg3_flag(tp, 57765_PLUS) &&
10627 tnapi->hw_status->status_tag != tnapi->last_tag)
10628 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10629
10630 msleep(10);
10631 }
10632
10633 tg3_disable_ints(tp);
10634
10635 free_irq(tnapi->irq_vec, tnapi);
10636
10637 err = tg3_request_irq(tp, 0);
10638
10639 if (err)
10640 return err;
10641
10642 if (intr_ok) {
10643 /* Reenable MSI one shot mode. */
10644 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10645 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10646 tw32(MSGINT_MODE, val);
10647 }
10648 return 0;
10649 }
10650
10651 return -EIO;
10652 }
10653
10654 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10655 * successfully restored
10656 */
10657 static int tg3_test_msi(struct tg3 *tp)
10658 {
10659 int err;
10660 u16 pci_cmd;
10661
10662 if (!tg3_flag(tp, USING_MSI))
10663 return 0;
10664
10665 /* Turn off SERR reporting in case MSI terminates with Master
10666 * Abort.
10667 */
10668 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10669 pci_write_config_word(tp->pdev, PCI_COMMAND,
10670 pci_cmd & ~PCI_COMMAND_SERR);
10671
10672 err = tg3_test_interrupt(tp);
10673
10674 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10675
10676 if (!err)
10677 return 0;
10678
10679 /* other failures */
10680 if (err != -EIO)
10681 return err;
10682
10683 /* MSI test failed, go back to INTx mode */
10684 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10685 "to INTx mode. Please report this failure to the PCI "
10686 "maintainer and include system chipset information\n");
10687
10688 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10689
10690 pci_disable_msi(tp->pdev);
10691
10692 tg3_flag_clear(tp, USING_MSI);
10693 tp->napi[0].irq_vec = tp->pdev->irq;
10694
10695 err = tg3_request_irq(tp, 0);
10696 if (err)
10697 return err;
10698
10699 /* Need to reset the chip because the MSI cycle may have terminated
10700 * with Master Abort.
10701 */
10702 tg3_full_lock(tp, 1);
10703
10704 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10705 err = tg3_init_hw(tp, 1);
10706
10707 tg3_full_unlock(tp);
10708
10709 if (err)
10710 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10711
10712 return err;
10713 }
10714
10715 static int tg3_request_firmware(struct tg3 *tp)
10716 {
10717 const struct tg3_firmware_hdr *fw_hdr;
10718
10719 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10720 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10721 tp->fw_needed);
10722 return -ENOENT;
10723 }
10724
10725 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10726
10727 /* Firmware blob starts with version numbers, followed by
10728 * start address and _full_ length including BSS sections
10729 * (which must be longer than the actual data, of course
10730 */
10731
10732 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10733 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10734 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10735 tp->fw_len, tp->fw_needed);
10736 release_firmware(tp->fw);
10737 tp->fw = NULL;
10738 return -EINVAL;
10739 }
10740
10741 /* We no longer need firmware; we have it. */
10742 tp->fw_needed = NULL;
10743 return 0;
10744 }
10745
10746 static u32 tg3_irq_count(struct tg3 *tp)
10747 {
10748 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10749
10750 if (irq_cnt > 1) {
10751 /* We want as many rx rings enabled as there are cpus.
10752 * In multiqueue MSI-X mode, the first MSI-X vector
10753 * only deals with link interrupts, etc, so we add
10754 * one to the number of vectors we are requesting.
10755 */
10756 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10757 }
10758
10759 return irq_cnt;
10760 }
10761
10762 static bool tg3_enable_msix(struct tg3 *tp)
10763 {
10764 int i, rc;
10765 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10766
10767 tp->txq_cnt = tp->txq_req;
10768 tp->rxq_cnt = tp->rxq_req;
10769 if (!tp->rxq_cnt)
10770 tp->rxq_cnt = netif_get_num_default_rss_queues();
10771 if (tp->rxq_cnt > tp->rxq_max)
10772 tp->rxq_cnt = tp->rxq_max;
10773
10774 /* Disable multiple TX rings by default. Simple round-robin hardware
10775 * scheduling of the TX rings can cause starvation of rings with
10776 * small packets when other rings have TSO or jumbo packets.
10777 */
10778 if (!tp->txq_req)
10779 tp->txq_cnt = 1;
10780
10781 tp->irq_cnt = tg3_irq_count(tp);
10782
10783 for (i = 0; i < tp->irq_max; i++) {
10784 msix_ent[i].entry = i;
10785 msix_ent[i].vector = 0;
10786 }
10787
10788 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10789 if (rc < 0) {
10790 return false;
10791 } else if (rc != 0) {
10792 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10793 return false;
10794 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10795 tp->irq_cnt, rc);
10796 tp->irq_cnt = rc;
10797 tp->rxq_cnt = max(rc - 1, 1);
10798 if (tp->txq_cnt)
10799 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10800 }
10801
10802 for (i = 0; i < tp->irq_max; i++)
10803 tp->napi[i].irq_vec = msix_ent[i].vector;
10804
10805 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10806 pci_disable_msix(tp->pdev);
10807 return false;
10808 }
10809
10810 if (tp->irq_cnt == 1)
10811 return true;
10812
10813 tg3_flag_set(tp, ENABLE_RSS);
10814
10815 if (tp->txq_cnt > 1)
10816 tg3_flag_set(tp, ENABLE_TSS);
10817
10818 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10819
10820 return true;
10821 }
10822
10823 static void tg3_ints_init(struct tg3 *tp)
10824 {
10825 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10826 !tg3_flag(tp, TAGGED_STATUS)) {
10827 /* All MSI supporting chips should support tagged
10828 * status. Assert that this is the case.
10829 */
10830 netdev_warn(tp->dev,
10831 "MSI without TAGGED_STATUS? Not using MSI\n");
10832 goto defcfg;
10833 }
10834
10835 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10836 tg3_flag_set(tp, USING_MSIX);
10837 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10838 tg3_flag_set(tp, USING_MSI);
10839
10840 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10841 u32 msi_mode = tr32(MSGINT_MODE);
10842 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10843 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10844 if (!tg3_flag(tp, 1SHOT_MSI))
10845 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10846 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10847 }
10848 defcfg:
10849 if (!tg3_flag(tp, USING_MSIX)) {
10850 tp->irq_cnt = 1;
10851 tp->napi[0].irq_vec = tp->pdev->irq;
10852 }
10853
10854 if (tp->irq_cnt == 1) {
10855 tp->txq_cnt = 1;
10856 tp->rxq_cnt = 1;
10857 netif_set_real_num_tx_queues(tp->dev, 1);
10858 netif_set_real_num_rx_queues(tp->dev, 1);
10859 }
10860 }
10861
10862 static void tg3_ints_fini(struct tg3 *tp)
10863 {
10864 if (tg3_flag(tp, USING_MSIX))
10865 pci_disable_msix(tp->pdev);
10866 else if (tg3_flag(tp, USING_MSI))
10867 pci_disable_msi(tp->pdev);
10868 tg3_flag_clear(tp, USING_MSI);
10869 tg3_flag_clear(tp, USING_MSIX);
10870 tg3_flag_clear(tp, ENABLE_RSS);
10871 tg3_flag_clear(tp, ENABLE_TSS);
10872 }
10873
10874 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10875 bool init)
10876 {
10877 struct net_device *dev = tp->dev;
10878 int i, err;
10879
10880 /*
10881 * Setup interrupts first so we know how
10882 * many NAPI resources to allocate
10883 */
10884 tg3_ints_init(tp);
10885
10886 tg3_rss_check_indir_tbl(tp);
10887
10888 /* The placement of this call is tied
10889 * to the setup and use of Host TX descriptors.
10890 */
10891 err = tg3_alloc_consistent(tp);
10892 if (err)
10893 goto err_out1;
10894
10895 tg3_napi_init(tp);
10896
10897 tg3_napi_enable(tp);
10898
10899 for (i = 0; i < tp->irq_cnt; i++) {
10900 struct tg3_napi *tnapi = &tp->napi[i];
10901 err = tg3_request_irq(tp, i);
10902 if (err) {
10903 for (i--; i >= 0; i--) {
10904 tnapi = &tp->napi[i];
10905 free_irq(tnapi->irq_vec, tnapi);
10906 }
10907 goto err_out2;
10908 }
10909 }
10910
10911 tg3_full_lock(tp, 0);
10912
10913 err = tg3_init_hw(tp, reset_phy);
10914 if (err) {
10915 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10916 tg3_free_rings(tp);
10917 }
10918
10919 tg3_full_unlock(tp);
10920
10921 if (err)
10922 goto err_out3;
10923
10924 if (test_irq && tg3_flag(tp, USING_MSI)) {
10925 err = tg3_test_msi(tp);
10926
10927 if (err) {
10928 tg3_full_lock(tp, 0);
10929 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10930 tg3_free_rings(tp);
10931 tg3_full_unlock(tp);
10932
10933 goto err_out2;
10934 }
10935
10936 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10937 u32 val = tr32(PCIE_TRANSACTION_CFG);
10938
10939 tw32(PCIE_TRANSACTION_CFG,
10940 val | PCIE_TRANS_CFG_1SHOT_MSI);
10941 }
10942 }
10943
10944 tg3_phy_start(tp);
10945
10946 tg3_hwmon_open(tp);
10947
10948 tg3_full_lock(tp, 0);
10949
10950 tg3_timer_start(tp);
10951 tg3_flag_set(tp, INIT_COMPLETE);
10952 tg3_enable_ints(tp);
10953
10954 if (init)
10955 tg3_ptp_init(tp);
10956 else
10957 tg3_ptp_resume(tp);
10958
10959
10960 tg3_full_unlock(tp);
10961
10962 netif_tx_start_all_queues(dev);
10963
10964 /*
10965 * Reset loopback feature if it was turned on while the device was down
10966 * make sure that it's installed properly now.
10967 */
10968 if (dev->features & NETIF_F_LOOPBACK)
10969 tg3_set_loopback(dev, dev->features);
10970
10971 return 0;
10972
10973 err_out3:
10974 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10975 struct tg3_napi *tnapi = &tp->napi[i];
10976 free_irq(tnapi->irq_vec, tnapi);
10977 }
10978
10979 err_out2:
10980 tg3_napi_disable(tp);
10981 tg3_napi_fini(tp);
10982 tg3_free_consistent(tp);
10983
10984 err_out1:
10985 tg3_ints_fini(tp);
10986
10987 return err;
10988 }
10989
10990 static void tg3_stop(struct tg3 *tp)
10991 {
10992 int i;
10993
10994 tg3_reset_task_cancel(tp);
10995 tg3_netif_stop(tp);
10996
10997 tg3_timer_stop(tp);
10998
10999 tg3_hwmon_close(tp);
11000
11001 tg3_phy_stop(tp);
11002
11003 tg3_full_lock(tp, 1);
11004
11005 tg3_disable_ints(tp);
11006
11007 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11008 tg3_free_rings(tp);
11009 tg3_flag_clear(tp, INIT_COMPLETE);
11010
11011 tg3_full_unlock(tp);
11012
11013 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11014 struct tg3_napi *tnapi = &tp->napi[i];
11015 free_irq(tnapi->irq_vec, tnapi);
11016 }
11017
11018 tg3_ints_fini(tp);
11019
11020 tg3_napi_fini(tp);
11021
11022 tg3_free_consistent(tp);
11023 }
11024
11025 static int tg3_open(struct net_device *dev)
11026 {
11027 struct tg3 *tp = netdev_priv(dev);
11028 int err;
11029
11030 if (tp->fw_needed) {
11031 err = tg3_request_firmware(tp);
11032 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11033 if (err) {
11034 netdev_warn(tp->dev, "EEE capability disabled\n");
11035 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11036 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11037 netdev_warn(tp->dev, "EEE capability restored\n");
11038 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11039 }
11040 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11041 if (err)
11042 return err;
11043 } else if (err) {
11044 netdev_warn(tp->dev, "TSO capability disabled\n");
11045 tg3_flag_clear(tp, TSO_CAPABLE);
11046 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11047 netdev_notice(tp->dev, "TSO capability restored\n");
11048 tg3_flag_set(tp, TSO_CAPABLE);
11049 }
11050 }
11051
11052 tg3_carrier_off(tp);
11053
11054 err = tg3_power_up(tp);
11055 if (err)
11056 return err;
11057
11058 tg3_full_lock(tp, 0);
11059
11060 tg3_disable_ints(tp);
11061 tg3_flag_clear(tp, INIT_COMPLETE);
11062
11063 tg3_full_unlock(tp);
11064
11065 err = tg3_start(tp, true, true, true);
11066 if (err) {
11067 tg3_frob_aux_power(tp, false);
11068 pci_set_power_state(tp->pdev, PCI_D3hot);
11069 }
11070
11071 if (tg3_flag(tp, PTP_CAPABLE)) {
11072 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11073 &tp->pdev->dev);
11074 if (IS_ERR(tp->ptp_clock))
11075 tp->ptp_clock = NULL;
11076 }
11077
11078 return err;
11079 }
11080
11081 static int tg3_close(struct net_device *dev)
11082 {
11083 struct tg3 *tp = netdev_priv(dev);
11084
11085 tg3_ptp_fini(tp);
11086
11087 tg3_stop(tp);
11088
11089 /* Clear stats across close / open calls */
11090 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11091 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11092
11093 tg3_power_down(tp);
11094
11095 tg3_carrier_off(tp);
11096
11097 return 0;
11098 }
11099
11100 static inline u64 get_stat64(tg3_stat64_t *val)
11101 {
11102 return ((u64)val->high << 32) | ((u64)val->low);
11103 }
11104
11105 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11106 {
11107 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11108
11109 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11110 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11111 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11112 u32 val;
11113
11114 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11115 tg3_writephy(tp, MII_TG3_TEST1,
11116 val | MII_TG3_TEST1_CRC_EN);
11117 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11118 } else
11119 val = 0;
11120
11121 tp->phy_crc_errors += val;
11122
11123 return tp->phy_crc_errors;
11124 }
11125
11126 return get_stat64(&hw_stats->rx_fcs_errors);
11127 }
11128
11129 #define ESTAT_ADD(member) \
11130 estats->member = old_estats->member + \
11131 get_stat64(&hw_stats->member)
11132
11133 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11134 {
11135 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11136 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11137
11138 ESTAT_ADD(rx_octets);
11139 ESTAT_ADD(rx_fragments);
11140 ESTAT_ADD(rx_ucast_packets);
11141 ESTAT_ADD(rx_mcast_packets);
11142 ESTAT_ADD(rx_bcast_packets);
11143 ESTAT_ADD(rx_fcs_errors);
11144 ESTAT_ADD(rx_align_errors);
11145 ESTAT_ADD(rx_xon_pause_rcvd);
11146 ESTAT_ADD(rx_xoff_pause_rcvd);
11147 ESTAT_ADD(rx_mac_ctrl_rcvd);
11148 ESTAT_ADD(rx_xoff_entered);
11149 ESTAT_ADD(rx_frame_too_long_errors);
11150 ESTAT_ADD(rx_jabbers);
11151 ESTAT_ADD(rx_undersize_packets);
11152 ESTAT_ADD(rx_in_length_errors);
11153 ESTAT_ADD(rx_out_length_errors);
11154 ESTAT_ADD(rx_64_or_less_octet_packets);
11155 ESTAT_ADD(rx_65_to_127_octet_packets);
11156 ESTAT_ADD(rx_128_to_255_octet_packets);
11157 ESTAT_ADD(rx_256_to_511_octet_packets);
11158 ESTAT_ADD(rx_512_to_1023_octet_packets);
11159 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11160 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11161 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11162 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11163 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11164
11165 ESTAT_ADD(tx_octets);
11166 ESTAT_ADD(tx_collisions);
11167 ESTAT_ADD(tx_xon_sent);
11168 ESTAT_ADD(tx_xoff_sent);
11169 ESTAT_ADD(tx_flow_control);
11170 ESTAT_ADD(tx_mac_errors);
11171 ESTAT_ADD(tx_single_collisions);
11172 ESTAT_ADD(tx_mult_collisions);
11173 ESTAT_ADD(tx_deferred);
11174 ESTAT_ADD(tx_excessive_collisions);
11175 ESTAT_ADD(tx_late_collisions);
11176 ESTAT_ADD(tx_collide_2times);
11177 ESTAT_ADD(tx_collide_3times);
11178 ESTAT_ADD(tx_collide_4times);
11179 ESTAT_ADD(tx_collide_5times);
11180 ESTAT_ADD(tx_collide_6times);
11181 ESTAT_ADD(tx_collide_7times);
11182 ESTAT_ADD(tx_collide_8times);
11183 ESTAT_ADD(tx_collide_9times);
11184 ESTAT_ADD(tx_collide_10times);
11185 ESTAT_ADD(tx_collide_11times);
11186 ESTAT_ADD(tx_collide_12times);
11187 ESTAT_ADD(tx_collide_13times);
11188 ESTAT_ADD(tx_collide_14times);
11189 ESTAT_ADD(tx_collide_15times);
11190 ESTAT_ADD(tx_ucast_packets);
11191 ESTAT_ADD(tx_mcast_packets);
11192 ESTAT_ADD(tx_bcast_packets);
11193 ESTAT_ADD(tx_carrier_sense_errors);
11194 ESTAT_ADD(tx_discards);
11195 ESTAT_ADD(tx_errors);
11196
11197 ESTAT_ADD(dma_writeq_full);
11198 ESTAT_ADD(dma_write_prioq_full);
11199 ESTAT_ADD(rxbds_empty);
11200 ESTAT_ADD(rx_discards);
11201 ESTAT_ADD(rx_errors);
11202 ESTAT_ADD(rx_threshold_hit);
11203
11204 ESTAT_ADD(dma_readq_full);
11205 ESTAT_ADD(dma_read_prioq_full);
11206 ESTAT_ADD(tx_comp_queue_full);
11207
11208 ESTAT_ADD(ring_set_send_prod_index);
11209 ESTAT_ADD(ring_status_update);
11210 ESTAT_ADD(nic_irqs);
11211 ESTAT_ADD(nic_avoided_irqs);
11212 ESTAT_ADD(nic_tx_threshold_hit);
11213
11214 ESTAT_ADD(mbuf_lwm_thresh_hit);
11215 }
11216
11217 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11218 {
11219 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11220 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11221
11222 stats->rx_packets = old_stats->rx_packets +
11223 get_stat64(&hw_stats->rx_ucast_packets) +
11224 get_stat64(&hw_stats->rx_mcast_packets) +
11225 get_stat64(&hw_stats->rx_bcast_packets);
11226
11227 stats->tx_packets = old_stats->tx_packets +
11228 get_stat64(&hw_stats->tx_ucast_packets) +
11229 get_stat64(&hw_stats->tx_mcast_packets) +
11230 get_stat64(&hw_stats->tx_bcast_packets);
11231
11232 stats->rx_bytes = old_stats->rx_bytes +
11233 get_stat64(&hw_stats->rx_octets);
11234 stats->tx_bytes = old_stats->tx_bytes +
11235 get_stat64(&hw_stats->tx_octets);
11236
11237 stats->rx_errors = old_stats->rx_errors +
11238 get_stat64(&hw_stats->rx_errors);
11239 stats->tx_errors = old_stats->tx_errors +
11240 get_stat64(&hw_stats->tx_errors) +
11241 get_stat64(&hw_stats->tx_mac_errors) +
11242 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11243 get_stat64(&hw_stats->tx_discards);
11244
11245 stats->multicast = old_stats->multicast +
11246 get_stat64(&hw_stats->rx_mcast_packets);
11247 stats->collisions = old_stats->collisions +
11248 get_stat64(&hw_stats->tx_collisions);
11249
11250 stats->rx_length_errors = old_stats->rx_length_errors +
11251 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11252 get_stat64(&hw_stats->rx_undersize_packets);
11253
11254 stats->rx_over_errors = old_stats->rx_over_errors +
11255 get_stat64(&hw_stats->rxbds_empty);
11256 stats->rx_frame_errors = old_stats->rx_frame_errors +
11257 get_stat64(&hw_stats->rx_align_errors);
11258 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11259 get_stat64(&hw_stats->tx_discards);
11260 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11261 get_stat64(&hw_stats->tx_carrier_sense_errors);
11262
11263 stats->rx_crc_errors = old_stats->rx_crc_errors +
11264 tg3_calc_crc_errors(tp);
11265
11266 stats->rx_missed_errors = old_stats->rx_missed_errors +
11267 get_stat64(&hw_stats->rx_discards);
11268
11269 stats->rx_dropped = tp->rx_dropped;
11270 stats->tx_dropped = tp->tx_dropped;
11271 }
11272
11273 static int tg3_get_regs_len(struct net_device *dev)
11274 {
11275 return TG3_REG_BLK_SIZE;
11276 }
11277
11278 static void tg3_get_regs(struct net_device *dev,
11279 struct ethtool_regs *regs, void *_p)
11280 {
11281 struct tg3 *tp = netdev_priv(dev);
11282
11283 regs->version = 0;
11284
11285 memset(_p, 0, TG3_REG_BLK_SIZE);
11286
11287 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11288 return;
11289
11290 tg3_full_lock(tp, 0);
11291
11292 tg3_dump_legacy_regs(tp, (u32 *)_p);
11293
11294 tg3_full_unlock(tp);
11295 }
11296
11297 static int tg3_get_eeprom_len(struct net_device *dev)
11298 {
11299 struct tg3 *tp = netdev_priv(dev);
11300
11301 return tp->nvram_size;
11302 }
11303
11304 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11305 {
11306 struct tg3 *tp = netdev_priv(dev);
11307 int ret;
11308 u8 *pd;
11309 u32 i, offset, len, b_offset, b_count;
11310 __be32 val;
11311
11312 if (tg3_flag(tp, NO_NVRAM))
11313 return -EINVAL;
11314
11315 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11316 return -EAGAIN;
11317
11318 offset = eeprom->offset;
11319 len = eeprom->len;
11320 eeprom->len = 0;
11321
11322 eeprom->magic = TG3_EEPROM_MAGIC;
11323
11324 if (offset & 3) {
11325 /* adjustments to start on required 4 byte boundary */
11326 b_offset = offset & 3;
11327 b_count = 4 - b_offset;
11328 if (b_count > len) {
11329 /* i.e. offset=1 len=2 */
11330 b_count = len;
11331 }
11332 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11333 if (ret)
11334 return ret;
11335 memcpy(data, ((char *)&val) + b_offset, b_count);
11336 len -= b_count;
11337 offset += b_count;
11338 eeprom->len += b_count;
11339 }
11340
11341 /* read bytes up to the last 4 byte boundary */
11342 pd = &data[eeprom->len];
11343 for (i = 0; i < (len - (len & 3)); i += 4) {
11344 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11345 if (ret) {
11346 eeprom->len += i;
11347 return ret;
11348 }
11349 memcpy(pd + i, &val, 4);
11350 }
11351 eeprom->len += i;
11352
11353 if (len & 3) {
11354 /* read last bytes not ending on 4 byte boundary */
11355 pd = &data[eeprom->len];
11356 b_count = len & 3;
11357 b_offset = offset + len - b_count;
11358 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11359 if (ret)
11360 return ret;
11361 memcpy(pd, &val, b_count);
11362 eeprom->len += b_count;
11363 }
11364 return 0;
11365 }
11366
11367 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11368 {
11369 struct tg3 *tp = netdev_priv(dev);
11370 int ret;
11371 u32 offset, len, b_offset, odd_len;
11372 u8 *buf;
11373 __be32 start, end;
11374
11375 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11376 return -EAGAIN;
11377
11378 if (tg3_flag(tp, NO_NVRAM) ||
11379 eeprom->magic != TG3_EEPROM_MAGIC)
11380 return -EINVAL;
11381
11382 offset = eeprom->offset;
11383 len = eeprom->len;
11384
11385 if ((b_offset = (offset & 3))) {
11386 /* adjustments to start on required 4 byte boundary */
11387 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11388 if (ret)
11389 return ret;
11390 len += b_offset;
11391 offset &= ~3;
11392 if (len < 4)
11393 len = 4;
11394 }
11395
11396 odd_len = 0;
11397 if (len & 3) {
11398 /* adjustments to end on required 4 byte boundary */
11399 odd_len = 1;
11400 len = (len + 3) & ~3;
11401 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11402 if (ret)
11403 return ret;
11404 }
11405
11406 buf = data;
11407 if (b_offset || odd_len) {
11408 buf = kmalloc(len, GFP_KERNEL);
11409 if (!buf)
11410 return -ENOMEM;
11411 if (b_offset)
11412 memcpy(buf, &start, 4);
11413 if (odd_len)
11414 memcpy(buf+len-4, &end, 4);
11415 memcpy(buf + b_offset, data, eeprom->len);
11416 }
11417
11418 ret = tg3_nvram_write_block(tp, offset, len, buf);
11419
11420 if (buf != data)
11421 kfree(buf);
11422
11423 return ret;
11424 }
11425
11426 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11427 {
11428 struct tg3 *tp = netdev_priv(dev);
11429
11430 if (tg3_flag(tp, USE_PHYLIB)) {
11431 struct phy_device *phydev;
11432 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11433 return -EAGAIN;
11434 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11435 return phy_ethtool_gset(phydev, cmd);
11436 }
11437
11438 cmd->supported = (SUPPORTED_Autoneg);
11439
11440 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11441 cmd->supported |= (SUPPORTED_1000baseT_Half |
11442 SUPPORTED_1000baseT_Full);
11443
11444 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11445 cmd->supported |= (SUPPORTED_100baseT_Half |
11446 SUPPORTED_100baseT_Full |
11447 SUPPORTED_10baseT_Half |
11448 SUPPORTED_10baseT_Full |
11449 SUPPORTED_TP);
11450 cmd->port = PORT_TP;
11451 } else {
11452 cmd->supported |= SUPPORTED_FIBRE;
11453 cmd->port = PORT_FIBRE;
11454 }
11455
11456 cmd->advertising = tp->link_config.advertising;
11457 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11458 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11459 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11460 cmd->advertising |= ADVERTISED_Pause;
11461 } else {
11462 cmd->advertising |= ADVERTISED_Pause |
11463 ADVERTISED_Asym_Pause;
11464 }
11465 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11466 cmd->advertising |= ADVERTISED_Asym_Pause;
11467 }
11468 }
11469 if (netif_running(dev) && tp->link_up) {
11470 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11471 cmd->duplex = tp->link_config.active_duplex;
11472 cmd->lp_advertising = tp->link_config.rmt_adv;
11473 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11474 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11475 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11476 else
11477 cmd->eth_tp_mdix = ETH_TP_MDI;
11478 }
11479 } else {
11480 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11481 cmd->duplex = DUPLEX_UNKNOWN;
11482 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11483 }
11484 cmd->phy_address = tp->phy_addr;
11485 cmd->transceiver = XCVR_INTERNAL;
11486 cmd->autoneg = tp->link_config.autoneg;
11487 cmd->maxtxpkt = 0;
11488 cmd->maxrxpkt = 0;
11489 return 0;
11490 }
11491
11492 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11493 {
11494 struct tg3 *tp = netdev_priv(dev);
11495 u32 speed = ethtool_cmd_speed(cmd);
11496
11497 if (tg3_flag(tp, USE_PHYLIB)) {
11498 struct phy_device *phydev;
11499 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11500 return -EAGAIN;
11501 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11502 return phy_ethtool_sset(phydev, cmd);
11503 }
11504
11505 if (cmd->autoneg != AUTONEG_ENABLE &&
11506 cmd->autoneg != AUTONEG_DISABLE)
11507 return -EINVAL;
11508
11509 if (cmd->autoneg == AUTONEG_DISABLE &&
11510 cmd->duplex != DUPLEX_FULL &&
11511 cmd->duplex != DUPLEX_HALF)
11512 return -EINVAL;
11513
11514 if (cmd->autoneg == AUTONEG_ENABLE) {
11515 u32 mask = ADVERTISED_Autoneg |
11516 ADVERTISED_Pause |
11517 ADVERTISED_Asym_Pause;
11518
11519 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11520 mask |= ADVERTISED_1000baseT_Half |
11521 ADVERTISED_1000baseT_Full;
11522
11523 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11524 mask |= ADVERTISED_100baseT_Half |
11525 ADVERTISED_100baseT_Full |
11526 ADVERTISED_10baseT_Half |
11527 ADVERTISED_10baseT_Full |
11528 ADVERTISED_TP;
11529 else
11530 mask |= ADVERTISED_FIBRE;
11531
11532 if (cmd->advertising & ~mask)
11533 return -EINVAL;
11534
11535 mask &= (ADVERTISED_1000baseT_Half |
11536 ADVERTISED_1000baseT_Full |
11537 ADVERTISED_100baseT_Half |
11538 ADVERTISED_100baseT_Full |
11539 ADVERTISED_10baseT_Half |
11540 ADVERTISED_10baseT_Full);
11541
11542 cmd->advertising &= mask;
11543 } else {
11544 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11545 if (speed != SPEED_1000)
11546 return -EINVAL;
11547
11548 if (cmd->duplex != DUPLEX_FULL)
11549 return -EINVAL;
11550 } else {
11551 if (speed != SPEED_100 &&
11552 speed != SPEED_10)
11553 return -EINVAL;
11554 }
11555 }
11556
11557 tg3_full_lock(tp, 0);
11558
11559 tp->link_config.autoneg = cmd->autoneg;
11560 if (cmd->autoneg == AUTONEG_ENABLE) {
11561 tp->link_config.advertising = (cmd->advertising |
11562 ADVERTISED_Autoneg);
11563 tp->link_config.speed = SPEED_UNKNOWN;
11564 tp->link_config.duplex = DUPLEX_UNKNOWN;
11565 } else {
11566 tp->link_config.advertising = 0;
11567 tp->link_config.speed = speed;
11568 tp->link_config.duplex = cmd->duplex;
11569 }
11570
11571 if (netif_running(dev))
11572 tg3_setup_phy(tp, 1);
11573
11574 tg3_full_unlock(tp);
11575
11576 return 0;
11577 }
11578
11579 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11580 {
11581 struct tg3 *tp = netdev_priv(dev);
11582
11583 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11584 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11585 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11586 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11587 }
11588
11589 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11590 {
11591 struct tg3 *tp = netdev_priv(dev);
11592
11593 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11594 wol->supported = WAKE_MAGIC;
11595 else
11596 wol->supported = 0;
11597 wol->wolopts = 0;
11598 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11599 wol->wolopts = WAKE_MAGIC;
11600 memset(&wol->sopass, 0, sizeof(wol->sopass));
11601 }
11602
11603 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11604 {
11605 struct tg3 *tp = netdev_priv(dev);
11606 struct device *dp = &tp->pdev->dev;
11607
11608 if (wol->wolopts & ~WAKE_MAGIC)
11609 return -EINVAL;
11610 if ((wol->wolopts & WAKE_MAGIC) &&
11611 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11612 return -EINVAL;
11613
11614 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11615
11616 spin_lock_bh(&tp->lock);
11617 if (device_may_wakeup(dp))
11618 tg3_flag_set(tp, WOL_ENABLE);
11619 else
11620 tg3_flag_clear(tp, WOL_ENABLE);
11621 spin_unlock_bh(&tp->lock);
11622
11623 return 0;
11624 }
11625
11626 static u32 tg3_get_msglevel(struct net_device *dev)
11627 {
11628 struct tg3 *tp = netdev_priv(dev);
11629 return tp->msg_enable;
11630 }
11631
11632 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11633 {
11634 struct tg3 *tp = netdev_priv(dev);
11635 tp->msg_enable = value;
11636 }
11637
11638 static int tg3_nway_reset(struct net_device *dev)
11639 {
11640 struct tg3 *tp = netdev_priv(dev);
11641 int r;
11642
11643 if (!netif_running(dev))
11644 return -EAGAIN;
11645
11646 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11647 return -EINVAL;
11648
11649 if (tg3_flag(tp, USE_PHYLIB)) {
11650 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11651 return -EAGAIN;
11652 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11653 } else {
11654 u32 bmcr;
11655
11656 spin_lock_bh(&tp->lock);
11657 r = -EINVAL;
11658 tg3_readphy(tp, MII_BMCR, &bmcr);
11659 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11660 ((bmcr & BMCR_ANENABLE) ||
11661 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11662 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11663 BMCR_ANENABLE);
11664 r = 0;
11665 }
11666 spin_unlock_bh(&tp->lock);
11667 }
11668
11669 return r;
11670 }
11671
11672 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11673 {
11674 struct tg3 *tp = netdev_priv(dev);
11675
11676 ering->rx_max_pending = tp->rx_std_ring_mask;
11677 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11678 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11679 else
11680 ering->rx_jumbo_max_pending = 0;
11681
11682 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11683
11684 ering->rx_pending = tp->rx_pending;
11685 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11686 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11687 else
11688 ering->rx_jumbo_pending = 0;
11689
11690 ering->tx_pending = tp->napi[0].tx_pending;
11691 }
11692
11693 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11694 {
11695 struct tg3 *tp = netdev_priv(dev);
11696 int i, irq_sync = 0, err = 0;
11697
11698 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11699 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11700 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11701 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11702 (tg3_flag(tp, TSO_BUG) &&
11703 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11704 return -EINVAL;
11705
11706 if (netif_running(dev)) {
11707 tg3_phy_stop(tp);
11708 tg3_netif_stop(tp);
11709 irq_sync = 1;
11710 }
11711
11712 tg3_full_lock(tp, irq_sync);
11713
11714 tp->rx_pending = ering->rx_pending;
11715
11716 if (tg3_flag(tp, MAX_RXPEND_64) &&
11717 tp->rx_pending > 63)
11718 tp->rx_pending = 63;
11719 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11720
11721 for (i = 0; i < tp->irq_max; i++)
11722 tp->napi[i].tx_pending = ering->tx_pending;
11723
11724 if (netif_running(dev)) {
11725 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11726 err = tg3_restart_hw(tp, 1);
11727 if (!err)
11728 tg3_netif_start(tp);
11729 }
11730
11731 tg3_full_unlock(tp);
11732
11733 if (irq_sync && !err)
11734 tg3_phy_start(tp);
11735
11736 return err;
11737 }
11738
11739 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11740 {
11741 struct tg3 *tp = netdev_priv(dev);
11742
11743 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11744
11745 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11746 epause->rx_pause = 1;
11747 else
11748 epause->rx_pause = 0;
11749
11750 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11751 epause->tx_pause = 1;
11752 else
11753 epause->tx_pause = 0;
11754 }
11755
11756 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11757 {
11758 struct tg3 *tp = netdev_priv(dev);
11759 int err = 0;
11760
11761 if (tg3_flag(tp, USE_PHYLIB)) {
11762 u32 newadv;
11763 struct phy_device *phydev;
11764
11765 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11766
11767 if (!(phydev->supported & SUPPORTED_Pause) ||
11768 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11769 (epause->rx_pause != epause->tx_pause)))
11770 return -EINVAL;
11771
11772 tp->link_config.flowctrl = 0;
11773 if (epause->rx_pause) {
11774 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11775
11776 if (epause->tx_pause) {
11777 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11778 newadv = ADVERTISED_Pause;
11779 } else
11780 newadv = ADVERTISED_Pause |
11781 ADVERTISED_Asym_Pause;
11782 } else if (epause->tx_pause) {
11783 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11784 newadv = ADVERTISED_Asym_Pause;
11785 } else
11786 newadv = 0;
11787
11788 if (epause->autoneg)
11789 tg3_flag_set(tp, PAUSE_AUTONEG);
11790 else
11791 tg3_flag_clear(tp, PAUSE_AUTONEG);
11792
11793 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11794 u32 oldadv = phydev->advertising &
11795 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11796 if (oldadv != newadv) {
11797 phydev->advertising &=
11798 ~(ADVERTISED_Pause |
11799 ADVERTISED_Asym_Pause);
11800 phydev->advertising |= newadv;
11801 if (phydev->autoneg) {
11802 /*
11803 * Always renegotiate the link to
11804 * inform our link partner of our
11805 * flow control settings, even if the
11806 * flow control is forced. Let
11807 * tg3_adjust_link() do the final
11808 * flow control setup.
11809 */
11810 return phy_start_aneg(phydev);
11811 }
11812 }
11813
11814 if (!epause->autoneg)
11815 tg3_setup_flow_control(tp, 0, 0);
11816 } else {
11817 tp->link_config.advertising &=
11818 ~(ADVERTISED_Pause |
11819 ADVERTISED_Asym_Pause);
11820 tp->link_config.advertising |= newadv;
11821 }
11822 } else {
11823 int irq_sync = 0;
11824
11825 if (netif_running(dev)) {
11826 tg3_netif_stop(tp);
11827 irq_sync = 1;
11828 }
11829
11830 tg3_full_lock(tp, irq_sync);
11831
11832 if (epause->autoneg)
11833 tg3_flag_set(tp, PAUSE_AUTONEG);
11834 else
11835 tg3_flag_clear(tp, PAUSE_AUTONEG);
11836 if (epause->rx_pause)
11837 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11838 else
11839 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11840 if (epause->tx_pause)
11841 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11842 else
11843 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11844
11845 if (netif_running(dev)) {
11846 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11847 err = tg3_restart_hw(tp, 1);
11848 if (!err)
11849 tg3_netif_start(tp);
11850 }
11851
11852 tg3_full_unlock(tp);
11853 }
11854
11855 return err;
11856 }
11857
11858 static int tg3_get_sset_count(struct net_device *dev, int sset)
11859 {
11860 switch (sset) {
11861 case ETH_SS_TEST:
11862 return TG3_NUM_TEST;
11863 case ETH_SS_STATS:
11864 return TG3_NUM_STATS;
11865 default:
11866 return -EOPNOTSUPP;
11867 }
11868 }
11869
11870 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11871 u32 *rules __always_unused)
11872 {
11873 struct tg3 *tp = netdev_priv(dev);
11874
11875 if (!tg3_flag(tp, SUPPORT_MSIX))
11876 return -EOPNOTSUPP;
11877
11878 switch (info->cmd) {
11879 case ETHTOOL_GRXRINGS:
11880 if (netif_running(tp->dev))
11881 info->data = tp->rxq_cnt;
11882 else {
11883 info->data = num_online_cpus();
11884 if (info->data > TG3_RSS_MAX_NUM_QS)
11885 info->data = TG3_RSS_MAX_NUM_QS;
11886 }
11887
11888 /* The first interrupt vector only
11889 * handles link interrupts.
11890 */
11891 info->data -= 1;
11892 return 0;
11893
11894 default:
11895 return -EOPNOTSUPP;
11896 }
11897 }
11898
11899 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11900 {
11901 u32 size = 0;
11902 struct tg3 *tp = netdev_priv(dev);
11903
11904 if (tg3_flag(tp, SUPPORT_MSIX))
11905 size = TG3_RSS_INDIR_TBL_SIZE;
11906
11907 return size;
11908 }
11909
11910 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11911 {
11912 struct tg3 *tp = netdev_priv(dev);
11913 int i;
11914
11915 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11916 indir[i] = tp->rss_ind_tbl[i];
11917
11918 return 0;
11919 }
11920
11921 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11922 {
11923 struct tg3 *tp = netdev_priv(dev);
11924 size_t i;
11925
11926 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11927 tp->rss_ind_tbl[i] = indir[i];
11928
11929 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11930 return 0;
11931
11932 /* It is legal to write the indirection
11933 * table while the device is running.
11934 */
11935 tg3_full_lock(tp, 0);
11936 tg3_rss_write_indir_tbl(tp);
11937 tg3_full_unlock(tp);
11938
11939 return 0;
11940 }
11941
11942 static void tg3_get_channels(struct net_device *dev,
11943 struct ethtool_channels *channel)
11944 {
11945 struct tg3 *tp = netdev_priv(dev);
11946 u32 deflt_qs = netif_get_num_default_rss_queues();
11947
11948 channel->max_rx = tp->rxq_max;
11949 channel->max_tx = tp->txq_max;
11950
11951 if (netif_running(dev)) {
11952 channel->rx_count = tp->rxq_cnt;
11953 channel->tx_count = tp->txq_cnt;
11954 } else {
11955 if (tp->rxq_req)
11956 channel->rx_count = tp->rxq_req;
11957 else
11958 channel->rx_count = min(deflt_qs, tp->rxq_max);
11959
11960 if (tp->txq_req)
11961 channel->tx_count = tp->txq_req;
11962 else
11963 channel->tx_count = min(deflt_qs, tp->txq_max);
11964 }
11965 }
11966
11967 static int tg3_set_channels(struct net_device *dev,
11968 struct ethtool_channels *channel)
11969 {
11970 struct tg3 *tp = netdev_priv(dev);
11971
11972 if (!tg3_flag(tp, SUPPORT_MSIX))
11973 return -EOPNOTSUPP;
11974
11975 if (channel->rx_count > tp->rxq_max ||
11976 channel->tx_count > tp->txq_max)
11977 return -EINVAL;
11978
11979 tp->rxq_req = channel->rx_count;
11980 tp->txq_req = channel->tx_count;
11981
11982 if (!netif_running(dev))
11983 return 0;
11984
11985 tg3_stop(tp);
11986
11987 tg3_carrier_off(tp);
11988
11989 tg3_start(tp, true, false, false);
11990
11991 return 0;
11992 }
11993
11994 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11995 {
11996 switch (stringset) {
11997 case ETH_SS_STATS:
11998 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11999 break;
12000 case ETH_SS_TEST:
12001 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12002 break;
12003 default:
12004 WARN_ON(1); /* we need a WARN() */
12005 break;
12006 }
12007 }
12008
12009 static int tg3_set_phys_id(struct net_device *dev,
12010 enum ethtool_phys_id_state state)
12011 {
12012 struct tg3 *tp = netdev_priv(dev);
12013
12014 if (!netif_running(tp->dev))
12015 return -EAGAIN;
12016
12017 switch (state) {
12018 case ETHTOOL_ID_ACTIVE:
12019 return 1; /* cycle on/off once per second */
12020
12021 case ETHTOOL_ID_ON:
12022 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12023 LED_CTRL_1000MBPS_ON |
12024 LED_CTRL_100MBPS_ON |
12025 LED_CTRL_10MBPS_ON |
12026 LED_CTRL_TRAFFIC_OVERRIDE |
12027 LED_CTRL_TRAFFIC_BLINK |
12028 LED_CTRL_TRAFFIC_LED);
12029 break;
12030
12031 case ETHTOOL_ID_OFF:
12032 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12033 LED_CTRL_TRAFFIC_OVERRIDE);
12034 break;
12035
12036 case ETHTOOL_ID_INACTIVE:
12037 tw32(MAC_LED_CTRL, tp->led_ctrl);
12038 break;
12039 }
12040
12041 return 0;
12042 }
12043
12044 static void tg3_get_ethtool_stats(struct net_device *dev,
12045 struct ethtool_stats *estats, u64 *tmp_stats)
12046 {
12047 struct tg3 *tp = netdev_priv(dev);
12048
12049 if (tp->hw_stats)
12050 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12051 else
12052 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12053 }
12054
12055 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12056 {
12057 int i;
12058 __be32 *buf;
12059 u32 offset = 0, len = 0;
12060 u32 magic, val;
12061
12062 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12063 return NULL;
12064
12065 if (magic == TG3_EEPROM_MAGIC) {
12066 for (offset = TG3_NVM_DIR_START;
12067 offset < TG3_NVM_DIR_END;
12068 offset += TG3_NVM_DIRENT_SIZE) {
12069 if (tg3_nvram_read(tp, offset, &val))
12070 return NULL;
12071
12072 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12073 TG3_NVM_DIRTYPE_EXTVPD)
12074 break;
12075 }
12076
12077 if (offset != TG3_NVM_DIR_END) {
12078 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12079 if (tg3_nvram_read(tp, offset + 4, &offset))
12080 return NULL;
12081
12082 offset = tg3_nvram_logical_addr(tp, offset);
12083 }
12084 }
12085
12086 if (!offset || !len) {
12087 offset = TG3_NVM_VPD_OFF;
12088 len = TG3_NVM_VPD_LEN;
12089 }
12090
12091 buf = kmalloc(len, GFP_KERNEL);
12092 if (buf == NULL)
12093 return NULL;
12094
12095 if (magic == TG3_EEPROM_MAGIC) {
12096 for (i = 0; i < len; i += 4) {
12097 /* The data is in little-endian format in NVRAM.
12098 * Use the big-endian read routines to preserve
12099 * the byte order as it exists in NVRAM.
12100 */
12101 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12102 goto error;
12103 }
12104 } else {
12105 u8 *ptr;
12106 ssize_t cnt;
12107 unsigned int pos = 0;
12108
12109 ptr = (u8 *)&buf[0];
12110 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12111 cnt = pci_read_vpd(tp->pdev, pos,
12112 len - pos, ptr);
12113 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12114 cnt = 0;
12115 else if (cnt < 0)
12116 goto error;
12117 }
12118 if (pos != len)
12119 goto error;
12120 }
12121
12122 *vpdlen = len;
12123
12124 return buf;
12125
12126 error:
12127 kfree(buf);
12128 return NULL;
12129 }
12130
12131 #define NVRAM_TEST_SIZE 0x100
12132 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12133 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12134 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12135 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12136 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12137 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12138 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12139 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12140
12141 static int tg3_test_nvram(struct tg3 *tp)
12142 {
12143 u32 csum, magic, len;
12144 __be32 *buf;
12145 int i, j, k, err = 0, size;
12146
12147 if (tg3_flag(tp, NO_NVRAM))
12148 return 0;
12149
12150 if (tg3_nvram_read(tp, 0, &magic) != 0)
12151 return -EIO;
12152
12153 if (magic == TG3_EEPROM_MAGIC)
12154 size = NVRAM_TEST_SIZE;
12155 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12156 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12157 TG3_EEPROM_SB_FORMAT_1) {
12158 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12159 case TG3_EEPROM_SB_REVISION_0:
12160 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12161 break;
12162 case TG3_EEPROM_SB_REVISION_2:
12163 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12164 break;
12165 case TG3_EEPROM_SB_REVISION_3:
12166 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12167 break;
12168 case TG3_EEPROM_SB_REVISION_4:
12169 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12170 break;
12171 case TG3_EEPROM_SB_REVISION_5:
12172 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12173 break;
12174 case TG3_EEPROM_SB_REVISION_6:
12175 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12176 break;
12177 default:
12178 return -EIO;
12179 }
12180 } else
12181 return 0;
12182 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12183 size = NVRAM_SELFBOOT_HW_SIZE;
12184 else
12185 return -EIO;
12186
12187 buf = kmalloc(size, GFP_KERNEL);
12188 if (buf == NULL)
12189 return -ENOMEM;
12190
12191 err = -EIO;
12192 for (i = 0, j = 0; i < size; i += 4, j++) {
12193 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12194 if (err)
12195 break;
12196 }
12197 if (i < size)
12198 goto out;
12199
12200 /* Selfboot format */
12201 magic = be32_to_cpu(buf[0]);
12202 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12203 TG3_EEPROM_MAGIC_FW) {
12204 u8 *buf8 = (u8 *) buf, csum8 = 0;
12205
12206 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12207 TG3_EEPROM_SB_REVISION_2) {
12208 /* For rev 2, the csum doesn't include the MBA. */
12209 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12210 csum8 += buf8[i];
12211 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12212 csum8 += buf8[i];
12213 } else {
12214 for (i = 0; i < size; i++)
12215 csum8 += buf8[i];
12216 }
12217
12218 if (csum8 == 0) {
12219 err = 0;
12220 goto out;
12221 }
12222
12223 err = -EIO;
12224 goto out;
12225 }
12226
12227 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12228 TG3_EEPROM_MAGIC_HW) {
12229 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12230 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12231 u8 *buf8 = (u8 *) buf;
12232
12233 /* Separate the parity bits and the data bytes. */
12234 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12235 if ((i == 0) || (i == 8)) {
12236 int l;
12237 u8 msk;
12238
12239 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12240 parity[k++] = buf8[i] & msk;
12241 i++;
12242 } else if (i == 16) {
12243 int l;
12244 u8 msk;
12245
12246 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12247 parity[k++] = buf8[i] & msk;
12248 i++;
12249
12250 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12251 parity[k++] = buf8[i] & msk;
12252 i++;
12253 }
12254 data[j++] = buf8[i];
12255 }
12256
12257 err = -EIO;
12258 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12259 u8 hw8 = hweight8(data[i]);
12260
12261 if ((hw8 & 0x1) && parity[i])
12262 goto out;
12263 else if (!(hw8 & 0x1) && !parity[i])
12264 goto out;
12265 }
12266 err = 0;
12267 goto out;
12268 }
12269
12270 err = -EIO;
12271
12272 /* Bootstrap checksum at offset 0x10 */
12273 csum = calc_crc((unsigned char *) buf, 0x10);
12274 if (csum != le32_to_cpu(buf[0x10/4]))
12275 goto out;
12276
12277 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12278 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12279 if (csum != le32_to_cpu(buf[0xfc/4]))
12280 goto out;
12281
12282 kfree(buf);
12283
12284 buf = tg3_vpd_readblock(tp, &len);
12285 if (!buf)
12286 return -ENOMEM;
12287
12288 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12289 if (i > 0) {
12290 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12291 if (j < 0)
12292 goto out;
12293
12294 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12295 goto out;
12296
12297 i += PCI_VPD_LRDT_TAG_SIZE;
12298 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12299 PCI_VPD_RO_KEYWORD_CHKSUM);
12300 if (j > 0) {
12301 u8 csum8 = 0;
12302
12303 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12304
12305 for (i = 0; i <= j; i++)
12306 csum8 += ((u8 *)buf)[i];
12307
12308 if (csum8)
12309 goto out;
12310 }
12311 }
12312
12313 err = 0;
12314
12315 out:
12316 kfree(buf);
12317 return err;
12318 }
12319
12320 #define TG3_SERDES_TIMEOUT_SEC 2
12321 #define TG3_COPPER_TIMEOUT_SEC 6
12322
12323 static int tg3_test_link(struct tg3 *tp)
12324 {
12325 int i, max;
12326
12327 if (!netif_running(tp->dev))
12328 return -ENODEV;
12329
12330 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12331 max = TG3_SERDES_TIMEOUT_SEC;
12332 else
12333 max = TG3_COPPER_TIMEOUT_SEC;
12334
12335 for (i = 0; i < max; i++) {
12336 if (tp->link_up)
12337 return 0;
12338
12339 if (msleep_interruptible(1000))
12340 break;
12341 }
12342
12343 return -EIO;
12344 }
12345
12346 /* Only test the commonly used registers */
12347 static int tg3_test_registers(struct tg3 *tp)
12348 {
12349 int i, is_5705, is_5750;
12350 u32 offset, read_mask, write_mask, val, save_val, read_val;
12351 static struct {
12352 u16 offset;
12353 u16 flags;
12354 #define TG3_FL_5705 0x1
12355 #define TG3_FL_NOT_5705 0x2
12356 #define TG3_FL_NOT_5788 0x4
12357 #define TG3_FL_NOT_5750 0x8
12358 u32 read_mask;
12359 u32 write_mask;
12360 } reg_tbl[] = {
12361 /* MAC Control Registers */
12362 { MAC_MODE, TG3_FL_NOT_5705,
12363 0x00000000, 0x00ef6f8c },
12364 { MAC_MODE, TG3_FL_5705,
12365 0x00000000, 0x01ef6b8c },
12366 { MAC_STATUS, TG3_FL_NOT_5705,
12367 0x03800107, 0x00000000 },
12368 { MAC_STATUS, TG3_FL_5705,
12369 0x03800100, 0x00000000 },
12370 { MAC_ADDR_0_HIGH, 0x0000,
12371 0x00000000, 0x0000ffff },
12372 { MAC_ADDR_0_LOW, 0x0000,
12373 0x00000000, 0xffffffff },
12374 { MAC_RX_MTU_SIZE, 0x0000,
12375 0x00000000, 0x0000ffff },
12376 { MAC_TX_MODE, 0x0000,
12377 0x00000000, 0x00000070 },
12378 { MAC_TX_LENGTHS, 0x0000,
12379 0x00000000, 0x00003fff },
12380 { MAC_RX_MODE, TG3_FL_NOT_5705,
12381 0x00000000, 0x000007fc },
12382 { MAC_RX_MODE, TG3_FL_5705,
12383 0x00000000, 0x000007dc },
12384 { MAC_HASH_REG_0, 0x0000,
12385 0x00000000, 0xffffffff },
12386 { MAC_HASH_REG_1, 0x0000,
12387 0x00000000, 0xffffffff },
12388 { MAC_HASH_REG_2, 0x0000,
12389 0x00000000, 0xffffffff },
12390 { MAC_HASH_REG_3, 0x0000,
12391 0x00000000, 0xffffffff },
12392
12393 /* Receive Data and Receive BD Initiator Control Registers. */
12394 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12395 0x00000000, 0xffffffff },
12396 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12397 0x00000000, 0xffffffff },
12398 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12399 0x00000000, 0x00000003 },
12400 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12401 0x00000000, 0xffffffff },
12402 { RCVDBDI_STD_BD+0, 0x0000,
12403 0x00000000, 0xffffffff },
12404 { RCVDBDI_STD_BD+4, 0x0000,
12405 0x00000000, 0xffffffff },
12406 { RCVDBDI_STD_BD+8, 0x0000,
12407 0x00000000, 0xffff0002 },
12408 { RCVDBDI_STD_BD+0xc, 0x0000,
12409 0x00000000, 0xffffffff },
12410
12411 /* Receive BD Initiator Control Registers. */
12412 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12413 0x00000000, 0xffffffff },
12414 { RCVBDI_STD_THRESH, TG3_FL_5705,
12415 0x00000000, 0x000003ff },
12416 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12417 0x00000000, 0xffffffff },
12418
12419 /* Host Coalescing Control Registers. */
12420 { HOSTCC_MODE, TG3_FL_NOT_5705,
12421 0x00000000, 0x00000004 },
12422 { HOSTCC_MODE, TG3_FL_5705,
12423 0x00000000, 0x000000f6 },
12424 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12425 0x00000000, 0xffffffff },
12426 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12427 0x00000000, 0x000003ff },
12428 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12429 0x00000000, 0xffffffff },
12430 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12431 0x00000000, 0x000003ff },
12432 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12433 0x00000000, 0xffffffff },
12434 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12435 0x00000000, 0x000000ff },
12436 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12437 0x00000000, 0xffffffff },
12438 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12439 0x00000000, 0x000000ff },
12440 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12441 0x00000000, 0xffffffff },
12442 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12443 0x00000000, 0xffffffff },
12444 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12445 0x00000000, 0xffffffff },
12446 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12447 0x00000000, 0x000000ff },
12448 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12449 0x00000000, 0xffffffff },
12450 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12451 0x00000000, 0x000000ff },
12452 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12453 0x00000000, 0xffffffff },
12454 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12455 0x00000000, 0xffffffff },
12456 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12457 0x00000000, 0xffffffff },
12458 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12459 0x00000000, 0xffffffff },
12460 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12461 0x00000000, 0xffffffff },
12462 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12463 0xffffffff, 0x00000000 },
12464 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12465 0xffffffff, 0x00000000 },
12466
12467 /* Buffer Manager Control Registers. */
12468 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12469 0x00000000, 0x007fff80 },
12470 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12471 0x00000000, 0x007fffff },
12472 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12473 0x00000000, 0x0000003f },
12474 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12475 0x00000000, 0x000001ff },
12476 { BUFMGR_MB_HIGH_WATER, 0x0000,
12477 0x00000000, 0x000001ff },
12478 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12479 0xffffffff, 0x00000000 },
12480 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12481 0xffffffff, 0x00000000 },
12482
12483 /* Mailbox Registers */
12484 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12485 0x00000000, 0x000001ff },
12486 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12487 0x00000000, 0x000001ff },
12488 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12489 0x00000000, 0x000007ff },
12490 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12491 0x00000000, 0x000001ff },
12492
12493 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12494 };
12495
12496 is_5705 = is_5750 = 0;
12497 if (tg3_flag(tp, 5705_PLUS)) {
12498 is_5705 = 1;
12499 if (tg3_flag(tp, 5750_PLUS))
12500 is_5750 = 1;
12501 }
12502
12503 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12504 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12505 continue;
12506
12507 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12508 continue;
12509
12510 if (tg3_flag(tp, IS_5788) &&
12511 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12512 continue;
12513
12514 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12515 continue;
12516
12517 offset = (u32) reg_tbl[i].offset;
12518 read_mask = reg_tbl[i].read_mask;
12519 write_mask = reg_tbl[i].write_mask;
12520
12521 /* Save the original register content */
12522 save_val = tr32(offset);
12523
12524 /* Determine the read-only value. */
12525 read_val = save_val & read_mask;
12526
12527 /* Write zero to the register, then make sure the read-only bits
12528 * are not changed and the read/write bits are all zeros.
12529 */
12530 tw32(offset, 0);
12531
12532 val = tr32(offset);
12533
12534 /* Test the read-only and read/write bits. */
12535 if (((val & read_mask) != read_val) || (val & write_mask))
12536 goto out;
12537
12538 /* Write ones to all the bits defined by RdMask and WrMask, then
12539 * make sure the read-only bits are not changed and the
12540 * read/write bits are all ones.
12541 */
12542 tw32(offset, read_mask | write_mask);
12543
12544 val = tr32(offset);
12545
12546 /* Test the read-only bits. */
12547 if ((val & read_mask) != read_val)
12548 goto out;
12549
12550 /* Test the read/write bits. */
12551 if ((val & write_mask) != write_mask)
12552 goto out;
12553
12554 tw32(offset, save_val);
12555 }
12556
12557 return 0;
12558
12559 out:
12560 if (netif_msg_hw(tp))
12561 netdev_err(tp->dev,
12562 "Register test failed at offset %x\n", offset);
12563 tw32(offset, save_val);
12564 return -EIO;
12565 }
12566
12567 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12568 {
12569 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12570 int i;
12571 u32 j;
12572
12573 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12574 for (j = 0; j < len; j += 4) {
12575 u32 val;
12576
12577 tg3_write_mem(tp, offset + j, test_pattern[i]);
12578 tg3_read_mem(tp, offset + j, &val);
12579 if (val != test_pattern[i])
12580 return -EIO;
12581 }
12582 }
12583 return 0;
12584 }
12585
12586 static int tg3_test_memory(struct tg3 *tp)
12587 {
12588 static struct mem_entry {
12589 u32 offset;
12590 u32 len;
12591 } mem_tbl_570x[] = {
12592 { 0x00000000, 0x00b50},
12593 { 0x00002000, 0x1c000},
12594 { 0xffffffff, 0x00000}
12595 }, mem_tbl_5705[] = {
12596 { 0x00000100, 0x0000c},
12597 { 0x00000200, 0x00008},
12598 { 0x00004000, 0x00800},
12599 { 0x00006000, 0x01000},
12600 { 0x00008000, 0x02000},
12601 { 0x00010000, 0x0e000},
12602 { 0xffffffff, 0x00000}
12603 }, mem_tbl_5755[] = {
12604 { 0x00000200, 0x00008},
12605 { 0x00004000, 0x00800},
12606 { 0x00006000, 0x00800},
12607 { 0x00008000, 0x02000},
12608 { 0x00010000, 0x0c000},
12609 { 0xffffffff, 0x00000}
12610 }, mem_tbl_5906[] = {
12611 { 0x00000200, 0x00008},
12612 { 0x00004000, 0x00400},
12613 { 0x00006000, 0x00400},
12614 { 0x00008000, 0x01000},
12615 { 0x00010000, 0x01000},
12616 { 0xffffffff, 0x00000}
12617 }, mem_tbl_5717[] = {
12618 { 0x00000200, 0x00008},
12619 { 0x00010000, 0x0a000},
12620 { 0x00020000, 0x13c00},
12621 { 0xffffffff, 0x00000}
12622 }, mem_tbl_57765[] = {
12623 { 0x00000200, 0x00008},
12624 { 0x00004000, 0x00800},
12625 { 0x00006000, 0x09800},
12626 { 0x00010000, 0x0a000},
12627 { 0xffffffff, 0x00000}
12628 };
12629 struct mem_entry *mem_tbl;
12630 int err = 0;
12631 int i;
12632
12633 if (tg3_flag(tp, 5717_PLUS))
12634 mem_tbl = mem_tbl_5717;
12635 else if (tg3_flag(tp, 57765_CLASS) ||
12636 tg3_asic_rev(tp) == ASIC_REV_5762)
12637 mem_tbl = mem_tbl_57765;
12638 else if (tg3_flag(tp, 5755_PLUS))
12639 mem_tbl = mem_tbl_5755;
12640 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12641 mem_tbl = mem_tbl_5906;
12642 else if (tg3_flag(tp, 5705_PLUS))
12643 mem_tbl = mem_tbl_5705;
12644 else
12645 mem_tbl = mem_tbl_570x;
12646
12647 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12648 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12649 if (err)
12650 break;
12651 }
12652
12653 return err;
12654 }
12655
12656 #define TG3_TSO_MSS 500
12657
12658 #define TG3_TSO_IP_HDR_LEN 20
12659 #define TG3_TSO_TCP_HDR_LEN 20
12660 #define TG3_TSO_TCP_OPT_LEN 12
12661
12662 static const u8 tg3_tso_header[] = {
12663 0x08, 0x00,
12664 0x45, 0x00, 0x00, 0x00,
12665 0x00, 0x00, 0x40, 0x00,
12666 0x40, 0x06, 0x00, 0x00,
12667 0x0a, 0x00, 0x00, 0x01,
12668 0x0a, 0x00, 0x00, 0x02,
12669 0x0d, 0x00, 0xe0, 0x00,
12670 0x00, 0x00, 0x01, 0x00,
12671 0x00, 0x00, 0x02, 0x00,
12672 0x80, 0x10, 0x10, 0x00,
12673 0x14, 0x09, 0x00, 0x00,
12674 0x01, 0x01, 0x08, 0x0a,
12675 0x11, 0x11, 0x11, 0x11,
12676 0x11, 0x11, 0x11, 0x11,
12677 };
12678
12679 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12680 {
12681 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12682 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12683 u32 budget;
12684 struct sk_buff *skb;
12685 u8 *tx_data, *rx_data;
12686 dma_addr_t map;
12687 int num_pkts, tx_len, rx_len, i, err;
12688 struct tg3_rx_buffer_desc *desc;
12689 struct tg3_napi *tnapi, *rnapi;
12690 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12691
12692 tnapi = &tp->napi[0];
12693 rnapi = &tp->napi[0];
12694 if (tp->irq_cnt > 1) {
12695 if (tg3_flag(tp, ENABLE_RSS))
12696 rnapi = &tp->napi[1];
12697 if (tg3_flag(tp, ENABLE_TSS))
12698 tnapi = &tp->napi[1];
12699 }
12700 coal_now = tnapi->coal_now | rnapi->coal_now;
12701
12702 err = -EIO;
12703
12704 tx_len = pktsz;
12705 skb = netdev_alloc_skb(tp->dev, tx_len);
12706 if (!skb)
12707 return -ENOMEM;
12708
12709 tx_data = skb_put(skb, tx_len);
12710 memcpy(tx_data, tp->dev->dev_addr, 6);
12711 memset(tx_data + 6, 0x0, 8);
12712
12713 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12714
12715 if (tso_loopback) {
12716 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12717
12718 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12719 TG3_TSO_TCP_OPT_LEN;
12720
12721 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12722 sizeof(tg3_tso_header));
12723 mss = TG3_TSO_MSS;
12724
12725 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12726 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12727
12728 /* Set the total length field in the IP header */
12729 iph->tot_len = htons((u16)(mss + hdr_len));
12730
12731 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12732 TXD_FLAG_CPU_POST_DMA);
12733
12734 if (tg3_flag(tp, HW_TSO_1) ||
12735 tg3_flag(tp, HW_TSO_2) ||
12736 tg3_flag(tp, HW_TSO_3)) {
12737 struct tcphdr *th;
12738 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12739 th = (struct tcphdr *)&tx_data[val];
12740 th->check = 0;
12741 } else
12742 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12743
12744 if (tg3_flag(tp, HW_TSO_3)) {
12745 mss |= (hdr_len & 0xc) << 12;
12746 if (hdr_len & 0x10)
12747 base_flags |= 0x00000010;
12748 base_flags |= (hdr_len & 0x3e0) << 5;
12749 } else if (tg3_flag(tp, HW_TSO_2))
12750 mss |= hdr_len << 9;
12751 else if (tg3_flag(tp, HW_TSO_1) ||
12752 tg3_asic_rev(tp) == ASIC_REV_5705) {
12753 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12754 } else {
12755 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12756 }
12757
12758 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12759 } else {
12760 num_pkts = 1;
12761 data_off = ETH_HLEN;
12762
12763 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12764 tx_len > VLAN_ETH_FRAME_LEN)
12765 base_flags |= TXD_FLAG_JMB_PKT;
12766 }
12767
12768 for (i = data_off; i < tx_len; i++)
12769 tx_data[i] = (u8) (i & 0xff);
12770
12771 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12772 if (pci_dma_mapping_error(tp->pdev, map)) {
12773 dev_kfree_skb(skb);
12774 return -EIO;
12775 }
12776
12777 val = tnapi->tx_prod;
12778 tnapi->tx_buffers[val].skb = skb;
12779 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12780
12781 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12782 rnapi->coal_now);
12783
12784 udelay(10);
12785
12786 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12787
12788 budget = tg3_tx_avail(tnapi);
12789 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12790 base_flags | TXD_FLAG_END, mss, 0)) {
12791 tnapi->tx_buffers[val].skb = NULL;
12792 dev_kfree_skb(skb);
12793 return -EIO;
12794 }
12795
12796 tnapi->tx_prod++;
12797
12798 /* Sync BD data before updating mailbox */
12799 wmb();
12800
12801 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12802 tr32_mailbox(tnapi->prodmbox);
12803
12804 udelay(10);
12805
12806 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12807 for (i = 0; i < 35; i++) {
12808 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12809 coal_now);
12810
12811 udelay(10);
12812
12813 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12814 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12815 if ((tx_idx == tnapi->tx_prod) &&
12816 (rx_idx == (rx_start_idx + num_pkts)))
12817 break;
12818 }
12819
12820 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12821 dev_kfree_skb(skb);
12822
12823 if (tx_idx != tnapi->tx_prod)
12824 goto out;
12825
12826 if (rx_idx != rx_start_idx + num_pkts)
12827 goto out;
12828
12829 val = data_off;
12830 while (rx_idx != rx_start_idx) {
12831 desc = &rnapi->rx_rcb[rx_start_idx++];
12832 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12833 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12834
12835 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12836 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12837 goto out;
12838
12839 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12840 - ETH_FCS_LEN;
12841
12842 if (!tso_loopback) {
12843 if (rx_len != tx_len)
12844 goto out;
12845
12846 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12847 if (opaque_key != RXD_OPAQUE_RING_STD)
12848 goto out;
12849 } else {
12850 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12851 goto out;
12852 }
12853 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12854 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12855 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12856 goto out;
12857 }
12858
12859 if (opaque_key == RXD_OPAQUE_RING_STD) {
12860 rx_data = tpr->rx_std_buffers[desc_idx].data;
12861 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12862 mapping);
12863 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12864 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12865 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12866 mapping);
12867 } else
12868 goto out;
12869
12870 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12871 PCI_DMA_FROMDEVICE);
12872
12873 rx_data += TG3_RX_OFFSET(tp);
12874 for (i = data_off; i < rx_len; i++, val++) {
12875 if (*(rx_data + i) != (u8) (val & 0xff))
12876 goto out;
12877 }
12878 }
12879
12880 err = 0;
12881
12882 /* tg3_free_rings will unmap and free the rx_data */
12883 out:
12884 return err;
12885 }
12886
12887 #define TG3_STD_LOOPBACK_FAILED 1
12888 #define TG3_JMB_LOOPBACK_FAILED 2
12889 #define TG3_TSO_LOOPBACK_FAILED 4
12890 #define TG3_LOOPBACK_FAILED \
12891 (TG3_STD_LOOPBACK_FAILED | \
12892 TG3_JMB_LOOPBACK_FAILED | \
12893 TG3_TSO_LOOPBACK_FAILED)
12894
12895 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12896 {
12897 int err = -EIO;
12898 u32 eee_cap;
12899 u32 jmb_pkt_sz = 9000;
12900
12901 if (tp->dma_limit)
12902 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12903
12904 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12905 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12906
12907 if (!netif_running(tp->dev)) {
12908 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12909 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12910 if (do_extlpbk)
12911 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12912 goto done;
12913 }
12914
12915 err = tg3_reset_hw(tp, 1);
12916 if (err) {
12917 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12918 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12919 if (do_extlpbk)
12920 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12921 goto done;
12922 }
12923
12924 if (tg3_flag(tp, ENABLE_RSS)) {
12925 int i;
12926
12927 /* Reroute all rx packets to the 1st queue */
12928 for (i = MAC_RSS_INDIR_TBL_0;
12929 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12930 tw32(i, 0x0);
12931 }
12932
12933 /* HW errata - mac loopback fails in some cases on 5780.
12934 * Normal traffic and PHY loopback are not affected by
12935 * errata. Also, the MAC loopback test is deprecated for
12936 * all newer ASIC revisions.
12937 */
12938 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12939 !tg3_flag(tp, CPMU_PRESENT)) {
12940 tg3_mac_loopback(tp, true);
12941
12942 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12943 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12944
12945 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12946 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12947 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12948
12949 tg3_mac_loopback(tp, false);
12950 }
12951
12952 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12953 !tg3_flag(tp, USE_PHYLIB)) {
12954 int i;
12955
12956 tg3_phy_lpbk_set(tp, 0, false);
12957
12958 /* Wait for link */
12959 for (i = 0; i < 100; i++) {
12960 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12961 break;
12962 mdelay(1);
12963 }
12964
12965 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12966 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12967 if (tg3_flag(tp, TSO_CAPABLE) &&
12968 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12969 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12970 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12971 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12972 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12973
12974 if (do_extlpbk) {
12975 tg3_phy_lpbk_set(tp, 0, true);
12976
12977 /* All link indications report up, but the hardware
12978 * isn't really ready for about 20 msec. Double it
12979 * to be sure.
12980 */
12981 mdelay(40);
12982
12983 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12984 data[TG3_EXT_LOOPB_TEST] |=
12985 TG3_STD_LOOPBACK_FAILED;
12986 if (tg3_flag(tp, TSO_CAPABLE) &&
12987 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12988 data[TG3_EXT_LOOPB_TEST] |=
12989 TG3_TSO_LOOPBACK_FAILED;
12990 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12991 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12992 data[TG3_EXT_LOOPB_TEST] |=
12993 TG3_JMB_LOOPBACK_FAILED;
12994 }
12995
12996 /* Re-enable gphy autopowerdown. */
12997 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12998 tg3_phy_toggle_apd(tp, true);
12999 }
13000
13001 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13002 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13003
13004 done:
13005 tp->phy_flags |= eee_cap;
13006
13007 return err;
13008 }
13009
13010 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13011 u64 *data)
13012 {
13013 struct tg3 *tp = netdev_priv(dev);
13014 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13015
13016 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13017 tg3_power_up(tp)) {
13018 etest->flags |= ETH_TEST_FL_FAILED;
13019 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13020 return;
13021 }
13022
13023 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13024
13025 if (tg3_test_nvram(tp) != 0) {
13026 etest->flags |= ETH_TEST_FL_FAILED;
13027 data[TG3_NVRAM_TEST] = 1;
13028 }
13029 if (!doextlpbk && tg3_test_link(tp)) {
13030 etest->flags |= ETH_TEST_FL_FAILED;
13031 data[TG3_LINK_TEST] = 1;
13032 }
13033 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13034 int err, err2 = 0, irq_sync = 0;
13035
13036 if (netif_running(dev)) {
13037 tg3_phy_stop(tp);
13038 tg3_netif_stop(tp);
13039 irq_sync = 1;
13040 }
13041
13042 tg3_full_lock(tp, irq_sync);
13043 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13044 err = tg3_nvram_lock(tp);
13045 tg3_halt_cpu(tp, RX_CPU_BASE);
13046 if (!tg3_flag(tp, 5705_PLUS))
13047 tg3_halt_cpu(tp, TX_CPU_BASE);
13048 if (!err)
13049 tg3_nvram_unlock(tp);
13050
13051 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13052 tg3_phy_reset(tp);
13053
13054 if (tg3_test_registers(tp) != 0) {
13055 etest->flags |= ETH_TEST_FL_FAILED;
13056 data[TG3_REGISTER_TEST] = 1;
13057 }
13058
13059 if (tg3_test_memory(tp) != 0) {
13060 etest->flags |= ETH_TEST_FL_FAILED;
13061 data[TG3_MEMORY_TEST] = 1;
13062 }
13063
13064 if (doextlpbk)
13065 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13066
13067 if (tg3_test_loopback(tp, data, doextlpbk))
13068 etest->flags |= ETH_TEST_FL_FAILED;
13069
13070 tg3_full_unlock(tp);
13071
13072 if (tg3_test_interrupt(tp) != 0) {
13073 etest->flags |= ETH_TEST_FL_FAILED;
13074 data[TG3_INTERRUPT_TEST] = 1;
13075 }
13076
13077 tg3_full_lock(tp, 0);
13078
13079 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13080 if (netif_running(dev)) {
13081 tg3_flag_set(tp, INIT_COMPLETE);
13082 err2 = tg3_restart_hw(tp, 1);
13083 if (!err2)
13084 tg3_netif_start(tp);
13085 }
13086
13087 tg3_full_unlock(tp);
13088
13089 if (irq_sync && !err2)
13090 tg3_phy_start(tp);
13091 }
13092 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13093 tg3_power_down(tp);
13094
13095 }
13096
13097 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13098 struct ifreq *ifr, int cmd)
13099 {
13100 struct tg3 *tp = netdev_priv(dev);
13101 struct hwtstamp_config stmpconf;
13102
13103 if (!tg3_flag(tp, PTP_CAPABLE))
13104 return -EINVAL;
13105
13106 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13107 return -EFAULT;
13108
13109 if (stmpconf.flags)
13110 return -EINVAL;
13111
13112 switch (stmpconf.tx_type) {
13113 case HWTSTAMP_TX_ON:
13114 tg3_flag_set(tp, TX_TSTAMP_EN);
13115 break;
13116 case HWTSTAMP_TX_OFF:
13117 tg3_flag_clear(tp, TX_TSTAMP_EN);
13118 break;
13119 default:
13120 return -ERANGE;
13121 }
13122
13123 switch (stmpconf.rx_filter) {
13124 case HWTSTAMP_FILTER_NONE:
13125 tp->rxptpctl = 0;
13126 break;
13127 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13128 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13129 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13130 break;
13131 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13132 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13133 TG3_RX_PTP_CTL_SYNC_EVNT;
13134 break;
13135 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13136 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13137 TG3_RX_PTP_CTL_DELAY_REQ;
13138 break;
13139 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13140 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13141 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13142 break;
13143 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13144 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13145 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13146 break;
13147 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13148 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13149 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13150 break;
13151 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13152 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13153 TG3_RX_PTP_CTL_SYNC_EVNT;
13154 break;
13155 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13156 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13157 TG3_RX_PTP_CTL_SYNC_EVNT;
13158 break;
13159 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13160 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13161 TG3_RX_PTP_CTL_SYNC_EVNT;
13162 break;
13163 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13164 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13165 TG3_RX_PTP_CTL_DELAY_REQ;
13166 break;
13167 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13168 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13169 TG3_RX_PTP_CTL_DELAY_REQ;
13170 break;
13171 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13172 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13173 TG3_RX_PTP_CTL_DELAY_REQ;
13174 break;
13175 default:
13176 return -ERANGE;
13177 }
13178
13179 if (netif_running(dev) && tp->rxptpctl)
13180 tw32(TG3_RX_PTP_CTL,
13181 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13182
13183 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13184 -EFAULT : 0;
13185 }
13186
13187 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13188 {
13189 struct mii_ioctl_data *data = if_mii(ifr);
13190 struct tg3 *tp = netdev_priv(dev);
13191 int err;
13192
13193 if (tg3_flag(tp, USE_PHYLIB)) {
13194 struct phy_device *phydev;
13195 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13196 return -EAGAIN;
13197 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13198 return phy_mii_ioctl(phydev, ifr, cmd);
13199 }
13200
13201 switch (cmd) {
13202 case SIOCGMIIPHY:
13203 data->phy_id = tp->phy_addr;
13204
13205 /* fallthru */
13206 case SIOCGMIIREG: {
13207 u32 mii_regval;
13208
13209 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13210 break; /* We have no PHY */
13211
13212 if (!netif_running(dev))
13213 return -EAGAIN;
13214
13215 spin_lock_bh(&tp->lock);
13216 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13217 data->reg_num & 0x1f, &mii_regval);
13218 spin_unlock_bh(&tp->lock);
13219
13220 data->val_out = mii_regval;
13221
13222 return err;
13223 }
13224
13225 case SIOCSMIIREG:
13226 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13227 break; /* We have no PHY */
13228
13229 if (!netif_running(dev))
13230 return -EAGAIN;
13231
13232 spin_lock_bh(&tp->lock);
13233 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13234 data->reg_num & 0x1f, data->val_in);
13235 spin_unlock_bh(&tp->lock);
13236
13237 return err;
13238
13239 case SIOCSHWTSTAMP:
13240 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13241
13242 default:
13243 /* do nothing */
13244 break;
13245 }
13246 return -EOPNOTSUPP;
13247 }
13248
13249 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13250 {
13251 struct tg3 *tp = netdev_priv(dev);
13252
13253 memcpy(ec, &tp->coal, sizeof(*ec));
13254 return 0;
13255 }
13256
13257 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13258 {
13259 struct tg3 *tp = netdev_priv(dev);
13260 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13261 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13262
13263 if (!tg3_flag(tp, 5705_PLUS)) {
13264 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13265 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13266 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13267 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13268 }
13269
13270 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13271 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13272 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13273 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13274 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13275 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13276 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13277 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13278 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13279 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13280 return -EINVAL;
13281
13282 /* No rx interrupts will be generated if both are zero */
13283 if ((ec->rx_coalesce_usecs == 0) &&
13284 (ec->rx_max_coalesced_frames == 0))
13285 return -EINVAL;
13286
13287 /* No tx interrupts will be generated if both are zero */
13288 if ((ec->tx_coalesce_usecs == 0) &&
13289 (ec->tx_max_coalesced_frames == 0))
13290 return -EINVAL;
13291
13292 /* Only copy relevant parameters, ignore all others. */
13293 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13294 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13295 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13296 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13297 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13298 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13299 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13300 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13301 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13302
13303 if (netif_running(dev)) {
13304 tg3_full_lock(tp, 0);
13305 __tg3_set_coalesce(tp, &tp->coal);
13306 tg3_full_unlock(tp);
13307 }
13308 return 0;
13309 }
13310
13311 static const struct ethtool_ops tg3_ethtool_ops = {
13312 .get_settings = tg3_get_settings,
13313 .set_settings = tg3_set_settings,
13314 .get_drvinfo = tg3_get_drvinfo,
13315 .get_regs_len = tg3_get_regs_len,
13316 .get_regs = tg3_get_regs,
13317 .get_wol = tg3_get_wol,
13318 .set_wol = tg3_set_wol,
13319 .get_msglevel = tg3_get_msglevel,
13320 .set_msglevel = tg3_set_msglevel,
13321 .nway_reset = tg3_nway_reset,
13322 .get_link = ethtool_op_get_link,
13323 .get_eeprom_len = tg3_get_eeprom_len,
13324 .get_eeprom = tg3_get_eeprom,
13325 .set_eeprom = tg3_set_eeprom,
13326 .get_ringparam = tg3_get_ringparam,
13327 .set_ringparam = tg3_set_ringparam,
13328 .get_pauseparam = tg3_get_pauseparam,
13329 .set_pauseparam = tg3_set_pauseparam,
13330 .self_test = tg3_self_test,
13331 .get_strings = tg3_get_strings,
13332 .set_phys_id = tg3_set_phys_id,
13333 .get_ethtool_stats = tg3_get_ethtool_stats,
13334 .get_coalesce = tg3_get_coalesce,
13335 .set_coalesce = tg3_set_coalesce,
13336 .get_sset_count = tg3_get_sset_count,
13337 .get_rxnfc = tg3_get_rxnfc,
13338 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13339 .get_rxfh_indir = tg3_get_rxfh_indir,
13340 .set_rxfh_indir = tg3_set_rxfh_indir,
13341 .get_channels = tg3_get_channels,
13342 .set_channels = tg3_set_channels,
13343 .get_ts_info = tg3_get_ts_info,
13344 };
13345
13346 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13347 struct rtnl_link_stats64 *stats)
13348 {
13349 struct tg3 *tp = netdev_priv(dev);
13350
13351 spin_lock_bh(&tp->lock);
13352 if (!tp->hw_stats) {
13353 spin_unlock_bh(&tp->lock);
13354 return &tp->net_stats_prev;
13355 }
13356
13357 tg3_get_nstats(tp, stats);
13358 spin_unlock_bh(&tp->lock);
13359
13360 return stats;
13361 }
13362
13363 static void tg3_set_rx_mode(struct net_device *dev)
13364 {
13365 struct tg3 *tp = netdev_priv(dev);
13366
13367 if (!netif_running(dev))
13368 return;
13369
13370 tg3_full_lock(tp, 0);
13371 __tg3_set_rx_mode(dev);
13372 tg3_full_unlock(tp);
13373 }
13374
13375 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13376 int new_mtu)
13377 {
13378 dev->mtu = new_mtu;
13379
13380 if (new_mtu > ETH_DATA_LEN) {
13381 if (tg3_flag(tp, 5780_CLASS)) {
13382 netdev_update_features(dev);
13383 tg3_flag_clear(tp, TSO_CAPABLE);
13384 } else {
13385 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13386 }
13387 } else {
13388 if (tg3_flag(tp, 5780_CLASS)) {
13389 tg3_flag_set(tp, TSO_CAPABLE);
13390 netdev_update_features(dev);
13391 }
13392 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13393 }
13394 }
13395
13396 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13397 {
13398 struct tg3 *tp = netdev_priv(dev);
13399 int err, reset_phy = 0;
13400
13401 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13402 return -EINVAL;
13403
13404 if (!netif_running(dev)) {
13405 /* We'll just catch it later when the
13406 * device is up'd.
13407 */
13408 tg3_set_mtu(dev, tp, new_mtu);
13409 return 0;
13410 }
13411
13412 tg3_phy_stop(tp);
13413
13414 tg3_netif_stop(tp);
13415
13416 tg3_full_lock(tp, 1);
13417
13418 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13419
13420 tg3_set_mtu(dev, tp, new_mtu);
13421
13422 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13423 * breaks all requests to 256 bytes.
13424 */
13425 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13426 reset_phy = 1;
13427
13428 err = tg3_restart_hw(tp, reset_phy);
13429
13430 if (!err)
13431 tg3_netif_start(tp);
13432
13433 tg3_full_unlock(tp);
13434
13435 if (!err)
13436 tg3_phy_start(tp);
13437
13438 return err;
13439 }
13440
13441 static const struct net_device_ops tg3_netdev_ops = {
13442 .ndo_open = tg3_open,
13443 .ndo_stop = tg3_close,
13444 .ndo_start_xmit = tg3_start_xmit,
13445 .ndo_get_stats64 = tg3_get_stats64,
13446 .ndo_validate_addr = eth_validate_addr,
13447 .ndo_set_rx_mode = tg3_set_rx_mode,
13448 .ndo_set_mac_address = tg3_set_mac_addr,
13449 .ndo_do_ioctl = tg3_ioctl,
13450 .ndo_tx_timeout = tg3_tx_timeout,
13451 .ndo_change_mtu = tg3_change_mtu,
13452 .ndo_fix_features = tg3_fix_features,
13453 .ndo_set_features = tg3_set_features,
13454 #ifdef CONFIG_NET_POLL_CONTROLLER
13455 .ndo_poll_controller = tg3_poll_controller,
13456 #endif
13457 };
13458
13459 static void tg3_get_eeprom_size(struct tg3 *tp)
13460 {
13461 u32 cursize, val, magic;
13462
13463 tp->nvram_size = EEPROM_CHIP_SIZE;
13464
13465 if (tg3_nvram_read(tp, 0, &magic) != 0)
13466 return;
13467
13468 if ((magic != TG3_EEPROM_MAGIC) &&
13469 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13470 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13471 return;
13472
13473 /*
13474 * Size the chip by reading offsets at increasing powers of two.
13475 * When we encounter our validation signature, we know the addressing
13476 * has wrapped around, and thus have our chip size.
13477 */
13478 cursize = 0x10;
13479
13480 while (cursize < tp->nvram_size) {
13481 if (tg3_nvram_read(tp, cursize, &val) != 0)
13482 return;
13483
13484 if (val == magic)
13485 break;
13486
13487 cursize <<= 1;
13488 }
13489
13490 tp->nvram_size = cursize;
13491 }
13492
13493 static void tg3_get_nvram_size(struct tg3 *tp)
13494 {
13495 u32 val;
13496
13497 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13498 return;
13499
13500 /* Selfboot format */
13501 if (val != TG3_EEPROM_MAGIC) {
13502 tg3_get_eeprom_size(tp);
13503 return;
13504 }
13505
13506 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13507 if (val != 0) {
13508 /* This is confusing. We want to operate on the
13509 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13510 * call will read from NVRAM and byteswap the data
13511 * according to the byteswapping settings for all
13512 * other register accesses. This ensures the data we
13513 * want will always reside in the lower 16-bits.
13514 * However, the data in NVRAM is in LE format, which
13515 * means the data from the NVRAM read will always be
13516 * opposite the endianness of the CPU. The 16-bit
13517 * byteswap then brings the data to CPU endianness.
13518 */
13519 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13520 return;
13521 }
13522 }
13523 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13524 }
13525
13526 static void tg3_get_nvram_info(struct tg3 *tp)
13527 {
13528 u32 nvcfg1;
13529
13530 nvcfg1 = tr32(NVRAM_CFG1);
13531 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13532 tg3_flag_set(tp, FLASH);
13533 } else {
13534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13535 tw32(NVRAM_CFG1, nvcfg1);
13536 }
13537
13538 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13539 tg3_flag(tp, 5780_CLASS)) {
13540 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13541 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13542 tp->nvram_jedecnum = JEDEC_ATMEL;
13543 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13544 tg3_flag_set(tp, NVRAM_BUFFERED);
13545 break;
13546 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13547 tp->nvram_jedecnum = JEDEC_ATMEL;
13548 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13549 break;
13550 case FLASH_VENDOR_ATMEL_EEPROM:
13551 tp->nvram_jedecnum = JEDEC_ATMEL;
13552 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13553 tg3_flag_set(tp, NVRAM_BUFFERED);
13554 break;
13555 case FLASH_VENDOR_ST:
13556 tp->nvram_jedecnum = JEDEC_ST;
13557 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13558 tg3_flag_set(tp, NVRAM_BUFFERED);
13559 break;
13560 case FLASH_VENDOR_SAIFUN:
13561 tp->nvram_jedecnum = JEDEC_SAIFUN;
13562 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13563 break;
13564 case FLASH_VENDOR_SST_SMALL:
13565 case FLASH_VENDOR_SST_LARGE:
13566 tp->nvram_jedecnum = JEDEC_SST;
13567 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13568 break;
13569 }
13570 } else {
13571 tp->nvram_jedecnum = JEDEC_ATMEL;
13572 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13573 tg3_flag_set(tp, NVRAM_BUFFERED);
13574 }
13575 }
13576
13577 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13578 {
13579 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13580 case FLASH_5752PAGE_SIZE_256:
13581 tp->nvram_pagesize = 256;
13582 break;
13583 case FLASH_5752PAGE_SIZE_512:
13584 tp->nvram_pagesize = 512;
13585 break;
13586 case FLASH_5752PAGE_SIZE_1K:
13587 tp->nvram_pagesize = 1024;
13588 break;
13589 case FLASH_5752PAGE_SIZE_2K:
13590 tp->nvram_pagesize = 2048;
13591 break;
13592 case FLASH_5752PAGE_SIZE_4K:
13593 tp->nvram_pagesize = 4096;
13594 break;
13595 case FLASH_5752PAGE_SIZE_264:
13596 tp->nvram_pagesize = 264;
13597 break;
13598 case FLASH_5752PAGE_SIZE_528:
13599 tp->nvram_pagesize = 528;
13600 break;
13601 }
13602 }
13603
13604 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13605 {
13606 u32 nvcfg1;
13607
13608 nvcfg1 = tr32(NVRAM_CFG1);
13609
13610 /* NVRAM protection for TPM */
13611 if (nvcfg1 & (1 << 27))
13612 tg3_flag_set(tp, PROTECTED_NVRAM);
13613
13614 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13615 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13616 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13617 tp->nvram_jedecnum = JEDEC_ATMEL;
13618 tg3_flag_set(tp, NVRAM_BUFFERED);
13619 break;
13620 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13621 tp->nvram_jedecnum = JEDEC_ATMEL;
13622 tg3_flag_set(tp, NVRAM_BUFFERED);
13623 tg3_flag_set(tp, FLASH);
13624 break;
13625 case FLASH_5752VENDOR_ST_M45PE10:
13626 case FLASH_5752VENDOR_ST_M45PE20:
13627 case FLASH_5752VENDOR_ST_M45PE40:
13628 tp->nvram_jedecnum = JEDEC_ST;
13629 tg3_flag_set(tp, NVRAM_BUFFERED);
13630 tg3_flag_set(tp, FLASH);
13631 break;
13632 }
13633
13634 if (tg3_flag(tp, FLASH)) {
13635 tg3_nvram_get_pagesize(tp, nvcfg1);
13636 } else {
13637 /* For eeprom, set pagesize to maximum eeprom size */
13638 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13639
13640 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13641 tw32(NVRAM_CFG1, nvcfg1);
13642 }
13643 }
13644
13645 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13646 {
13647 u32 nvcfg1, protect = 0;
13648
13649 nvcfg1 = tr32(NVRAM_CFG1);
13650
13651 /* NVRAM protection for TPM */
13652 if (nvcfg1 & (1 << 27)) {
13653 tg3_flag_set(tp, PROTECTED_NVRAM);
13654 protect = 1;
13655 }
13656
13657 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13658 switch (nvcfg1) {
13659 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13660 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13661 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13662 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13663 tp->nvram_jedecnum = JEDEC_ATMEL;
13664 tg3_flag_set(tp, NVRAM_BUFFERED);
13665 tg3_flag_set(tp, FLASH);
13666 tp->nvram_pagesize = 264;
13667 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13668 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13669 tp->nvram_size = (protect ? 0x3e200 :
13670 TG3_NVRAM_SIZE_512KB);
13671 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13672 tp->nvram_size = (protect ? 0x1f200 :
13673 TG3_NVRAM_SIZE_256KB);
13674 else
13675 tp->nvram_size = (protect ? 0x1f200 :
13676 TG3_NVRAM_SIZE_128KB);
13677 break;
13678 case FLASH_5752VENDOR_ST_M45PE10:
13679 case FLASH_5752VENDOR_ST_M45PE20:
13680 case FLASH_5752VENDOR_ST_M45PE40:
13681 tp->nvram_jedecnum = JEDEC_ST;
13682 tg3_flag_set(tp, NVRAM_BUFFERED);
13683 tg3_flag_set(tp, FLASH);
13684 tp->nvram_pagesize = 256;
13685 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13686 tp->nvram_size = (protect ?
13687 TG3_NVRAM_SIZE_64KB :
13688 TG3_NVRAM_SIZE_128KB);
13689 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13690 tp->nvram_size = (protect ?
13691 TG3_NVRAM_SIZE_64KB :
13692 TG3_NVRAM_SIZE_256KB);
13693 else
13694 tp->nvram_size = (protect ?
13695 TG3_NVRAM_SIZE_128KB :
13696 TG3_NVRAM_SIZE_512KB);
13697 break;
13698 }
13699 }
13700
13701 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13702 {
13703 u32 nvcfg1;
13704
13705 nvcfg1 = tr32(NVRAM_CFG1);
13706
13707 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13708 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13709 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13710 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13711 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13712 tp->nvram_jedecnum = JEDEC_ATMEL;
13713 tg3_flag_set(tp, NVRAM_BUFFERED);
13714 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13715
13716 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13717 tw32(NVRAM_CFG1, nvcfg1);
13718 break;
13719 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13720 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13721 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13722 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13723 tp->nvram_jedecnum = JEDEC_ATMEL;
13724 tg3_flag_set(tp, NVRAM_BUFFERED);
13725 tg3_flag_set(tp, FLASH);
13726 tp->nvram_pagesize = 264;
13727 break;
13728 case FLASH_5752VENDOR_ST_M45PE10:
13729 case FLASH_5752VENDOR_ST_M45PE20:
13730 case FLASH_5752VENDOR_ST_M45PE40:
13731 tp->nvram_jedecnum = JEDEC_ST;
13732 tg3_flag_set(tp, NVRAM_BUFFERED);
13733 tg3_flag_set(tp, FLASH);
13734 tp->nvram_pagesize = 256;
13735 break;
13736 }
13737 }
13738
13739 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13740 {
13741 u32 nvcfg1, protect = 0;
13742
13743 nvcfg1 = tr32(NVRAM_CFG1);
13744
13745 /* NVRAM protection for TPM */
13746 if (nvcfg1 & (1 << 27)) {
13747 tg3_flag_set(tp, PROTECTED_NVRAM);
13748 protect = 1;
13749 }
13750
13751 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13752 switch (nvcfg1) {
13753 case FLASH_5761VENDOR_ATMEL_ADB021D:
13754 case FLASH_5761VENDOR_ATMEL_ADB041D:
13755 case FLASH_5761VENDOR_ATMEL_ADB081D:
13756 case FLASH_5761VENDOR_ATMEL_ADB161D:
13757 case FLASH_5761VENDOR_ATMEL_MDB021D:
13758 case FLASH_5761VENDOR_ATMEL_MDB041D:
13759 case FLASH_5761VENDOR_ATMEL_MDB081D:
13760 case FLASH_5761VENDOR_ATMEL_MDB161D:
13761 tp->nvram_jedecnum = JEDEC_ATMEL;
13762 tg3_flag_set(tp, NVRAM_BUFFERED);
13763 tg3_flag_set(tp, FLASH);
13764 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13765 tp->nvram_pagesize = 256;
13766 break;
13767 case FLASH_5761VENDOR_ST_A_M45PE20:
13768 case FLASH_5761VENDOR_ST_A_M45PE40:
13769 case FLASH_5761VENDOR_ST_A_M45PE80:
13770 case FLASH_5761VENDOR_ST_A_M45PE16:
13771 case FLASH_5761VENDOR_ST_M_M45PE20:
13772 case FLASH_5761VENDOR_ST_M_M45PE40:
13773 case FLASH_5761VENDOR_ST_M_M45PE80:
13774 case FLASH_5761VENDOR_ST_M_M45PE16:
13775 tp->nvram_jedecnum = JEDEC_ST;
13776 tg3_flag_set(tp, NVRAM_BUFFERED);
13777 tg3_flag_set(tp, FLASH);
13778 tp->nvram_pagesize = 256;
13779 break;
13780 }
13781
13782 if (protect) {
13783 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13784 } else {
13785 switch (nvcfg1) {
13786 case FLASH_5761VENDOR_ATMEL_ADB161D:
13787 case FLASH_5761VENDOR_ATMEL_MDB161D:
13788 case FLASH_5761VENDOR_ST_A_M45PE16:
13789 case FLASH_5761VENDOR_ST_M_M45PE16:
13790 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13791 break;
13792 case FLASH_5761VENDOR_ATMEL_ADB081D:
13793 case FLASH_5761VENDOR_ATMEL_MDB081D:
13794 case FLASH_5761VENDOR_ST_A_M45PE80:
13795 case FLASH_5761VENDOR_ST_M_M45PE80:
13796 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13797 break;
13798 case FLASH_5761VENDOR_ATMEL_ADB041D:
13799 case FLASH_5761VENDOR_ATMEL_MDB041D:
13800 case FLASH_5761VENDOR_ST_A_M45PE40:
13801 case FLASH_5761VENDOR_ST_M_M45PE40:
13802 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13803 break;
13804 case FLASH_5761VENDOR_ATMEL_ADB021D:
13805 case FLASH_5761VENDOR_ATMEL_MDB021D:
13806 case FLASH_5761VENDOR_ST_A_M45PE20:
13807 case FLASH_5761VENDOR_ST_M_M45PE20:
13808 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13809 break;
13810 }
13811 }
13812 }
13813
13814 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13815 {
13816 tp->nvram_jedecnum = JEDEC_ATMEL;
13817 tg3_flag_set(tp, NVRAM_BUFFERED);
13818 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13819 }
13820
13821 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13822 {
13823 u32 nvcfg1;
13824
13825 nvcfg1 = tr32(NVRAM_CFG1);
13826
13827 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13828 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13829 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13830 tp->nvram_jedecnum = JEDEC_ATMEL;
13831 tg3_flag_set(tp, NVRAM_BUFFERED);
13832 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13833
13834 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13835 tw32(NVRAM_CFG1, nvcfg1);
13836 return;
13837 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13838 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13839 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13840 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13841 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13842 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13843 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13844 tp->nvram_jedecnum = JEDEC_ATMEL;
13845 tg3_flag_set(tp, NVRAM_BUFFERED);
13846 tg3_flag_set(tp, FLASH);
13847
13848 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13849 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13850 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13851 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13852 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13853 break;
13854 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13855 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13856 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13857 break;
13858 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13859 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13860 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13861 break;
13862 }
13863 break;
13864 case FLASH_5752VENDOR_ST_M45PE10:
13865 case FLASH_5752VENDOR_ST_M45PE20:
13866 case FLASH_5752VENDOR_ST_M45PE40:
13867 tp->nvram_jedecnum = JEDEC_ST;
13868 tg3_flag_set(tp, NVRAM_BUFFERED);
13869 tg3_flag_set(tp, FLASH);
13870
13871 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13872 case FLASH_5752VENDOR_ST_M45PE10:
13873 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13874 break;
13875 case FLASH_5752VENDOR_ST_M45PE20:
13876 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13877 break;
13878 case FLASH_5752VENDOR_ST_M45PE40:
13879 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13880 break;
13881 }
13882 break;
13883 default:
13884 tg3_flag_set(tp, NO_NVRAM);
13885 return;
13886 }
13887
13888 tg3_nvram_get_pagesize(tp, nvcfg1);
13889 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13890 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13891 }
13892
13893
13894 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13895 {
13896 u32 nvcfg1;
13897
13898 nvcfg1 = tr32(NVRAM_CFG1);
13899
13900 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13901 case FLASH_5717VENDOR_ATMEL_EEPROM:
13902 case FLASH_5717VENDOR_MICRO_EEPROM:
13903 tp->nvram_jedecnum = JEDEC_ATMEL;
13904 tg3_flag_set(tp, NVRAM_BUFFERED);
13905 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13906
13907 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13908 tw32(NVRAM_CFG1, nvcfg1);
13909 return;
13910 case FLASH_5717VENDOR_ATMEL_MDB011D:
13911 case FLASH_5717VENDOR_ATMEL_ADB011B:
13912 case FLASH_5717VENDOR_ATMEL_ADB011D:
13913 case FLASH_5717VENDOR_ATMEL_MDB021D:
13914 case FLASH_5717VENDOR_ATMEL_ADB021B:
13915 case FLASH_5717VENDOR_ATMEL_ADB021D:
13916 case FLASH_5717VENDOR_ATMEL_45USPT:
13917 tp->nvram_jedecnum = JEDEC_ATMEL;
13918 tg3_flag_set(tp, NVRAM_BUFFERED);
13919 tg3_flag_set(tp, FLASH);
13920
13921 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13922 case FLASH_5717VENDOR_ATMEL_MDB021D:
13923 /* Detect size with tg3_nvram_get_size() */
13924 break;
13925 case FLASH_5717VENDOR_ATMEL_ADB021B:
13926 case FLASH_5717VENDOR_ATMEL_ADB021D:
13927 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13928 break;
13929 default:
13930 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13931 break;
13932 }
13933 break;
13934 case FLASH_5717VENDOR_ST_M_M25PE10:
13935 case FLASH_5717VENDOR_ST_A_M25PE10:
13936 case FLASH_5717VENDOR_ST_M_M45PE10:
13937 case FLASH_5717VENDOR_ST_A_M45PE10:
13938 case FLASH_5717VENDOR_ST_M_M25PE20:
13939 case FLASH_5717VENDOR_ST_A_M25PE20:
13940 case FLASH_5717VENDOR_ST_M_M45PE20:
13941 case FLASH_5717VENDOR_ST_A_M45PE20:
13942 case FLASH_5717VENDOR_ST_25USPT:
13943 case FLASH_5717VENDOR_ST_45USPT:
13944 tp->nvram_jedecnum = JEDEC_ST;
13945 tg3_flag_set(tp, NVRAM_BUFFERED);
13946 tg3_flag_set(tp, FLASH);
13947
13948 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13949 case FLASH_5717VENDOR_ST_M_M25PE20:
13950 case FLASH_5717VENDOR_ST_M_M45PE20:
13951 /* Detect size with tg3_nvram_get_size() */
13952 break;
13953 case FLASH_5717VENDOR_ST_A_M25PE20:
13954 case FLASH_5717VENDOR_ST_A_M45PE20:
13955 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13956 break;
13957 default:
13958 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13959 break;
13960 }
13961 break;
13962 default:
13963 tg3_flag_set(tp, NO_NVRAM);
13964 return;
13965 }
13966
13967 tg3_nvram_get_pagesize(tp, nvcfg1);
13968 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13969 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13970 }
13971
13972 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13973 {
13974 u32 nvcfg1, nvmpinstrp;
13975
13976 nvcfg1 = tr32(NVRAM_CFG1);
13977 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13978
13979 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13980 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13981 tg3_flag_set(tp, NO_NVRAM);
13982 return;
13983 }
13984
13985 switch (nvmpinstrp) {
13986 case FLASH_5762_EEPROM_HD:
13987 nvmpinstrp = FLASH_5720_EEPROM_HD;
13988 break;
13989 case FLASH_5762_EEPROM_LD:
13990 nvmpinstrp = FLASH_5720_EEPROM_LD;
13991 break;
13992 }
13993 }
13994
13995 switch (nvmpinstrp) {
13996 case FLASH_5720_EEPROM_HD:
13997 case FLASH_5720_EEPROM_LD:
13998 tp->nvram_jedecnum = JEDEC_ATMEL;
13999 tg3_flag_set(tp, NVRAM_BUFFERED);
14000
14001 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14002 tw32(NVRAM_CFG1, nvcfg1);
14003 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14004 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14005 else
14006 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14007 return;
14008 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14009 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14010 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14011 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14012 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14013 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14014 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14015 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14016 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14017 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14018 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14019 case FLASH_5720VENDOR_ATMEL_45USPT:
14020 tp->nvram_jedecnum = JEDEC_ATMEL;
14021 tg3_flag_set(tp, NVRAM_BUFFERED);
14022 tg3_flag_set(tp, FLASH);
14023
14024 switch (nvmpinstrp) {
14025 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14026 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14027 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14028 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14029 break;
14030 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14031 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14032 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14033 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14034 break;
14035 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14036 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14037 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14038 break;
14039 default:
14040 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14041 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14042 break;
14043 }
14044 break;
14045 case FLASH_5720VENDOR_M_ST_M25PE10:
14046 case FLASH_5720VENDOR_M_ST_M45PE10:
14047 case FLASH_5720VENDOR_A_ST_M25PE10:
14048 case FLASH_5720VENDOR_A_ST_M45PE10:
14049 case FLASH_5720VENDOR_M_ST_M25PE20:
14050 case FLASH_5720VENDOR_M_ST_M45PE20:
14051 case FLASH_5720VENDOR_A_ST_M25PE20:
14052 case FLASH_5720VENDOR_A_ST_M45PE20:
14053 case FLASH_5720VENDOR_M_ST_M25PE40:
14054 case FLASH_5720VENDOR_M_ST_M45PE40:
14055 case FLASH_5720VENDOR_A_ST_M25PE40:
14056 case FLASH_5720VENDOR_A_ST_M45PE40:
14057 case FLASH_5720VENDOR_M_ST_M25PE80:
14058 case FLASH_5720VENDOR_M_ST_M45PE80:
14059 case FLASH_5720VENDOR_A_ST_M25PE80:
14060 case FLASH_5720VENDOR_A_ST_M45PE80:
14061 case FLASH_5720VENDOR_ST_25USPT:
14062 case FLASH_5720VENDOR_ST_45USPT:
14063 tp->nvram_jedecnum = JEDEC_ST;
14064 tg3_flag_set(tp, NVRAM_BUFFERED);
14065 tg3_flag_set(tp, FLASH);
14066
14067 switch (nvmpinstrp) {
14068 case FLASH_5720VENDOR_M_ST_M25PE20:
14069 case FLASH_5720VENDOR_M_ST_M45PE20:
14070 case FLASH_5720VENDOR_A_ST_M25PE20:
14071 case FLASH_5720VENDOR_A_ST_M45PE20:
14072 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14073 break;
14074 case FLASH_5720VENDOR_M_ST_M25PE40:
14075 case FLASH_5720VENDOR_M_ST_M45PE40:
14076 case FLASH_5720VENDOR_A_ST_M25PE40:
14077 case FLASH_5720VENDOR_A_ST_M45PE40:
14078 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14079 break;
14080 case FLASH_5720VENDOR_M_ST_M25PE80:
14081 case FLASH_5720VENDOR_M_ST_M45PE80:
14082 case FLASH_5720VENDOR_A_ST_M25PE80:
14083 case FLASH_5720VENDOR_A_ST_M45PE80:
14084 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14085 break;
14086 default:
14087 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14088 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14089 break;
14090 }
14091 break;
14092 default:
14093 tg3_flag_set(tp, NO_NVRAM);
14094 return;
14095 }
14096
14097 tg3_nvram_get_pagesize(tp, nvcfg1);
14098 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14099 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14100
14101 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14102 u32 val;
14103
14104 if (tg3_nvram_read(tp, 0, &val))
14105 return;
14106
14107 if (val != TG3_EEPROM_MAGIC &&
14108 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14109 tg3_flag_set(tp, NO_NVRAM);
14110 }
14111 }
14112
14113 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14114 static void tg3_nvram_init(struct tg3 *tp)
14115 {
14116 if (tg3_flag(tp, IS_SSB_CORE)) {
14117 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14118 tg3_flag_clear(tp, NVRAM);
14119 tg3_flag_clear(tp, NVRAM_BUFFERED);
14120 tg3_flag_set(tp, NO_NVRAM);
14121 return;
14122 }
14123
14124 tw32_f(GRC_EEPROM_ADDR,
14125 (EEPROM_ADDR_FSM_RESET |
14126 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14127 EEPROM_ADDR_CLKPERD_SHIFT)));
14128
14129 msleep(1);
14130
14131 /* Enable seeprom accesses. */
14132 tw32_f(GRC_LOCAL_CTRL,
14133 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14134 udelay(100);
14135
14136 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14137 tg3_asic_rev(tp) != ASIC_REV_5701) {
14138 tg3_flag_set(tp, NVRAM);
14139
14140 if (tg3_nvram_lock(tp)) {
14141 netdev_warn(tp->dev,
14142 "Cannot get nvram lock, %s failed\n",
14143 __func__);
14144 return;
14145 }
14146 tg3_enable_nvram_access(tp);
14147
14148 tp->nvram_size = 0;
14149
14150 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14151 tg3_get_5752_nvram_info(tp);
14152 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14153 tg3_get_5755_nvram_info(tp);
14154 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14155 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14156 tg3_asic_rev(tp) == ASIC_REV_5785)
14157 tg3_get_5787_nvram_info(tp);
14158 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14159 tg3_get_5761_nvram_info(tp);
14160 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14161 tg3_get_5906_nvram_info(tp);
14162 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14163 tg3_flag(tp, 57765_CLASS))
14164 tg3_get_57780_nvram_info(tp);
14165 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14166 tg3_asic_rev(tp) == ASIC_REV_5719)
14167 tg3_get_5717_nvram_info(tp);
14168 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14169 tg3_asic_rev(tp) == ASIC_REV_5762)
14170 tg3_get_5720_nvram_info(tp);
14171 else
14172 tg3_get_nvram_info(tp);
14173
14174 if (tp->nvram_size == 0)
14175 tg3_get_nvram_size(tp);
14176
14177 tg3_disable_nvram_access(tp);
14178 tg3_nvram_unlock(tp);
14179
14180 } else {
14181 tg3_flag_clear(tp, NVRAM);
14182 tg3_flag_clear(tp, NVRAM_BUFFERED);
14183
14184 tg3_get_eeprom_size(tp);
14185 }
14186 }
14187
14188 struct subsys_tbl_ent {
14189 u16 subsys_vendor, subsys_devid;
14190 u32 phy_id;
14191 };
14192
14193 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14194 /* Broadcom boards. */
14195 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14196 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14197 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14198 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14199 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14200 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14201 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14202 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14203 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14204 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14205 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14206 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14207 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14208 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14209 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14210 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14211 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14212 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14213 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14214 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14215 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14216 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14217
14218 /* 3com boards. */
14219 { TG3PCI_SUBVENDOR_ID_3COM,
14220 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14221 { TG3PCI_SUBVENDOR_ID_3COM,
14222 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14223 { TG3PCI_SUBVENDOR_ID_3COM,
14224 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14225 { TG3PCI_SUBVENDOR_ID_3COM,
14226 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14227 { TG3PCI_SUBVENDOR_ID_3COM,
14228 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14229
14230 /* DELL boards. */
14231 { TG3PCI_SUBVENDOR_ID_DELL,
14232 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14233 { TG3PCI_SUBVENDOR_ID_DELL,
14234 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14235 { TG3PCI_SUBVENDOR_ID_DELL,
14236 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14237 { TG3PCI_SUBVENDOR_ID_DELL,
14238 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14239
14240 /* Compaq boards. */
14241 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14242 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14243 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14244 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14245 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14246 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14247 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14248 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14249 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14250 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14251
14252 /* IBM boards. */
14253 { TG3PCI_SUBVENDOR_ID_IBM,
14254 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14255 };
14256
14257 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14258 {
14259 int i;
14260
14261 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14262 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14263 tp->pdev->subsystem_vendor) &&
14264 (subsys_id_to_phy_id[i].subsys_devid ==
14265 tp->pdev->subsystem_device))
14266 return &subsys_id_to_phy_id[i];
14267 }
14268 return NULL;
14269 }
14270
14271 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14272 {
14273 u32 val;
14274
14275 tp->phy_id = TG3_PHY_ID_INVALID;
14276 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14277
14278 /* Assume an onboard device and WOL capable by default. */
14279 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14280 tg3_flag_set(tp, WOL_CAP);
14281
14282 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14283 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14284 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14285 tg3_flag_set(tp, IS_NIC);
14286 }
14287 val = tr32(VCPU_CFGSHDW);
14288 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14289 tg3_flag_set(tp, ASPM_WORKAROUND);
14290 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14291 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14292 tg3_flag_set(tp, WOL_ENABLE);
14293 device_set_wakeup_enable(&tp->pdev->dev, true);
14294 }
14295 goto done;
14296 }
14297
14298 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14299 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14300 u32 nic_cfg, led_cfg;
14301 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14302 int eeprom_phy_serdes = 0;
14303
14304 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14305 tp->nic_sram_data_cfg = nic_cfg;
14306
14307 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14308 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14309 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14310 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14311 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14312 (ver > 0) && (ver < 0x100))
14313 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14314
14315 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14317
14318 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14319 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14320 eeprom_phy_serdes = 1;
14321
14322 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14323 if (nic_phy_id != 0) {
14324 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14325 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14326
14327 eeprom_phy_id = (id1 >> 16) << 10;
14328 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14329 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14330 } else
14331 eeprom_phy_id = 0;
14332
14333 tp->phy_id = eeprom_phy_id;
14334 if (eeprom_phy_serdes) {
14335 if (!tg3_flag(tp, 5705_PLUS))
14336 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14337 else
14338 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14339 }
14340
14341 if (tg3_flag(tp, 5750_PLUS))
14342 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14343 SHASTA_EXT_LED_MODE_MASK);
14344 else
14345 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14346
14347 switch (led_cfg) {
14348 default:
14349 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14350 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14351 break;
14352
14353 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14354 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14355 break;
14356
14357 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14358 tp->led_ctrl = LED_CTRL_MODE_MAC;
14359
14360 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14361 * read on some older 5700/5701 bootcode.
14362 */
14363 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14364 tg3_asic_rev(tp) == ASIC_REV_5701)
14365 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14366
14367 break;
14368
14369 case SHASTA_EXT_LED_SHARED:
14370 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14371 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14372 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14373 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14374 LED_CTRL_MODE_PHY_2);
14375 break;
14376
14377 case SHASTA_EXT_LED_MAC:
14378 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14379 break;
14380
14381 case SHASTA_EXT_LED_COMBO:
14382 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14383 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14384 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14385 LED_CTRL_MODE_PHY_2);
14386 break;
14387
14388 }
14389
14390 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14391 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14392 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14393 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14394
14395 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14396 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14397
14398 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14399 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14400 if ((tp->pdev->subsystem_vendor ==
14401 PCI_VENDOR_ID_ARIMA) &&
14402 (tp->pdev->subsystem_device == 0x205a ||
14403 tp->pdev->subsystem_device == 0x2063))
14404 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14405 } else {
14406 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14407 tg3_flag_set(tp, IS_NIC);
14408 }
14409
14410 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14411 tg3_flag_set(tp, ENABLE_ASF);
14412 if (tg3_flag(tp, 5750_PLUS))
14413 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14414 }
14415
14416 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14417 tg3_flag(tp, 5750_PLUS))
14418 tg3_flag_set(tp, ENABLE_APE);
14419
14420 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14421 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14422 tg3_flag_clear(tp, WOL_CAP);
14423
14424 if (tg3_flag(tp, WOL_CAP) &&
14425 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14426 tg3_flag_set(tp, WOL_ENABLE);
14427 device_set_wakeup_enable(&tp->pdev->dev, true);
14428 }
14429
14430 if (cfg2 & (1 << 17))
14431 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14432
14433 /* serdes signal pre-emphasis in register 0x590 set by */
14434 /* bootcode if bit 18 is set */
14435 if (cfg2 & (1 << 18))
14436 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14437
14438 if ((tg3_flag(tp, 57765_PLUS) ||
14439 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14440 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14441 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14442 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14443
14444 if (tg3_flag(tp, PCI_EXPRESS) &&
14445 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14446 !tg3_flag(tp, 57765_PLUS)) {
14447 u32 cfg3;
14448
14449 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14450 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14451 tg3_flag_set(tp, ASPM_WORKAROUND);
14452 }
14453
14454 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14455 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14456 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14457 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14458 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14459 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14460 }
14461 done:
14462 if (tg3_flag(tp, WOL_CAP))
14463 device_set_wakeup_enable(&tp->pdev->dev,
14464 tg3_flag(tp, WOL_ENABLE));
14465 else
14466 device_set_wakeup_capable(&tp->pdev->dev, false);
14467 }
14468
14469 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14470 {
14471 int i, err;
14472 u32 val2, off = offset * 8;
14473
14474 err = tg3_nvram_lock(tp);
14475 if (err)
14476 return err;
14477
14478 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14479 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14480 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14481 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14482 udelay(10);
14483
14484 for (i = 0; i < 100; i++) {
14485 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14486 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14487 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14488 break;
14489 }
14490 udelay(10);
14491 }
14492
14493 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14494
14495 tg3_nvram_unlock(tp);
14496 if (val2 & APE_OTP_STATUS_CMD_DONE)
14497 return 0;
14498
14499 return -EBUSY;
14500 }
14501
14502 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14503 {
14504 int i;
14505 u32 val;
14506
14507 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14508 tw32(OTP_CTRL, cmd);
14509
14510 /* Wait for up to 1 ms for command to execute. */
14511 for (i = 0; i < 100; i++) {
14512 val = tr32(OTP_STATUS);
14513 if (val & OTP_STATUS_CMD_DONE)
14514 break;
14515 udelay(10);
14516 }
14517
14518 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14519 }
14520
14521 /* Read the gphy configuration from the OTP region of the chip. The gphy
14522 * configuration is a 32-bit value that straddles the alignment boundary.
14523 * We do two 32-bit reads and then shift and merge the results.
14524 */
14525 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14526 {
14527 u32 bhalf_otp, thalf_otp;
14528
14529 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14530
14531 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14532 return 0;
14533
14534 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14535
14536 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14537 return 0;
14538
14539 thalf_otp = tr32(OTP_READ_DATA);
14540
14541 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14542
14543 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14544 return 0;
14545
14546 bhalf_otp = tr32(OTP_READ_DATA);
14547
14548 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14549 }
14550
14551 static void tg3_phy_init_link_config(struct tg3 *tp)
14552 {
14553 u32 adv = ADVERTISED_Autoneg;
14554
14555 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14556 adv |= ADVERTISED_1000baseT_Half |
14557 ADVERTISED_1000baseT_Full;
14558
14559 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14560 adv |= ADVERTISED_100baseT_Half |
14561 ADVERTISED_100baseT_Full |
14562 ADVERTISED_10baseT_Half |
14563 ADVERTISED_10baseT_Full |
14564 ADVERTISED_TP;
14565 else
14566 adv |= ADVERTISED_FIBRE;
14567
14568 tp->link_config.advertising = adv;
14569 tp->link_config.speed = SPEED_UNKNOWN;
14570 tp->link_config.duplex = DUPLEX_UNKNOWN;
14571 tp->link_config.autoneg = AUTONEG_ENABLE;
14572 tp->link_config.active_speed = SPEED_UNKNOWN;
14573 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14574
14575 tp->old_link = -1;
14576 }
14577
14578 static int tg3_phy_probe(struct tg3 *tp)
14579 {
14580 u32 hw_phy_id_1, hw_phy_id_2;
14581 u32 hw_phy_id, hw_phy_id_masked;
14582 int err;
14583
14584 /* flow control autonegotiation is default behavior */
14585 tg3_flag_set(tp, PAUSE_AUTONEG);
14586 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14587
14588 if (tg3_flag(tp, ENABLE_APE)) {
14589 switch (tp->pci_fn) {
14590 case 0:
14591 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14592 break;
14593 case 1:
14594 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14595 break;
14596 case 2:
14597 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14598 break;
14599 case 3:
14600 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14601 break;
14602 }
14603 }
14604
14605 if (tg3_flag(tp, USE_PHYLIB))
14606 return tg3_phy_init(tp);
14607
14608 /* Reading the PHY ID register can conflict with ASF
14609 * firmware access to the PHY hardware.
14610 */
14611 err = 0;
14612 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14613 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14614 } else {
14615 /* Now read the physical PHY_ID from the chip and verify
14616 * that it is sane. If it doesn't look good, we fall back
14617 * to either the hard-coded table based PHY_ID and failing
14618 * that the value found in the eeprom area.
14619 */
14620 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14621 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14622
14623 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14624 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14625 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14626
14627 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14628 }
14629
14630 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14631 tp->phy_id = hw_phy_id;
14632 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14633 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14634 else
14635 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14636 } else {
14637 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14638 /* Do nothing, phy ID already set up in
14639 * tg3_get_eeprom_hw_cfg().
14640 */
14641 } else {
14642 struct subsys_tbl_ent *p;
14643
14644 /* No eeprom signature? Try the hardcoded
14645 * subsys device table.
14646 */
14647 p = tg3_lookup_by_subsys(tp);
14648 if (p) {
14649 tp->phy_id = p->phy_id;
14650 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14651 /* For now we saw the IDs 0xbc050cd0,
14652 * 0xbc050f80 and 0xbc050c30 on devices
14653 * connected to an BCM4785 and there are
14654 * probably more. Just assume that the phy is
14655 * supported when it is connected to a SSB core
14656 * for now.
14657 */
14658 return -ENODEV;
14659 }
14660
14661 if (!tp->phy_id ||
14662 tp->phy_id == TG3_PHY_ID_BCM8002)
14663 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14664 }
14665 }
14666
14667 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14668 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14669 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14670 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14671 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14672 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14673 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14674 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14675 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14676 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14677
14678 tg3_phy_init_link_config(tp);
14679
14680 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14681 !tg3_flag(tp, ENABLE_APE) &&
14682 !tg3_flag(tp, ENABLE_ASF)) {
14683 u32 bmsr, dummy;
14684
14685 tg3_readphy(tp, MII_BMSR, &bmsr);
14686 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14687 (bmsr & BMSR_LSTATUS))
14688 goto skip_phy_reset;
14689
14690 err = tg3_phy_reset(tp);
14691 if (err)
14692 return err;
14693
14694 tg3_phy_set_wirespeed(tp);
14695
14696 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14697 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14698 tp->link_config.flowctrl);
14699
14700 tg3_writephy(tp, MII_BMCR,
14701 BMCR_ANENABLE | BMCR_ANRESTART);
14702 }
14703 }
14704
14705 skip_phy_reset:
14706 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14707 err = tg3_init_5401phy_dsp(tp);
14708 if (err)
14709 return err;
14710
14711 err = tg3_init_5401phy_dsp(tp);
14712 }
14713
14714 return err;
14715 }
14716
14717 static void tg3_read_vpd(struct tg3 *tp)
14718 {
14719 u8 *vpd_data;
14720 unsigned int block_end, rosize, len;
14721 u32 vpdlen;
14722 int j, i = 0;
14723
14724 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14725 if (!vpd_data)
14726 goto out_no_vpd;
14727
14728 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14729 if (i < 0)
14730 goto out_not_found;
14731
14732 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14733 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14734 i += PCI_VPD_LRDT_TAG_SIZE;
14735
14736 if (block_end > vpdlen)
14737 goto out_not_found;
14738
14739 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14740 PCI_VPD_RO_KEYWORD_MFR_ID);
14741 if (j > 0) {
14742 len = pci_vpd_info_field_size(&vpd_data[j]);
14743
14744 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14745 if (j + len > block_end || len != 4 ||
14746 memcmp(&vpd_data[j], "1028", 4))
14747 goto partno;
14748
14749 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14750 PCI_VPD_RO_KEYWORD_VENDOR0);
14751 if (j < 0)
14752 goto partno;
14753
14754 len = pci_vpd_info_field_size(&vpd_data[j]);
14755
14756 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14757 if (j + len > block_end)
14758 goto partno;
14759
14760 memcpy(tp->fw_ver, &vpd_data[j], len);
14761 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14762 }
14763
14764 partno:
14765 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14766 PCI_VPD_RO_KEYWORD_PARTNO);
14767 if (i < 0)
14768 goto out_not_found;
14769
14770 len = pci_vpd_info_field_size(&vpd_data[i]);
14771
14772 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14773 if (len > TG3_BPN_SIZE ||
14774 (len + i) > vpdlen)
14775 goto out_not_found;
14776
14777 memcpy(tp->board_part_number, &vpd_data[i], len);
14778
14779 out_not_found:
14780 kfree(vpd_data);
14781 if (tp->board_part_number[0])
14782 return;
14783
14784 out_no_vpd:
14785 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14786 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14787 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14788 strcpy(tp->board_part_number, "BCM5717");
14789 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14790 strcpy(tp->board_part_number, "BCM5718");
14791 else
14792 goto nomatch;
14793 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14794 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14795 strcpy(tp->board_part_number, "BCM57780");
14796 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14797 strcpy(tp->board_part_number, "BCM57760");
14798 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14799 strcpy(tp->board_part_number, "BCM57790");
14800 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14801 strcpy(tp->board_part_number, "BCM57788");
14802 else
14803 goto nomatch;
14804 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14805 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14806 strcpy(tp->board_part_number, "BCM57761");
14807 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14808 strcpy(tp->board_part_number, "BCM57765");
14809 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14810 strcpy(tp->board_part_number, "BCM57781");
14811 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14812 strcpy(tp->board_part_number, "BCM57785");
14813 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14814 strcpy(tp->board_part_number, "BCM57791");
14815 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14816 strcpy(tp->board_part_number, "BCM57795");
14817 else
14818 goto nomatch;
14819 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14820 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14821 strcpy(tp->board_part_number, "BCM57762");
14822 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14823 strcpy(tp->board_part_number, "BCM57766");
14824 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14825 strcpy(tp->board_part_number, "BCM57782");
14826 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14827 strcpy(tp->board_part_number, "BCM57786");
14828 else
14829 goto nomatch;
14830 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14831 strcpy(tp->board_part_number, "BCM95906");
14832 } else {
14833 nomatch:
14834 strcpy(tp->board_part_number, "none");
14835 }
14836 }
14837
14838 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14839 {
14840 u32 val;
14841
14842 if (tg3_nvram_read(tp, offset, &val) ||
14843 (val & 0xfc000000) != 0x0c000000 ||
14844 tg3_nvram_read(tp, offset + 4, &val) ||
14845 val != 0)
14846 return 0;
14847
14848 return 1;
14849 }
14850
14851 static void tg3_read_bc_ver(struct tg3 *tp)
14852 {
14853 u32 val, offset, start, ver_offset;
14854 int i, dst_off;
14855 bool newver = false;
14856
14857 if (tg3_nvram_read(tp, 0xc, &offset) ||
14858 tg3_nvram_read(tp, 0x4, &start))
14859 return;
14860
14861 offset = tg3_nvram_logical_addr(tp, offset);
14862
14863 if (tg3_nvram_read(tp, offset, &val))
14864 return;
14865
14866 if ((val & 0xfc000000) == 0x0c000000) {
14867 if (tg3_nvram_read(tp, offset + 4, &val))
14868 return;
14869
14870 if (val == 0)
14871 newver = true;
14872 }
14873
14874 dst_off = strlen(tp->fw_ver);
14875
14876 if (newver) {
14877 if (TG3_VER_SIZE - dst_off < 16 ||
14878 tg3_nvram_read(tp, offset + 8, &ver_offset))
14879 return;
14880
14881 offset = offset + ver_offset - start;
14882 for (i = 0; i < 16; i += 4) {
14883 __be32 v;
14884 if (tg3_nvram_read_be32(tp, offset + i, &v))
14885 return;
14886
14887 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14888 }
14889 } else {
14890 u32 major, minor;
14891
14892 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14893 return;
14894
14895 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14896 TG3_NVM_BCVER_MAJSFT;
14897 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14898 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14899 "v%d.%02d", major, minor);
14900 }
14901 }
14902
14903 static void tg3_read_hwsb_ver(struct tg3 *tp)
14904 {
14905 u32 val, major, minor;
14906
14907 /* Use native endian representation */
14908 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14909 return;
14910
14911 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14912 TG3_NVM_HWSB_CFG1_MAJSFT;
14913 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14914 TG3_NVM_HWSB_CFG1_MINSFT;
14915
14916 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14917 }
14918
14919 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14920 {
14921 u32 offset, major, minor, build;
14922
14923 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14924
14925 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14926 return;
14927
14928 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14929 case TG3_EEPROM_SB_REVISION_0:
14930 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14931 break;
14932 case TG3_EEPROM_SB_REVISION_2:
14933 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14934 break;
14935 case TG3_EEPROM_SB_REVISION_3:
14936 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14937 break;
14938 case TG3_EEPROM_SB_REVISION_4:
14939 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14940 break;
14941 case TG3_EEPROM_SB_REVISION_5:
14942 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14943 break;
14944 case TG3_EEPROM_SB_REVISION_6:
14945 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14946 break;
14947 default:
14948 return;
14949 }
14950
14951 if (tg3_nvram_read(tp, offset, &val))
14952 return;
14953
14954 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14955 TG3_EEPROM_SB_EDH_BLD_SHFT;
14956 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14957 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14958 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14959
14960 if (minor > 99 || build > 26)
14961 return;
14962
14963 offset = strlen(tp->fw_ver);
14964 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14965 " v%d.%02d", major, minor);
14966
14967 if (build > 0) {
14968 offset = strlen(tp->fw_ver);
14969 if (offset < TG3_VER_SIZE - 1)
14970 tp->fw_ver[offset] = 'a' + build - 1;
14971 }
14972 }
14973
14974 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14975 {
14976 u32 val, offset, start;
14977 int i, vlen;
14978
14979 for (offset = TG3_NVM_DIR_START;
14980 offset < TG3_NVM_DIR_END;
14981 offset += TG3_NVM_DIRENT_SIZE) {
14982 if (tg3_nvram_read(tp, offset, &val))
14983 return;
14984
14985 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14986 break;
14987 }
14988
14989 if (offset == TG3_NVM_DIR_END)
14990 return;
14991
14992 if (!tg3_flag(tp, 5705_PLUS))
14993 start = 0x08000000;
14994 else if (tg3_nvram_read(tp, offset - 4, &start))
14995 return;
14996
14997 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14998 !tg3_fw_img_is_valid(tp, offset) ||
14999 tg3_nvram_read(tp, offset + 8, &val))
15000 return;
15001
15002 offset += val - start;
15003
15004 vlen = strlen(tp->fw_ver);
15005
15006 tp->fw_ver[vlen++] = ',';
15007 tp->fw_ver[vlen++] = ' ';
15008
15009 for (i = 0; i < 4; i++) {
15010 __be32 v;
15011 if (tg3_nvram_read_be32(tp, offset, &v))
15012 return;
15013
15014 offset += sizeof(v);
15015
15016 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15017 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15018 break;
15019 }
15020
15021 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15022 vlen += sizeof(v);
15023 }
15024 }
15025
15026 static void tg3_probe_ncsi(struct tg3 *tp)
15027 {
15028 u32 apedata;
15029
15030 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15031 if (apedata != APE_SEG_SIG_MAGIC)
15032 return;
15033
15034 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15035 if (!(apedata & APE_FW_STATUS_READY))
15036 return;
15037
15038 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15039 tg3_flag_set(tp, APE_HAS_NCSI);
15040 }
15041
15042 static void tg3_read_dash_ver(struct tg3 *tp)
15043 {
15044 int vlen;
15045 u32 apedata;
15046 char *fwtype;
15047
15048 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15049
15050 if (tg3_flag(tp, APE_HAS_NCSI))
15051 fwtype = "NCSI";
15052 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15053 fwtype = "SMASH";
15054 else
15055 fwtype = "DASH";
15056
15057 vlen = strlen(tp->fw_ver);
15058
15059 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15060 fwtype,
15061 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15062 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15063 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15064 (apedata & APE_FW_VERSION_BLDMSK));
15065 }
15066
15067 static void tg3_read_otp_ver(struct tg3 *tp)
15068 {
15069 u32 val, val2;
15070
15071 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15072 return;
15073
15074 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15075 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15076 TG3_OTP_MAGIC0_VALID(val)) {
15077 u64 val64 = (u64) val << 32 | val2;
15078 u32 ver = 0;
15079 int i, vlen;
15080
15081 for (i = 0; i < 7; i++) {
15082 if ((val64 & 0xff) == 0)
15083 break;
15084 ver = val64 & 0xff;
15085 val64 >>= 8;
15086 }
15087 vlen = strlen(tp->fw_ver);
15088 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15089 }
15090 }
15091
15092 static void tg3_read_fw_ver(struct tg3 *tp)
15093 {
15094 u32 val;
15095 bool vpd_vers = false;
15096
15097 if (tp->fw_ver[0] != 0)
15098 vpd_vers = true;
15099
15100 if (tg3_flag(tp, NO_NVRAM)) {
15101 strcat(tp->fw_ver, "sb");
15102 tg3_read_otp_ver(tp);
15103 return;
15104 }
15105
15106 if (tg3_nvram_read(tp, 0, &val))
15107 return;
15108
15109 if (val == TG3_EEPROM_MAGIC)
15110 tg3_read_bc_ver(tp);
15111 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15112 tg3_read_sb_ver(tp, val);
15113 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15114 tg3_read_hwsb_ver(tp);
15115
15116 if (tg3_flag(tp, ENABLE_ASF)) {
15117 if (tg3_flag(tp, ENABLE_APE)) {
15118 tg3_probe_ncsi(tp);
15119 if (!vpd_vers)
15120 tg3_read_dash_ver(tp);
15121 } else if (!vpd_vers) {
15122 tg3_read_mgmtfw_ver(tp);
15123 }
15124 }
15125
15126 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15127 }
15128
15129 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15130 {
15131 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15132 return TG3_RX_RET_MAX_SIZE_5717;
15133 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15134 return TG3_RX_RET_MAX_SIZE_5700;
15135 else
15136 return TG3_RX_RET_MAX_SIZE_5705;
15137 }
15138
15139 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15142 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15143 { },
15144 };
15145
15146 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15147 {
15148 struct pci_dev *peer;
15149 unsigned int func, devnr = tp->pdev->devfn & ~7;
15150
15151 for (func = 0; func < 8; func++) {
15152 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15153 if (peer && peer != tp->pdev)
15154 break;
15155 pci_dev_put(peer);
15156 }
15157 /* 5704 can be configured in single-port mode, set peer to
15158 * tp->pdev in that case.
15159 */
15160 if (!peer) {
15161 peer = tp->pdev;
15162 return peer;
15163 }
15164
15165 /*
15166 * We don't need to keep the refcount elevated; there's no way
15167 * to remove one half of this device without removing the other
15168 */
15169 pci_dev_put(peer);
15170
15171 return peer;
15172 }
15173
15174 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15175 {
15176 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15177 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15178 u32 reg;
15179
15180 /* All devices that use the alternate
15181 * ASIC REV location have a CPMU.
15182 */
15183 tg3_flag_set(tp, CPMU_PRESENT);
15184
15185 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15186 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15187 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15188 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15189 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15190 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15191 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15192 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15193 reg = TG3PCI_GEN2_PRODID_ASICREV;
15194 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15195 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15196 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15198 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15199 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15200 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15201 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15202 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15203 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15204 reg = TG3PCI_GEN15_PRODID_ASICREV;
15205 else
15206 reg = TG3PCI_PRODID_ASICREV;
15207
15208 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15209 }
15210
15211 /* Wrong chip ID in 5752 A0. This code can be removed later
15212 * as A0 is not in production.
15213 */
15214 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15215 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15216
15217 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15218 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15219
15220 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15221 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15222 tg3_asic_rev(tp) == ASIC_REV_5720)
15223 tg3_flag_set(tp, 5717_PLUS);
15224
15225 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15226 tg3_asic_rev(tp) == ASIC_REV_57766)
15227 tg3_flag_set(tp, 57765_CLASS);
15228
15229 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15230 tg3_asic_rev(tp) == ASIC_REV_5762)
15231 tg3_flag_set(tp, 57765_PLUS);
15232
15233 /* Intentionally exclude ASIC_REV_5906 */
15234 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15235 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15236 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15237 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15238 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15239 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15240 tg3_flag(tp, 57765_PLUS))
15241 tg3_flag_set(tp, 5755_PLUS);
15242
15243 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15244 tg3_asic_rev(tp) == ASIC_REV_5714)
15245 tg3_flag_set(tp, 5780_CLASS);
15246
15247 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15248 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15249 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15250 tg3_flag(tp, 5755_PLUS) ||
15251 tg3_flag(tp, 5780_CLASS))
15252 tg3_flag_set(tp, 5750_PLUS);
15253
15254 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15255 tg3_flag(tp, 5750_PLUS))
15256 tg3_flag_set(tp, 5705_PLUS);
15257 }
15258
15259 static bool tg3_10_100_only_device(struct tg3 *tp,
15260 const struct pci_device_id *ent)
15261 {
15262 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15263
15264 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15265 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15266 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15267 return true;
15268
15269 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15270 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15271 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15272 return true;
15273 } else {
15274 return true;
15275 }
15276 }
15277
15278 return false;
15279 }
15280
15281 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15282 {
15283 u32 misc_ctrl_reg;
15284 u32 pci_state_reg, grc_misc_cfg;
15285 u32 val;
15286 u16 pci_cmd;
15287 int err;
15288
15289 /* Force memory write invalidate off. If we leave it on,
15290 * then on 5700_BX chips we have to enable a workaround.
15291 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15292 * to match the cacheline size. The Broadcom driver have this
15293 * workaround but turns MWI off all the times so never uses
15294 * it. This seems to suggest that the workaround is insufficient.
15295 */
15296 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15297 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15298 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15299
15300 /* Important! -- Make sure register accesses are byteswapped
15301 * correctly. Also, for those chips that require it, make
15302 * sure that indirect register accesses are enabled before
15303 * the first operation.
15304 */
15305 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15306 &misc_ctrl_reg);
15307 tp->misc_host_ctrl |= (misc_ctrl_reg &
15308 MISC_HOST_CTRL_CHIPREV);
15309 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15310 tp->misc_host_ctrl);
15311
15312 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15313
15314 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15315 * we need to disable memory and use config. cycles
15316 * only to access all registers. The 5702/03 chips
15317 * can mistakenly decode the special cycles from the
15318 * ICH chipsets as memory write cycles, causing corruption
15319 * of register and memory space. Only certain ICH bridges
15320 * will drive special cycles with non-zero data during the
15321 * address phase which can fall within the 5703's address
15322 * range. This is not an ICH bug as the PCI spec allows
15323 * non-zero address during special cycles. However, only
15324 * these ICH bridges are known to drive non-zero addresses
15325 * during special cycles.
15326 *
15327 * Since special cycles do not cross PCI bridges, we only
15328 * enable this workaround if the 5703 is on the secondary
15329 * bus of these ICH bridges.
15330 */
15331 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15332 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15333 static struct tg3_dev_id {
15334 u32 vendor;
15335 u32 device;
15336 u32 rev;
15337 } ich_chipsets[] = {
15338 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15339 PCI_ANY_ID },
15340 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15341 PCI_ANY_ID },
15342 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15343 0xa },
15344 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15345 PCI_ANY_ID },
15346 { },
15347 };
15348 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15349 struct pci_dev *bridge = NULL;
15350
15351 while (pci_id->vendor != 0) {
15352 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15353 bridge);
15354 if (!bridge) {
15355 pci_id++;
15356 continue;
15357 }
15358 if (pci_id->rev != PCI_ANY_ID) {
15359 if (bridge->revision > pci_id->rev)
15360 continue;
15361 }
15362 if (bridge->subordinate &&
15363 (bridge->subordinate->number ==
15364 tp->pdev->bus->number)) {
15365 tg3_flag_set(tp, ICH_WORKAROUND);
15366 pci_dev_put(bridge);
15367 break;
15368 }
15369 }
15370 }
15371
15372 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15373 static struct tg3_dev_id {
15374 u32 vendor;
15375 u32 device;
15376 } bridge_chipsets[] = {
15377 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15378 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15379 { },
15380 };
15381 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15382 struct pci_dev *bridge = NULL;
15383
15384 while (pci_id->vendor != 0) {
15385 bridge = pci_get_device(pci_id->vendor,
15386 pci_id->device,
15387 bridge);
15388 if (!bridge) {
15389 pci_id++;
15390 continue;
15391 }
15392 if (bridge->subordinate &&
15393 (bridge->subordinate->number <=
15394 tp->pdev->bus->number) &&
15395 (bridge->subordinate->busn_res.end >=
15396 tp->pdev->bus->number)) {
15397 tg3_flag_set(tp, 5701_DMA_BUG);
15398 pci_dev_put(bridge);
15399 break;
15400 }
15401 }
15402 }
15403
15404 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15405 * DMA addresses > 40-bit. This bridge may have other additional
15406 * 57xx devices behind it in some 4-port NIC designs for example.
15407 * Any tg3 device found behind the bridge will also need the 40-bit
15408 * DMA workaround.
15409 */
15410 if (tg3_flag(tp, 5780_CLASS)) {
15411 tg3_flag_set(tp, 40BIT_DMA_BUG);
15412 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15413 } else {
15414 struct pci_dev *bridge = NULL;
15415
15416 do {
15417 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15418 PCI_DEVICE_ID_SERVERWORKS_EPB,
15419 bridge);
15420 if (bridge && bridge->subordinate &&
15421 (bridge->subordinate->number <=
15422 tp->pdev->bus->number) &&
15423 (bridge->subordinate->busn_res.end >=
15424 tp->pdev->bus->number)) {
15425 tg3_flag_set(tp, 40BIT_DMA_BUG);
15426 pci_dev_put(bridge);
15427 break;
15428 }
15429 } while (bridge);
15430 }
15431
15432 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15433 tg3_asic_rev(tp) == ASIC_REV_5714)
15434 tp->pdev_peer = tg3_find_peer(tp);
15435
15436 /* Determine TSO capabilities */
15437 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15438 ; /* Do nothing. HW bug. */
15439 else if (tg3_flag(tp, 57765_PLUS))
15440 tg3_flag_set(tp, HW_TSO_3);
15441 else if (tg3_flag(tp, 5755_PLUS) ||
15442 tg3_asic_rev(tp) == ASIC_REV_5906)
15443 tg3_flag_set(tp, HW_TSO_2);
15444 else if (tg3_flag(tp, 5750_PLUS)) {
15445 tg3_flag_set(tp, HW_TSO_1);
15446 tg3_flag_set(tp, TSO_BUG);
15447 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15448 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15449 tg3_flag_clear(tp, TSO_BUG);
15450 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15451 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15452 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15453 tg3_flag_set(tp, FW_TSO);
15454 tg3_flag_set(tp, TSO_BUG);
15455 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15456 tp->fw_needed = FIRMWARE_TG3TSO5;
15457 else
15458 tp->fw_needed = FIRMWARE_TG3TSO;
15459 }
15460
15461 /* Selectively allow TSO based on operating conditions */
15462 if (tg3_flag(tp, HW_TSO_1) ||
15463 tg3_flag(tp, HW_TSO_2) ||
15464 tg3_flag(tp, HW_TSO_3) ||
15465 tg3_flag(tp, FW_TSO)) {
15466 /* For firmware TSO, assume ASF is disabled.
15467 * We'll disable TSO later if we discover ASF
15468 * is enabled in tg3_get_eeprom_hw_cfg().
15469 */
15470 tg3_flag_set(tp, TSO_CAPABLE);
15471 } else {
15472 tg3_flag_clear(tp, TSO_CAPABLE);
15473 tg3_flag_clear(tp, TSO_BUG);
15474 tp->fw_needed = NULL;
15475 }
15476
15477 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15478 tp->fw_needed = FIRMWARE_TG3;
15479
15480 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15481 tp->fw_needed = FIRMWARE_TG357766;
15482
15483 tp->irq_max = 1;
15484
15485 if (tg3_flag(tp, 5750_PLUS)) {
15486 tg3_flag_set(tp, SUPPORT_MSI);
15487 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15488 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15489 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15490 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15491 tp->pdev_peer == tp->pdev))
15492 tg3_flag_clear(tp, SUPPORT_MSI);
15493
15494 if (tg3_flag(tp, 5755_PLUS) ||
15495 tg3_asic_rev(tp) == ASIC_REV_5906) {
15496 tg3_flag_set(tp, 1SHOT_MSI);
15497 }
15498
15499 if (tg3_flag(tp, 57765_PLUS)) {
15500 tg3_flag_set(tp, SUPPORT_MSIX);
15501 tp->irq_max = TG3_IRQ_MAX_VECS;
15502 }
15503 }
15504
15505 tp->txq_max = 1;
15506 tp->rxq_max = 1;
15507 if (tp->irq_max > 1) {
15508 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15509 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15510
15511 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15512 tg3_asic_rev(tp) == ASIC_REV_5720)
15513 tp->txq_max = tp->irq_max - 1;
15514 }
15515
15516 if (tg3_flag(tp, 5755_PLUS) ||
15517 tg3_asic_rev(tp) == ASIC_REV_5906)
15518 tg3_flag_set(tp, SHORT_DMA_BUG);
15519
15520 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15521 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15522
15523 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15524 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15525 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15526 tg3_asic_rev(tp) == ASIC_REV_5762)
15527 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15528
15529 if (tg3_flag(tp, 57765_PLUS) &&
15530 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15531 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15532
15533 if (!tg3_flag(tp, 5705_PLUS) ||
15534 tg3_flag(tp, 5780_CLASS) ||
15535 tg3_flag(tp, USE_JUMBO_BDFLAG))
15536 tg3_flag_set(tp, JUMBO_CAPABLE);
15537
15538 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15539 &pci_state_reg);
15540
15541 if (pci_is_pcie(tp->pdev)) {
15542 u16 lnkctl;
15543
15544 tg3_flag_set(tp, PCI_EXPRESS);
15545
15546 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15547 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15548 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15549 tg3_flag_clear(tp, HW_TSO_2);
15550 tg3_flag_clear(tp, TSO_CAPABLE);
15551 }
15552 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15553 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15554 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15555 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15556 tg3_flag_set(tp, CLKREQ_BUG);
15557 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15558 tg3_flag_set(tp, L1PLLPD_EN);
15559 }
15560 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15561 /* BCM5785 devices are effectively PCIe devices, and should
15562 * follow PCIe codepaths, but do not have a PCIe capabilities
15563 * section.
15564 */
15565 tg3_flag_set(tp, PCI_EXPRESS);
15566 } else if (!tg3_flag(tp, 5705_PLUS) ||
15567 tg3_flag(tp, 5780_CLASS)) {
15568 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15569 if (!tp->pcix_cap) {
15570 dev_err(&tp->pdev->dev,
15571 "Cannot find PCI-X capability, aborting\n");
15572 return -EIO;
15573 }
15574
15575 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15576 tg3_flag_set(tp, PCIX_MODE);
15577 }
15578
15579 /* If we have an AMD 762 or VIA K8T800 chipset, write
15580 * reordering to the mailbox registers done by the host
15581 * controller can cause major troubles. We read back from
15582 * every mailbox register write to force the writes to be
15583 * posted to the chip in order.
15584 */
15585 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15586 !tg3_flag(tp, PCI_EXPRESS))
15587 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15588
15589 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15590 &tp->pci_cacheline_sz);
15591 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15592 &tp->pci_lat_timer);
15593 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15594 tp->pci_lat_timer < 64) {
15595 tp->pci_lat_timer = 64;
15596 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15597 tp->pci_lat_timer);
15598 }
15599
15600 /* Important! -- It is critical that the PCI-X hw workaround
15601 * situation is decided before the first MMIO register access.
15602 */
15603 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15604 /* 5700 BX chips need to have their TX producer index
15605 * mailboxes written twice to workaround a bug.
15606 */
15607 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15608
15609 /* If we are in PCI-X mode, enable register write workaround.
15610 *
15611 * The workaround is to use indirect register accesses
15612 * for all chip writes not to mailbox registers.
15613 */
15614 if (tg3_flag(tp, PCIX_MODE)) {
15615 u32 pm_reg;
15616
15617 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15618
15619 /* The chip can have it's power management PCI config
15620 * space registers clobbered due to this bug.
15621 * So explicitly force the chip into D0 here.
15622 */
15623 pci_read_config_dword(tp->pdev,
15624 tp->pm_cap + PCI_PM_CTRL,
15625 &pm_reg);
15626 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15627 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15628 pci_write_config_dword(tp->pdev,
15629 tp->pm_cap + PCI_PM_CTRL,
15630 pm_reg);
15631
15632 /* Also, force SERR#/PERR# in PCI command. */
15633 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15634 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15635 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15636 }
15637 }
15638
15639 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15640 tg3_flag_set(tp, PCI_HIGH_SPEED);
15641 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15642 tg3_flag_set(tp, PCI_32BIT);
15643
15644 /* Chip-specific fixup from Broadcom driver */
15645 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15646 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15647 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15648 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15649 }
15650
15651 /* Default fast path register access methods */
15652 tp->read32 = tg3_read32;
15653 tp->write32 = tg3_write32;
15654 tp->read32_mbox = tg3_read32;
15655 tp->write32_mbox = tg3_write32;
15656 tp->write32_tx_mbox = tg3_write32;
15657 tp->write32_rx_mbox = tg3_write32;
15658
15659 /* Various workaround register access methods */
15660 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15661 tp->write32 = tg3_write_indirect_reg32;
15662 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15663 (tg3_flag(tp, PCI_EXPRESS) &&
15664 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15665 /*
15666 * Back to back register writes can cause problems on these
15667 * chips, the workaround is to read back all reg writes
15668 * except those to mailbox regs.
15669 *
15670 * See tg3_write_indirect_reg32().
15671 */
15672 tp->write32 = tg3_write_flush_reg32;
15673 }
15674
15675 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15676 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15677 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15678 tp->write32_rx_mbox = tg3_write_flush_reg32;
15679 }
15680
15681 if (tg3_flag(tp, ICH_WORKAROUND)) {
15682 tp->read32 = tg3_read_indirect_reg32;
15683 tp->write32 = tg3_write_indirect_reg32;
15684 tp->read32_mbox = tg3_read_indirect_mbox;
15685 tp->write32_mbox = tg3_write_indirect_mbox;
15686 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15687 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15688
15689 iounmap(tp->regs);
15690 tp->regs = NULL;
15691
15692 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15693 pci_cmd &= ~PCI_COMMAND_MEMORY;
15694 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15695 }
15696 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15697 tp->read32_mbox = tg3_read32_mbox_5906;
15698 tp->write32_mbox = tg3_write32_mbox_5906;
15699 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15700 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15701 }
15702
15703 if (tp->write32 == tg3_write_indirect_reg32 ||
15704 (tg3_flag(tp, PCIX_MODE) &&
15705 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15706 tg3_asic_rev(tp) == ASIC_REV_5701)))
15707 tg3_flag_set(tp, SRAM_USE_CONFIG);
15708
15709 /* The memory arbiter has to be enabled in order for SRAM accesses
15710 * to succeed. Normally on powerup the tg3 chip firmware will make
15711 * sure it is enabled, but other entities such as system netboot
15712 * code might disable it.
15713 */
15714 val = tr32(MEMARB_MODE);
15715 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15716
15717 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15718 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15719 tg3_flag(tp, 5780_CLASS)) {
15720 if (tg3_flag(tp, PCIX_MODE)) {
15721 pci_read_config_dword(tp->pdev,
15722 tp->pcix_cap + PCI_X_STATUS,
15723 &val);
15724 tp->pci_fn = val & 0x7;
15725 }
15726 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15727 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15728 tg3_asic_rev(tp) == ASIC_REV_5720) {
15729 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15730 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15731 val = tr32(TG3_CPMU_STATUS);
15732
15733 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15734 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15735 else
15736 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15737 TG3_CPMU_STATUS_FSHFT_5719;
15738 }
15739
15740 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15741 tp->write32_tx_mbox = tg3_write_flush_reg32;
15742 tp->write32_rx_mbox = tg3_write_flush_reg32;
15743 }
15744
15745 /* Get eeprom hw config before calling tg3_set_power_state().
15746 * In particular, the TG3_FLAG_IS_NIC flag must be
15747 * determined before calling tg3_set_power_state() so that
15748 * we know whether or not to switch out of Vaux power.
15749 * When the flag is set, it means that GPIO1 is used for eeprom
15750 * write protect and also implies that it is a LOM where GPIOs
15751 * are not used to switch power.
15752 */
15753 tg3_get_eeprom_hw_cfg(tp);
15754
15755 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15756 tg3_flag_clear(tp, TSO_CAPABLE);
15757 tg3_flag_clear(tp, TSO_BUG);
15758 tp->fw_needed = NULL;
15759 }
15760
15761 if (tg3_flag(tp, ENABLE_APE)) {
15762 /* Allow reads and writes to the
15763 * APE register and memory space.
15764 */
15765 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15766 PCISTATE_ALLOW_APE_SHMEM_WR |
15767 PCISTATE_ALLOW_APE_PSPACE_WR;
15768 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15769 pci_state_reg);
15770
15771 tg3_ape_lock_init(tp);
15772 }
15773
15774 /* Set up tp->grc_local_ctrl before calling
15775 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15776 * will bring 5700's external PHY out of reset.
15777 * It is also used as eeprom write protect on LOMs.
15778 */
15779 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15780 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15781 tg3_flag(tp, EEPROM_WRITE_PROT))
15782 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15783 GRC_LCLCTRL_GPIO_OUTPUT1);
15784 /* Unused GPIO3 must be driven as output on 5752 because there
15785 * are no pull-up resistors on unused GPIO pins.
15786 */
15787 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15788 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15789
15790 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15791 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15792 tg3_flag(tp, 57765_CLASS))
15793 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15794
15795 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15796 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15797 /* Turn off the debug UART. */
15798 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15799 if (tg3_flag(tp, IS_NIC))
15800 /* Keep VMain power. */
15801 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15802 GRC_LCLCTRL_GPIO_OUTPUT0;
15803 }
15804
15805 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15806 tp->grc_local_ctrl |=
15807 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15808
15809 /* Switch out of Vaux if it is a NIC */
15810 tg3_pwrsrc_switch_to_vmain(tp);
15811
15812 /* Derive initial jumbo mode from MTU assigned in
15813 * ether_setup() via the alloc_etherdev() call
15814 */
15815 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15816 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15817
15818 /* Determine WakeOnLan speed to use. */
15819 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15820 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15821 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15822 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15823 tg3_flag_clear(tp, WOL_SPEED_100MB);
15824 } else {
15825 tg3_flag_set(tp, WOL_SPEED_100MB);
15826 }
15827
15828 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15829 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15830
15831 /* A few boards don't want Ethernet@WireSpeed phy feature */
15832 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15833 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15834 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15835 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15836 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15837 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15838 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15839
15840 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15841 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15842 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15843 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15844 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15845
15846 if (tg3_flag(tp, 5705_PLUS) &&
15847 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15848 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15849 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15850 !tg3_flag(tp, 57765_PLUS)) {
15851 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15852 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15853 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15854 tg3_asic_rev(tp) == ASIC_REV_5761) {
15855 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15856 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15857 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15858 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15859 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15860 } else
15861 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15862 }
15863
15864 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15865 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15866 tp->phy_otp = tg3_read_otp_phycfg(tp);
15867 if (tp->phy_otp == 0)
15868 tp->phy_otp = TG3_OTP_DEFAULT;
15869 }
15870
15871 if (tg3_flag(tp, CPMU_PRESENT))
15872 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15873 else
15874 tp->mi_mode = MAC_MI_MODE_BASE;
15875
15876 tp->coalesce_mode = 0;
15877 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15878 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15879 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15880
15881 /* Set these bits to enable statistics workaround. */
15882 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15883 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15884 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15885 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15886 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15887 }
15888
15889 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15890 tg3_asic_rev(tp) == ASIC_REV_57780)
15891 tg3_flag_set(tp, USE_PHYLIB);
15892
15893 err = tg3_mdio_init(tp);
15894 if (err)
15895 return err;
15896
15897 /* Initialize data/descriptor byte/word swapping. */
15898 val = tr32(GRC_MODE);
15899 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15900 tg3_asic_rev(tp) == ASIC_REV_5762)
15901 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15902 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15903 GRC_MODE_B2HRX_ENABLE |
15904 GRC_MODE_HTX2B_ENABLE |
15905 GRC_MODE_HOST_STACKUP);
15906 else
15907 val &= GRC_MODE_HOST_STACKUP;
15908
15909 tw32(GRC_MODE, val | tp->grc_mode);
15910
15911 tg3_switch_clocks(tp);
15912
15913 /* Clear this out for sanity. */
15914 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15915
15916 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15917 &pci_state_reg);
15918 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15919 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15920 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15921 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15922 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15923 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15924 void __iomem *sram_base;
15925
15926 /* Write some dummy words into the SRAM status block
15927 * area, see if it reads back correctly. If the return
15928 * value is bad, force enable the PCIX workaround.
15929 */
15930 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15931
15932 writel(0x00000000, sram_base);
15933 writel(0x00000000, sram_base + 4);
15934 writel(0xffffffff, sram_base + 4);
15935 if (readl(sram_base) != 0x00000000)
15936 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15937 }
15938 }
15939
15940 udelay(50);
15941 tg3_nvram_init(tp);
15942
15943 /* If the device has an NVRAM, no need to load patch firmware */
15944 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15945 !tg3_flag(tp, NO_NVRAM))
15946 tp->fw_needed = NULL;
15947
15948 grc_misc_cfg = tr32(GRC_MISC_CFG);
15949 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15950
15951 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15952 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15953 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15954 tg3_flag_set(tp, IS_5788);
15955
15956 if (!tg3_flag(tp, IS_5788) &&
15957 tg3_asic_rev(tp) != ASIC_REV_5700)
15958 tg3_flag_set(tp, TAGGED_STATUS);
15959 if (tg3_flag(tp, TAGGED_STATUS)) {
15960 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15961 HOSTCC_MODE_CLRTICK_TXBD);
15962
15963 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15964 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15965 tp->misc_host_ctrl);
15966 }
15967
15968 /* Preserve the APE MAC_MODE bits */
15969 if (tg3_flag(tp, ENABLE_APE))
15970 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15971 else
15972 tp->mac_mode = 0;
15973
15974 if (tg3_10_100_only_device(tp, ent))
15975 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15976
15977 err = tg3_phy_probe(tp);
15978 if (err) {
15979 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15980 /* ... but do not return immediately ... */
15981 tg3_mdio_fini(tp);
15982 }
15983
15984 tg3_read_vpd(tp);
15985 tg3_read_fw_ver(tp);
15986
15987 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15988 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15989 } else {
15990 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15991 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15992 else
15993 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15994 }
15995
15996 /* 5700 {AX,BX} chips have a broken status block link
15997 * change bit implementation, so we must use the
15998 * status register in those cases.
15999 */
16000 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16001 tg3_flag_set(tp, USE_LINKCHG_REG);
16002 else
16003 tg3_flag_clear(tp, USE_LINKCHG_REG);
16004
16005 /* The led_ctrl is set during tg3_phy_probe, here we might
16006 * have to force the link status polling mechanism based
16007 * upon subsystem IDs.
16008 */
16009 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16010 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16011 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16012 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16013 tg3_flag_set(tp, USE_LINKCHG_REG);
16014 }
16015
16016 /* For all SERDES we poll the MAC status register. */
16017 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16018 tg3_flag_set(tp, POLL_SERDES);
16019 else
16020 tg3_flag_clear(tp, POLL_SERDES);
16021
16022 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16023 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16024 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16025 tg3_flag(tp, PCIX_MODE)) {
16026 tp->rx_offset = NET_SKB_PAD;
16027 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16028 tp->rx_copy_thresh = ~(u16)0;
16029 #endif
16030 }
16031
16032 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16033 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16034 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16035
16036 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16037
16038 /* Increment the rx prod index on the rx std ring by at most
16039 * 8 for these chips to workaround hw errata.
16040 */
16041 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16042 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16043 tg3_asic_rev(tp) == ASIC_REV_5755)
16044 tp->rx_std_max_post = 8;
16045
16046 if (tg3_flag(tp, ASPM_WORKAROUND))
16047 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16048 PCIE_PWR_MGMT_L1_THRESH_MSK;
16049
16050 return err;
16051 }
16052
16053 #ifdef CONFIG_SPARC
16054 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16055 {
16056 struct net_device *dev = tp->dev;
16057 struct pci_dev *pdev = tp->pdev;
16058 struct device_node *dp = pci_device_to_OF_node(pdev);
16059 const unsigned char *addr;
16060 int len;
16061
16062 addr = of_get_property(dp, "local-mac-address", &len);
16063 if (addr && len == 6) {
16064 memcpy(dev->dev_addr, addr, 6);
16065 return 0;
16066 }
16067 return -ENODEV;
16068 }
16069
16070 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16071 {
16072 struct net_device *dev = tp->dev;
16073
16074 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16075 return 0;
16076 }
16077 #endif
16078
16079 static int tg3_get_device_address(struct tg3 *tp)
16080 {
16081 struct net_device *dev = tp->dev;
16082 u32 hi, lo, mac_offset;
16083 int addr_ok = 0;
16084 int err;
16085
16086 #ifdef CONFIG_SPARC
16087 if (!tg3_get_macaddr_sparc(tp))
16088 return 0;
16089 #endif
16090
16091 if (tg3_flag(tp, IS_SSB_CORE)) {
16092 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16093 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16094 return 0;
16095 }
16096
16097 mac_offset = 0x7c;
16098 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16099 tg3_flag(tp, 5780_CLASS)) {
16100 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16101 mac_offset = 0xcc;
16102 if (tg3_nvram_lock(tp))
16103 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16104 else
16105 tg3_nvram_unlock(tp);
16106 } else if (tg3_flag(tp, 5717_PLUS)) {
16107 if (tp->pci_fn & 1)
16108 mac_offset = 0xcc;
16109 if (tp->pci_fn > 1)
16110 mac_offset += 0x18c;
16111 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16112 mac_offset = 0x10;
16113
16114 /* First try to get it from MAC address mailbox. */
16115 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16116 if ((hi >> 16) == 0x484b) {
16117 dev->dev_addr[0] = (hi >> 8) & 0xff;
16118 dev->dev_addr[1] = (hi >> 0) & 0xff;
16119
16120 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16121 dev->dev_addr[2] = (lo >> 24) & 0xff;
16122 dev->dev_addr[3] = (lo >> 16) & 0xff;
16123 dev->dev_addr[4] = (lo >> 8) & 0xff;
16124 dev->dev_addr[5] = (lo >> 0) & 0xff;
16125
16126 /* Some old bootcode may report a 0 MAC address in SRAM */
16127 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16128 }
16129 if (!addr_ok) {
16130 /* Next, try NVRAM. */
16131 if (!tg3_flag(tp, NO_NVRAM) &&
16132 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16133 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16134 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16135 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16136 }
16137 /* Finally just fetch it out of the MAC control regs. */
16138 else {
16139 hi = tr32(MAC_ADDR_0_HIGH);
16140 lo = tr32(MAC_ADDR_0_LOW);
16141
16142 dev->dev_addr[5] = lo & 0xff;
16143 dev->dev_addr[4] = (lo >> 8) & 0xff;
16144 dev->dev_addr[3] = (lo >> 16) & 0xff;
16145 dev->dev_addr[2] = (lo >> 24) & 0xff;
16146 dev->dev_addr[1] = hi & 0xff;
16147 dev->dev_addr[0] = (hi >> 8) & 0xff;
16148 }
16149 }
16150
16151 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16152 #ifdef CONFIG_SPARC
16153 if (!tg3_get_default_macaddr_sparc(tp))
16154 return 0;
16155 #endif
16156 return -EINVAL;
16157 }
16158 return 0;
16159 }
16160
16161 #define BOUNDARY_SINGLE_CACHELINE 1
16162 #define BOUNDARY_MULTI_CACHELINE 2
16163
16164 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16165 {
16166 int cacheline_size;
16167 u8 byte;
16168 int goal;
16169
16170 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16171 if (byte == 0)
16172 cacheline_size = 1024;
16173 else
16174 cacheline_size = (int) byte * 4;
16175
16176 /* On 5703 and later chips, the boundary bits have no
16177 * effect.
16178 */
16179 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16180 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16181 !tg3_flag(tp, PCI_EXPRESS))
16182 goto out;
16183
16184 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16185 goal = BOUNDARY_MULTI_CACHELINE;
16186 #else
16187 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16188 goal = BOUNDARY_SINGLE_CACHELINE;
16189 #else
16190 goal = 0;
16191 #endif
16192 #endif
16193
16194 if (tg3_flag(tp, 57765_PLUS)) {
16195 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16196 goto out;
16197 }
16198
16199 if (!goal)
16200 goto out;
16201
16202 /* PCI controllers on most RISC systems tend to disconnect
16203 * when a device tries to burst across a cache-line boundary.
16204 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16205 *
16206 * Unfortunately, for PCI-E there are only limited
16207 * write-side controls for this, and thus for reads
16208 * we will still get the disconnects. We'll also waste
16209 * these PCI cycles for both read and write for chips
16210 * other than 5700 and 5701 which do not implement the
16211 * boundary bits.
16212 */
16213 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16214 switch (cacheline_size) {
16215 case 16:
16216 case 32:
16217 case 64:
16218 case 128:
16219 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16220 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16221 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16222 } else {
16223 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16224 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16225 }
16226 break;
16227
16228 case 256:
16229 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16230 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16231 break;
16232
16233 default:
16234 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16235 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16236 break;
16237 }
16238 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16239 switch (cacheline_size) {
16240 case 16:
16241 case 32:
16242 case 64:
16243 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16244 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16245 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16246 break;
16247 }
16248 /* fallthrough */
16249 case 128:
16250 default:
16251 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16252 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16253 break;
16254 }
16255 } else {
16256 switch (cacheline_size) {
16257 case 16:
16258 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16259 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16260 DMA_RWCTRL_WRITE_BNDRY_16);
16261 break;
16262 }
16263 /* fallthrough */
16264 case 32:
16265 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16266 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16267 DMA_RWCTRL_WRITE_BNDRY_32);
16268 break;
16269 }
16270 /* fallthrough */
16271 case 64:
16272 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16273 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16274 DMA_RWCTRL_WRITE_BNDRY_64);
16275 break;
16276 }
16277 /* fallthrough */
16278 case 128:
16279 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16280 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16281 DMA_RWCTRL_WRITE_BNDRY_128);
16282 break;
16283 }
16284 /* fallthrough */
16285 case 256:
16286 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16287 DMA_RWCTRL_WRITE_BNDRY_256);
16288 break;
16289 case 512:
16290 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16291 DMA_RWCTRL_WRITE_BNDRY_512);
16292 break;
16293 case 1024:
16294 default:
16295 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16296 DMA_RWCTRL_WRITE_BNDRY_1024);
16297 break;
16298 }
16299 }
16300
16301 out:
16302 return val;
16303 }
16304
16305 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16306 int size, int to_device)
16307 {
16308 struct tg3_internal_buffer_desc test_desc;
16309 u32 sram_dma_descs;
16310 int i, ret;
16311
16312 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16313
16314 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16315 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16316 tw32(RDMAC_STATUS, 0);
16317 tw32(WDMAC_STATUS, 0);
16318
16319 tw32(BUFMGR_MODE, 0);
16320 tw32(FTQ_RESET, 0);
16321
16322 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16323 test_desc.addr_lo = buf_dma & 0xffffffff;
16324 test_desc.nic_mbuf = 0x00002100;
16325 test_desc.len = size;
16326
16327 /*
16328 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16329 * the *second* time the tg3 driver was getting loaded after an
16330 * initial scan.
16331 *
16332 * Broadcom tells me:
16333 * ...the DMA engine is connected to the GRC block and a DMA
16334 * reset may affect the GRC block in some unpredictable way...
16335 * The behavior of resets to individual blocks has not been tested.
16336 *
16337 * Broadcom noted the GRC reset will also reset all sub-components.
16338 */
16339 if (to_device) {
16340 test_desc.cqid_sqid = (13 << 8) | 2;
16341
16342 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16343 udelay(40);
16344 } else {
16345 test_desc.cqid_sqid = (16 << 8) | 7;
16346
16347 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16348 udelay(40);
16349 }
16350 test_desc.flags = 0x00000005;
16351
16352 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16353 u32 val;
16354
16355 val = *(((u32 *)&test_desc) + i);
16356 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16357 sram_dma_descs + (i * sizeof(u32)));
16358 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16359 }
16360 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16361
16362 if (to_device)
16363 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16364 else
16365 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16366
16367 ret = -ENODEV;
16368 for (i = 0; i < 40; i++) {
16369 u32 val;
16370
16371 if (to_device)
16372 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16373 else
16374 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16375 if ((val & 0xffff) == sram_dma_descs) {
16376 ret = 0;
16377 break;
16378 }
16379
16380 udelay(100);
16381 }
16382
16383 return ret;
16384 }
16385
16386 #define TEST_BUFFER_SIZE 0x2000
16387
16388 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16389 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16390 { },
16391 };
16392
16393 static int tg3_test_dma(struct tg3 *tp)
16394 {
16395 dma_addr_t buf_dma;
16396 u32 *buf, saved_dma_rwctrl;
16397 int ret = 0;
16398
16399 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16400 &buf_dma, GFP_KERNEL);
16401 if (!buf) {
16402 ret = -ENOMEM;
16403 goto out_nofree;
16404 }
16405
16406 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16407 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16408
16409 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16410
16411 if (tg3_flag(tp, 57765_PLUS))
16412 goto out;
16413
16414 if (tg3_flag(tp, PCI_EXPRESS)) {
16415 /* DMA read watermark not used on PCIE */
16416 tp->dma_rwctrl |= 0x00180000;
16417 } else if (!tg3_flag(tp, PCIX_MODE)) {
16418 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16419 tg3_asic_rev(tp) == ASIC_REV_5750)
16420 tp->dma_rwctrl |= 0x003f0000;
16421 else
16422 tp->dma_rwctrl |= 0x003f000f;
16423 } else {
16424 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16425 tg3_asic_rev(tp) == ASIC_REV_5704) {
16426 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16427 u32 read_water = 0x7;
16428
16429 /* If the 5704 is behind the EPB bridge, we can
16430 * do the less restrictive ONE_DMA workaround for
16431 * better performance.
16432 */
16433 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16434 tg3_asic_rev(tp) == ASIC_REV_5704)
16435 tp->dma_rwctrl |= 0x8000;
16436 else if (ccval == 0x6 || ccval == 0x7)
16437 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16438
16439 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16440 read_water = 4;
16441 /* Set bit 23 to enable PCIX hw bug fix */
16442 tp->dma_rwctrl |=
16443 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16444 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16445 (1 << 23);
16446 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16447 /* 5780 always in PCIX mode */
16448 tp->dma_rwctrl |= 0x00144000;
16449 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16450 /* 5714 always in PCIX mode */
16451 tp->dma_rwctrl |= 0x00148000;
16452 } else {
16453 tp->dma_rwctrl |= 0x001b000f;
16454 }
16455 }
16456 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16457 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16458
16459 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16460 tg3_asic_rev(tp) == ASIC_REV_5704)
16461 tp->dma_rwctrl &= 0xfffffff0;
16462
16463 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16464 tg3_asic_rev(tp) == ASIC_REV_5701) {
16465 /* Remove this if it causes problems for some boards. */
16466 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16467
16468 /* On 5700/5701 chips, we need to set this bit.
16469 * Otherwise the chip will issue cacheline transactions
16470 * to streamable DMA memory with not all the byte
16471 * enables turned on. This is an error on several
16472 * RISC PCI controllers, in particular sparc64.
16473 *
16474 * On 5703/5704 chips, this bit has been reassigned
16475 * a different meaning. In particular, it is used
16476 * on those chips to enable a PCI-X workaround.
16477 */
16478 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16479 }
16480
16481 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16482
16483 #if 0
16484 /* Unneeded, already done by tg3_get_invariants. */
16485 tg3_switch_clocks(tp);
16486 #endif
16487
16488 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16489 tg3_asic_rev(tp) != ASIC_REV_5701)
16490 goto out;
16491
16492 /* It is best to perform DMA test with maximum write burst size
16493 * to expose the 5700/5701 write DMA bug.
16494 */
16495 saved_dma_rwctrl = tp->dma_rwctrl;
16496 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16497 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16498
16499 while (1) {
16500 u32 *p = buf, i;
16501
16502 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16503 p[i] = i;
16504
16505 /* Send the buffer to the chip. */
16506 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16507 if (ret) {
16508 dev_err(&tp->pdev->dev,
16509 "%s: Buffer write failed. err = %d\n",
16510 __func__, ret);
16511 break;
16512 }
16513
16514 #if 0
16515 /* validate data reached card RAM correctly. */
16516 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16517 u32 val;
16518 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16519 if (le32_to_cpu(val) != p[i]) {
16520 dev_err(&tp->pdev->dev,
16521 "%s: Buffer corrupted on device! "
16522 "(%d != %d)\n", __func__, val, i);
16523 /* ret = -ENODEV here? */
16524 }
16525 p[i] = 0;
16526 }
16527 #endif
16528 /* Now read it back. */
16529 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16530 if (ret) {
16531 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16532 "err = %d\n", __func__, ret);
16533 break;
16534 }
16535
16536 /* Verify it. */
16537 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16538 if (p[i] == i)
16539 continue;
16540
16541 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16542 DMA_RWCTRL_WRITE_BNDRY_16) {
16543 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16544 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16545 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16546 break;
16547 } else {
16548 dev_err(&tp->pdev->dev,
16549 "%s: Buffer corrupted on read back! "
16550 "(%d != %d)\n", __func__, p[i], i);
16551 ret = -ENODEV;
16552 goto out;
16553 }
16554 }
16555
16556 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16557 /* Success. */
16558 ret = 0;
16559 break;
16560 }
16561 }
16562 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16563 DMA_RWCTRL_WRITE_BNDRY_16) {
16564 /* DMA test passed without adjusting DMA boundary,
16565 * now look for chipsets that are known to expose the
16566 * DMA bug without failing the test.
16567 */
16568 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16569 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16570 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16571 } else {
16572 /* Safe to use the calculated DMA boundary. */
16573 tp->dma_rwctrl = saved_dma_rwctrl;
16574 }
16575
16576 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16577 }
16578
16579 out:
16580 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16581 out_nofree:
16582 return ret;
16583 }
16584
16585 static void tg3_init_bufmgr_config(struct tg3 *tp)
16586 {
16587 if (tg3_flag(tp, 57765_PLUS)) {
16588 tp->bufmgr_config.mbuf_read_dma_low_water =
16589 DEFAULT_MB_RDMA_LOW_WATER_5705;
16590 tp->bufmgr_config.mbuf_mac_rx_low_water =
16591 DEFAULT_MB_MACRX_LOW_WATER_57765;
16592 tp->bufmgr_config.mbuf_high_water =
16593 DEFAULT_MB_HIGH_WATER_57765;
16594
16595 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16596 DEFAULT_MB_RDMA_LOW_WATER_5705;
16597 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16598 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16599 tp->bufmgr_config.mbuf_high_water_jumbo =
16600 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16601 } else if (tg3_flag(tp, 5705_PLUS)) {
16602 tp->bufmgr_config.mbuf_read_dma_low_water =
16603 DEFAULT_MB_RDMA_LOW_WATER_5705;
16604 tp->bufmgr_config.mbuf_mac_rx_low_water =
16605 DEFAULT_MB_MACRX_LOW_WATER_5705;
16606 tp->bufmgr_config.mbuf_high_water =
16607 DEFAULT_MB_HIGH_WATER_5705;
16608 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16609 tp->bufmgr_config.mbuf_mac_rx_low_water =
16610 DEFAULT_MB_MACRX_LOW_WATER_5906;
16611 tp->bufmgr_config.mbuf_high_water =
16612 DEFAULT_MB_HIGH_WATER_5906;
16613 }
16614
16615 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16616 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16617 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16618 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16619 tp->bufmgr_config.mbuf_high_water_jumbo =
16620 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16621 } else {
16622 tp->bufmgr_config.mbuf_read_dma_low_water =
16623 DEFAULT_MB_RDMA_LOW_WATER;
16624 tp->bufmgr_config.mbuf_mac_rx_low_water =
16625 DEFAULT_MB_MACRX_LOW_WATER;
16626 tp->bufmgr_config.mbuf_high_water =
16627 DEFAULT_MB_HIGH_WATER;
16628
16629 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16630 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16631 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16632 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16633 tp->bufmgr_config.mbuf_high_water_jumbo =
16634 DEFAULT_MB_HIGH_WATER_JUMBO;
16635 }
16636
16637 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16638 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16639 }
16640
16641 static char *tg3_phy_string(struct tg3 *tp)
16642 {
16643 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16644 case TG3_PHY_ID_BCM5400: return "5400";
16645 case TG3_PHY_ID_BCM5401: return "5401";
16646 case TG3_PHY_ID_BCM5411: return "5411";
16647 case TG3_PHY_ID_BCM5701: return "5701";
16648 case TG3_PHY_ID_BCM5703: return "5703";
16649 case TG3_PHY_ID_BCM5704: return "5704";
16650 case TG3_PHY_ID_BCM5705: return "5705";
16651 case TG3_PHY_ID_BCM5750: return "5750";
16652 case TG3_PHY_ID_BCM5752: return "5752";
16653 case TG3_PHY_ID_BCM5714: return "5714";
16654 case TG3_PHY_ID_BCM5780: return "5780";
16655 case TG3_PHY_ID_BCM5755: return "5755";
16656 case TG3_PHY_ID_BCM5787: return "5787";
16657 case TG3_PHY_ID_BCM5784: return "5784";
16658 case TG3_PHY_ID_BCM5756: return "5722/5756";
16659 case TG3_PHY_ID_BCM5906: return "5906";
16660 case TG3_PHY_ID_BCM5761: return "5761";
16661 case TG3_PHY_ID_BCM5718C: return "5718C";
16662 case TG3_PHY_ID_BCM5718S: return "5718S";
16663 case TG3_PHY_ID_BCM57765: return "57765";
16664 case TG3_PHY_ID_BCM5719C: return "5719C";
16665 case TG3_PHY_ID_BCM5720C: return "5720C";
16666 case TG3_PHY_ID_BCM5762: return "5762C";
16667 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16668 case 0: return "serdes";
16669 default: return "unknown";
16670 }
16671 }
16672
16673 static char *tg3_bus_string(struct tg3 *tp, char *str)
16674 {
16675 if (tg3_flag(tp, PCI_EXPRESS)) {
16676 strcpy(str, "PCI Express");
16677 return str;
16678 } else if (tg3_flag(tp, PCIX_MODE)) {
16679 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16680
16681 strcpy(str, "PCIX:");
16682
16683 if ((clock_ctrl == 7) ||
16684 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16685 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16686 strcat(str, "133MHz");
16687 else if (clock_ctrl == 0)
16688 strcat(str, "33MHz");
16689 else if (clock_ctrl == 2)
16690 strcat(str, "50MHz");
16691 else if (clock_ctrl == 4)
16692 strcat(str, "66MHz");
16693 else if (clock_ctrl == 6)
16694 strcat(str, "100MHz");
16695 } else {
16696 strcpy(str, "PCI:");
16697 if (tg3_flag(tp, PCI_HIGH_SPEED))
16698 strcat(str, "66MHz");
16699 else
16700 strcat(str, "33MHz");
16701 }
16702 if (tg3_flag(tp, PCI_32BIT))
16703 strcat(str, ":32-bit");
16704 else
16705 strcat(str, ":64-bit");
16706 return str;
16707 }
16708
16709 static void tg3_init_coal(struct tg3 *tp)
16710 {
16711 struct ethtool_coalesce *ec = &tp->coal;
16712
16713 memset(ec, 0, sizeof(*ec));
16714 ec->cmd = ETHTOOL_GCOALESCE;
16715 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16716 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16717 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16718 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16719 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16720 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16721 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16722 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16723 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16724
16725 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16726 HOSTCC_MODE_CLRTICK_TXBD)) {
16727 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16728 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16729 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16730 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16731 }
16732
16733 if (tg3_flag(tp, 5705_PLUS)) {
16734 ec->rx_coalesce_usecs_irq = 0;
16735 ec->tx_coalesce_usecs_irq = 0;
16736 ec->stats_block_coalesce_usecs = 0;
16737 }
16738 }
16739
16740 static int tg3_init_one(struct pci_dev *pdev,
16741 const struct pci_device_id *ent)
16742 {
16743 struct net_device *dev;
16744 struct tg3 *tp;
16745 int i, err, pm_cap;
16746 u32 sndmbx, rcvmbx, intmbx;
16747 char str[40];
16748 u64 dma_mask, persist_dma_mask;
16749 netdev_features_t features = 0;
16750
16751 printk_once(KERN_INFO "%s\n", version);
16752
16753 err = pci_enable_device(pdev);
16754 if (err) {
16755 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16756 return err;
16757 }
16758
16759 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16760 if (err) {
16761 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16762 goto err_out_disable_pdev;
16763 }
16764
16765 pci_set_master(pdev);
16766
16767 /* Find power-management capability. */
16768 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16769 if (pm_cap == 0) {
16770 dev_err(&pdev->dev,
16771 "Cannot find Power Management capability, aborting\n");
16772 err = -EIO;
16773 goto err_out_free_res;
16774 }
16775
16776 err = pci_set_power_state(pdev, PCI_D0);
16777 if (err) {
16778 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16779 goto err_out_free_res;
16780 }
16781
16782 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16783 if (!dev) {
16784 err = -ENOMEM;
16785 goto err_out_power_down;
16786 }
16787
16788 SET_NETDEV_DEV(dev, &pdev->dev);
16789
16790 tp = netdev_priv(dev);
16791 tp->pdev = pdev;
16792 tp->dev = dev;
16793 tp->pm_cap = pm_cap;
16794 tp->rx_mode = TG3_DEF_RX_MODE;
16795 tp->tx_mode = TG3_DEF_TX_MODE;
16796 tp->irq_sync = 1;
16797
16798 if (tg3_debug > 0)
16799 tp->msg_enable = tg3_debug;
16800 else
16801 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16802
16803 if (pdev_is_ssb_gige_core(pdev)) {
16804 tg3_flag_set(tp, IS_SSB_CORE);
16805 if (ssb_gige_must_flush_posted_writes(pdev))
16806 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16807 if (ssb_gige_one_dma_at_once(pdev))
16808 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16809 if (ssb_gige_have_roboswitch(pdev))
16810 tg3_flag_set(tp, ROBOSWITCH);
16811 if (ssb_gige_is_rgmii(pdev))
16812 tg3_flag_set(tp, RGMII_MODE);
16813 }
16814
16815 /* The word/byte swap controls here control register access byte
16816 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16817 * setting below.
16818 */
16819 tp->misc_host_ctrl =
16820 MISC_HOST_CTRL_MASK_PCI_INT |
16821 MISC_HOST_CTRL_WORD_SWAP |
16822 MISC_HOST_CTRL_INDIR_ACCESS |
16823 MISC_HOST_CTRL_PCISTATE_RW;
16824
16825 /* The NONFRM (non-frame) byte/word swap controls take effect
16826 * on descriptor entries, anything which isn't packet data.
16827 *
16828 * The StrongARM chips on the board (one for tx, one for rx)
16829 * are running in big-endian mode.
16830 */
16831 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16832 GRC_MODE_WSWAP_NONFRM_DATA);
16833 #ifdef __BIG_ENDIAN
16834 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16835 #endif
16836 spin_lock_init(&tp->lock);
16837 spin_lock_init(&tp->indirect_lock);
16838 INIT_WORK(&tp->reset_task, tg3_reset_task);
16839
16840 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16841 if (!tp->regs) {
16842 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16843 err = -ENOMEM;
16844 goto err_out_free_dev;
16845 }
16846
16847 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16848 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16849 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16850 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16851 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16852 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16853 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16854 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16855 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16856 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16857 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16858 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16859 tg3_flag_set(tp, ENABLE_APE);
16860 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16861 if (!tp->aperegs) {
16862 dev_err(&pdev->dev,
16863 "Cannot map APE registers, aborting\n");
16864 err = -ENOMEM;
16865 goto err_out_iounmap;
16866 }
16867 }
16868
16869 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16870 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16871
16872 dev->ethtool_ops = &tg3_ethtool_ops;
16873 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16874 dev->netdev_ops = &tg3_netdev_ops;
16875 dev->irq = pdev->irq;
16876
16877 err = tg3_get_invariants(tp, ent);
16878 if (err) {
16879 dev_err(&pdev->dev,
16880 "Problem fetching invariants of chip, aborting\n");
16881 goto err_out_apeunmap;
16882 }
16883
16884 /* The EPB bridge inside 5714, 5715, and 5780 and any
16885 * device behind the EPB cannot support DMA addresses > 40-bit.
16886 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16887 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16888 * do DMA address check in tg3_start_xmit().
16889 */
16890 if (tg3_flag(tp, IS_5788))
16891 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16892 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16893 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16894 #ifdef CONFIG_HIGHMEM
16895 dma_mask = DMA_BIT_MASK(64);
16896 #endif
16897 } else
16898 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16899
16900 /* Configure DMA attributes. */
16901 if (dma_mask > DMA_BIT_MASK(32)) {
16902 err = pci_set_dma_mask(pdev, dma_mask);
16903 if (!err) {
16904 features |= NETIF_F_HIGHDMA;
16905 err = pci_set_consistent_dma_mask(pdev,
16906 persist_dma_mask);
16907 if (err < 0) {
16908 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16909 "DMA for consistent allocations\n");
16910 goto err_out_apeunmap;
16911 }
16912 }
16913 }
16914 if (err || dma_mask == DMA_BIT_MASK(32)) {
16915 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16916 if (err) {
16917 dev_err(&pdev->dev,
16918 "No usable DMA configuration, aborting\n");
16919 goto err_out_apeunmap;
16920 }
16921 }
16922
16923 tg3_init_bufmgr_config(tp);
16924
16925 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16926
16927 /* 5700 B0 chips do not support checksumming correctly due
16928 * to hardware bugs.
16929 */
16930 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16931 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16932
16933 if (tg3_flag(tp, 5755_PLUS))
16934 features |= NETIF_F_IPV6_CSUM;
16935 }
16936
16937 /* TSO is on by default on chips that support hardware TSO.
16938 * Firmware TSO on older chips gives lower performance, so it
16939 * is off by default, but can be enabled using ethtool.
16940 */
16941 if ((tg3_flag(tp, HW_TSO_1) ||
16942 tg3_flag(tp, HW_TSO_2) ||
16943 tg3_flag(tp, HW_TSO_3)) &&
16944 (features & NETIF_F_IP_CSUM))
16945 features |= NETIF_F_TSO;
16946 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16947 if (features & NETIF_F_IPV6_CSUM)
16948 features |= NETIF_F_TSO6;
16949 if (tg3_flag(tp, HW_TSO_3) ||
16950 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16951 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16952 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16953 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16954 tg3_asic_rev(tp) == ASIC_REV_57780)
16955 features |= NETIF_F_TSO_ECN;
16956 }
16957
16958 dev->features |= features;
16959 dev->vlan_features |= features;
16960
16961 /*
16962 * Add loopback capability only for a subset of devices that support
16963 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16964 * loopback for the remaining devices.
16965 */
16966 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16967 !tg3_flag(tp, CPMU_PRESENT))
16968 /* Add the loopback capability */
16969 features |= NETIF_F_LOOPBACK;
16970
16971 dev->hw_features |= features;
16972
16973 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16974 !tg3_flag(tp, TSO_CAPABLE) &&
16975 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16976 tg3_flag_set(tp, MAX_RXPEND_64);
16977 tp->rx_pending = 63;
16978 }
16979
16980 err = tg3_get_device_address(tp);
16981 if (err) {
16982 dev_err(&pdev->dev,
16983 "Could not obtain valid ethernet address, aborting\n");
16984 goto err_out_apeunmap;
16985 }
16986
16987 /*
16988 * Reset chip in case UNDI or EFI driver did not shutdown
16989 * DMA self test will enable WDMAC and we'll see (spurious)
16990 * pending DMA on the PCI bus at that point.
16991 */
16992 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16993 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16994 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16995 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16996 }
16997
16998 err = tg3_test_dma(tp);
16999 if (err) {
17000 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17001 goto err_out_apeunmap;
17002 }
17003
17004 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17005 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17006 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17007 for (i = 0; i < tp->irq_max; i++) {
17008 struct tg3_napi *tnapi = &tp->napi[i];
17009
17010 tnapi->tp = tp;
17011 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17012
17013 tnapi->int_mbox = intmbx;
17014 if (i <= 4)
17015 intmbx += 0x8;
17016 else
17017 intmbx += 0x4;
17018
17019 tnapi->consmbox = rcvmbx;
17020 tnapi->prodmbox = sndmbx;
17021
17022 if (i)
17023 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17024 else
17025 tnapi->coal_now = HOSTCC_MODE_NOW;
17026
17027 if (!tg3_flag(tp, SUPPORT_MSIX))
17028 break;
17029
17030 /*
17031 * If we support MSIX, we'll be using RSS. If we're using
17032 * RSS, the first vector only handles link interrupts and the
17033 * remaining vectors handle rx and tx interrupts. Reuse the
17034 * mailbox values for the next iteration. The values we setup
17035 * above are still useful for the single vectored mode.
17036 */
17037 if (!i)
17038 continue;
17039
17040 rcvmbx += 0x8;
17041
17042 if (sndmbx & 0x4)
17043 sndmbx -= 0x4;
17044 else
17045 sndmbx += 0xc;
17046 }
17047
17048 tg3_init_coal(tp);
17049
17050 pci_set_drvdata(pdev, dev);
17051
17052 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17053 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17054 tg3_asic_rev(tp) == ASIC_REV_5762)
17055 tg3_flag_set(tp, PTP_CAPABLE);
17056
17057 if (tg3_flag(tp, 5717_PLUS)) {
17058 /* Resume a low-power mode */
17059 tg3_frob_aux_power(tp, false);
17060 }
17061
17062 tg3_timer_init(tp);
17063
17064 tg3_carrier_off(tp);
17065
17066 err = register_netdev(dev);
17067 if (err) {
17068 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17069 goto err_out_apeunmap;
17070 }
17071
17072 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17073 tp->board_part_number,
17074 tg3_chip_rev_id(tp),
17075 tg3_bus_string(tp, str),
17076 dev->dev_addr);
17077
17078 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17079 struct phy_device *phydev;
17080 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17081 netdev_info(dev,
17082 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17083 phydev->drv->name, dev_name(&phydev->dev));
17084 } else {
17085 char *ethtype;
17086
17087 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17088 ethtype = "10/100Base-TX";
17089 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17090 ethtype = "1000Base-SX";
17091 else
17092 ethtype = "10/100/1000Base-T";
17093
17094 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17095 "(WireSpeed[%d], EEE[%d])\n",
17096 tg3_phy_string(tp), ethtype,
17097 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17098 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17099 }
17100
17101 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17102 (dev->features & NETIF_F_RXCSUM) != 0,
17103 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17104 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17105 tg3_flag(tp, ENABLE_ASF) != 0,
17106 tg3_flag(tp, TSO_CAPABLE) != 0);
17107 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17108 tp->dma_rwctrl,
17109 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17110 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17111
17112 pci_save_state(pdev);
17113
17114 return 0;
17115
17116 err_out_apeunmap:
17117 if (tp->aperegs) {
17118 iounmap(tp->aperegs);
17119 tp->aperegs = NULL;
17120 }
17121
17122 err_out_iounmap:
17123 if (tp->regs) {
17124 iounmap(tp->regs);
17125 tp->regs = NULL;
17126 }
17127
17128 err_out_free_dev:
17129 free_netdev(dev);
17130
17131 err_out_power_down:
17132 pci_set_power_state(pdev, PCI_D3hot);
17133
17134 err_out_free_res:
17135 pci_release_regions(pdev);
17136
17137 err_out_disable_pdev:
17138 pci_disable_device(pdev);
17139 pci_set_drvdata(pdev, NULL);
17140 return err;
17141 }
17142
17143 static void tg3_remove_one(struct pci_dev *pdev)
17144 {
17145 struct net_device *dev = pci_get_drvdata(pdev);
17146
17147 if (dev) {
17148 struct tg3 *tp = netdev_priv(dev);
17149
17150 release_firmware(tp->fw);
17151
17152 tg3_reset_task_cancel(tp);
17153
17154 if (tg3_flag(tp, USE_PHYLIB)) {
17155 tg3_phy_fini(tp);
17156 tg3_mdio_fini(tp);
17157 }
17158
17159 unregister_netdev(dev);
17160 if (tp->aperegs) {
17161 iounmap(tp->aperegs);
17162 tp->aperegs = NULL;
17163 }
17164 if (tp->regs) {
17165 iounmap(tp->regs);
17166 tp->regs = NULL;
17167 }
17168 free_netdev(dev);
17169 pci_release_regions(pdev);
17170 pci_disable_device(pdev);
17171 pci_set_drvdata(pdev, NULL);
17172 }
17173 }
17174
17175 #ifdef CONFIG_PM_SLEEP
17176 static int tg3_suspend(struct device *device)
17177 {
17178 struct pci_dev *pdev = to_pci_dev(device);
17179 struct net_device *dev = pci_get_drvdata(pdev);
17180 struct tg3 *tp = netdev_priv(dev);
17181 int err;
17182
17183 if (!netif_running(dev))
17184 return 0;
17185
17186 tg3_reset_task_cancel(tp);
17187 tg3_phy_stop(tp);
17188 tg3_netif_stop(tp);
17189
17190 tg3_timer_stop(tp);
17191
17192 tg3_full_lock(tp, 1);
17193 tg3_disable_ints(tp);
17194 tg3_full_unlock(tp);
17195
17196 netif_device_detach(dev);
17197
17198 tg3_full_lock(tp, 0);
17199 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17200 tg3_flag_clear(tp, INIT_COMPLETE);
17201 tg3_full_unlock(tp);
17202
17203 err = tg3_power_down_prepare(tp);
17204 if (err) {
17205 int err2;
17206
17207 tg3_full_lock(tp, 0);
17208
17209 tg3_flag_set(tp, INIT_COMPLETE);
17210 err2 = tg3_restart_hw(tp, 1);
17211 if (err2)
17212 goto out;
17213
17214 tg3_timer_start(tp);
17215
17216 netif_device_attach(dev);
17217 tg3_netif_start(tp);
17218
17219 out:
17220 tg3_full_unlock(tp);
17221
17222 if (!err2)
17223 tg3_phy_start(tp);
17224 }
17225
17226 return err;
17227 }
17228
17229 static int tg3_resume(struct device *device)
17230 {
17231 struct pci_dev *pdev = to_pci_dev(device);
17232 struct net_device *dev = pci_get_drvdata(pdev);
17233 struct tg3 *tp = netdev_priv(dev);
17234 int err;
17235
17236 if (!netif_running(dev))
17237 return 0;
17238
17239 netif_device_attach(dev);
17240
17241 tg3_full_lock(tp, 0);
17242
17243 tg3_flag_set(tp, INIT_COMPLETE);
17244 err = tg3_restart_hw(tp, 1);
17245 if (err)
17246 goto out;
17247
17248 tg3_timer_start(tp);
17249
17250 tg3_netif_start(tp);
17251
17252 out:
17253 tg3_full_unlock(tp);
17254
17255 if (!err)
17256 tg3_phy_start(tp);
17257
17258 return err;
17259 }
17260
17261 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17262 #define TG3_PM_OPS (&tg3_pm_ops)
17263
17264 #else
17265
17266 #define TG3_PM_OPS NULL
17267
17268 #endif /* CONFIG_PM_SLEEP */
17269
17270 /**
17271 * tg3_io_error_detected - called when PCI error is detected
17272 * @pdev: Pointer to PCI device
17273 * @state: The current pci connection state
17274 *
17275 * This function is called after a PCI bus error affecting
17276 * this device has been detected.
17277 */
17278 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17279 pci_channel_state_t state)
17280 {
17281 struct net_device *netdev = pci_get_drvdata(pdev);
17282 struct tg3 *tp = netdev_priv(netdev);
17283 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17284
17285 netdev_info(netdev, "PCI I/O error detected\n");
17286
17287 rtnl_lock();
17288
17289 if (!netif_running(netdev))
17290 goto done;
17291
17292 tg3_phy_stop(tp);
17293
17294 tg3_netif_stop(tp);
17295
17296 tg3_timer_stop(tp);
17297
17298 /* Want to make sure that the reset task doesn't run */
17299 tg3_reset_task_cancel(tp);
17300
17301 netif_device_detach(netdev);
17302
17303 /* Clean up software state, even if MMIO is blocked */
17304 tg3_full_lock(tp, 0);
17305 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17306 tg3_full_unlock(tp);
17307
17308 done:
17309 if (state == pci_channel_io_perm_failure)
17310 err = PCI_ERS_RESULT_DISCONNECT;
17311 else
17312 pci_disable_device(pdev);
17313
17314 rtnl_unlock();
17315
17316 return err;
17317 }
17318
17319 /**
17320 * tg3_io_slot_reset - called after the pci bus has been reset.
17321 * @pdev: Pointer to PCI device
17322 *
17323 * Restart the card from scratch, as if from a cold-boot.
17324 * At this point, the card has exprienced a hard reset,
17325 * followed by fixups by BIOS, and has its config space
17326 * set up identically to what it was at cold boot.
17327 */
17328 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17329 {
17330 struct net_device *netdev = pci_get_drvdata(pdev);
17331 struct tg3 *tp = netdev_priv(netdev);
17332 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17333 int err;
17334
17335 rtnl_lock();
17336
17337 if (pci_enable_device(pdev)) {
17338 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17339 goto done;
17340 }
17341
17342 pci_set_master(pdev);
17343 pci_restore_state(pdev);
17344 pci_save_state(pdev);
17345
17346 if (!netif_running(netdev)) {
17347 rc = PCI_ERS_RESULT_RECOVERED;
17348 goto done;
17349 }
17350
17351 err = tg3_power_up(tp);
17352 if (err)
17353 goto done;
17354
17355 rc = PCI_ERS_RESULT_RECOVERED;
17356
17357 done:
17358 rtnl_unlock();
17359
17360 return rc;
17361 }
17362
17363 /**
17364 * tg3_io_resume - called when traffic can start flowing again.
17365 * @pdev: Pointer to PCI device
17366 *
17367 * This callback is called when the error recovery driver tells
17368 * us that its OK to resume normal operation.
17369 */
17370 static void tg3_io_resume(struct pci_dev *pdev)
17371 {
17372 struct net_device *netdev = pci_get_drvdata(pdev);
17373 struct tg3 *tp = netdev_priv(netdev);
17374 int err;
17375
17376 rtnl_lock();
17377
17378 if (!netif_running(netdev))
17379 goto done;
17380
17381 tg3_full_lock(tp, 0);
17382 tg3_flag_set(tp, INIT_COMPLETE);
17383 err = tg3_restart_hw(tp, 1);
17384 if (err) {
17385 tg3_full_unlock(tp);
17386 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17387 goto done;
17388 }
17389
17390 netif_device_attach(netdev);
17391
17392 tg3_timer_start(tp);
17393
17394 tg3_netif_start(tp);
17395
17396 tg3_full_unlock(tp);
17397
17398 tg3_phy_start(tp);
17399
17400 done:
17401 rtnl_unlock();
17402 }
17403
17404 static const struct pci_error_handlers tg3_err_handler = {
17405 .error_detected = tg3_io_error_detected,
17406 .slot_reset = tg3_io_slot_reset,
17407 .resume = tg3_io_resume
17408 };
17409
17410 static struct pci_driver tg3_driver = {
17411 .name = DRV_MODULE_NAME,
17412 .id_table = tg3_pci_tbl,
17413 .probe = tg3_init_one,
17414 .remove = tg3_remove_one,
17415 .err_handler = &tg3_err_handler,
17416 .driver.pm = TG3_PM_OPS,
17417 };
17418
17419 static int __init tg3_init(void)
17420 {
17421 return pci_register_driver(&tg3_driver);
17422 }
17423
17424 static void __exit tg3_cleanup(void)
17425 {
17426 pci_unregister_driver(&tg3_driver);
17427 }
17428
17429 module_init(tg3_init);
17430 module_exit(tg3_cleanup);
This page took 0.500795 seconds and 5 git commands to generate.