Merge tag 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck...
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 137
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 11, 2014"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217 #define FIRMWARE_TG3 "tigon/tg3.bin"
218 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
221
222 static char version[] =
223 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239
240 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 PCI_VENDOR_ID_LENOVO,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 { "rx_octets" },
365 { "rx_fragments" },
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
369 { "rx_fcs_errors" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
376 { "rx_jabbers" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
390
391 { "tx_octets" },
392 { "tx_collisions" },
393
394 { "tx_xon_sent" },
395 { "tx_xoff_sent" },
396 { "tx_flow_control" },
397 { "tx_mac_errors" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
400 { "tx_deferred" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
421 { "tx_discards" },
422 { "tx_errors" },
423
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
426 { "rxbds_empty" },
427 { "rx_discards" },
428 { "rx_errors" },
429 { "rx_threshold_hit" },
430
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
434
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
437 { "nic_irqs" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
440
441 { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
453
454
455 static const struct {
456 const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 [TG3_NVRAM_TEST] = { "nvram test (online) " },
459 [TG3_LINK_TEST] = { "link test (online) " },
460 [TG3_REGISTER_TEST] = { "register test (offline)" },
461 [TG3_MEMORY_TEST] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
466 };
467
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
469
470
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 writel(val, tp->regs + off);
474 }
475
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 return readl(tp->regs + off);
479 }
480
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 writel(val, tp->aperegs + off);
484 }
485
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 return readl(tp->aperegs + off);
489 }
490
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 unsigned long flags;
494
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 writel(val, tp->regs + off);
504 readl(tp->regs + off);
505 }
506
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 unsigned long flags;
510 u32 val;
511
512 spin_lock_irqsave(&tp->indirect_lock, flags);
513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 return val;
517 }
518
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 unsigned long flags;
522
523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 TG3_64BIT_REG_LOW, val);
526 return;
527 }
528 if (off == TG3_RX_STD_PROD_IDX_REG) {
529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 TG3_64BIT_REG_LOW, val);
531 return;
532 }
533
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
541 */
542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 (val == 0x1)) {
544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 }
547 }
548
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 unsigned long flags;
552 u32 val;
553
554 spin_lock_irqsave(&tp->indirect_lock, flags);
555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 */
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 /* Non-posted methods */
570 tp->write32(tp, off, val);
571 else {
572 /* Posted method */
573 tg3_write32(tp, off, val);
574 if (usec_wait)
575 udelay(usec_wait);
576 tp->read32(tp, off);
577 }
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
580 */
581 if (usec_wait)
582 udelay(usec_wait);
583 }
584
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 tp->write32_mbox(tp, off, val);
588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 !tg3_flag(tp, ICH_WORKAROUND)))
591 tp->read32_mbox(tp, off);
592 }
593
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 void __iomem *mbox = tp->regs + off;
597 writel(val, mbox);
598 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 writel(val, mbox);
600 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 readl(mbox);
603 }
604
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
625
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 unsigned long flags;
629
630 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 return;
633
634 spin_lock_irqsave(&tp->indirect_lock, flags);
635 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 } else {
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 }
648 spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 unsigned long flags;
654
655 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 *val = 0;
658 return;
659 }
660
661 spin_lock_irqsave(&tp->indirect_lock, flags);
662 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 } else {
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 }
675 spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 int i;
681 u32 regbase, bit;
682
683 if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 regbase = TG3_APE_LOCK_GRANT;
685 else
686 regbase = TG3_APE_PER_LOCK_GRANT;
687
688 /* Make sure the driver hasn't any stale locks. */
689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 switch (i) {
691 case TG3_APE_LOCK_PHY0:
692 case TG3_APE_LOCK_PHY1:
693 case TG3_APE_LOCK_PHY2:
694 case TG3_APE_LOCK_PHY3:
695 bit = APE_LOCK_GRANT_DRIVER;
696 break;
697 default:
698 if (!tp->pci_fn)
699 bit = APE_LOCK_GRANT_DRIVER;
700 else
701 bit = 1 << tp->pci_fn;
702 }
703 tg3_ape_write32(tp, regbase + 4 * i, bit);
704 }
705
706 }
707
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 int i, off;
711 int ret = 0;
712 u32 status, req, gnt, bit;
713
714 if (!tg3_flag(tp, ENABLE_APE))
715 return 0;
716
717 switch (locknum) {
718 case TG3_APE_LOCK_GPIO:
719 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 return 0;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 case TG3_APE_LOCK_GRC:
782 case TG3_APE_LOCK_MEM:
783 if (!tp->pci_fn)
784 bit = APE_LOCK_GRANT_DRIVER;
785 else
786 bit = 1 << tp->pci_fn;
787 break;
788 case TG3_APE_LOCK_PHY0:
789 case TG3_APE_LOCK_PHY1:
790 case TG3_APE_LOCK_PHY2:
791 case TG3_APE_LOCK_PHY3:
792 bit = APE_LOCK_GRANT_DRIVER;
793 break;
794 default:
795 return;
796 }
797
798 if (tg3_asic_rev(tp) == ASIC_REV_5761)
799 gnt = TG3_APE_LOCK_GRANT;
800 else
801 gnt = TG3_APE_PER_LOCK_GRANT;
802
803 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808 u32 apedata;
809
810 while (timeout_us) {
811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812 return -EBUSY;
813
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816 break;
817
818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820 udelay(10);
821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822 }
823
824 return timeout_us ? 0 : -EBUSY;
825 }
826
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 u32 i, apedata;
830
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 break;
836
837 udelay(10);
838 }
839
840 return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 u32 len)
845 {
846 int err;
847 u32 i, bufoff, msgoff, maxlen, apedata;
848
849 if (!tg3_flag(tp, APE_HAS_NCSI))
850 return 0;
851
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
854 return -ENODEV;
855
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
858 return -EAGAIN;
859
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 TG3_APE_SHMEM_BASE;
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865 while (len) {
866 u32 length;
867
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
870 len -= length;
871
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
874 return -EAGAIN;
875
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
878 if (err)
879 return err;
880
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892 base_off += length;
893
894 if (tg3_ape_wait_for_event(tp, 30000))
895 return -EAGAIN;
896
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
900 data++;
901 }
902 }
903
904 return 0;
905 }
906
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909 int err;
910 u32 apedata;
911
912 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913 if (apedata != APE_SEG_SIG_MAGIC)
914 return -EAGAIN;
915
916 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917 if (!(apedata & APE_FW_STATUS_READY))
918 return -EAGAIN;
919
920 /* Wait for up to 1 millisecond for APE to service previous event. */
921 err = tg3_ape_event_lock(tp, 1000);
922 if (err)
923 return err;
924
925 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926 event | APE_EVENT_STATUS_EVENT_PENDING);
927
928 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931 return 0;
932 }
933
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936 u32 event;
937 u32 apedata;
938
939 if (!tg3_flag(tp, ENABLE_APE))
940 return;
941
942 switch (kind) {
943 case RESET_KIND_INIT:
944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945 APE_HOST_SEG_SIG_MAGIC);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947 APE_HOST_SEG_LEN_MAGIC);
948 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953 APE_HOST_BEHAV_NO_PHYLOCK);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955 TG3_APE_HOST_DRVR_STATE_START);
956
957 event = APE_EVENT_STATUS_STATE_START;
958 break;
959 case RESET_KIND_SHUTDOWN:
960 /* With the interface we are currently using,
961 * APE does not track driver state. Wiping
962 * out the HOST SEGMENT SIGNATURE forces
963 * the APE to assume OS absent status.
964 */
965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967 if (device_may_wakeup(&tp->pdev->dev) &&
968 tg3_flag(tp, WOL_ENABLE)) {
969 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970 TG3_APE_HOST_WOL_SPEED_AUTO);
971 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972 } else
973 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977 event = APE_EVENT_STATUS_STATE_UNLOAD;
978 break;
979 default:
980 return;
981 }
982
983 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985 tg3_ape_send_event(tp, event);
986 }
987
988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990 int i;
991
992 tw32(TG3PCI_MISC_HOST_CTRL,
993 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994 for (i = 0; i < tp->irq_max; i++)
995 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997
998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000 int i;
1001
1002 tp->irq_sync = 0;
1003 wmb();
1004
1005 tw32(TG3PCI_MISC_HOST_CTRL,
1006 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009 for (i = 0; i < tp->irq_cnt; i++) {
1010 struct tg3_napi *tnapi = &tp->napi[i];
1011
1012 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 if (tg3_flag(tp, 1SHOT_MSI))
1014 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016 tp->coal_now |= tnapi->coal_now;
1017 }
1018
1019 /* Force an initial interrupt */
1020 if (!tg3_flag(tp, TAGGED_STATUS) &&
1021 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023 else
1024 tw32(HOSTCC_MODE, tp->coal_now);
1025
1026 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031 struct tg3 *tp = tnapi->tp;
1032 struct tg3_hw_status *sblk = tnapi->hw_status;
1033 unsigned int work_exists = 0;
1034
1035 /* check for phy events */
1036 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037 if (sblk->status & SD_STATUS_LINK_CHG)
1038 work_exists = 1;
1039 }
1040
1041 /* check for TX work to do */
1042 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043 work_exists = 1;
1044
1045 /* check for RX work to do */
1046 if (tnapi->rx_rcb_prod_idx &&
1047 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048 work_exists = 1;
1049
1050 return work_exists;
1051 }
1052
1053 /* tg3_int_reenable
1054 * similar to tg3_enable_ints, but it accurately determines whether there
1055 * is new work pending and can return without flushing the PIO write
1056 * which reenables interrupts
1057 */
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060 struct tg3 *tp = tnapi->tp;
1061
1062 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063 mmiowb();
1064
1065 /* When doing tagged status, this work check is unnecessary.
1066 * The last_tag we write above tells the chip which piece of
1067 * work we've completed.
1068 */
1069 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073
1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076 u32 clock_ctrl;
1077 u32 orig_clock_ctrl;
1078
1079 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080 return;
1081
1082 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084 orig_clock_ctrl = clock_ctrl;
1085 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086 CLOCK_CTRL_CLKRUN_OENABLE |
1087 0x1f);
1088 tp->pci_clock_ctrl = clock_ctrl;
1089
1090 if (tg3_flag(tp, 5705_PLUS)) {
1091 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094 }
1095 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097 clock_ctrl |
1098 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099 40);
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102 40);
1103 }
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106
1107 #define PHY_BUSY_LOOPS 5000
1108
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110 u32 *val)
1111 {
1112 u32 frame_val;
1113 unsigned int loops;
1114 int ret;
1115
1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117 tw32_f(MAC_MI_MODE,
1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119 udelay(80);
1120 }
1121
1122 tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124 *val = 0x0;
1125
1126 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 MI_COM_PHY_ADDR_MASK);
1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 MI_COM_REG_ADDR_MASK);
1130 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132 tw32_f(MAC_MI_COM, frame_val);
1133
1134 loops = PHY_BUSY_LOOPS;
1135 while (loops != 0) {
1136 udelay(10);
1137 frame_val = tr32(MAC_MI_COM);
1138
1139 if ((frame_val & MI_COM_BUSY) == 0) {
1140 udelay(5);
1141 frame_val = tr32(MAC_MI_COM);
1142 break;
1143 }
1144 loops -= 1;
1145 }
1146
1147 ret = -EBUSY;
1148 if (loops != 0) {
1149 *val = frame_val & MI_COM_DATA_MASK;
1150 ret = 0;
1151 }
1152
1153 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 udelay(80);
1156 }
1157
1158 tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160 return ret;
1161 }
1162
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169 u32 val)
1170 {
1171 u32 frame_val;
1172 unsigned int loops;
1173 int ret;
1174
1175 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177 return 0;
1178
1179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180 tw32_f(MAC_MI_MODE,
1181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182 udelay(80);
1183 }
1184
1185 tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188 MI_COM_PHY_ADDR_MASK);
1189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190 MI_COM_REG_ADDR_MASK);
1191 frame_val |= (val & MI_COM_DATA_MASK);
1192 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194 tw32_f(MAC_MI_COM, frame_val);
1195
1196 loops = PHY_BUSY_LOOPS;
1197 while (loops != 0) {
1198 udelay(10);
1199 frame_val = tr32(MAC_MI_COM);
1200 if ((frame_val & MI_COM_BUSY) == 0) {
1201 udelay(5);
1202 frame_val = tr32(MAC_MI_COM);
1203 break;
1204 }
1205 loops -= 1;
1206 }
1207
1208 ret = -EBUSY;
1209 if (loops != 0)
1210 ret = 0;
1211
1212 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214 udelay(80);
1215 }
1216
1217 tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219 return ret;
1220 }
1221
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229 int err;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236 if (err)
1237 goto done;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241 if (err)
1242 goto done;
1243
1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246 done:
1247 return err;
1248 }
1249
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252 int err;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264 if (err)
1265 goto done;
1266
1267 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269 done:
1270 return err;
1271 }
1272
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275 int err;
1276
1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278 if (!err)
1279 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281 return err;
1282 }
1283
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286 int err;
1287
1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289 if (!err)
1290 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292 return err;
1293 }
1294
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297 int err;
1298
1299 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301 MII_TG3_AUXCTL_SHDWSEL_MISC);
1302 if (!err)
1303 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305 return err;
1306 }
1307
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311 set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318 u32 val;
1319 int err;
1320
1321 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323 if (err)
1324 return err;
1325
1326 if (enable)
1327 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328 else
1329 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334 return err;
1335 }
1336
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340 reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345 u32 phy_control;
1346 int limit, err;
1347
1348 /* OK, reset it, and poll the BMCR_RESET bit until it
1349 * clears or we time out.
1350 */
1351 phy_control = BMCR_RESET;
1352 err = tg3_writephy(tp, MII_BMCR, phy_control);
1353 if (err != 0)
1354 return -EBUSY;
1355
1356 limit = 5000;
1357 while (limit--) {
1358 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359 if (err != 0)
1360 return -EBUSY;
1361
1362 if ((phy_control & BMCR_RESET) == 0) {
1363 udelay(40);
1364 break;
1365 }
1366 udelay(10);
1367 }
1368 if (limit < 0)
1369 return -EBUSY;
1370
1371 return 0;
1372 }
1373
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 val;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (__tg3_readphy(tp, mii_id, reg, &val))
1382 val = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return val;
1387 }
1388
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391 struct tg3 *tp = bp->priv;
1392 u32 ret = 0;
1393
1394 spin_lock_bh(&tp->lock);
1395
1396 if (__tg3_writephy(tp, mii_id, reg, val))
1397 ret = -EIO;
1398
1399 spin_unlock_bh(&tp->lock);
1400
1401 return ret;
1402 }
1403
1404 static void tg3_mdio_config_5785(struct tg3 *tp)
1405 {
1406 u32 val;
1407 struct phy_device *phydev;
1408
1409 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1410 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411 case PHY_ID_BCM50610:
1412 case PHY_ID_BCM50610M:
1413 val = MAC_PHYCFG2_50610_LED_MODES;
1414 break;
1415 case PHY_ID_BCMAC131:
1416 val = MAC_PHYCFG2_AC131_LED_MODES;
1417 break;
1418 case PHY_ID_RTL8211C:
1419 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1420 break;
1421 case PHY_ID_RTL8201E:
1422 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423 break;
1424 default:
1425 return;
1426 }
1427
1428 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429 tw32(MAC_PHYCFG2, val);
1430
1431 val = tr32(MAC_PHYCFG1);
1432 val &= ~(MAC_PHYCFG1_RGMII_INT |
1433 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435 tw32(MAC_PHYCFG1, val);
1436
1437 return;
1438 }
1439
1440 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442 MAC_PHYCFG2_FMODE_MASK_MASK |
1443 MAC_PHYCFG2_GMODE_MASK_MASK |
1444 MAC_PHYCFG2_ACT_MASK_MASK |
1445 MAC_PHYCFG2_QUAL_MASK_MASK |
1446 MAC_PHYCFG2_INBAND_ENABLE;
1447
1448 tw32(MAC_PHYCFG2, val);
1449
1450 val = tr32(MAC_PHYCFG1);
1451 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1458 }
1459 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461 tw32(MAC_PHYCFG1, val);
1462
1463 val = tr32(MAC_EXT_RGMII_MODE);
1464 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET |
1468 MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET);
1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 val |= MAC_RGMII_MODE_RX_INT_B |
1474 MAC_RGMII_MODE_RX_QUALITY |
1475 MAC_RGMII_MODE_RX_ACTIVITY |
1476 MAC_RGMII_MODE_RX_ENG_DET;
1477 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478 val |= MAC_RGMII_MODE_TX_ENABLE |
1479 MAC_RGMII_MODE_TX_LOWPWR |
1480 MAC_RGMII_MODE_TX_RESET;
1481 }
1482 tw32(MAC_EXT_RGMII_MODE, val);
1483 }
1484
1485 static void tg3_mdio_start(struct tg3 *tp)
1486 {
1487 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488 tw32_f(MAC_MI_MODE, tp->mi_mode);
1489 udelay(80);
1490
1491 if (tg3_flag(tp, MDIOBUS_INITED) &&
1492 tg3_asic_rev(tp) == ASIC_REV_5785)
1493 tg3_mdio_config_5785(tp);
1494 }
1495
1496 static int tg3_mdio_init(struct tg3 *tp)
1497 {
1498 int i;
1499 u32 reg;
1500 struct phy_device *phydev;
1501
1502 if (tg3_flag(tp, 5717_PLUS)) {
1503 u32 is_serdes;
1504
1505 tp->phy_addr = tp->pci_fn + 1;
1506
1507 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1509 else
1510 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511 TG3_CPMU_PHY_STRAP_IS_SERDES;
1512 if (is_serdes)
1513 tp->phy_addr += 7;
1514 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1515 int addr;
1516
1517 addr = ssb_gige_get_phyaddr(tp->pdev);
1518 if (addr < 0)
1519 return addr;
1520 tp->phy_addr = addr;
1521 } else
1522 tp->phy_addr = TG3_PHY_MII_ADDR;
1523
1524 tg3_mdio_start(tp);
1525
1526 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1527 return 0;
1528
1529 tp->mdio_bus = mdiobus_alloc();
1530 if (tp->mdio_bus == NULL)
1531 return -ENOMEM;
1532
1533 tp->mdio_bus->name = "tg3 mdio bus";
1534 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536 tp->mdio_bus->priv = tp;
1537 tp->mdio_bus->parent = &tp->pdev->dev;
1538 tp->mdio_bus->read = &tg3_mdio_read;
1539 tp->mdio_bus->write = &tg3_mdio_write;
1540 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1541 tp->mdio_bus->irq = &tp->mdio_irq[0];
1542
1543 for (i = 0; i < PHY_MAX_ADDR; i++)
1544 tp->mdio_bus->irq[i] = PHY_POLL;
1545
1546 /* The bus registration will look for all the PHYs on the mdio bus.
1547 * Unfortunately, it does not ensure the PHY is powered up before
1548 * accessing the PHY ID registers. A chip reset is the
1549 * quickest way to bring the device back to an operational state..
1550 */
1551 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1552 tg3_bmcr_reset(tp);
1553
1554 i = mdiobus_register(tp->mdio_bus);
1555 if (i) {
1556 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1557 mdiobus_free(tp->mdio_bus);
1558 return i;
1559 }
1560
1561 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1562
1563 if (!phydev || !phydev->drv) {
1564 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1565 mdiobus_unregister(tp->mdio_bus);
1566 mdiobus_free(tp->mdio_bus);
1567 return -ENODEV;
1568 }
1569
1570 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1571 case PHY_ID_BCM57780:
1572 phydev->interface = PHY_INTERFACE_MODE_GMII;
1573 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1574 break;
1575 case PHY_ID_BCM50610:
1576 case PHY_ID_BCM50610M:
1577 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1578 PHY_BRCM_RX_REFCLK_UNUSED |
1579 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1580 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1582 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1583 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1584 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1585 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1586 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1587 /* fallthru */
1588 case PHY_ID_RTL8211C:
1589 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1590 break;
1591 case PHY_ID_RTL8201E:
1592 case PHY_ID_BCMAC131:
1593 phydev->interface = PHY_INTERFACE_MODE_MII;
1594 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1596 break;
1597 }
1598
1599 tg3_flag_set(tp, MDIOBUS_INITED);
1600
1601 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1602 tg3_mdio_config_5785(tp);
1603
1604 return 0;
1605 }
1606
1607 static void tg3_mdio_fini(struct tg3 *tp)
1608 {
1609 if (tg3_flag(tp, MDIOBUS_INITED)) {
1610 tg3_flag_clear(tp, MDIOBUS_INITED);
1611 mdiobus_unregister(tp->mdio_bus);
1612 mdiobus_free(tp->mdio_bus);
1613 }
1614 }
1615
1616 /* tp->lock is held. */
1617 static inline void tg3_generate_fw_event(struct tg3 *tp)
1618 {
1619 u32 val;
1620
1621 val = tr32(GRC_RX_CPU_EVENT);
1622 val |= GRC_RX_CPU_DRIVER_EVENT;
1623 tw32_f(GRC_RX_CPU_EVENT, val);
1624
1625 tp->last_event_jiffies = jiffies;
1626 }
1627
1628 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1629
1630 /* tp->lock is held. */
1631 static void tg3_wait_for_event_ack(struct tg3 *tp)
1632 {
1633 int i;
1634 unsigned int delay_cnt;
1635 long time_remain;
1636
1637 /* If enough time has passed, no wait is necessary. */
1638 time_remain = (long)(tp->last_event_jiffies + 1 +
1639 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1640 (long)jiffies;
1641 if (time_remain < 0)
1642 return;
1643
1644 /* Check if we can shorten the wait time. */
1645 delay_cnt = jiffies_to_usecs(time_remain);
1646 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1647 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1648 delay_cnt = (delay_cnt >> 3) + 1;
1649
1650 for (i = 0; i < delay_cnt; i++) {
1651 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1652 break;
1653 if (pci_channel_offline(tp->pdev))
1654 break;
1655
1656 udelay(8);
1657 }
1658 }
1659
1660 /* tp->lock is held. */
1661 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662 {
1663 u32 reg, val;
1664
1665 val = 0;
1666 if (!tg3_readphy(tp, MII_BMCR, &reg))
1667 val = reg << 16;
1668 if (!tg3_readphy(tp, MII_BMSR, &reg))
1669 val |= (reg & 0xffff);
1670 *data++ = val;
1671
1672 val = 0;
1673 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1674 val = reg << 16;
1675 if (!tg3_readphy(tp, MII_LPA, &reg))
1676 val |= (reg & 0xffff);
1677 *data++ = val;
1678
1679 val = 0;
1680 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1681 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1682 val = reg << 16;
1683 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1684 val |= (reg & 0xffff);
1685 }
1686 *data++ = val;
1687
1688 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1689 val = reg << 16;
1690 else
1691 val = 0;
1692 *data++ = val;
1693 }
1694
1695 /* tp->lock is held. */
1696 static void tg3_ump_link_report(struct tg3 *tp)
1697 {
1698 u32 data[4];
1699
1700 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1701 return;
1702
1703 tg3_phy_gather_ump_data(tp, data);
1704
1705 tg3_wait_for_event_ack(tp);
1706
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1713
1714 tg3_generate_fw_event(tp);
1715 }
1716
1717 /* tp->lock is held. */
1718 static void tg3_stop_fw(struct tg3 *tp)
1719 {
1720 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1721 /* Wait for RX cpu to ACK the previous event. */
1722 tg3_wait_for_event_ack(tp);
1723
1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1725
1726 tg3_generate_fw_event(tp);
1727
1728 /* Wait for RX cpu to ACK this event. */
1729 tg3_wait_for_event_ack(tp);
1730 }
1731 }
1732
1733 /* tp->lock is held. */
1734 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1735 {
1736 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1737 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1738
1739 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1740 switch (kind) {
1741 case RESET_KIND_INIT:
1742 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 DRV_STATE_START);
1744 break;
1745
1746 case RESET_KIND_SHUTDOWN:
1747 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 DRV_STATE_UNLOAD);
1749 break;
1750
1751 case RESET_KIND_SUSPEND:
1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 DRV_STATE_SUSPEND);
1754 break;
1755
1756 default:
1757 break;
1758 }
1759 }
1760 }
1761
1762 /* tp->lock is held. */
1763 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1764 {
1765 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1766 switch (kind) {
1767 case RESET_KIND_INIT:
1768 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769 DRV_STATE_START_DONE);
1770 break;
1771
1772 case RESET_KIND_SHUTDOWN:
1773 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774 DRV_STATE_UNLOAD_DONE);
1775 break;
1776
1777 default:
1778 break;
1779 }
1780 }
1781 }
1782
1783 /* tp->lock is held. */
1784 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1785 {
1786 if (tg3_flag(tp, ENABLE_ASF)) {
1787 switch (kind) {
1788 case RESET_KIND_INIT:
1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 DRV_STATE_START);
1791 break;
1792
1793 case RESET_KIND_SHUTDOWN:
1794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 DRV_STATE_UNLOAD);
1796 break;
1797
1798 case RESET_KIND_SUSPEND:
1799 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1800 DRV_STATE_SUSPEND);
1801 break;
1802
1803 default:
1804 break;
1805 }
1806 }
1807 }
1808
1809 static int tg3_poll_fw(struct tg3 *tp)
1810 {
1811 int i;
1812 u32 val;
1813
1814 if (tg3_flag(tp, NO_FWARE_REPORTED))
1815 return 0;
1816
1817 if (tg3_flag(tp, IS_SSB_CORE)) {
1818 /* We don't use firmware. */
1819 return 0;
1820 }
1821
1822 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1823 /* Wait up to 20ms for init done. */
1824 for (i = 0; i < 200; i++) {
1825 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1826 return 0;
1827 if (pci_channel_offline(tp->pdev))
1828 return -ENODEV;
1829
1830 udelay(100);
1831 }
1832 return -ENODEV;
1833 }
1834
1835 /* Wait for firmware initialization to complete. */
1836 for (i = 0; i < 100000; i++) {
1837 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1838 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1839 break;
1840 if (pci_channel_offline(tp->pdev)) {
1841 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1842 tg3_flag_set(tp, NO_FWARE_REPORTED);
1843 netdev_info(tp->dev, "No firmware running\n");
1844 }
1845
1846 break;
1847 }
1848
1849 udelay(10);
1850 }
1851
1852 /* Chip might not be fitted with firmware. Some Sun onboard
1853 * parts are configured like that. So don't signal the timeout
1854 * of the above loop as an error, but do report the lack of
1855 * running firmware once.
1856 */
1857 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1858 tg3_flag_set(tp, NO_FWARE_REPORTED);
1859
1860 netdev_info(tp->dev, "No firmware running\n");
1861 }
1862
1863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1864 /* The 57765 A0 needs a little more
1865 * time to do some important work.
1866 */
1867 mdelay(10);
1868 }
1869
1870 return 0;
1871 }
1872
1873 static void tg3_link_report(struct tg3 *tp)
1874 {
1875 if (!netif_carrier_ok(tp->dev)) {
1876 netif_info(tp, link, tp->dev, "Link is down\n");
1877 tg3_ump_link_report(tp);
1878 } else if (netif_msg_link(tp)) {
1879 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1880 (tp->link_config.active_speed == SPEED_1000 ?
1881 1000 :
1882 (tp->link_config.active_speed == SPEED_100 ?
1883 100 : 10)),
1884 (tp->link_config.active_duplex == DUPLEX_FULL ?
1885 "full" : "half"));
1886
1887 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1888 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1889 "on" : "off",
1890 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1891 "on" : "off");
1892
1893 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1894 netdev_info(tp->dev, "EEE is %s\n",
1895 tp->setlpicnt ? "enabled" : "disabled");
1896
1897 tg3_ump_link_report(tp);
1898 }
1899
1900 tp->link_up = netif_carrier_ok(tp->dev);
1901 }
1902
1903 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1904 {
1905 u32 flowctrl = 0;
1906
1907 if (adv & ADVERTISE_PAUSE_CAP) {
1908 flowctrl |= FLOW_CTRL_RX;
1909 if (!(adv & ADVERTISE_PAUSE_ASYM))
1910 flowctrl |= FLOW_CTRL_TX;
1911 } else if (adv & ADVERTISE_PAUSE_ASYM)
1912 flowctrl |= FLOW_CTRL_TX;
1913
1914 return flowctrl;
1915 }
1916
1917 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1918 {
1919 u16 miireg;
1920
1921 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1922 miireg = ADVERTISE_1000XPAUSE;
1923 else if (flow_ctrl & FLOW_CTRL_TX)
1924 miireg = ADVERTISE_1000XPSE_ASYM;
1925 else if (flow_ctrl & FLOW_CTRL_RX)
1926 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1927 else
1928 miireg = 0;
1929
1930 return miireg;
1931 }
1932
1933 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1934 {
1935 u32 flowctrl = 0;
1936
1937 if (adv & ADVERTISE_1000XPAUSE) {
1938 flowctrl |= FLOW_CTRL_RX;
1939 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1940 flowctrl |= FLOW_CTRL_TX;
1941 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1942 flowctrl |= FLOW_CTRL_TX;
1943
1944 return flowctrl;
1945 }
1946
1947 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1948 {
1949 u8 cap = 0;
1950
1951 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1952 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1953 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1954 if (lcladv & ADVERTISE_1000XPAUSE)
1955 cap = FLOW_CTRL_RX;
1956 if (rmtadv & ADVERTISE_1000XPAUSE)
1957 cap = FLOW_CTRL_TX;
1958 }
1959
1960 return cap;
1961 }
1962
1963 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1964 {
1965 u8 autoneg;
1966 u8 flowctrl = 0;
1967 u32 old_rx_mode = tp->rx_mode;
1968 u32 old_tx_mode = tp->tx_mode;
1969
1970 if (tg3_flag(tp, USE_PHYLIB))
1971 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1972 else
1973 autoneg = tp->link_config.autoneg;
1974
1975 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1976 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1977 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1978 else
1979 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1980 } else
1981 flowctrl = tp->link_config.flowctrl;
1982
1983 tp->link_config.active_flowctrl = flowctrl;
1984
1985 if (flowctrl & FLOW_CTRL_RX)
1986 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1987 else
1988 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1989
1990 if (old_rx_mode != tp->rx_mode)
1991 tw32_f(MAC_RX_MODE, tp->rx_mode);
1992
1993 if (flowctrl & FLOW_CTRL_TX)
1994 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1995 else
1996 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1997
1998 if (old_tx_mode != tp->tx_mode)
1999 tw32_f(MAC_TX_MODE, tp->tx_mode);
2000 }
2001
2002 static void tg3_adjust_link(struct net_device *dev)
2003 {
2004 u8 oldflowctrl, linkmesg = 0;
2005 u32 mac_mode, lcl_adv, rmt_adv;
2006 struct tg3 *tp = netdev_priv(dev);
2007 struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2008
2009 spin_lock_bh(&tp->lock);
2010
2011 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2012 MAC_MODE_HALF_DUPLEX);
2013
2014 oldflowctrl = tp->link_config.active_flowctrl;
2015
2016 if (phydev->link) {
2017 lcl_adv = 0;
2018 rmt_adv = 0;
2019
2020 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2021 mac_mode |= MAC_MODE_PORT_MODE_MII;
2022 else if (phydev->speed == SPEED_1000 ||
2023 tg3_asic_rev(tp) != ASIC_REV_5785)
2024 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2025 else
2026 mac_mode |= MAC_MODE_PORT_MODE_MII;
2027
2028 if (phydev->duplex == DUPLEX_HALF)
2029 mac_mode |= MAC_MODE_HALF_DUPLEX;
2030 else {
2031 lcl_adv = mii_advertise_flowctrl(
2032 tp->link_config.flowctrl);
2033
2034 if (phydev->pause)
2035 rmt_adv = LPA_PAUSE_CAP;
2036 if (phydev->asym_pause)
2037 rmt_adv |= LPA_PAUSE_ASYM;
2038 }
2039
2040 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2041 } else
2042 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2043
2044 if (mac_mode != tp->mac_mode) {
2045 tp->mac_mode = mac_mode;
2046 tw32_f(MAC_MODE, tp->mac_mode);
2047 udelay(40);
2048 }
2049
2050 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2051 if (phydev->speed == SPEED_10)
2052 tw32(MAC_MI_STAT,
2053 MAC_MI_STAT_10MBPS_MODE |
2054 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 else
2056 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057 }
2058
2059 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2060 tw32(MAC_TX_LENGTHS,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062 (6 << TX_LENGTHS_IPG_SHIFT) |
2063 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064 else
2065 tw32(MAC_TX_LENGTHS,
2066 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2067 (6 << TX_LENGTHS_IPG_SHIFT) |
2068 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2069
2070 if (phydev->link != tp->old_link ||
2071 phydev->speed != tp->link_config.active_speed ||
2072 phydev->duplex != tp->link_config.active_duplex ||
2073 oldflowctrl != tp->link_config.active_flowctrl)
2074 linkmesg = 1;
2075
2076 tp->old_link = phydev->link;
2077 tp->link_config.active_speed = phydev->speed;
2078 tp->link_config.active_duplex = phydev->duplex;
2079
2080 spin_unlock_bh(&tp->lock);
2081
2082 if (linkmesg)
2083 tg3_link_report(tp);
2084 }
2085
2086 static int tg3_phy_init(struct tg3 *tp)
2087 {
2088 struct phy_device *phydev;
2089
2090 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2091 return 0;
2092
2093 /* Bring the PHY back to a known state. */
2094 tg3_bmcr_reset(tp);
2095
2096 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2097
2098 /* Attach the MAC to the PHY. */
2099 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2100 tg3_adjust_link, phydev->interface);
2101 if (IS_ERR(phydev)) {
2102 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2103 return PTR_ERR(phydev);
2104 }
2105
2106 /* Mask with MAC supported features. */
2107 switch (phydev->interface) {
2108 case PHY_INTERFACE_MODE_GMII:
2109 case PHY_INTERFACE_MODE_RGMII:
2110 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2111 phydev->supported &= (PHY_GBIT_FEATURES |
2112 SUPPORTED_Pause |
2113 SUPPORTED_Asym_Pause);
2114 break;
2115 }
2116 /* fallthru */
2117 case PHY_INTERFACE_MODE_MII:
2118 phydev->supported &= (PHY_BASIC_FEATURES |
2119 SUPPORTED_Pause |
2120 SUPPORTED_Asym_Pause);
2121 break;
2122 default:
2123 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2124 return -EINVAL;
2125 }
2126
2127 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2128
2129 phydev->advertising = phydev->supported;
2130
2131 return 0;
2132 }
2133
2134 static void tg3_phy_start(struct tg3 *tp)
2135 {
2136 struct phy_device *phydev;
2137
2138 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2139 return;
2140
2141 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2142
2143 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2144 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2145 phydev->speed = tp->link_config.speed;
2146 phydev->duplex = tp->link_config.duplex;
2147 phydev->autoneg = tp->link_config.autoneg;
2148 phydev->advertising = tp->link_config.advertising;
2149 }
2150
2151 phy_start(phydev);
2152
2153 phy_start_aneg(phydev);
2154 }
2155
2156 static void tg3_phy_stop(struct tg3 *tp)
2157 {
2158 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2159 return;
2160
2161 phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2162 }
2163
2164 static void tg3_phy_fini(struct tg3 *tp)
2165 {
2166 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2167 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2168 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2169 }
2170 }
2171
2172 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2173 {
2174 int err;
2175 u32 val;
2176
2177 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2178 return 0;
2179
2180 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2181 /* Cannot do read-modify-write on 5401 */
2182 err = tg3_phy_auxctl_write(tp,
2183 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2184 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2185 0x4c20);
2186 goto done;
2187 }
2188
2189 err = tg3_phy_auxctl_read(tp,
2190 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2191 if (err)
2192 return err;
2193
2194 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2195 err = tg3_phy_auxctl_write(tp,
2196 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197
2198 done:
2199 return err;
2200 }
2201
2202 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2203 {
2204 u32 phytest;
2205
2206 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2207 u32 phy;
2208
2209 tg3_writephy(tp, MII_TG3_FET_TEST,
2210 phytest | MII_TG3_FET_SHADOW_EN);
2211 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2212 if (enable)
2213 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214 else
2215 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2216 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2217 }
2218 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2219 }
2220 }
2221
2222 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2223 {
2224 u32 reg;
2225
2226 if (!tg3_flag(tp, 5705_PLUS) ||
2227 (tg3_flag(tp, 5717_PLUS) &&
2228 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2229 return;
2230
2231 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2232 tg3_phy_fet_toggle_apd(tp, enable);
2233 return;
2234 }
2235
2236 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2237 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2238 MII_TG3_MISC_SHDW_SCR5_SDTL |
2239 MII_TG3_MISC_SHDW_SCR5_C125OE;
2240 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2241 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2242
2243 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2244
2245
2246 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2247 if (enable)
2248 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2249
2250 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2251 }
2252
2253 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2254 {
2255 u32 phy;
2256
2257 if (!tg3_flag(tp, 5705_PLUS) ||
2258 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2259 return;
2260
2261 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2262 u32 ephy;
2263
2264 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2265 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2266
2267 tg3_writephy(tp, MII_TG3_FET_TEST,
2268 ephy | MII_TG3_FET_SHADOW_EN);
2269 if (!tg3_readphy(tp, reg, &phy)) {
2270 if (enable)
2271 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272 else
2273 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2274 tg3_writephy(tp, reg, phy);
2275 }
2276 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2277 }
2278 } else {
2279 int ret;
2280
2281 ret = tg3_phy_auxctl_read(tp,
2282 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2283 if (!ret) {
2284 if (enable)
2285 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286 else
2287 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2288 tg3_phy_auxctl_write(tp,
2289 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2290 }
2291 }
2292 }
2293
2294 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2295 {
2296 int ret;
2297 u32 val;
2298
2299 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2300 return;
2301
2302 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2303 if (!ret)
2304 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2305 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2306 }
2307
2308 static void tg3_phy_apply_otp(struct tg3 *tp)
2309 {
2310 u32 otp, phy;
2311
2312 if (!tp->phy_otp)
2313 return;
2314
2315 otp = tp->phy_otp;
2316
2317 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2318 return;
2319
2320 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2321 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2322 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2323
2324 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2325 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2327
2328 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2329 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2330 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2331
2332 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2333 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2334
2335 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2337
2338 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2339 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2340 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2341
2342 tg3_phy_toggle_auxctl_smdsp(tp, false);
2343 }
2344
2345 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2346 {
2347 u32 val;
2348 struct ethtool_eee *dest = &tp->eee;
2349
2350 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2351 return;
2352
2353 if (eee)
2354 dest = eee;
2355
2356 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2357 return;
2358
2359 /* Pull eee_active */
2360 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2361 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2362 dest->eee_active = 1;
2363 } else
2364 dest->eee_active = 0;
2365
2366 /* Pull lp advertised settings */
2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2368 return;
2369 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2370
2371 /* Pull advertised and eee_enabled settings */
2372 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2373 return;
2374 dest->eee_enabled = !!val;
2375 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2376
2377 /* Pull tx_lpi_enabled */
2378 val = tr32(TG3_CPMU_EEE_MODE);
2379 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2380
2381 /* Pull lpi timer value */
2382 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2383 }
2384
2385 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2386 {
2387 u32 val;
2388
2389 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2390 return;
2391
2392 tp->setlpicnt = 0;
2393
2394 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2395 current_link_up &&
2396 tp->link_config.active_duplex == DUPLEX_FULL &&
2397 (tp->link_config.active_speed == SPEED_100 ||
2398 tp->link_config.active_speed == SPEED_1000)) {
2399 u32 eeectl;
2400
2401 if (tp->link_config.active_speed == SPEED_1000)
2402 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2403 else
2404 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2405
2406 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2407
2408 tg3_eee_pull_config(tp, NULL);
2409 if (tp->eee.eee_active)
2410 tp->setlpicnt = 2;
2411 }
2412
2413 if (!tp->setlpicnt) {
2414 if (current_link_up &&
2415 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2416 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2417 tg3_phy_toggle_auxctl_smdsp(tp, false);
2418 }
2419
2420 val = tr32(TG3_CPMU_EEE_MODE);
2421 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2422 }
2423 }
2424
2425 static void tg3_phy_eee_enable(struct tg3 *tp)
2426 {
2427 u32 val;
2428
2429 if (tp->link_config.active_speed == SPEED_1000 &&
2430 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2431 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2432 tg3_flag(tp, 57765_CLASS)) &&
2433 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2434 val = MII_TG3_DSP_TAP26_ALNOKO |
2435 MII_TG3_DSP_TAP26_RMRXSTO;
2436 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2437 tg3_phy_toggle_auxctl_smdsp(tp, false);
2438 }
2439
2440 val = tr32(TG3_CPMU_EEE_MODE);
2441 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2442 }
2443
2444 static int tg3_wait_macro_done(struct tg3 *tp)
2445 {
2446 int limit = 100;
2447
2448 while (limit--) {
2449 u32 tmp32;
2450
2451 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2452 if ((tmp32 & 0x1000) == 0)
2453 break;
2454 }
2455 }
2456 if (limit < 0)
2457 return -EBUSY;
2458
2459 return 0;
2460 }
2461
2462 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2463 {
2464 static const u32 test_pat[4][6] = {
2465 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2466 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2467 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2468 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2469 };
2470 int chan;
2471
2472 for (chan = 0; chan < 4; chan++) {
2473 int i;
2474
2475 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2476 (chan * 0x2000) | 0x0200);
2477 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2478
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2481 test_pat[chan][i]);
2482
2483 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2484 if (tg3_wait_macro_done(tp)) {
2485 *resetp = 1;
2486 return -EBUSY;
2487 }
2488
2489 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2490 (chan * 0x2000) | 0x0200);
2491 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2492 if (tg3_wait_macro_done(tp)) {
2493 *resetp = 1;
2494 return -EBUSY;
2495 }
2496
2497 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2498 if (tg3_wait_macro_done(tp)) {
2499 *resetp = 1;
2500 return -EBUSY;
2501 }
2502
2503 for (i = 0; i < 6; i += 2) {
2504 u32 low, high;
2505
2506 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2507 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2508 tg3_wait_macro_done(tp)) {
2509 *resetp = 1;
2510 return -EBUSY;
2511 }
2512 low &= 0x7fff;
2513 high &= 0x000f;
2514 if (low != test_pat[chan][i] ||
2515 high != test_pat[chan][i+1]) {
2516 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2517 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2518 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2519
2520 return -EBUSY;
2521 }
2522 }
2523 }
2524
2525 return 0;
2526 }
2527
2528 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2529 {
2530 int chan;
2531
2532 for (chan = 0; chan < 4; chan++) {
2533 int i;
2534
2535 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2536 (chan * 0x2000) | 0x0200);
2537 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2538 for (i = 0; i < 6; i++)
2539 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2540 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2541 if (tg3_wait_macro_done(tp))
2542 return -EBUSY;
2543 }
2544
2545 return 0;
2546 }
2547
2548 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2549 {
2550 u32 reg32, phy9_orig;
2551 int retries, do_phy_reset, err;
2552
2553 retries = 10;
2554 do_phy_reset = 1;
2555 do {
2556 if (do_phy_reset) {
2557 err = tg3_bmcr_reset(tp);
2558 if (err)
2559 return err;
2560 do_phy_reset = 0;
2561 }
2562
2563 /* Disable transmitter and interrupt. */
2564 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2565 continue;
2566
2567 reg32 |= 0x3000;
2568 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2569
2570 /* Set full-duplex, 1000 mbps. */
2571 tg3_writephy(tp, MII_BMCR,
2572 BMCR_FULLDPLX | BMCR_SPEED1000);
2573
2574 /* Set to master mode. */
2575 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2576 continue;
2577
2578 tg3_writephy(tp, MII_CTRL1000,
2579 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2580
2581 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2582 if (err)
2583 return err;
2584
2585 /* Block the PHY control access. */
2586 tg3_phydsp_write(tp, 0x8005, 0x0800);
2587
2588 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2589 if (!err)
2590 break;
2591 } while (--retries);
2592
2593 err = tg3_phy_reset_chanpat(tp);
2594 if (err)
2595 return err;
2596
2597 tg3_phydsp_write(tp, 0x8005, 0x0000);
2598
2599 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2600 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2601
2602 tg3_phy_toggle_auxctl_smdsp(tp, false);
2603
2604 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2605
2606 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2607 if (err)
2608 return err;
2609
2610 reg32 &= ~0x3000;
2611 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612
2613 return 0;
2614 }
2615
2616 static void tg3_carrier_off(struct tg3 *tp)
2617 {
2618 netif_carrier_off(tp->dev);
2619 tp->link_up = false;
2620 }
2621
2622 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2623 {
2624 if (tg3_flag(tp, ENABLE_ASF))
2625 netdev_warn(tp->dev,
2626 "Management side-band traffic will be interrupted during phy settings change\n");
2627 }
2628
2629 /* This will reset the tigon3 PHY if there is no valid
2630 * link unless the FORCE argument is non-zero.
2631 */
2632 static int tg3_phy_reset(struct tg3 *tp)
2633 {
2634 u32 val, cpmuctrl;
2635 int err;
2636
2637 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2638 val = tr32(GRC_MISC_CFG);
2639 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2640 udelay(40);
2641 }
2642 err = tg3_readphy(tp, MII_BMSR, &val);
2643 err |= tg3_readphy(tp, MII_BMSR, &val);
2644 if (err != 0)
2645 return -EBUSY;
2646
2647 if (netif_running(tp->dev) && tp->link_up) {
2648 netif_carrier_off(tp->dev);
2649 tg3_link_report(tp);
2650 }
2651
2652 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2653 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2654 tg3_asic_rev(tp) == ASIC_REV_5705) {
2655 err = tg3_phy_reset_5703_4_5(tp);
2656 if (err)
2657 return err;
2658 goto out;
2659 }
2660
2661 cpmuctrl = 0;
2662 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2663 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2664 cpmuctrl = tr32(TG3_CPMU_CTRL);
2665 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2666 tw32(TG3_CPMU_CTRL,
2667 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2668 }
2669
2670 err = tg3_bmcr_reset(tp);
2671 if (err)
2672 return err;
2673
2674 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2675 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2676 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2677
2678 tw32(TG3_CPMU_CTRL, cpmuctrl);
2679 }
2680
2681 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2682 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2683 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2684 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2685 CPMU_LSPD_1000MB_MACCLK_12_5) {
2686 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2687 udelay(40);
2688 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2689 }
2690 }
2691
2692 if (tg3_flag(tp, 5717_PLUS) &&
2693 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2694 return 0;
2695
2696 tg3_phy_apply_otp(tp);
2697
2698 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2699 tg3_phy_toggle_apd(tp, true);
2700 else
2701 tg3_phy_toggle_apd(tp, false);
2702
2703 out:
2704 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2705 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2707 tg3_phydsp_write(tp, 0x000a, 0x0323);
2708 tg3_phy_toggle_auxctl_smdsp(tp, false);
2709 }
2710
2711 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2712 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2713 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714 }
2715
2716 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2717 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2718 tg3_phydsp_write(tp, 0x000a, 0x310b);
2719 tg3_phydsp_write(tp, 0x201f, 0x9506);
2720 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2721 tg3_phy_toggle_auxctl_smdsp(tp, false);
2722 }
2723 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2724 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2726 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2728 tg3_writephy(tp, MII_TG3_TEST1,
2729 MII_TG3_TEST1_TRIM_EN | 0x4);
2730 } else
2731 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2732
2733 tg3_phy_toggle_auxctl_smdsp(tp, false);
2734 }
2735 }
2736
2737 /* Set Extended packet length bit (bit 14) on all chips that */
2738 /* support jumbo frames */
2739 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2740 /* Cannot do read-modify-write on 5401 */
2741 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2742 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 /* Set bit 14 with read-modify-write to preserve other bits */
2744 err = tg3_phy_auxctl_read(tp,
2745 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2746 if (!err)
2747 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2748 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2749 }
2750
2751 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2752 * jumbo frames transmission.
2753 */
2754 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2755 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2756 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2758 }
2759
2760 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2761 /* adjust output voltage */
2762 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2763 }
2764
2765 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2766 tg3_phydsp_write(tp, 0xffb, 0x4000);
2767
2768 tg3_phy_toggle_automdix(tp, true);
2769 tg3_phy_set_wirespeed(tp);
2770 return 0;
2771 }
2772
2773 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2774 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2775 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2776 TG3_GPIO_MSG_NEED_VAUX)
2777 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2778 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2779 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2780 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 12))
2782
2783 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2784 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2785 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2786 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 12))
2788
2789 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2790 {
2791 u32 status, shift;
2792
2793 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2794 tg3_asic_rev(tp) == ASIC_REV_5719)
2795 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2796 else
2797 status = tr32(TG3_CPMU_DRV_STATUS);
2798
2799 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2800 status &= ~(TG3_GPIO_MSG_MASK << shift);
2801 status |= (newstat << shift);
2802
2803 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2804 tg3_asic_rev(tp) == ASIC_REV_5719)
2805 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2806 else
2807 tw32(TG3_CPMU_DRV_STATUS, status);
2808
2809 return status >> TG3_APE_GPIO_MSG_SHIFT;
2810 }
2811
2812 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2813 {
2814 if (!tg3_flag(tp, IS_NIC))
2815 return 0;
2816
2817 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2818 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2819 tg3_asic_rev(tp) == ASIC_REV_5720) {
2820 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2821 return -EIO;
2822
2823 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2824
2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 TG3_GRC_LCLCTL_PWRSW_DELAY);
2827
2828 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2829 } else {
2830 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2831 TG3_GRC_LCLCTL_PWRSW_DELAY);
2832 }
2833
2834 return 0;
2835 }
2836
2837 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2838 {
2839 u32 grc_local_ctrl;
2840
2841 if (!tg3_flag(tp, IS_NIC) ||
2842 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2843 tg3_asic_rev(tp) == ASIC_REV_5701)
2844 return;
2845
2846 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2847
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855
2856 tw32_wait_f(GRC_LOCAL_CTRL,
2857 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2858 TG3_GRC_LCLCTL_PWRSW_DELAY);
2859 }
2860
2861 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2862 {
2863 if (!tg3_flag(tp, IS_NIC))
2864 return;
2865
2866 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2867 tg3_asic_rev(tp) == ASIC_REV_5701) {
2868 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2869 (GRC_LCLCTRL_GPIO_OE0 |
2870 GRC_LCLCTRL_GPIO_OE1 |
2871 GRC_LCLCTRL_GPIO_OE2 |
2872 GRC_LCLCTRL_GPIO_OUTPUT0 |
2873 GRC_LCLCTRL_GPIO_OUTPUT1),
2874 TG3_GRC_LCLCTL_PWRSW_DELAY);
2875 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2876 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2877 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2878 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2879 GRC_LCLCTRL_GPIO_OE1 |
2880 GRC_LCLCTRL_GPIO_OE2 |
2881 GRC_LCLCTRL_GPIO_OUTPUT0 |
2882 GRC_LCLCTRL_GPIO_OUTPUT1 |
2883 tp->grc_local_ctrl;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890
2891 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2892 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2893 TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 } else {
2895 u32 no_gpio2;
2896 u32 grc_local_ctrl = 0;
2897
2898 /* Workaround to prevent overdrawing Amps. */
2899 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2900 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2901 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2902 grc_local_ctrl,
2903 TG3_GRC_LCLCTL_PWRSW_DELAY);
2904 }
2905
2906 /* On 5753 and variants, GPIO2 cannot be used. */
2907 no_gpio2 = tp->nic_sram_data_cfg &
2908 NIC_SRAM_DATA_CFG_NO_GPIO2;
2909
2910 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2911 GRC_LCLCTRL_GPIO_OE1 |
2912 GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT1 |
2914 GRC_LCLCTRL_GPIO_OUTPUT2;
2915 if (no_gpio2) {
2916 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2917 GRC_LCLCTRL_GPIO_OUTPUT2);
2918 }
2919 tw32_wait_f(GRC_LOCAL_CTRL,
2920 tp->grc_local_ctrl | grc_local_ctrl,
2921 TG3_GRC_LCLCTL_PWRSW_DELAY);
2922
2923 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2924
2925 tw32_wait_f(GRC_LOCAL_CTRL,
2926 tp->grc_local_ctrl | grc_local_ctrl,
2927 TG3_GRC_LCLCTL_PWRSW_DELAY);
2928
2929 if (!no_gpio2) {
2930 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2931 tw32_wait_f(GRC_LOCAL_CTRL,
2932 tp->grc_local_ctrl | grc_local_ctrl,
2933 TG3_GRC_LCLCTL_PWRSW_DELAY);
2934 }
2935 }
2936 }
2937
2938 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2939 {
2940 u32 msg = 0;
2941
2942 /* Serialize power state transitions */
2943 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2944 return;
2945
2946 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2947 msg = TG3_GPIO_MSG_NEED_VAUX;
2948
2949 msg = tg3_set_function_status(tp, msg);
2950
2951 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2952 goto done;
2953
2954 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2955 tg3_pwrsrc_switch_to_vaux(tp);
2956 else
2957 tg3_pwrsrc_die_with_vmain(tp);
2958
2959 done:
2960 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2961 }
2962
2963 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2964 {
2965 bool need_vaux = false;
2966
2967 /* The GPIOs do something completely different on 57765. */
2968 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2969 return;
2970
2971 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2972 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2973 tg3_asic_rev(tp) == ASIC_REV_5720) {
2974 tg3_frob_aux_power_5717(tp, include_wol ?
2975 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2976 return;
2977 }
2978
2979 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2980 struct net_device *dev_peer;
2981
2982 dev_peer = pci_get_drvdata(tp->pdev_peer);
2983
2984 /* remove_one() may have been run on the peer. */
2985 if (dev_peer) {
2986 struct tg3 *tp_peer = netdev_priv(dev_peer);
2987
2988 if (tg3_flag(tp_peer, INIT_COMPLETE))
2989 return;
2990
2991 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2992 tg3_flag(tp_peer, ENABLE_ASF))
2993 need_vaux = true;
2994 }
2995 }
2996
2997 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2998 tg3_flag(tp, ENABLE_ASF))
2999 need_vaux = true;
3000
3001 if (need_vaux)
3002 tg3_pwrsrc_switch_to_vaux(tp);
3003 else
3004 tg3_pwrsrc_die_with_vmain(tp);
3005 }
3006
3007 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3008 {
3009 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3010 return 1;
3011 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3012 if (speed != SPEED_10)
3013 return 1;
3014 } else if (speed == SPEED_10)
3015 return 1;
3016
3017 return 0;
3018 }
3019
3020 static bool tg3_phy_power_bug(struct tg3 *tp)
3021 {
3022 switch (tg3_asic_rev(tp)) {
3023 case ASIC_REV_5700:
3024 case ASIC_REV_5704:
3025 return true;
3026 case ASIC_REV_5780:
3027 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3028 return true;
3029 return false;
3030 case ASIC_REV_5717:
3031 if (!tp->pci_fn)
3032 return true;
3033 return false;
3034 case ASIC_REV_5719:
3035 case ASIC_REV_5720:
3036 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3037 !tp->pci_fn)
3038 return true;
3039 return false;
3040 }
3041
3042 return false;
3043 }
3044
3045 static bool tg3_phy_led_bug(struct tg3 *tp)
3046 {
3047 switch (tg3_asic_rev(tp)) {
3048 case ASIC_REV_5719:
3049 case ASIC_REV_5720:
3050 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3051 !tp->pci_fn)
3052 return true;
3053 return false;
3054 }
3055
3056 return false;
3057 }
3058
3059 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3060 {
3061 u32 val;
3062
3063 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3064 return;
3065
3066 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3067 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3068 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3069 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3070
3071 sg_dig_ctrl |=
3072 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3073 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3074 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3075 }
3076 return;
3077 }
3078
3079 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3080 tg3_bmcr_reset(tp);
3081 val = tr32(GRC_MISC_CFG);
3082 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3083 udelay(40);
3084 return;
3085 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3086 u32 phytest;
3087 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3088 u32 phy;
3089
3090 tg3_writephy(tp, MII_ADVERTISE, 0);
3091 tg3_writephy(tp, MII_BMCR,
3092 BMCR_ANENABLE | BMCR_ANRESTART);
3093
3094 tg3_writephy(tp, MII_TG3_FET_TEST,
3095 phytest | MII_TG3_FET_SHADOW_EN);
3096 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3097 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3098 tg3_writephy(tp,
3099 MII_TG3_FET_SHDW_AUXMODE4,
3100 phy);
3101 }
3102 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3103 }
3104 return;
3105 } else if (do_low_power) {
3106 if (!tg3_phy_led_bug(tp))
3107 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3108 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3109
3110 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3111 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3112 MII_TG3_AUXCTL_PCTL_VREG_11V;
3113 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3114 }
3115
3116 /* The PHY should not be powered down on some chips because
3117 * of bugs.
3118 */
3119 if (tg3_phy_power_bug(tp))
3120 return;
3121
3122 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3123 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3124 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3125 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3126 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3127 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3128 }
3129
3130 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3131 }
3132
3133 /* tp->lock is held. */
3134 static int tg3_nvram_lock(struct tg3 *tp)
3135 {
3136 if (tg3_flag(tp, NVRAM)) {
3137 int i;
3138
3139 if (tp->nvram_lock_cnt == 0) {
3140 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3141 for (i = 0; i < 8000; i++) {
3142 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3143 break;
3144 udelay(20);
3145 }
3146 if (i == 8000) {
3147 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3148 return -ENODEV;
3149 }
3150 }
3151 tp->nvram_lock_cnt++;
3152 }
3153 return 0;
3154 }
3155
3156 /* tp->lock is held. */
3157 static void tg3_nvram_unlock(struct tg3 *tp)
3158 {
3159 if (tg3_flag(tp, NVRAM)) {
3160 if (tp->nvram_lock_cnt > 0)
3161 tp->nvram_lock_cnt--;
3162 if (tp->nvram_lock_cnt == 0)
3163 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3164 }
3165 }
3166
3167 /* tp->lock is held. */
3168 static void tg3_enable_nvram_access(struct tg3 *tp)
3169 {
3170 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3171 u32 nvaccess = tr32(NVRAM_ACCESS);
3172
3173 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3174 }
3175 }
3176
3177 /* tp->lock is held. */
3178 static void tg3_disable_nvram_access(struct tg3 *tp)
3179 {
3180 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3181 u32 nvaccess = tr32(NVRAM_ACCESS);
3182
3183 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3184 }
3185 }
3186
3187 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3188 u32 offset, u32 *val)
3189 {
3190 u32 tmp;
3191 int i;
3192
3193 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3194 return -EINVAL;
3195
3196 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3197 EEPROM_ADDR_DEVID_MASK |
3198 EEPROM_ADDR_READ);
3199 tw32(GRC_EEPROM_ADDR,
3200 tmp |
3201 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3203 EEPROM_ADDR_ADDR_MASK) |
3204 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3205
3206 for (i = 0; i < 1000; i++) {
3207 tmp = tr32(GRC_EEPROM_ADDR);
3208
3209 if (tmp & EEPROM_ADDR_COMPLETE)
3210 break;
3211 msleep(1);
3212 }
3213 if (!(tmp & EEPROM_ADDR_COMPLETE))
3214 return -EBUSY;
3215
3216 tmp = tr32(GRC_EEPROM_DATA);
3217
3218 /*
3219 * The data will always be opposite the native endian
3220 * format. Perform a blind byteswap to compensate.
3221 */
3222 *val = swab32(tmp);
3223
3224 return 0;
3225 }
3226
3227 #define NVRAM_CMD_TIMEOUT 5000
3228
3229 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230 {
3231 int i;
3232
3233 tw32(NVRAM_CMD, nvram_cmd);
3234 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3235 usleep_range(10, 40);
3236 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 udelay(10);
3238 break;
3239 }
3240 }
3241
3242 if (i == NVRAM_CMD_TIMEOUT)
3243 return -EBUSY;
3244
3245 return 0;
3246 }
3247
3248 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3249 {
3250 if (tg3_flag(tp, NVRAM) &&
3251 tg3_flag(tp, NVRAM_BUFFERED) &&
3252 tg3_flag(tp, FLASH) &&
3253 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3254 (tp->nvram_jedecnum == JEDEC_ATMEL))
3255
3256 addr = ((addr / tp->nvram_pagesize) <<
3257 ATMEL_AT45DB0X1B_PAGE_POS) +
3258 (addr % tp->nvram_pagesize);
3259
3260 return addr;
3261 }
3262
3263 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3264 {
3265 if (tg3_flag(tp, NVRAM) &&
3266 tg3_flag(tp, NVRAM_BUFFERED) &&
3267 tg3_flag(tp, FLASH) &&
3268 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3269 (tp->nvram_jedecnum == JEDEC_ATMEL))
3270
3271 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3272 tp->nvram_pagesize) +
3273 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3274
3275 return addr;
3276 }
3277
3278 /* NOTE: Data read in from NVRAM is byteswapped according to
3279 * the byteswapping settings for all other register accesses.
3280 * tg3 devices are BE devices, so on a BE machine, the data
3281 * returned will be exactly as it is seen in NVRAM. On a LE
3282 * machine, the 32-bit value will be byteswapped.
3283 */
3284 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3285 {
3286 int ret;
3287
3288 if (!tg3_flag(tp, NVRAM))
3289 return tg3_nvram_read_using_eeprom(tp, offset, val);
3290
3291 offset = tg3_nvram_phys_addr(tp, offset);
3292
3293 if (offset > NVRAM_ADDR_MSK)
3294 return -EINVAL;
3295
3296 ret = tg3_nvram_lock(tp);
3297 if (ret)
3298 return ret;
3299
3300 tg3_enable_nvram_access(tp);
3301
3302 tw32(NVRAM_ADDR, offset);
3303 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3304 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3305
3306 if (ret == 0)
3307 *val = tr32(NVRAM_RDDATA);
3308
3309 tg3_disable_nvram_access(tp);
3310
3311 tg3_nvram_unlock(tp);
3312
3313 return ret;
3314 }
3315
3316 /* Ensures NVRAM data is in bytestream format. */
3317 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3318 {
3319 u32 v;
3320 int res = tg3_nvram_read(tp, offset, &v);
3321 if (!res)
3322 *val = cpu_to_be32(v);
3323 return res;
3324 }
3325
3326 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3327 u32 offset, u32 len, u8 *buf)
3328 {
3329 int i, j, rc = 0;
3330 u32 val;
3331
3332 for (i = 0; i < len; i += 4) {
3333 u32 addr;
3334 __be32 data;
3335
3336 addr = offset + i;
3337
3338 memcpy(&data, buf + i, 4);
3339
3340 /*
3341 * The SEEPROM interface expects the data to always be opposite
3342 * the native endian format. We accomplish this by reversing
3343 * all the operations that would have been performed on the
3344 * data from a call to tg3_nvram_read_be32().
3345 */
3346 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3347
3348 val = tr32(GRC_EEPROM_ADDR);
3349 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3350
3351 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3352 EEPROM_ADDR_READ);
3353 tw32(GRC_EEPROM_ADDR, val |
3354 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3355 (addr & EEPROM_ADDR_ADDR_MASK) |
3356 EEPROM_ADDR_START |
3357 EEPROM_ADDR_WRITE);
3358
3359 for (j = 0; j < 1000; j++) {
3360 val = tr32(GRC_EEPROM_ADDR);
3361
3362 if (val & EEPROM_ADDR_COMPLETE)
3363 break;
3364 msleep(1);
3365 }
3366 if (!(val & EEPROM_ADDR_COMPLETE)) {
3367 rc = -EBUSY;
3368 break;
3369 }
3370 }
3371
3372 return rc;
3373 }
3374
3375 /* offset and length are dword aligned */
3376 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3377 u8 *buf)
3378 {
3379 int ret = 0;
3380 u32 pagesize = tp->nvram_pagesize;
3381 u32 pagemask = pagesize - 1;
3382 u32 nvram_cmd;
3383 u8 *tmp;
3384
3385 tmp = kmalloc(pagesize, GFP_KERNEL);
3386 if (tmp == NULL)
3387 return -ENOMEM;
3388
3389 while (len) {
3390 int j;
3391 u32 phy_addr, page_off, size;
3392
3393 phy_addr = offset & ~pagemask;
3394
3395 for (j = 0; j < pagesize; j += 4) {
3396 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3397 (__be32 *) (tmp + j));
3398 if (ret)
3399 break;
3400 }
3401 if (ret)
3402 break;
3403
3404 page_off = offset & pagemask;
3405 size = pagesize;
3406 if (len < size)
3407 size = len;
3408
3409 len -= size;
3410
3411 memcpy(tmp + page_off, buf, size);
3412
3413 offset = offset + (pagesize - page_off);
3414
3415 tg3_enable_nvram_access(tp);
3416
3417 /*
3418 * Before we can erase the flash page, we need
3419 * to issue a special "write enable" command.
3420 */
3421 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3422
3423 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424 break;
3425
3426 /* Erase the target page */
3427 tw32(NVRAM_ADDR, phy_addr);
3428
3429 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3430 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3431
3432 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433 break;
3434
3435 /* Issue another write enable to start the write. */
3436 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3437
3438 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439 break;
3440
3441 for (j = 0; j < pagesize; j += 4) {
3442 __be32 data;
3443
3444 data = *((__be32 *) (tmp + j));
3445
3446 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3447
3448 tw32(NVRAM_ADDR, phy_addr + j);
3449
3450 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3451 NVRAM_CMD_WR;
3452
3453 if (j == 0)
3454 nvram_cmd |= NVRAM_CMD_FIRST;
3455 else if (j == (pagesize - 4))
3456 nvram_cmd |= NVRAM_CMD_LAST;
3457
3458 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3459 if (ret)
3460 break;
3461 }
3462 if (ret)
3463 break;
3464 }
3465
3466 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3467 tg3_nvram_exec_cmd(tp, nvram_cmd);
3468
3469 kfree(tmp);
3470
3471 return ret;
3472 }
3473
3474 /* offset and length are dword aligned */
3475 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3476 u8 *buf)
3477 {
3478 int i, ret = 0;
3479
3480 for (i = 0; i < len; i += 4, offset += 4) {
3481 u32 page_off, phy_addr, nvram_cmd;
3482 __be32 data;
3483
3484 memcpy(&data, buf + i, 4);
3485 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3486
3487 page_off = offset % tp->nvram_pagesize;
3488
3489 phy_addr = tg3_nvram_phys_addr(tp, offset);
3490
3491 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3492
3493 if (page_off == 0 || i == 0)
3494 nvram_cmd |= NVRAM_CMD_FIRST;
3495 if (page_off == (tp->nvram_pagesize - 4))
3496 nvram_cmd |= NVRAM_CMD_LAST;
3497
3498 if (i == (len - 4))
3499 nvram_cmd |= NVRAM_CMD_LAST;
3500
3501 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3502 !tg3_flag(tp, FLASH) ||
3503 !tg3_flag(tp, 57765_PLUS))
3504 tw32(NVRAM_ADDR, phy_addr);
3505
3506 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3507 !tg3_flag(tp, 5755_PLUS) &&
3508 (tp->nvram_jedecnum == JEDEC_ST) &&
3509 (nvram_cmd & NVRAM_CMD_FIRST)) {
3510 u32 cmd;
3511
3512 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3513 ret = tg3_nvram_exec_cmd(tp, cmd);
3514 if (ret)
3515 break;
3516 }
3517 if (!tg3_flag(tp, FLASH)) {
3518 /* We always do complete word writes to eeprom. */
3519 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3520 }
3521
3522 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3523 if (ret)
3524 break;
3525 }
3526 return ret;
3527 }
3528
3529 /* offset and length are dword aligned */
3530 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3531 {
3532 int ret;
3533
3534 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3535 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3536 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3537 udelay(40);
3538 }
3539
3540 if (!tg3_flag(tp, NVRAM)) {
3541 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3542 } else {
3543 u32 grc_mode;
3544
3545 ret = tg3_nvram_lock(tp);
3546 if (ret)
3547 return ret;
3548
3549 tg3_enable_nvram_access(tp);
3550 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3551 tw32(NVRAM_WRITE1, 0x406);
3552
3553 grc_mode = tr32(GRC_MODE);
3554 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3555
3556 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3557 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3558 buf);
3559 } else {
3560 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3561 buf);
3562 }
3563
3564 grc_mode = tr32(GRC_MODE);
3565 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3566
3567 tg3_disable_nvram_access(tp);
3568 tg3_nvram_unlock(tp);
3569 }
3570
3571 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3572 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3573 udelay(40);
3574 }
3575
3576 return ret;
3577 }
3578
3579 #define RX_CPU_SCRATCH_BASE 0x30000
3580 #define RX_CPU_SCRATCH_SIZE 0x04000
3581 #define TX_CPU_SCRATCH_BASE 0x34000
3582 #define TX_CPU_SCRATCH_SIZE 0x04000
3583
3584 /* tp->lock is held. */
3585 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3586 {
3587 int i;
3588 const int iters = 10000;
3589
3590 for (i = 0; i < iters; i++) {
3591 tw32(cpu_base + CPU_STATE, 0xffffffff);
3592 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3593 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3594 break;
3595 if (pci_channel_offline(tp->pdev))
3596 return -EBUSY;
3597 }
3598
3599 return (i == iters) ? -EBUSY : 0;
3600 }
3601
3602 /* tp->lock is held. */
3603 static int tg3_rxcpu_pause(struct tg3 *tp)
3604 {
3605 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3606
3607 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3609 udelay(10);
3610
3611 return rc;
3612 }
3613
3614 /* tp->lock is held. */
3615 static int tg3_txcpu_pause(struct tg3 *tp)
3616 {
3617 return tg3_pause_cpu(tp, TX_CPU_BASE);
3618 }
3619
3620 /* tp->lock is held. */
3621 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3622 {
3623 tw32(cpu_base + CPU_STATE, 0xffffffff);
3624 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3625 }
3626
3627 /* tp->lock is held. */
3628 static void tg3_rxcpu_resume(struct tg3 *tp)
3629 {
3630 tg3_resume_cpu(tp, RX_CPU_BASE);
3631 }
3632
3633 /* tp->lock is held. */
3634 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3635 {
3636 int rc;
3637
3638 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3639
3640 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3641 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3642
3643 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3644 return 0;
3645 }
3646 if (cpu_base == RX_CPU_BASE) {
3647 rc = tg3_rxcpu_pause(tp);
3648 } else {
3649 /*
3650 * There is only an Rx CPU for the 5750 derivative in the
3651 * BCM4785.
3652 */
3653 if (tg3_flag(tp, IS_SSB_CORE))
3654 return 0;
3655
3656 rc = tg3_txcpu_pause(tp);
3657 }
3658
3659 if (rc) {
3660 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3661 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3662 return -ENODEV;
3663 }
3664
3665 /* Clear firmware's nvram arbitration. */
3666 if (tg3_flag(tp, NVRAM))
3667 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3668 return 0;
3669 }
3670
3671 static int tg3_fw_data_len(struct tg3 *tp,
3672 const struct tg3_firmware_hdr *fw_hdr)
3673 {
3674 int fw_len;
3675
3676 /* Non fragmented firmware have one firmware header followed by a
3677 * contiguous chunk of data to be written. The length field in that
3678 * header is not the length of data to be written but the complete
3679 * length of the bss. The data length is determined based on
3680 * tp->fw->size minus headers.
3681 *
3682 * Fragmented firmware have a main header followed by multiple
3683 * fragments. Each fragment is identical to non fragmented firmware
3684 * with a firmware header followed by a contiguous chunk of data. In
3685 * the main header, the length field is unused and set to 0xffffffff.
3686 * In each fragment header the length is the entire size of that
3687 * fragment i.e. fragment data + header length. Data length is
3688 * therefore length field in the header minus TG3_FW_HDR_LEN.
3689 */
3690 if (tp->fw_len == 0xffffffff)
3691 fw_len = be32_to_cpu(fw_hdr->len);
3692 else
3693 fw_len = tp->fw->size;
3694
3695 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3696 }
3697
3698 /* tp->lock is held. */
3699 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3700 u32 cpu_scratch_base, int cpu_scratch_size,
3701 const struct tg3_firmware_hdr *fw_hdr)
3702 {
3703 int err, i;
3704 void (*write_op)(struct tg3 *, u32, u32);
3705 int total_len = tp->fw->size;
3706
3707 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3708 netdev_err(tp->dev,
3709 "%s: Trying to load TX cpu firmware which is 5705\n",
3710 __func__);
3711 return -EINVAL;
3712 }
3713
3714 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3715 write_op = tg3_write_mem;
3716 else
3717 write_op = tg3_write_indirect_reg32;
3718
3719 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3720 /* It is possible that bootcode is still loading at this point.
3721 * Get the nvram lock first before halting the cpu.
3722 */
3723 int lock_err = tg3_nvram_lock(tp);
3724 err = tg3_halt_cpu(tp, cpu_base);
3725 if (!lock_err)
3726 tg3_nvram_unlock(tp);
3727 if (err)
3728 goto out;
3729
3730 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3731 write_op(tp, cpu_scratch_base + i, 0);
3732 tw32(cpu_base + CPU_STATE, 0xffffffff);
3733 tw32(cpu_base + CPU_MODE,
3734 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3735 } else {
3736 /* Subtract additional main header for fragmented firmware and
3737 * advance to the first fragment
3738 */
3739 total_len -= TG3_FW_HDR_LEN;
3740 fw_hdr++;
3741 }
3742
3743 do {
3744 u32 *fw_data = (u32 *)(fw_hdr + 1);
3745 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3746 write_op(tp, cpu_scratch_base +
3747 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3748 (i * sizeof(u32)),
3749 be32_to_cpu(fw_data[i]));
3750
3751 total_len -= be32_to_cpu(fw_hdr->len);
3752
3753 /* Advance to next fragment */
3754 fw_hdr = (struct tg3_firmware_hdr *)
3755 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3756 } while (total_len > 0);
3757
3758 err = 0;
3759
3760 out:
3761 return err;
3762 }
3763
3764 /* tp->lock is held. */
3765 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3766 {
3767 int i;
3768 const int iters = 5;
3769
3770 tw32(cpu_base + CPU_STATE, 0xffffffff);
3771 tw32_f(cpu_base + CPU_PC, pc);
3772
3773 for (i = 0; i < iters; i++) {
3774 if (tr32(cpu_base + CPU_PC) == pc)
3775 break;
3776 tw32(cpu_base + CPU_STATE, 0xffffffff);
3777 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3778 tw32_f(cpu_base + CPU_PC, pc);
3779 udelay(1000);
3780 }
3781
3782 return (i == iters) ? -EBUSY : 0;
3783 }
3784
3785 /* tp->lock is held. */
3786 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3787 {
3788 const struct tg3_firmware_hdr *fw_hdr;
3789 int err;
3790
3791 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3792
3793 /* Firmware blob starts with version numbers, followed by
3794 start address and length. We are setting complete length.
3795 length = end_address_of_bss - start_address_of_text.
3796 Remainder is the blob to be loaded contiguously
3797 from start address. */
3798
3799 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3800 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3801 fw_hdr);
3802 if (err)
3803 return err;
3804
3805 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3806 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3807 fw_hdr);
3808 if (err)
3809 return err;
3810
3811 /* Now startup only the RX cpu. */
3812 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3813 be32_to_cpu(fw_hdr->base_addr));
3814 if (err) {
3815 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3816 "should be %08x\n", __func__,
3817 tr32(RX_CPU_BASE + CPU_PC),
3818 be32_to_cpu(fw_hdr->base_addr));
3819 return -ENODEV;
3820 }
3821
3822 tg3_rxcpu_resume(tp);
3823
3824 return 0;
3825 }
3826
3827 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3828 {
3829 const int iters = 1000;
3830 int i;
3831 u32 val;
3832
3833 /* Wait for boot code to complete initialization and enter service
3834 * loop. It is then safe to download service patches
3835 */
3836 for (i = 0; i < iters; i++) {
3837 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3838 break;
3839
3840 udelay(10);
3841 }
3842
3843 if (i == iters) {
3844 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3845 return -EBUSY;
3846 }
3847
3848 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3849 if (val & 0xff) {
3850 netdev_warn(tp->dev,
3851 "Other patches exist. Not downloading EEE patch\n");
3852 return -EEXIST;
3853 }
3854
3855 return 0;
3856 }
3857
3858 /* tp->lock is held. */
3859 static void tg3_load_57766_firmware(struct tg3 *tp)
3860 {
3861 struct tg3_firmware_hdr *fw_hdr;
3862
3863 if (!tg3_flag(tp, NO_NVRAM))
3864 return;
3865
3866 if (tg3_validate_rxcpu_state(tp))
3867 return;
3868
3869 if (!tp->fw)
3870 return;
3871
3872 /* This firmware blob has a different format than older firmware
3873 * releases as given below. The main difference is we have fragmented
3874 * data to be written to non-contiguous locations.
3875 *
3876 * In the beginning we have a firmware header identical to other
3877 * firmware which consists of version, base addr and length. The length
3878 * here is unused and set to 0xffffffff.
3879 *
3880 * This is followed by a series of firmware fragments which are
3881 * individually identical to previous firmware. i.e. they have the
3882 * firmware header and followed by data for that fragment. The version
3883 * field of the individual fragment header is unused.
3884 */
3885
3886 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3887 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3888 return;
3889
3890 if (tg3_rxcpu_pause(tp))
3891 return;
3892
3893 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3894 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3895
3896 tg3_rxcpu_resume(tp);
3897 }
3898
3899 /* tp->lock is held. */
3900 static int tg3_load_tso_firmware(struct tg3 *tp)
3901 {
3902 const struct tg3_firmware_hdr *fw_hdr;
3903 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3904 int err;
3905
3906 if (!tg3_flag(tp, FW_TSO))
3907 return 0;
3908
3909 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3910
3911 /* Firmware blob starts with version numbers, followed by
3912 start address and length. We are setting complete length.
3913 length = end_address_of_bss - start_address_of_text.
3914 Remainder is the blob to be loaded contiguously
3915 from start address. */
3916
3917 cpu_scratch_size = tp->fw_len;
3918
3919 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3920 cpu_base = RX_CPU_BASE;
3921 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3922 } else {
3923 cpu_base = TX_CPU_BASE;
3924 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3925 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3926 }
3927
3928 err = tg3_load_firmware_cpu(tp, cpu_base,
3929 cpu_scratch_base, cpu_scratch_size,
3930 fw_hdr);
3931 if (err)
3932 return err;
3933
3934 /* Now startup the cpu. */
3935 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3936 be32_to_cpu(fw_hdr->base_addr));
3937 if (err) {
3938 netdev_err(tp->dev,
3939 "%s fails to set CPU PC, is %08x should be %08x\n",
3940 __func__, tr32(cpu_base + CPU_PC),
3941 be32_to_cpu(fw_hdr->base_addr));
3942 return -ENODEV;
3943 }
3944
3945 tg3_resume_cpu(tp, cpu_base);
3946 return 0;
3947 }
3948
3949 /* tp->lock is held. */
3950 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3951 {
3952 u32 addr_high, addr_low;
3953
3954 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3955 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3956 (mac_addr[4] << 8) | mac_addr[5]);
3957
3958 if (index < 4) {
3959 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3960 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3961 } else {
3962 index -= 4;
3963 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3964 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3965 }
3966 }
3967
3968 /* tp->lock is held. */
3969 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3970 {
3971 u32 addr_high;
3972 int i;
3973
3974 for (i = 0; i < 4; i++) {
3975 if (i == 1 && skip_mac_1)
3976 continue;
3977 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3978 }
3979
3980 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3981 tg3_asic_rev(tp) == ASIC_REV_5704) {
3982 for (i = 4; i < 16; i++)
3983 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984 }
3985
3986 addr_high = (tp->dev->dev_addr[0] +
3987 tp->dev->dev_addr[1] +
3988 tp->dev->dev_addr[2] +
3989 tp->dev->dev_addr[3] +
3990 tp->dev->dev_addr[4] +
3991 tp->dev->dev_addr[5]) &
3992 TX_BACKOFF_SEED_MASK;
3993 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3994 }
3995
3996 static void tg3_enable_register_access(struct tg3 *tp)
3997 {
3998 /*
3999 * Make sure register accesses (indirect or otherwise) will function
4000 * correctly.
4001 */
4002 pci_write_config_dword(tp->pdev,
4003 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4004 }
4005
4006 static int tg3_power_up(struct tg3 *tp)
4007 {
4008 int err;
4009
4010 tg3_enable_register_access(tp);
4011
4012 err = pci_set_power_state(tp->pdev, PCI_D0);
4013 if (!err) {
4014 /* Switch out of Vaux if it is a NIC */
4015 tg3_pwrsrc_switch_to_vmain(tp);
4016 } else {
4017 netdev_err(tp->dev, "Transition to D0 failed\n");
4018 }
4019
4020 return err;
4021 }
4022
4023 static int tg3_setup_phy(struct tg3 *, bool);
4024
4025 static int tg3_power_down_prepare(struct tg3 *tp)
4026 {
4027 u32 misc_host_ctrl;
4028 bool device_should_wake, do_low_power;
4029
4030 tg3_enable_register_access(tp);
4031
4032 /* Restore the CLKREQ setting. */
4033 if (tg3_flag(tp, CLKREQ_BUG))
4034 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4035 PCI_EXP_LNKCTL_CLKREQ_EN);
4036
4037 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4038 tw32(TG3PCI_MISC_HOST_CTRL,
4039 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4040
4041 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4042 tg3_flag(tp, WOL_ENABLE);
4043
4044 if (tg3_flag(tp, USE_PHYLIB)) {
4045 do_low_power = false;
4046 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4047 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4048 struct phy_device *phydev;
4049 u32 phyid, advertising;
4050
4051 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4052
4053 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4054
4055 tp->link_config.speed = phydev->speed;
4056 tp->link_config.duplex = phydev->duplex;
4057 tp->link_config.autoneg = phydev->autoneg;
4058 tp->link_config.advertising = phydev->advertising;
4059
4060 advertising = ADVERTISED_TP |
4061 ADVERTISED_Pause |
4062 ADVERTISED_Autoneg |
4063 ADVERTISED_10baseT_Half;
4064
4065 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4066 if (tg3_flag(tp, WOL_SPEED_100MB))
4067 advertising |=
4068 ADVERTISED_100baseT_Half |
4069 ADVERTISED_100baseT_Full |
4070 ADVERTISED_10baseT_Full;
4071 else
4072 advertising |= ADVERTISED_10baseT_Full;
4073 }
4074
4075 phydev->advertising = advertising;
4076
4077 phy_start_aneg(phydev);
4078
4079 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4080 if (phyid != PHY_ID_BCMAC131) {
4081 phyid &= PHY_BCM_OUI_MASK;
4082 if (phyid == PHY_BCM_OUI_1 ||
4083 phyid == PHY_BCM_OUI_2 ||
4084 phyid == PHY_BCM_OUI_3)
4085 do_low_power = true;
4086 }
4087 }
4088 } else {
4089 do_low_power = true;
4090
4091 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4092 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4093
4094 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4095 tg3_setup_phy(tp, false);
4096 }
4097
4098 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4099 u32 val;
4100
4101 val = tr32(GRC_VCPU_EXT_CTRL);
4102 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4103 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4104 int i;
4105 u32 val;
4106
4107 for (i = 0; i < 200; i++) {
4108 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4109 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4110 break;
4111 msleep(1);
4112 }
4113 }
4114 if (tg3_flag(tp, WOL_CAP))
4115 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4116 WOL_DRV_STATE_SHUTDOWN |
4117 WOL_DRV_WOL |
4118 WOL_SET_MAGIC_PKT);
4119
4120 if (device_should_wake) {
4121 u32 mac_mode;
4122
4123 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4124 if (do_low_power &&
4125 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4126 tg3_phy_auxctl_write(tp,
4127 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4128 MII_TG3_AUXCTL_PCTL_WOL_EN |
4129 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4130 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4131 udelay(40);
4132 }
4133
4134 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4135 mac_mode = MAC_MODE_PORT_MODE_GMII;
4136 else if (tp->phy_flags &
4137 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4138 if (tp->link_config.active_speed == SPEED_1000)
4139 mac_mode = MAC_MODE_PORT_MODE_GMII;
4140 else
4141 mac_mode = MAC_MODE_PORT_MODE_MII;
4142 } else
4143 mac_mode = MAC_MODE_PORT_MODE_MII;
4144
4145 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4146 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4147 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4148 SPEED_100 : SPEED_10;
4149 if (tg3_5700_link_polarity(tp, speed))
4150 mac_mode |= MAC_MODE_LINK_POLARITY;
4151 else
4152 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4153 }
4154 } else {
4155 mac_mode = MAC_MODE_PORT_MODE_TBI;
4156 }
4157
4158 if (!tg3_flag(tp, 5750_PLUS))
4159 tw32(MAC_LED_CTRL, tp->led_ctrl);
4160
4161 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4162 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4163 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4164 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4165
4166 if (tg3_flag(tp, ENABLE_APE))
4167 mac_mode |= MAC_MODE_APE_TX_EN |
4168 MAC_MODE_APE_RX_EN |
4169 MAC_MODE_TDE_ENABLE;
4170
4171 tw32_f(MAC_MODE, mac_mode);
4172 udelay(100);
4173
4174 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4175 udelay(10);
4176 }
4177
4178 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4179 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4180 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4181 u32 base_val;
4182
4183 base_val = tp->pci_clock_ctrl;
4184 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4185 CLOCK_CTRL_TXCLK_DISABLE);
4186
4187 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4188 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4189 } else if (tg3_flag(tp, 5780_CLASS) ||
4190 tg3_flag(tp, CPMU_PRESENT) ||
4191 tg3_asic_rev(tp) == ASIC_REV_5906) {
4192 /* do nothing */
4193 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4194 u32 newbits1, newbits2;
4195
4196 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4197 tg3_asic_rev(tp) == ASIC_REV_5701) {
4198 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4199 CLOCK_CTRL_TXCLK_DISABLE |
4200 CLOCK_CTRL_ALTCLK);
4201 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4202 } else if (tg3_flag(tp, 5705_PLUS)) {
4203 newbits1 = CLOCK_CTRL_625_CORE;
4204 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4205 } else {
4206 newbits1 = CLOCK_CTRL_ALTCLK;
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 }
4209
4210 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4211 40);
4212
4213 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4214 40);
4215
4216 if (!tg3_flag(tp, 5705_PLUS)) {
4217 u32 newbits3;
4218
4219 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4220 tg3_asic_rev(tp) == ASIC_REV_5701) {
4221 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4222 CLOCK_CTRL_TXCLK_DISABLE |
4223 CLOCK_CTRL_44MHZ_CORE);
4224 } else {
4225 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4226 }
4227
4228 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4229 tp->pci_clock_ctrl | newbits3, 40);
4230 }
4231 }
4232
4233 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4234 tg3_power_down_phy(tp, do_low_power);
4235
4236 tg3_frob_aux_power(tp, true);
4237
4238 /* Workaround for unstable PLL clock */
4239 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4240 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4241 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4242 u32 val = tr32(0x7d00);
4243
4244 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4245 tw32(0x7d00, val);
4246 if (!tg3_flag(tp, ENABLE_ASF)) {
4247 int err;
4248
4249 err = tg3_nvram_lock(tp);
4250 tg3_halt_cpu(tp, RX_CPU_BASE);
4251 if (!err)
4252 tg3_nvram_unlock(tp);
4253 }
4254 }
4255
4256 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4257
4258 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4259
4260 return 0;
4261 }
4262
4263 static void tg3_power_down(struct tg3 *tp)
4264 {
4265 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4266 pci_set_power_state(tp->pdev, PCI_D3hot);
4267 }
4268
4269 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4270 {
4271 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4272 case MII_TG3_AUX_STAT_10HALF:
4273 *speed = SPEED_10;
4274 *duplex = DUPLEX_HALF;
4275 break;
4276
4277 case MII_TG3_AUX_STAT_10FULL:
4278 *speed = SPEED_10;
4279 *duplex = DUPLEX_FULL;
4280 break;
4281
4282 case MII_TG3_AUX_STAT_100HALF:
4283 *speed = SPEED_100;
4284 *duplex = DUPLEX_HALF;
4285 break;
4286
4287 case MII_TG3_AUX_STAT_100FULL:
4288 *speed = SPEED_100;
4289 *duplex = DUPLEX_FULL;
4290 break;
4291
4292 case MII_TG3_AUX_STAT_1000HALF:
4293 *speed = SPEED_1000;
4294 *duplex = DUPLEX_HALF;
4295 break;
4296
4297 case MII_TG3_AUX_STAT_1000FULL:
4298 *speed = SPEED_1000;
4299 *duplex = DUPLEX_FULL;
4300 break;
4301
4302 default:
4303 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4304 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4305 SPEED_10;
4306 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4307 DUPLEX_HALF;
4308 break;
4309 }
4310 *speed = SPEED_UNKNOWN;
4311 *duplex = DUPLEX_UNKNOWN;
4312 break;
4313 }
4314 }
4315
4316 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4317 {
4318 int err = 0;
4319 u32 val, new_adv;
4320
4321 new_adv = ADVERTISE_CSMA;
4322 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4323 new_adv |= mii_advertise_flowctrl(flowctrl);
4324
4325 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4326 if (err)
4327 goto done;
4328
4329 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4330 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4331
4332 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4333 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4334 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4335
4336 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4337 if (err)
4338 goto done;
4339 }
4340
4341 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4342 goto done;
4343
4344 tw32(TG3_CPMU_EEE_MODE,
4345 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4346
4347 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4348 if (!err) {
4349 u32 err2;
4350
4351 val = 0;
4352 /* Advertise 100-BaseTX EEE ability */
4353 if (advertise & ADVERTISED_100baseT_Full)
4354 val |= MDIO_AN_EEE_ADV_100TX;
4355 /* Advertise 1000-BaseT EEE ability */
4356 if (advertise & ADVERTISED_1000baseT_Full)
4357 val |= MDIO_AN_EEE_ADV_1000T;
4358
4359 if (!tp->eee.eee_enabled) {
4360 val = 0;
4361 tp->eee.advertised = 0;
4362 } else {
4363 tp->eee.advertised = advertise &
4364 (ADVERTISED_100baseT_Full |
4365 ADVERTISED_1000baseT_Full);
4366 }
4367
4368 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4369 if (err)
4370 val = 0;
4371
4372 switch (tg3_asic_rev(tp)) {
4373 case ASIC_REV_5717:
4374 case ASIC_REV_57765:
4375 case ASIC_REV_57766:
4376 case ASIC_REV_5719:
4377 /* If we advertised any eee advertisements above... */
4378 if (val)
4379 val = MII_TG3_DSP_TAP26_ALNOKO |
4380 MII_TG3_DSP_TAP26_RMRXSTO |
4381 MII_TG3_DSP_TAP26_OPCSINPT;
4382 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4383 /* Fall through */
4384 case ASIC_REV_5720:
4385 case ASIC_REV_5762:
4386 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4387 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4388 MII_TG3_DSP_CH34TP2_HIBW01);
4389 }
4390
4391 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4392 if (!err)
4393 err = err2;
4394 }
4395
4396 done:
4397 return err;
4398 }
4399
4400 static void tg3_phy_copper_begin(struct tg3 *tp)
4401 {
4402 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4403 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4404 u32 adv, fc;
4405
4406 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4407 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4408 adv = ADVERTISED_10baseT_Half |
4409 ADVERTISED_10baseT_Full;
4410 if (tg3_flag(tp, WOL_SPEED_100MB))
4411 adv |= ADVERTISED_100baseT_Half |
4412 ADVERTISED_100baseT_Full;
4413 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4414 if (!(tp->phy_flags &
4415 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4416 adv |= ADVERTISED_1000baseT_Half;
4417 adv |= ADVERTISED_1000baseT_Full;
4418 }
4419
4420 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4421 } else {
4422 adv = tp->link_config.advertising;
4423 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4424 adv &= ~(ADVERTISED_1000baseT_Half |
4425 ADVERTISED_1000baseT_Full);
4426
4427 fc = tp->link_config.flowctrl;
4428 }
4429
4430 tg3_phy_autoneg_cfg(tp, adv, fc);
4431
4432 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4433 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4434 /* Normally during power down we want to autonegotiate
4435 * the lowest possible speed for WOL. However, to avoid
4436 * link flap, we leave it untouched.
4437 */
4438 return;
4439 }
4440
4441 tg3_writephy(tp, MII_BMCR,
4442 BMCR_ANENABLE | BMCR_ANRESTART);
4443 } else {
4444 int i;
4445 u32 bmcr, orig_bmcr;
4446
4447 tp->link_config.active_speed = tp->link_config.speed;
4448 tp->link_config.active_duplex = tp->link_config.duplex;
4449
4450 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4451 /* With autoneg disabled, 5715 only links up when the
4452 * advertisement register has the configured speed
4453 * enabled.
4454 */
4455 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4456 }
4457
4458 bmcr = 0;
4459 switch (tp->link_config.speed) {
4460 default:
4461 case SPEED_10:
4462 break;
4463
4464 case SPEED_100:
4465 bmcr |= BMCR_SPEED100;
4466 break;
4467
4468 case SPEED_1000:
4469 bmcr |= BMCR_SPEED1000;
4470 break;
4471 }
4472
4473 if (tp->link_config.duplex == DUPLEX_FULL)
4474 bmcr |= BMCR_FULLDPLX;
4475
4476 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4477 (bmcr != orig_bmcr)) {
4478 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4479 for (i = 0; i < 1500; i++) {
4480 u32 tmp;
4481
4482 udelay(10);
4483 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4484 tg3_readphy(tp, MII_BMSR, &tmp))
4485 continue;
4486 if (!(tmp & BMSR_LSTATUS)) {
4487 udelay(40);
4488 break;
4489 }
4490 }
4491 tg3_writephy(tp, MII_BMCR, bmcr);
4492 udelay(40);
4493 }
4494 }
4495 }
4496
4497 static int tg3_phy_pull_config(struct tg3 *tp)
4498 {
4499 int err;
4500 u32 val;
4501
4502 err = tg3_readphy(tp, MII_BMCR, &val);
4503 if (err)
4504 goto done;
4505
4506 if (!(val & BMCR_ANENABLE)) {
4507 tp->link_config.autoneg = AUTONEG_DISABLE;
4508 tp->link_config.advertising = 0;
4509 tg3_flag_clear(tp, PAUSE_AUTONEG);
4510
4511 err = -EIO;
4512
4513 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4514 case 0:
4515 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4516 goto done;
4517
4518 tp->link_config.speed = SPEED_10;
4519 break;
4520 case BMCR_SPEED100:
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 goto done;
4523
4524 tp->link_config.speed = SPEED_100;
4525 break;
4526 case BMCR_SPEED1000:
4527 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4528 tp->link_config.speed = SPEED_1000;
4529 break;
4530 }
4531 /* Fall through */
4532 default:
4533 goto done;
4534 }
4535
4536 if (val & BMCR_FULLDPLX)
4537 tp->link_config.duplex = DUPLEX_FULL;
4538 else
4539 tp->link_config.duplex = DUPLEX_HALF;
4540
4541 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4542
4543 err = 0;
4544 goto done;
4545 }
4546
4547 tp->link_config.autoneg = AUTONEG_ENABLE;
4548 tp->link_config.advertising = ADVERTISED_Autoneg;
4549 tg3_flag_set(tp, PAUSE_AUTONEG);
4550
4551 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4552 u32 adv;
4553
4554 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4555 if (err)
4556 goto done;
4557
4558 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4559 tp->link_config.advertising |= adv | ADVERTISED_TP;
4560
4561 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4562 } else {
4563 tp->link_config.advertising |= ADVERTISED_FIBRE;
4564 }
4565
4566 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4567 u32 adv;
4568
4569 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4570 err = tg3_readphy(tp, MII_CTRL1000, &val);
4571 if (err)
4572 goto done;
4573
4574 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4575 } else {
4576 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4577 if (err)
4578 goto done;
4579
4580 adv = tg3_decode_flowctrl_1000X(val);
4581 tp->link_config.flowctrl = adv;
4582
4583 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4584 adv = mii_adv_to_ethtool_adv_x(val);
4585 }
4586
4587 tp->link_config.advertising |= adv;
4588 }
4589
4590 done:
4591 return err;
4592 }
4593
4594 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4595 {
4596 int err;
4597
4598 /* Turn off tap power management. */
4599 /* Set Extended packet length bit */
4600 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4601
4602 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4603 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4604 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4605 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4606 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4607
4608 udelay(40);
4609
4610 return err;
4611 }
4612
4613 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4614 {
4615 struct ethtool_eee eee;
4616
4617 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4618 return true;
4619
4620 tg3_eee_pull_config(tp, &eee);
4621
4622 if (tp->eee.eee_enabled) {
4623 if (tp->eee.advertised != eee.advertised ||
4624 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4625 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4626 return false;
4627 } else {
4628 /* EEE is disabled but we're advertising */
4629 if (eee.advertised)
4630 return false;
4631 }
4632
4633 return true;
4634 }
4635
4636 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4637 {
4638 u32 advmsk, tgtadv, advertising;
4639
4640 advertising = tp->link_config.advertising;
4641 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4642
4643 advmsk = ADVERTISE_ALL;
4644 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4645 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4646 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4647 }
4648
4649 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4650 return false;
4651
4652 if ((*lcladv & advmsk) != tgtadv)
4653 return false;
4654
4655 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4656 u32 tg3_ctrl;
4657
4658 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4659
4660 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4661 return false;
4662
4663 if (tgtadv &&
4664 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4665 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4666 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4667 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4668 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4669 } else {
4670 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4671 }
4672
4673 if (tg3_ctrl != tgtadv)
4674 return false;
4675 }
4676
4677 return true;
4678 }
4679
4680 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4681 {
4682 u32 lpeth = 0;
4683
4684 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4685 u32 val;
4686
4687 if (tg3_readphy(tp, MII_STAT1000, &val))
4688 return false;
4689
4690 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4691 }
4692
4693 if (tg3_readphy(tp, MII_LPA, rmtadv))
4694 return false;
4695
4696 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4697 tp->link_config.rmt_adv = lpeth;
4698
4699 return true;
4700 }
4701
4702 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4703 {
4704 if (curr_link_up != tp->link_up) {
4705 if (curr_link_up) {
4706 netif_carrier_on(tp->dev);
4707 } else {
4708 netif_carrier_off(tp->dev);
4709 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4710 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4711 }
4712
4713 tg3_link_report(tp);
4714 return true;
4715 }
4716
4717 return false;
4718 }
4719
4720 static void tg3_clear_mac_status(struct tg3 *tp)
4721 {
4722 tw32(MAC_EVENT, 0);
4723
4724 tw32_f(MAC_STATUS,
4725 MAC_STATUS_SYNC_CHANGED |
4726 MAC_STATUS_CFG_CHANGED |
4727 MAC_STATUS_MI_COMPLETION |
4728 MAC_STATUS_LNKSTATE_CHANGED);
4729 udelay(40);
4730 }
4731
4732 static void tg3_setup_eee(struct tg3 *tp)
4733 {
4734 u32 val;
4735
4736 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4737 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4738 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4739 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4740
4741 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4742
4743 tw32_f(TG3_CPMU_EEE_CTRL,
4744 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4745
4746 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4747 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4748 TG3_CPMU_EEEMD_LPI_IN_RX |
4749 TG3_CPMU_EEEMD_EEE_ENABLE;
4750
4751 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4752 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4753
4754 if (tg3_flag(tp, ENABLE_APE))
4755 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4756
4757 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4758
4759 tw32_f(TG3_CPMU_EEE_DBTMR1,
4760 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4761 (tp->eee.tx_lpi_timer & 0xffff));
4762
4763 tw32_f(TG3_CPMU_EEE_DBTMR2,
4764 TG3_CPMU_DBTMR2_APE_TX_2047US |
4765 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4766 }
4767
4768 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4769 {
4770 bool current_link_up;
4771 u32 bmsr, val;
4772 u32 lcl_adv, rmt_adv;
4773 u16 current_speed;
4774 u8 current_duplex;
4775 int i, err;
4776
4777 tg3_clear_mac_status(tp);
4778
4779 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4780 tw32_f(MAC_MI_MODE,
4781 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4782 udelay(80);
4783 }
4784
4785 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4786
4787 /* Some third-party PHYs need to be reset on link going
4788 * down.
4789 */
4790 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4791 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4792 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4793 tp->link_up) {
4794 tg3_readphy(tp, MII_BMSR, &bmsr);
4795 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4796 !(bmsr & BMSR_LSTATUS))
4797 force_reset = true;
4798 }
4799 if (force_reset)
4800 tg3_phy_reset(tp);
4801
4802 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4803 tg3_readphy(tp, MII_BMSR, &bmsr);
4804 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4805 !tg3_flag(tp, INIT_COMPLETE))
4806 bmsr = 0;
4807
4808 if (!(bmsr & BMSR_LSTATUS)) {
4809 err = tg3_init_5401phy_dsp(tp);
4810 if (err)
4811 return err;
4812
4813 tg3_readphy(tp, MII_BMSR, &bmsr);
4814 for (i = 0; i < 1000; i++) {
4815 udelay(10);
4816 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4817 (bmsr & BMSR_LSTATUS)) {
4818 udelay(40);
4819 break;
4820 }
4821 }
4822
4823 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4824 TG3_PHY_REV_BCM5401_B0 &&
4825 !(bmsr & BMSR_LSTATUS) &&
4826 tp->link_config.active_speed == SPEED_1000) {
4827 err = tg3_phy_reset(tp);
4828 if (!err)
4829 err = tg3_init_5401phy_dsp(tp);
4830 if (err)
4831 return err;
4832 }
4833 }
4834 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4835 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4836 /* 5701 {A0,B0} CRC bug workaround */
4837 tg3_writephy(tp, 0x15, 0x0a75);
4838 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4839 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4840 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4841 }
4842
4843 /* Clear pending interrupts... */
4844 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4845 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4846
4847 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4848 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4849 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4850 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4851
4852 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4853 tg3_asic_rev(tp) == ASIC_REV_5701) {
4854 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4855 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4856 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4857 else
4858 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4859 }
4860
4861 current_link_up = false;
4862 current_speed = SPEED_UNKNOWN;
4863 current_duplex = DUPLEX_UNKNOWN;
4864 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4865 tp->link_config.rmt_adv = 0;
4866
4867 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4868 err = tg3_phy_auxctl_read(tp,
4869 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870 &val);
4871 if (!err && !(val & (1 << 10))) {
4872 tg3_phy_auxctl_write(tp,
4873 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4874 val | (1 << 10));
4875 goto relink;
4876 }
4877 }
4878
4879 bmsr = 0;
4880 for (i = 0; i < 100; i++) {
4881 tg3_readphy(tp, MII_BMSR, &bmsr);
4882 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4883 (bmsr & BMSR_LSTATUS))
4884 break;
4885 udelay(40);
4886 }
4887
4888 if (bmsr & BMSR_LSTATUS) {
4889 u32 aux_stat, bmcr;
4890
4891 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4892 for (i = 0; i < 2000; i++) {
4893 udelay(10);
4894 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4895 aux_stat)
4896 break;
4897 }
4898
4899 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4900 &current_speed,
4901 &current_duplex);
4902
4903 bmcr = 0;
4904 for (i = 0; i < 200; i++) {
4905 tg3_readphy(tp, MII_BMCR, &bmcr);
4906 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4907 continue;
4908 if (bmcr && bmcr != 0x7fff)
4909 break;
4910 udelay(10);
4911 }
4912
4913 lcl_adv = 0;
4914 rmt_adv = 0;
4915
4916 tp->link_config.active_speed = current_speed;
4917 tp->link_config.active_duplex = current_duplex;
4918
4919 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4920 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4921
4922 if ((bmcr & BMCR_ANENABLE) &&
4923 eee_config_ok &&
4924 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4925 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4926 current_link_up = true;
4927
4928 /* EEE settings changes take effect only after a phy
4929 * reset. If we have skipped a reset due to Link Flap
4930 * Avoidance being enabled, do it now.
4931 */
4932 if (!eee_config_ok &&
4933 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4934 !force_reset) {
4935 tg3_setup_eee(tp);
4936 tg3_phy_reset(tp);
4937 }
4938 } else {
4939 if (!(bmcr & BMCR_ANENABLE) &&
4940 tp->link_config.speed == current_speed &&
4941 tp->link_config.duplex == current_duplex) {
4942 current_link_up = true;
4943 }
4944 }
4945
4946 if (current_link_up &&
4947 tp->link_config.active_duplex == DUPLEX_FULL) {
4948 u32 reg, bit;
4949
4950 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4951 reg = MII_TG3_FET_GEN_STAT;
4952 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4953 } else {
4954 reg = MII_TG3_EXT_STAT;
4955 bit = MII_TG3_EXT_STAT_MDIX;
4956 }
4957
4958 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4959 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4960
4961 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4962 }
4963 }
4964
4965 relink:
4966 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4967 tg3_phy_copper_begin(tp);
4968
4969 if (tg3_flag(tp, ROBOSWITCH)) {
4970 current_link_up = true;
4971 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4972 current_speed = SPEED_1000;
4973 current_duplex = DUPLEX_FULL;
4974 tp->link_config.active_speed = current_speed;
4975 tp->link_config.active_duplex = current_duplex;
4976 }
4977
4978 tg3_readphy(tp, MII_BMSR, &bmsr);
4979 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4980 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4981 current_link_up = true;
4982 }
4983
4984 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4985 if (current_link_up) {
4986 if (tp->link_config.active_speed == SPEED_100 ||
4987 tp->link_config.active_speed == SPEED_10)
4988 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989 else
4990 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4992 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4993 else
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995
4996 /* In order for the 5750 core in BCM4785 chip to work properly
4997 * in RGMII mode, the Led Control Register must be set up.
4998 */
4999 if (tg3_flag(tp, RGMII_MODE)) {
5000 u32 led_ctrl = tr32(MAC_LED_CTRL);
5001 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5002
5003 if (tp->link_config.active_speed == SPEED_10)
5004 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5005 else if (tp->link_config.active_speed == SPEED_100)
5006 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5007 LED_CTRL_100MBPS_ON);
5008 else if (tp->link_config.active_speed == SPEED_1000)
5009 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5010 LED_CTRL_1000MBPS_ON);
5011
5012 tw32(MAC_LED_CTRL, led_ctrl);
5013 udelay(40);
5014 }
5015
5016 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5017 if (tp->link_config.active_duplex == DUPLEX_HALF)
5018 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5019
5020 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5021 if (current_link_up &&
5022 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5023 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5024 else
5025 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5026 }
5027
5028 /* ??? Without this setting Netgear GA302T PHY does not
5029 * ??? send/receive packets...
5030 */
5031 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5032 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5033 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5034 tw32_f(MAC_MI_MODE, tp->mi_mode);
5035 udelay(80);
5036 }
5037
5038 tw32_f(MAC_MODE, tp->mac_mode);
5039 udelay(40);
5040
5041 tg3_phy_eee_adjust(tp, current_link_up);
5042
5043 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5044 /* Polled via timer. */
5045 tw32_f(MAC_EVENT, 0);
5046 } else {
5047 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5048 }
5049 udelay(40);
5050
5051 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5052 current_link_up &&
5053 tp->link_config.active_speed == SPEED_1000 &&
5054 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5055 udelay(120);
5056 tw32_f(MAC_STATUS,
5057 (MAC_STATUS_SYNC_CHANGED |
5058 MAC_STATUS_CFG_CHANGED));
5059 udelay(40);
5060 tg3_write_mem(tp,
5061 NIC_SRAM_FIRMWARE_MBOX,
5062 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5063 }
5064
5065 /* Prevent send BD corruption. */
5066 if (tg3_flag(tp, CLKREQ_BUG)) {
5067 if (tp->link_config.active_speed == SPEED_100 ||
5068 tp->link_config.active_speed == SPEED_10)
5069 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5070 PCI_EXP_LNKCTL_CLKREQ_EN);
5071 else
5072 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5073 PCI_EXP_LNKCTL_CLKREQ_EN);
5074 }
5075
5076 tg3_test_and_report_link_chg(tp, current_link_up);
5077
5078 return 0;
5079 }
5080
5081 struct tg3_fiber_aneginfo {
5082 int state;
5083 #define ANEG_STATE_UNKNOWN 0
5084 #define ANEG_STATE_AN_ENABLE 1
5085 #define ANEG_STATE_RESTART_INIT 2
5086 #define ANEG_STATE_RESTART 3
5087 #define ANEG_STATE_DISABLE_LINK_OK 4
5088 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5089 #define ANEG_STATE_ABILITY_DETECT 6
5090 #define ANEG_STATE_ACK_DETECT_INIT 7
5091 #define ANEG_STATE_ACK_DETECT 8
5092 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5093 #define ANEG_STATE_COMPLETE_ACK 10
5094 #define ANEG_STATE_IDLE_DETECT_INIT 11
5095 #define ANEG_STATE_IDLE_DETECT 12
5096 #define ANEG_STATE_LINK_OK 13
5097 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5098 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5099
5100 u32 flags;
5101 #define MR_AN_ENABLE 0x00000001
5102 #define MR_RESTART_AN 0x00000002
5103 #define MR_AN_COMPLETE 0x00000004
5104 #define MR_PAGE_RX 0x00000008
5105 #define MR_NP_LOADED 0x00000010
5106 #define MR_TOGGLE_TX 0x00000020
5107 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5108 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5109 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5110 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5111 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5112 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5113 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5114 #define MR_TOGGLE_RX 0x00002000
5115 #define MR_NP_RX 0x00004000
5116
5117 #define MR_LINK_OK 0x80000000
5118
5119 unsigned long link_time, cur_time;
5120
5121 u32 ability_match_cfg;
5122 int ability_match_count;
5123
5124 char ability_match, idle_match, ack_match;
5125
5126 u32 txconfig, rxconfig;
5127 #define ANEG_CFG_NP 0x00000080
5128 #define ANEG_CFG_ACK 0x00000040
5129 #define ANEG_CFG_RF2 0x00000020
5130 #define ANEG_CFG_RF1 0x00000010
5131 #define ANEG_CFG_PS2 0x00000001
5132 #define ANEG_CFG_PS1 0x00008000
5133 #define ANEG_CFG_HD 0x00004000
5134 #define ANEG_CFG_FD 0x00002000
5135 #define ANEG_CFG_INVAL 0x00001f06
5136
5137 };
5138 #define ANEG_OK 0
5139 #define ANEG_DONE 1
5140 #define ANEG_TIMER_ENAB 2
5141 #define ANEG_FAILED -1
5142
5143 #define ANEG_STATE_SETTLE_TIME 10000
5144
5145 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5146 struct tg3_fiber_aneginfo *ap)
5147 {
5148 u16 flowctrl;
5149 unsigned long delta;
5150 u32 rx_cfg_reg;
5151 int ret;
5152
5153 if (ap->state == ANEG_STATE_UNKNOWN) {
5154 ap->rxconfig = 0;
5155 ap->link_time = 0;
5156 ap->cur_time = 0;
5157 ap->ability_match_cfg = 0;
5158 ap->ability_match_count = 0;
5159 ap->ability_match = 0;
5160 ap->idle_match = 0;
5161 ap->ack_match = 0;
5162 }
5163 ap->cur_time++;
5164
5165 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5166 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5167
5168 if (rx_cfg_reg != ap->ability_match_cfg) {
5169 ap->ability_match_cfg = rx_cfg_reg;
5170 ap->ability_match = 0;
5171 ap->ability_match_count = 0;
5172 } else {
5173 if (++ap->ability_match_count > 1) {
5174 ap->ability_match = 1;
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 }
5177 }
5178 if (rx_cfg_reg & ANEG_CFG_ACK)
5179 ap->ack_match = 1;
5180 else
5181 ap->ack_match = 0;
5182
5183 ap->idle_match = 0;
5184 } else {
5185 ap->idle_match = 1;
5186 ap->ability_match_cfg = 0;
5187 ap->ability_match_count = 0;
5188 ap->ability_match = 0;
5189 ap->ack_match = 0;
5190
5191 rx_cfg_reg = 0;
5192 }
5193
5194 ap->rxconfig = rx_cfg_reg;
5195 ret = ANEG_OK;
5196
5197 switch (ap->state) {
5198 case ANEG_STATE_UNKNOWN:
5199 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5200 ap->state = ANEG_STATE_AN_ENABLE;
5201
5202 /* fallthru */
5203 case ANEG_STATE_AN_ENABLE:
5204 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5205 if (ap->flags & MR_AN_ENABLE) {
5206 ap->link_time = 0;
5207 ap->cur_time = 0;
5208 ap->ability_match_cfg = 0;
5209 ap->ability_match_count = 0;
5210 ap->ability_match = 0;
5211 ap->idle_match = 0;
5212 ap->ack_match = 0;
5213
5214 ap->state = ANEG_STATE_RESTART_INIT;
5215 } else {
5216 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5217 }
5218 break;
5219
5220 case ANEG_STATE_RESTART_INIT:
5221 ap->link_time = ap->cur_time;
5222 ap->flags &= ~(MR_NP_LOADED);
5223 ap->txconfig = 0;
5224 tw32(MAC_TX_AUTO_NEG, 0);
5225 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5226 tw32_f(MAC_MODE, tp->mac_mode);
5227 udelay(40);
5228
5229 ret = ANEG_TIMER_ENAB;
5230 ap->state = ANEG_STATE_RESTART;
5231
5232 /* fallthru */
5233 case ANEG_STATE_RESTART:
5234 delta = ap->cur_time - ap->link_time;
5235 if (delta > ANEG_STATE_SETTLE_TIME)
5236 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5237 else
5238 ret = ANEG_TIMER_ENAB;
5239 break;
5240
5241 case ANEG_STATE_DISABLE_LINK_OK:
5242 ret = ANEG_DONE;
5243 break;
5244
5245 case ANEG_STATE_ABILITY_DETECT_INIT:
5246 ap->flags &= ~(MR_TOGGLE_TX);
5247 ap->txconfig = ANEG_CFG_FD;
5248 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5249 if (flowctrl & ADVERTISE_1000XPAUSE)
5250 ap->txconfig |= ANEG_CFG_PS1;
5251 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5252 ap->txconfig |= ANEG_CFG_PS2;
5253 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5254 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5255 tw32_f(MAC_MODE, tp->mac_mode);
5256 udelay(40);
5257
5258 ap->state = ANEG_STATE_ABILITY_DETECT;
5259 break;
5260
5261 case ANEG_STATE_ABILITY_DETECT:
5262 if (ap->ability_match != 0 && ap->rxconfig != 0)
5263 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5264 break;
5265
5266 case ANEG_STATE_ACK_DETECT_INIT:
5267 ap->txconfig |= ANEG_CFG_ACK;
5268 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5269 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5270 tw32_f(MAC_MODE, tp->mac_mode);
5271 udelay(40);
5272
5273 ap->state = ANEG_STATE_ACK_DETECT;
5274
5275 /* fallthru */
5276 case ANEG_STATE_ACK_DETECT:
5277 if (ap->ack_match != 0) {
5278 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5279 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5280 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5281 } else {
5282 ap->state = ANEG_STATE_AN_ENABLE;
5283 }
5284 } else if (ap->ability_match != 0 &&
5285 ap->rxconfig == 0) {
5286 ap->state = ANEG_STATE_AN_ENABLE;
5287 }
5288 break;
5289
5290 case ANEG_STATE_COMPLETE_ACK_INIT:
5291 if (ap->rxconfig & ANEG_CFG_INVAL) {
5292 ret = ANEG_FAILED;
5293 break;
5294 }
5295 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5296 MR_LP_ADV_HALF_DUPLEX |
5297 MR_LP_ADV_SYM_PAUSE |
5298 MR_LP_ADV_ASYM_PAUSE |
5299 MR_LP_ADV_REMOTE_FAULT1 |
5300 MR_LP_ADV_REMOTE_FAULT2 |
5301 MR_LP_ADV_NEXT_PAGE |
5302 MR_TOGGLE_RX |
5303 MR_NP_RX);
5304 if (ap->rxconfig & ANEG_CFG_FD)
5305 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5306 if (ap->rxconfig & ANEG_CFG_HD)
5307 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5308 if (ap->rxconfig & ANEG_CFG_PS1)
5309 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5310 if (ap->rxconfig & ANEG_CFG_PS2)
5311 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5312 if (ap->rxconfig & ANEG_CFG_RF1)
5313 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5314 if (ap->rxconfig & ANEG_CFG_RF2)
5315 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5316 if (ap->rxconfig & ANEG_CFG_NP)
5317 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5318
5319 ap->link_time = ap->cur_time;
5320
5321 ap->flags ^= (MR_TOGGLE_TX);
5322 if (ap->rxconfig & 0x0008)
5323 ap->flags |= MR_TOGGLE_RX;
5324 if (ap->rxconfig & ANEG_CFG_NP)
5325 ap->flags |= MR_NP_RX;
5326 ap->flags |= MR_PAGE_RX;
5327
5328 ap->state = ANEG_STATE_COMPLETE_ACK;
5329 ret = ANEG_TIMER_ENAB;
5330 break;
5331
5332 case ANEG_STATE_COMPLETE_ACK:
5333 if (ap->ability_match != 0 &&
5334 ap->rxconfig == 0) {
5335 ap->state = ANEG_STATE_AN_ENABLE;
5336 break;
5337 }
5338 delta = ap->cur_time - ap->link_time;
5339 if (delta > ANEG_STATE_SETTLE_TIME) {
5340 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5341 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342 } else {
5343 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5344 !(ap->flags & MR_NP_RX)) {
5345 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346 } else {
5347 ret = ANEG_FAILED;
5348 }
5349 }
5350 }
5351 break;
5352
5353 case ANEG_STATE_IDLE_DETECT_INIT:
5354 ap->link_time = ap->cur_time;
5355 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5356 tw32_f(MAC_MODE, tp->mac_mode);
5357 udelay(40);
5358
5359 ap->state = ANEG_STATE_IDLE_DETECT;
5360 ret = ANEG_TIMER_ENAB;
5361 break;
5362
5363 case ANEG_STATE_IDLE_DETECT:
5364 if (ap->ability_match != 0 &&
5365 ap->rxconfig == 0) {
5366 ap->state = ANEG_STATE_AN_ENABLE;
5367 break;
5368 }
5369 delta = ap->cur_time - ap->link_time;
5370 if (delta > ANEG_STATE_SETTLE_TIME) {
5371 /* XXX another gem from the Broadcom driver :( */
5372 ap->state = ANEG_STATE_LINK_OK;
5373 }
5374 break;
5375
5376 case ANEG_STATE_LINK_OK:
5377 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5378 ret = ANEG_DONE;
5379 break;
5380
5381 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5382 /* ??? unimplemented */
5383 break;
5384
5385 case ANEG_STATE_NEXT_PAGE_WAIT:
5386 /* ??? unimplemented */
5387 break;
5388
5389 default:
5390 ret = ANEG_FAILED;
5391 break;
5392 }
5393
5394 return ret;
5395 }
5396
5397 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5398 {
5399 int res = 0;
5400 struct tg3_fiber_aneginfo aninfo;
5401 int status = ANEG_FAILED;
5402 unsigned int tick;
5403 u32 tmp;
5404
5405 tw32_f(MAC_TX_AUTO_NEG, 0);
5406
5407 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5408 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5409 udelay(40);
5410
5411 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5412 udelay(40);
5413
5414 memset(&aninfo, 0, sizeof(aninfo));
5415 aninfo.flags |= MR_AN_ENABLE;
5416 aninfo.state = ANEG_STATE_UNKNOWN;
5417 aninfo.cur_time = 0;
5418 tick = 0;
5419 while (++tick < 195000) {
5420 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5421 if (status == ANEG_DONE || status == ANEG_FAILED)
5422 break;
5423
5424 udelay(1);
5425 }
5426
5427 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5428 tw32_f(MAC_MODE, tp->mac_mode);
5429 udelay(40);
5430
5431 *txflags = aninfo.txconfig;
5432 *rxflags = aninfo.flags;
5433
5434 if (status == ANEG_DONE &&
5435 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5436 MR_LP_ADV_FULL_DUPLEX)))
5437 res = 1;
5438
5439 return res;
5440 }
5441
5442 static void tg3_init_bcm8002(struct tg3 *tp)
5443 {
5444 u32 mac_status = tr32(MAC_STATUS);
5445 int i;
5446
5447 /* Reset when initting first time or we have a link. */
5448 if (tg3_flag(tp, INIT_COMPLETE) &&
5449 !(mac_status & MAC_STATUS_PCS_SYNCED))
5450 return;
5451
5452 /* Set PLL lock range. */
5453 tg3_writephy(tp, 0x16, 0x8007);
5454
5455 /* SW reset */
5456 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5457
5458 /* Wait for reset to complete. */
5459 /* XXX schedule_timeout() ... */
5460 for (i = 0; i < 500; i++)
5461 udelay(10);
5462
5463 /* Config mode; select PMA/Ch 1 regs. */
5464 tg3_writephy(tp, 0x10, 0x8411);
5465
5466 /* Enable auto-lock and comdet, select txclk for tx. */
5467 tg3_writephy(tp, 0x11, 0x0a10);
5468
5469 tg3_writephy(tp, 0x18, 0x00a0);
5470 tg3_writephy(tp, 0x16, 0x41ff);
5471
5472 /* Assert and deassert POR. */
5473 tg3_writephy(tp, 0x13, 0x0400);
5474 udelay(40);
5475 tg3_writephy(tp, 0x13, 0x0000);
5476
5477 tg3_writephy(tp, 0x11, 0x0a50);
5478 udelay(40);
5479 tg3_writephy(tp, 0x11, 0x0a10);
5480
5481 /* Wait for signal to stabilize */
5482 /* XXX schedule_timeout() ... */
5483 for (i = 0; i < 15000; i++)
5484 udelay(10);
5485
5486 /* Deselect the channel register so we can read the PHYID
5487 * later.
5488 */
5489 tg3_writephy(tp, 0x10, 0x8011);
5490 }
5491
5492 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5493 {
5494 u16 flowctrl;
5495 bool current_link_up;
5496 u32 sg_dig_ctrl, sg_dig_status;
5497 u32 serdes_cfg, expected_sg_dig_ctrl;
5498 int workaround, port_a;
5499
5500 serdes_cfg = 0;
5501 expected_sg_dig_ctrl = 0;
5502 workaround = 0;
5503 port_a = 1;
5504 current_link_up = false;
5505
5506 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5507 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5508 workaround = 1;
5509 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5510 port_a = 0;
5511
5512 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5513 /* preserve bits 20-23 for voltage regulator */
5514 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5515 }
5516
5517 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5518
5519 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5520 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5521 if (workaround) {
5522 u32 val = serdes_cfg;
5523
5524 if (port_a)
5525 val |= 0xc010000;
5526 else
5527 val |= 0x4010000;
5528 tw32_f(MAC_SERDES_CFG, val);
5529 }
5530
5531 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5532 }
5533 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5534 tg3_setup_flow_control(tp, 0, 0);
5535 current_link_up = true;
5536 }
5537 goto out;
5538 }
5539
5540 /* Want auto-negotiation. */
5541 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5542
5543 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5544 if (flowctrl & ADVERTISE_1000XPAUSE)
5545 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5546 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5547 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5548
5549 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5550 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5551 tp->serdes_counter &&
5552 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5553 MAC_STATUS_RCVD_CFG)) ==
5554 MAC_STATUS_PCS_SYNCED)) {
5555 tp->serdes_counter--;
5556 current_link_up = true;
5557 goto out;
5558 }
5559 restart_autoneg:
5560 if (workaround)
5561 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5562 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5563 udelay(5);
5564 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5565
5566 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5567 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5568 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5569 MAC_STATUS_SIGNAL_DET)) {
5570 sg_dig_status = tr32(SG_DIG_STATUS);
5571 mac_status = tr32(MAC_STATUS);
5572
5573 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5574 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5575 u32 local_adv = 0, remote_adv = 0;
5576
5577 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5578 local_adv |= ADVERTISE_1000XPAUSE;
5579 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5580 local_adv |= ADVERTISE_1000XPSE_ASYM;
5581
5582 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5583 remote_adv |= LPA_1000XPAUSE;
5584 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5585 remote_adv |= LPA_1000XPAUSE_ASYM;
5586
5587 tp->link_config.rmt_adv =
5588 mii_adv_to_ethtool_adv_x(remote_adv);
5589
5590 tg3_setup_flow_control(tp, local_adv, remote_adv);
5591 current_link_up = true;
5592 tp->serdes_counter = 0;
5593 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5594 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5595 if (tp->serdes_counter)
5596 tp->serdes_counter--;
5597 else {
5598 if (workaround) {
5599 u32 val = serdes_cfg;
5600
5601 if (port_a)
5602 val |= 0xc010000;
5603 else
5604 val |= 0x4010000;
5605
5606 tw32_f(MAC_SERDES_CFG, val);
5607 }
5608
5609 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5610 udelay(40);
5611
5612 /* Link parallel detection - link is up */
5613 /* only if we have PCS_SYNC and not */
5614 /* receiving config code words */
5615 mac_status = tr32(MAC_STATUS);
5616 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5617 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5618 tg3_setup_flow_control(tp, 0, 0);
5619 current_link_up = true;
5620 tp->phy_flags |=
5621 TG3_PHYFLG_PARALLEL_DETECT;
5622 tp->serdes_counter =
5623 SERDES_PARALLEL_DET_TIMEOUT;
5624 } else
5625 goto restart_autoneg;
5626 }
5627 }
5628 } else {
5629 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5630 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5631 }
5632
5633 out:
5634 return current_link_up;
5635 }
5636
5637 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5638 {
5639 bool current_link_up = false;
5640
5641 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5642 goto out;
5643
5644 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5645 u32 txflags, rxflags;
5646 int i;
5647
5648 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5649 u32 local_adv = 0, remote_adv = 0;
5650
5651 if (txflags & ANEG_CFG_PS1)
5652 local_adv |= ADVERTISE_1000XPAUSE;
5653 if (txflags & ANEG_CFG_PS2)
5654 local_adv |= ADVERTISE_1000XPSE_ASYM;
5655
5656 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5657 remote_adv |= LPA_1000XPAUSE;
5658 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5659 remote_adv |= LPA_1000XPAUSE_ASYM;
5660
5661 tp->link_config.rmt_adv =
5662 mii_adv_to_ethtool_adv_x(remote_adv);
5663
5664 tg3_setup_flow_control(tp, local_adv, remote_adv);
5665
5666 current_link_up = true;
5667 }
5668 for (i = 0; i < 30; i++) {
5669 udelay(20);
5670 tw32_f(MAC_STATUS,
5671 (MAC_STATUS_SYNC_CHANGED |
5672 MAC_STATUS_CFG_CHANGED));
5673 udelay(40);
5674 if ((tr32(MAC_STATUS) &
5675 (MAC_STATUS_SYNC_CHANGED |
5676 MAC_STATUS_CFG_CHANGED)) == 0)
5677 break;
5678 }
5679
5680 mac_status = tr32(MAC_STATUS);
5681 if (!current_link_up &&
5682 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5683 !(mac_status & MAC_STATUS_RCVD_CFG))
5684 current_link_up = true;
5685 } else {
5686 tg3_setup_flow_control(tp, 0, 0);
5687
5688 /* Forcing 1000FD link up. */
5689 current_link_up = true;
5690
5691 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5692 udelay(40);
5693
5694 tw32_f(MAC_MODE, tp->mac_mode);
5695 udelay(40);
5696 }
5697
5698 out:
5699 return current_link_up;
5700 }
5701
5702 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5703 {
5704 u32 orig_pause_cfg;
5705 u16 orig_active_speed;
5706 u8 orig_active_duplex;
5707 u32 mac_status;
5708 bool current_link_up;
5709 int i;
5710
5711 orig_pause_cfg = tp->link_config.active_flowctrl;
5712 orig_active_speed = tp->link_config.active_speed;
5713 orig_active_duplex = tp->link_config.active_duplex;
5714
5715 if (!tg3_flag(tp, HW_AUTONEG) &&
5716 tp->link_up &&
5717 tg3_flag(tp, INIT_COMPLETE)) {
5718 mac_status = tr32(MAC_STATUS);
5719 mac_status &= (MAC_STATUS_PCS_SYNCED |
5720 MAC_STATUS_SIGNAL_DET |
5721 MAC_STATUS_CFG_CHANGED |
5722 MAC_STATUS_RCVD_CFG);
5723 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5724 MAC_STATUS_SIGNAL_DET)) {
5725 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5726 MAC_STATUS_CFG_CHANGED));
5727 return 0;
5728 }
5729 }
5730
5731 tw32_f(MAC_TX_AUTO_NEG, 0);
5732
5733 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5734 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5735 tw32_f(MAC_MODE, tp->mac_mode);
5736 udelay(40);
5737
5738 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5739 tg3_init_bcm8002(tp);
5740
5741 /* Enable link change event even when serdes polling. */
5742 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5743 udelay(40);
5744
5745 current_link_up = false;
5746 tp->link_config.rmt_adv = 0;
5747 mac_status = tr32(MAC_STATUS);
5748
5749 if (tg3_flag(tp, HW_AUTONEG))
5750 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5751 else
5752 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5753
5754 tp->napi[0].hw_status->status =
5755 (SD_STATUS_UPDATED |
5756 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5757
5758 for (i = 0; i < 100; i++) {
5759 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5760 MAC_STATUS_CFG_CHANGED));
5761 udelay(5);
5762 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5763 MAC_STATUS_CFG_CHANGED |
5764 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5765 break;
5766 }
5767
5768 mac_status = tr32(MAC_STATUS);
5769 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5770 current_link_up = false;
5771 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5772 tp->serdes_counter == 0) {
5773 tw32_f(MAC_MODE, (tp->mac_mode |
5774 MAC_MODE_SEND_CONFIGS));
5775 udelay(1);
5776 tw32_f(MAC_MODE, tp->mac_mode);
5777 }
5778 }
5779
5780 if (current_link_up) {
5781 tp->link_config.active_speed = SPEED_1000;
5782 tp->link_config.active_duplex = DUPLEX_FULL;
5783 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784 LED_CTRL_LNKLED_OVERRIDE |
5785 LED_CTRL_1000MBPS_ON));
5786 } else {
5787 tp->link_config.active_speed = SPEED_UNKNOWN;
5788 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5789 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5790 LED_CTRL_LNKLED_OVERRIDE |
5791 LED_CTRL_TRAFFIC_OVERRIDE));
5792 }
5793
5794 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5795 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5796 if (orig_pause_cfg != now_pause_cfg ||
5797 orig_active_speed != tp->link_config.active_speed ||
5798 orig_active_duplex != tp->link_config.active_duplex)
5799 tg3_link_report(tp);
5800 }
5801
5802 return 0;
5803 }
5804
5805 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5806 {
5807 int err = 0;
5808 u32 bmsr, bmcr;
5809 u16 current_speed = SPEED_UNKNOWN;
5810 u8 current_duplex = DUPLEX_UNKNOWN;
5811 bool current_link_up = false;
5812 u32 local_adv, remote_adv, sgsr;
5813
5814 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5815 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5816 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5817 (sgsr & SERDES_TG3_SGMII_MODE)) {
5818
5819 if (force_reset)
5820 tg3_phy_reset(tp);
5821
5822 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5823
5824 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5825 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5826 } else {
5827 current_link_up = true;
5828 if (sgsr & SERDES_TG3_SPEED_1000) {
5829 current_speed = SPEED_1000;
5830 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 } else if (sgsr & SERDES_TG3_SPEED_100) {
5832 current_speed = SPEED_100;
5833 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5834 } else {
5835 current_speed = SPEED_10;
5836 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 }
5838
5839 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5840 current_duplex = DUPLEX_FULL;
5841 else
5842 current_duplex = DUPLEX_HALF;
5843 }
5844
5845 tw32_f(MAC_MODE, tp->mac_mode);
5846 udelay(40);
5847
5848 tg3_clear_mac_status(tp);
5849
5850 goto fiber_setup_done;
5851 }
5852
5853 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5854 tw32_f(MAC_MODE, tp->mac_mode);
5855 udelay(40);
5856
5857 tg3_clear_mac_status(tp);
5858
5859 if (force_reset)
5860 tg3_phy_reset(tp);
5861
5862 tp->link_config.rmt_adv = 0;
5863
5864 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5865 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5866 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5867 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5868 bmsr |= BMSR_LSTATUS;
5869 else
5870 bmsr &= ~BMSR_LSTATUS;
5871 }
5872
5873 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5874
5875 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5876 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5877 /* do nothing, just check for link up at the end */
5878 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5879 u32 adv, newadv;
5880
5881 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5882 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5883 ADVERTISE_1000XPAUSE |
5884 ADVERTISE_1000XPSE_ASYM |
5885 ADVERTISE_SLCT);
5886
5887 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5888 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5889
5890 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5891 tg3_writephy(tp, MII_ADVERTISE, newadv);
5892 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5893 tg3_writephy(tp, MII_BMCR, bmcr);
5894
5895 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5896 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5897 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5898
5899 return err;
5900 }
5901 } else {
5902 u32 new_bmcr;
5903
5904 bmcr &= ~BMCR_SPEED1000;
5905 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5906
5907 if (tp->link_config.duplex == DUPLEX_FULL)
5908 new_bmcr |= BMCR_FULLDPLX;
5909
5910 if (new_bmcr != bmcr) {
5911 /* BMCR_SPEED1000 is a reserved bit that needs
5912 * to be set on write.
5913 */
5914 new_bmcr |= BMCR_SPEED1000;
5915
5916 /* Force a linkdown */
5917 if (tp->link_up) {
5918 u32 adv;
5919
5920 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5921 adv &= ~(ADVERTISE_1000XFULL |
5922 ADVERTISE_1000XHALF |
5923 ADVERTISE_SLCT);
5924 tg3_writephy(tp, MII_ADVERTISE, adv);
5925 tg3_writephy(tp, MII_BMCR, bmcr |
5926 BMCR_ANRESTART |
5927 BMCR_ANENABLE);
5928 udelay(10);
5929 tg3_carrier_off(tp);
5930 }
5931 tg3_writephy(tp, MII_BMCR, new_bmcr);
5932 bmcr = new_bmcr;
5933 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5934 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5935 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5936 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5937 bmsr |= BMSR_LSTATUS;
5938 else
5939 bmsr &= ~BMSR_LSTATUS;
5940 }
5941 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5942 }
5943 }
5944
5945 if (bmsr & BMSR_LSTATUS) {
5946 current_speed = SPEED_1000;
5947 current_link_up = true;
5948 if (bmcr & BMCR_FULLDPLX)
5949 current_duplex = DUPLEX_FULL;
5950 else
5951 current_duplex = DUPLEX_HALF;
5952
5953 local_adv = 0;
5954 remote_adv = 0;
5955
5956 if (bmcr & BMCR_ANENABLE) {
5957 u32 common;
5958
5959 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5960 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5961 common = local_adv & remote_adv;
5962 if (common & (ADVERTISE_1000XHALF |
5963 ADVERTISE_1000XFULL)) {
5964 if (common & ADVERTISE_1000XFULL)
5965 current_duplex = DUPLEX_FULL;
5966 else
5967 current_duplex = DUPLEX_HALF;
5968
5969 tp->link_config.rmt_adv =
5970 mii_adv_to_ethtool_adv_x(remote_adv);
5971 } else if (!tg3_flag(tp, 5780_CLASS)) {
5972 /* Link is up via parallel detect */
5973 } else {
5974 current_link_up = false;
5975 }
5976 }
5977 }
5978
5979 fiber_setup_done:
5980 if (current_link_up && current_duplex == DUPLEX_FULL)
5981 tg3_setup_flow_control(tp, local_adv, remote_adv);
5982
5983 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5984 if (tp->link_config.active_duplex == DUPLEX_HALF)
5985 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5986
5987 tw32_f(MAC_MODE, tp->mac_mode);
5988 udelay(40);
5989
5990 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5991
5992 tp->link_config.active_speed = current_speed;
5993 tp->link_config.active_duplex = current_duplex;
5994
5995 tg3_test_and_report_link_chg(tp, current_link_up);
5996 return err;
5997 }
5998
5999 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6000 {
6001 if (tp->serdes_counter) {
6002 /* Give autoneg time to complete. */
6003 tp->serdes_counter--;
6004 return;
6005 }
6006
6007 if (!tp->link_up &&
6008 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6009 u32 bmcr;
6010
6011 tg3_readphy(tp, MII_BMCR, &bmcr);
6012 if (bmcr & BMCR_ANENABLE) {
6013 u32 phy1, phy2;
6014
6015 /* Select shadow register 0x1f */
6016 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6017 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6018
6019 /* Select expansion interrupt status register */
6020 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6021 MII_TG3_DSP_EXP1_INT_STAT);
6022 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6023 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024
6025 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6026 /* We have signal detect and not receiving
6027 * config code words, link is up by parallel
6028 * detection.
6029 */
6030
6031 bmcr &= ~BMCR_ANENABLE;
6032 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6033 tg3_writephy(tp, MII_BMCR, bmcr);
6034 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6035 }
6036 }
6037 } else if (tp->link_up &&
6038 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6039 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6040 u32 phy2;
6041
6042 /* Select expansion interrupt status register */
6043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6044 MII_TG3_DSP_EXP1_INT_STAT);
6045 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6046 if (phy2 & 0x20) {
6047 u32 bmcr;
6048
6049 /* Config code words received, turn on autoneg. */
6050 tg3_readphy(tp, MII_BMCR, &bmcr);
6051 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6052
6053 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6054
6055 }
6056 }
6057 }
6058
6059 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6060 {
6061 u32 val;
6062 int err;
6063
6064 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6065 err = tg3_setup_fiber_phy(tp, force_reset);
6066 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6067 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6068 else
6069 err = tg3_setup_copper_phy(tp, force_reset);
6070
6071 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6072 u32 scale;
6073
6074 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6075 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6076 scale = 65;
6077 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6078 scale = 6;
6079 else
6080 scale = 12;
6081
6082 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6083 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6084 tw32(GRC_MISC_CFG, val);
6085 }
6086
6087 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6088 (6 << TX_LENGTHS_IPG_SHIFT);
6089 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6090 tg3_asic_rev(tp) == ASIC_REV_5762)
6091 val |= tr32(MAC_TX_LENGTHS) &
6092 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6093 TX_LENGTHS_CNT_DWN_VAL_MSK);
6094
6095 if (tp->link_config.active_speed == SPEED_1000 &&
6096 tp->link_config.active_duplex == DUPLEX_HALF)
6097 tw32(MAC_TX_LENGTHS, val |
6098 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6099 else
6100 tw32(MAC_TX_LENGTHS, val |
6101 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6102
6103 if (!tg3_flag(tp, 5705_PLUS)) {
6104 if (tp->link_up) {
6105 tw32(HOSTCC_STAT_COAL_TICKS,
6106 tp->coal.stats_block_coalesce_usecs);
6107 } else {
6108 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6109 }
6110 }
6111
6112 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6113 val = tr32(PCIE_PWR_MGMT_THRESH);
6114 if (!tp->link_up)
6115 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6116 tp->pwrmgmt_thresh;
6117 else
6118 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6119 tw32(PCIE_PWR_MGMT_THRESH, val);
6120 }
6121
6122 return err;
6123 }
6124
6125 /* tp->lock must be held */
6126 static u64 tg3_refclk_read(struct tg3 *tp)
6127 {
6128 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6129 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6130 }
6131
6132 /* tp->lock must be held */
6133 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6134 {
6135 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6136
6137 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6138 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6139 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6140 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6141 }
6142
6143 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6144 static inline void tg3_full_unlock(struct tg3 *tp);
6145 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6146 {
6147 struct tg3 *tp = netdev_priv(dev);
6148
6149 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6150 SOF_TIMESTAMPING_RX_SOFTWARE |
6151 SOF_TIMESTAMPING_SOFTWARE;
6152
6153 if (tg3_flag(tp, PTP_CAPABLE)) {
6154 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6155 SOF_TIMESTAMPING_RX_HARDWARE |
6156 SOF_TIMESTAMPING_RAW_HARDWARE;
6157 }
6158
6159 if (tp->ptp_clock)
6160 info->phc_index = ptp_clock_index(tp->ptp_clock);
6161 else
6162 info->phc_index = -1;
6163
6164 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6165
6166 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6167 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6168 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6169 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6170 return 0;
6171 }
6172
6173 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6174 {
6175 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6176 bool neg_adj = false;
6177 u32 correction = 0;
6178
6179 if (ppb < 0) {
6180 neg_adj = true;
6181 ppb = -ppb;
6182 }
6183
6184 /* Frequency adjustment is performed using hardware with a 24 bit
6185 * accumulator and a programmable correction value. On each clk, the
6186 * correction value gets added to the accumulator and when it
6187 * overflows, the time counter is incremented/decremented.
6188 *
6189 * So conversion from ppb to correction value is
6190 * ppb * (1 << 24) / 1000000000
6191 */
6192 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6193 TG3_EAV_REF_CLK_CORRECT_MASK;
6194
6195 tg3_full_lock(tp, 0);
6196
6197 if (correction)
6198 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6199 TG3_EAV_REF_CLK_CORRECT_EN |
6200 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6201 else
6202 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6203
6204 tg3_full_unlock(tp);
6205
6206 return 0;
6207 }
6208
6209 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6210 {
6211 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6212
6213 tg3_full_lock(tp, 0);
6214 tp->ptp_adjust += delta;
6215 tg3_full_unlock(tp);
6216
6217 return 0;
6218 }
6219
6220 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6221 {
6222 u64 ns;
6223 u32 remainder;
6224 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6225
6226 tg3_full_lock(tp, 0);
6227 ns = tg3_refclk_read(tp);
6228 ns += tp->ptp_adjust;
6229 tg3_full_unlock(tp);
6230
6231 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6232 ts->tv_nsec = remainder;
6233
6234 return 0;
6235 }
6236
6237 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6238 const struct timespec *ts)
6239 {
6240 u64 ns;
6241 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6242
6243 ns = timespec_to_ns(ts);
6244
6245 tg3_full_lock(tp, 0);
6246 tg3_refclk_write(tp, ns);
6247 tp->ptp_adjust = 0;
6248 tg3_full_unlock(tp);
6249
6250 return 0;
6251 }
6252
6253 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6254 struct ptp_clock_request *rq, int on)
6255 {
6256 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6257 u32 clock_ctl;
6258 int rval = 0;
6259
6260 switch (rq->type) {
6261 case PTP_CLK_REQ_PEROUT:
6262 if (rq->perout.index != 0)
6263 return -EINVAL;
6264
6265 tg3_full_lock(tp, 0);
6266 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6267 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6268
6269 if (on) {
6270 u64 nsec;
6271
6272 nsec = rq->perout.start.sec * 1000000000ULL +
6273 rq->perout.start.nsec;
6274
6275 if (rq->perout.period.sec || rq->perout.period.nsec) {
6276 netdev_warn(tp->dev,
6277 "Device supports only a one-shot timesync output, period must be 0\n");
6278 rval = -EINVAL;
6279 goto err_out;
6280 }
6281
6282 if (nsec & (1ULL << 63)) {
6283 netdev_warn(tp->dev,
6284 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6285 rval = -EINVAL;
6286 goto err_out;
6287 }
6288
6289 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6290 tw32(TG3_EAV_WATCHDOG0_MSB,
6291 TG3_EAV_WATCHDOG0_EN |
6292 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6293
6294 tw32(TG3_EAV_REF_CLCK_CTL,
6295 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6296 } else {
6297 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6298 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6299 }
6300
6301 err_out:
6302 tg3_full_unlock(tp);
6303 return rval;
6304
6305 default:
6306 break;
6307 }
6308
6309 return -EOPNOTSUPP;
6310 }
6311
6312 static const struct ptp_clock_info tg3_ptp_caps = {
6313 .owner = THIS_MODULE,
6314 .name = "tg3 clock",
6315 .max_adj = 250000000,
6316 .n_alarm = 0,
6317 .n_ext_ts = 0,
6318 .n_per_out = 1,
6319 .n_pins = 0,
6320 .pps = 0,
6321 .adjfreq = tg3_ptp_adjfreq,
6322 .adjtime = tg3_ptp_adjtime,
6323 .gettime = tg3_ptp_gettime,
6324 .settime = tg3_ptp_settime,
6325 .enable = tg3_ptp_enable,
6326 };
6327
6328 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6329 struct skb_shared_hwtstamps *timestamp)
6330 {
6331 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6332 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6333 tp->ptp_adjust);
6334 }
6335
6336 /* tp->lock must be held */
6337 static void tg3_ptp_init(struct tg3 *tp)
6338 {
6339 if (!tg3_flag(tp, PTP_CAPABLE))
6340 return;
6341
6342 /* Initialize the hardware clock to the system time. */
6343 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6344 tp->ptp_adjust = 0;
6345 tp->ptp_info = tg3_ptp_caps;
6346 }
6347
6348 /* tp->lock must be held */
6349 static void tg3_ptp_resume(struct tg3 *tp)
6350 {
6351 if (!tg3_flag(tp, PTP_CAPABLE))
6352 return;
6353
6354 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6355 tp->ptp_adjust = 0;
6356 }
6357
6358 static void tg3_ptp_fini(struct tg3 *tp)
6359 {
6360 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6361 return;
6362
6363 ptp_clock_unregister(tp->ptp_clock);
6364 tp->ptp_clock = NULL;
6365 tp->ptp_adjust = 0;
6366 }
6367
6368 static inline int tg3_irq_sync(struct tg3 *tp)
6369 {
6370 return tp->irq_sync;
6371 }
6372
6373 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6374 {
6375 int i;
6376
6377 dst = (u32 *)((u8 *)dst + off);
6378 for (i = 0; i < len; i += sizeof(u32))
6379 *dst++ = tr32(off + i);
6380 }
6381
6382 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6383 {
6384 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6385 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6386 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6387 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6388 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6389 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6390 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6391 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6392 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6393 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6394 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6395 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6396 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6397 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6398 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6399 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6400 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6401 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6402 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6403
6404 if (tg3_flag(tp, SUPPORT_MSIX))
6405 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6406
6407 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6408 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6409 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6410 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6411 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6412 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6413 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6414 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6415
6416 if (!tg3_flag(tp, 5705_PLUS)) {
6417 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6418 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6419 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6420 }
6421
6422 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6423 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6424 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6425 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6426 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6427
6428 if (tg3_flag(tp, NVRAM))
6429 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6430 }
6431
6432 static void tg3_dump_state(struct tg3 *tp)
6433 {
6434 int i;
6435 u32 *regs;
6436
6437 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6438 if (!regs)
6439 return;
6440
6441 if (tg3_flag(tp, PCI_EXPRESS)) {
6442 /* Read up to but not including private PCI registers */
6443 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6444 regs[i / sizeof(u32)] = tr32(i);
6445 } else
6446 tg3_dump_legacy_regs(tp, regs);
6447
6448 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6449 if (!regs[i + 0] && !regs[i + 1] &&
6450 !regs[i + 2] && !regs[i + 3])
6451 continue;
6452
6453 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6454 i * 4,
6455 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6456 }
6457
6458 kfree(regs);
6459
6460 for (i = 0; i < tp->irq_cnt; i++) {
6461 struct tg3_napi *tnapi = &tp->napi[i];
6462
6463 /* SW status block */
6464 netdev_err(tp->dev,
6465 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6466 i,
6467 tnapi->hw_status->status,
6468 tnapi->hw_status->status_tag,
6469 tnapi->hw_status->rx_jumbo_consumer,
6470 tnapi->hw_status->rx_consumer,
6471 tnapi->hw_status->rx_mini_consumer,
6472 tnapi->hw_status->idx[0].rx_producer,
6473 tnapi->hw_status->idx[0].tx_consumer);
6474
6475 netdev_err(tp->dev,
6476 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6477 i,
6478 tnapi->last_tag, tnapi->last_irq_tag,
6479 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6480 tnapi->rx_rcb_ptr,
6481 tnapi->prodring.rx_std_prod_idx,
6482 tnapi->prodring.rx_std_cons_idx,
6483 tnapi->prodring.rx_jmb_prod_idx,
6484 tnapi->prodring.rx_jmb_cons_idx);
6485 }
6486 }
6487
6488 /* This is called whenever we suspect that the system chipset is re-
6489 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6490 * is bogus tx completions. We try to recover by setting the
6491 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6492 * in the workqueue.
6493 */
6494 static void tg3_tx_recover(struct tg3 *tp)
6495 {
6496 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6497 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6498
6499 netdev_warn(tp->dev,
6500 "The system may be re-ordering memory-mapped I/O "
6501 "cycles to the network device, attempting to recover. "
6502 "Please report the problem to the driver maintainer "
6503 "and include system chipset information.\n");
6504
6505 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6506 }
6507
6508 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6509 {
6510 /* Tell compiler to fetch tx indices from memory. */
6511 barrier();
6512 return tnapi->tx_pending -
6513 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6514 }
6515
6516 /* Tigon3 never reports partial packet sends. So we do not
6517 * need special logic to handle SKBs that have not had all
6518 * of their frags sent yet, like SunGEM does.
6519 */
6520 static void tg3_tx(struct tg3_napi *tnapi)
6521 {
6522 struct tg3 *tp = tnapi->tp;
6523 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6524 u32 sw_idx = tnapi->tx_cons;
6525 struct netdev_queue *txq;
6526 int index = tnapi - tp->napi;
6527 unsigned int pkts_compl = 0, bytes_compl = 0;
6528
6529 if (tg3_flag(tp, ENABLE_TSS))
6530 index--;
6531
6532 txq = netdev_get_tx_queue(tp->dev, index);
6533
6534 while (sw_idx != hw_idx) {
6535 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6536 struct sk_buff *skb = ri->skb;
6537 int i, tx_bug = 0;
6538
6539 if (unlikely(skb == NULL)) {
6540 tg3_tx_recover(tp);
6541 return;
6542 }
6543
6544 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6545 struct skb_shared_hwtstamps timestamp;
6546 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6547 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6548
6549 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6550
6551 skb_tstamp_tx(skb, &timestamp);
6552 }
6553
6554 pci_unmap_single(tp->pdev,
6555 dma_unmap_addr(ri, mapping),
6556 skb_headlen(skb),
6557 PCI_DMA_TODEVICE);
6558
6559 ri->skb = NULL;
6560
6561 while (ri->fragmented) {
6562 ri->fragmented = false;
6563 sw_idx = NEXT_TX(sw_idx);
6564 ri = &tnapi->tx_buffers[sw_idx];
6565 }
6566
6567 sw_idx = NEXT_TX(sw_idx);
6568
6569 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6570 ri = &tnapi->tx_buffers[sw_idx];
6571 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6572 tx_bug = 1;
6573
6574 pci_unmap_page(tp->pdev,
6575 dma_unmap_addr(ri, mapping),
6576 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6577 PCI_DMA_TODEVICE);
6578
6579 while (ri->fragmented) {
6580 ri->fragmented = false;
6581 sw_idx = NEXT_TX(sw_idx);
6582 ri = &tnapi->tx_buffers[sw_idx];
6583 }
6584
6585 sw_idx = NEXT_TX(sw_idx);
6586 }
6587
6588 pkts_compl++;
6589 bytes_compl += skb->len;
6590
6591 dev_kfree_skb_any(skb);
6592
6593 if (unlikely(tx_bug)) {
6594 tg3_tx_recover(tp);
6595 return;
6596 }
6597 }
6598
6599 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6600
6601 tnapi->tx_cons = sw_idx;
6602
6603 /* Need to make the tx_cons update visible to tg3_start_xmit()
6604 * before checking for netif_queue_stopped(). Without the
6605 * memory barrier, there is a small possibility that tg3_start_xmit()
6606 * will miss it and cause the queue to be stopped forever.
6607 */
6608 smp_mb();
6609
6610 if (unlikely(netif_tx_queue_stopped(txq) &&
6611 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6612 __netif_tx_lock(txq, smp_processor_id());
6613 if (netif_tx_queue_stopped(txq) &&
6614 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6615 netif_tx_wake_queue(txq);
6616 __netif_tx_unlock(txq);
6617 }
6618 }
6619
6620 static void tg3_frag_free(bool is_frag, void *data)
6621 {
6622 if (is_frag)
6623 put_page(virt_to_head_page(data));
6624 else
6625 kfree(data);
6626 }
6627
6628 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6629 {
6630 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6631 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6632
6633 if (!ri->data)
6634 return;
6635
6636 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6637 map_sz, PCI_DMA_FROMDEVICE);
6638 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6639 ri->data = NULL;
6640 }
6641
6642
6643 /* Returns size of skb allocated or < 0 on error.
6644 *
6645 * We only need to fill in the address because the other members
6646 * of the RX descriptor are invariant, see tg3_init_rings.
6647 *
6648 * Note the purposeful assymetry of cpu vs. chip accesses. For
6649 * posting buffers we only dirty the first cache line of the RX
6650 * descriptor (containing the address). Whereas for the RX status
6651 * buffers the cpu only reads the last cacheline of the RX descriptor
6652 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6653 */
6654 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6655 u32 opaque_key, u32 dest_idx_unmasked,
6656 unsigned int *frag_size)
6657 {
6658 struct tg3_rx_buffer_desc *desc;
6659 struct ring_info *map;
6660 u8 *data;
6661 dma_addr_t mapping;
6662 int skb_size, data_size, dest_idx;
6663
6664 switch (opaque_key) {
6665 case RXD_OPAQUE_RING_STD:
6666 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6667 desc = &tpr->rx_std[dest_idx];
6668 map = &tpr->rx_std_buffers[dest_idx];
6669 data_size = tp->rx_pkt_map_sz;
6670 break;
6671
6672 case RXD_OPAQUE_RING_JUMBO:
6673 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6674 desc = &tpr->rx_jmb[dest_idx].std;
6675 map = &tpr->rx_jmb_buffers[dest_idx];
6676 data_size = TG3_RX_JMB_MAP_SZ;
6677 break;
6678
6679 default:
6680 return -EINVAL;
6681 }
6682
6683 /* Do not overwrite any of the map or rp information
6684 * until we are sure we can commit to a new buffer.
6685 *
6686 * Callers depend upon this behavior and assume that
6687 * we leave everything unchanged if we fail.
6688 */
6689 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6690 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6691 if (skb_size <= PAGE_SIZE) {
6692 data = netdev_alloc_frag(skb_size);
6693 *frag_size = skb_size;
6694 } else {
6695 data = kmalloc(skb_size, GFP_ATOMIC);
6696 *frag_size = 0;
6697 }
6698 if (!data)
6699 return -ENOMEM;
6700
6701 mapping = pci_map_single(tp->pdev,
6702 data + TG3_RX_OFFSET(tp),
6703 data_size,
6704 PCI_DMA_FROMDEVICE);
6705 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6706 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6707 return -EIO;
6708 }
6709
6710 map->data = data;
6711 dma_unmap_addr_set(map, mapping, mapping);
6712
6713 desc->addr_hi = ((u64)mapping >> 32);
6714 desc->addr_lo = ((u64)mapping & 0xffffffff);
6715
6716 return data_size;
6717 }
6718
6719 /* We only need to move over in the address because the other
6720 * members of the RX descriptor are invariant. See notes above
6721 * tg3_alloc_rx_data for full details.
6722 */
6723 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6724 struct tg3_rx_prodring_set *dpr,
6725 u32 opaque_key, int src_idx,
6726 u32 dest_idx_unmasked)
6727 {
6728 struct tg3 *tp = tnapi->tp;
6729 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6730 struct ring_info *src_map, *dest_map;
6731 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6732 int dest_idx;
6733
6734 switch (opaque_key) {
6735 case RXD_OPAQUE_RING_STD:
6736 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6737 dest_desc = &dpr->rx_std[dest_idx];
6738 dest_map = &dpr->rx_std_buffers[dest_idx];
6739 src_desc = &spr->rx_std[src_idx];
6740 src_map = &spr->rx_std_buffers[src_idx];
6741 break;
6742
6743 case RXD_OPAQUE_RING_JUMBO:
6744 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6745 dest_desc = &dpr->rx_jmb[dest_idx].std;
6746 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6747 src_desc = &spr->rx_jmb[src_idx].std;
6748 src_map = &spr->rx_jmb_buffers[src_idx];
6749 break;
6750
6751 default:
6752 return;
6753 }
6754
6755 dest_map->data = src_map->data;
6756 dma_unmap_addr_set(dest_map, mapping,
6757 dma_unmap_addr(src_map, mapping));
6758 dest_desc->addr_hi = src_desc->addr_hi;
6759 dest_desc->addr_lo = src_desc->addr_lo;
6760
6761 /* Ensure that the update to the skb happens after the physical
6762 * addresses have been transferred to the new BD location.
6763 */
6764 smp_wmb();
6765
6766 src_map->data = NULL;
6767 }
6768
6769 /* The RX ring scheme is composed of multiple rings which post fresh
6770 * buffers to the chip, and one special ring the chip uses to report
6771 * status back to the host.
6772 *
6773 * The special ring reports the status of received packets to the
6774 * host. The chip does not write into the original descriptor the
6775 * RX buffer was obtained from. The chip simply takes the original
6776 * descriptor as provided by the host, updates the status and length
6777 * field, then writes this into the next status ring entry.
6778 *
6779 * Each ring the host uses to post buffers to the chip is described
6780 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6781 * it is first placed into the on-chip ram. When the packet's length
6782 * is known, it walks down the TG3_BDINFO entries to select the ring.
6783 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6784 * which is within the range of the new packet's length is chosen.
6785 *
6786 * The "separate ring for rx status" scheme may sound queer, but it makes
6787 * sense from a cache coherency perspective. If only the host writes
6788 * to the buffer post rings, and only the chip writes to the rx status
6789 * rings, then cache lines never move beyond shared-modified state.
6790 * If both the host and chip were to write into the same ring, cache line
6791 * eviction could occur since both entities want it in an exclusive state.
6792 */
6793 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6794 {
6795 struct tg3 *tp = tnapi->tp;
6796 u32 work_mask, rx_std_posted = 0;
6797 u32 std_prod_idx, jmb_prod_idx;
6798 u32 sw_idx = tnapi->rx_rcb_ptr;
6799 u16 hw_idx;
6800 int received;
6801 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6802
6803 hw_idx = *(tnapi->rx_rcb_prod_idx);
6804 /*
6805 * We need to order the read of hw_idx and the read of
6806 * the opaque cookie.
6807 */
6808 rmb();
6809 work_mask = 0;
6810 received = 0;
6811 std_prod_idx = tpr->rx_std_prod_idx;
6812 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6813 while (sw_idx != hw_idx && budget > 0) {
6814 struct ring_info *ri;
6815 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6816 unsigned int len;
6817 struct sk_buff *skb;
6818 dma_addr_t dma_addr;
6819 u32 opaque_key, desc_idx, *post_ptr;
6820 u8 *data;
6821 u64 tstamp = 0;
6822
6823 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6824 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6825 if (opaque_key == RXD_OPAQUE_RING_STD) {
6826 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6827 dma_addr = dma_unmap_addr(ri, mapping);
6828 data = ri->data;
6829 post_ptr = &std_prod_idx;
6830 rx_std_posted++;
6831 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6832 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6833 dma_addr = dma_unmap_addr(ri, mapping);
6834 data = ri->data;
6835 post_ptr = &jmb_prod_idx;
6836 } else
6837 goto next_pkt_nopost;
6838
6839 work_mask |= opaque_key;
6840
6841 if (desc->err_vlan & RXD_ERR_MASK) {
6842 drop_it:
6843 tg3_recycle_rx(tnapi, tpr, opaque_key,
6844 desc_idx, *post_ptr);
6845 drop_it_no_recycle:
6846 /* Other statistics kept track of by card. */
6847 tp->rx_dropped++;
6848 goto next_pkt;
6849 }
6850
6851 prefetch(data + TG3_RX_OFFSET(tp));
6852 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6853 ETH_FCS_LEN;
6854
6855 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6856 RXD_FLAG_PTPSTAT_PTPV1 ||
6857 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858 RXD_FLAG_PTPSTAT_PTPV2) {
6859 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6860 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6861 }
6862
6863 if (len > TG3_RX_COPY_THRESH(tp)) {
6864 int skb_size;
6865 unsigned int frag_size;
6866
6867 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6868 *post_ptr, &frag_size);
6869 if (skb_size < 0)
6870 goto drop_it;
6871
6872 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6873 PCI_DMA_FROMDEVICE);
6874
6875 /* Ensure that the update to the data happens
6876 * after the usage of the old DMA mapping.
6877 */
6878 smp_wmb();
6879
6880 ri->data = NULL;
6881
6882 skb = build_skb(data, frag_size);
6883 if (!skb) {
6884 tg3_frag_free(frag_size != 0, data);
6885 goto drop_it_no_recycle;
6886 }
6887 skb_reserve(skb, TG3_RX_OFFSET(tp));
6888 } else {
6889 tg3_recycle_rx(tnapi, tpr, opaque_key,
6890 desc_idx, *post_ptr);
6891
6892 skb = netdev_alloc_skb(tp->dev,
6893 len + TG3_RAW_IP_ALIGN);
6894 if (skb == NULL)
6895 goto drop_it_no_recycle;
6896
6897 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6898 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6899 memcpy(skb->data,
6900 data + TG3_RX_OFFSET(tp),
6901 len);
6902 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6903 }
6904
6905 skb_put(skb, len);
6906 if (tstamp)
6907 tg3_hwclock_to_timestamp(tp, tstamp,
6908 skb_hwtstamps(skb));
6909
6910 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6911 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6912 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6913 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6914 skb->ip_summed = CHECKSUM_UNNECESSARY;
6915 else
6916 skb_checksum_none_assert(skb);
6917
6918 skb->protocol = eth_type_trans(skb, tp->dev);
6919
6920 if (len > (tp->dev->mtu + ETH_HLEN) &&
6921 skb->protocol != htons(ETH_P_8021Q)) {
6922 dev_kfree_skb_any(skb);
6923 goto drop_it_no_recycle;
6924 }
6925
6926 if (desc->type_flags & RXD_FLAG_VLAN &&
6927 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6928 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6929 desc->err_vlan & RXD_VLAN_MASK);
6930
6931 napi_gro_receive(&tnapi->napi, skb);
6932
6933 received++;
6934 budget--;
6935
6936 next_pkt:
6937 (*post_ptr)++;
6938
6939 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6940 tpr->rx_std_prod_idx = std_prod_idx &
6941 tp->rx_std_ring_mask;
6942 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6943 tpr->rx_std_prod_idx);
6944 work_mask &= ~RXD_OPAQUE_RING_STD;
6945 rx_std_posted = 0;
6946 }
6947 next_pkt_nopost:
6948 sw_idx++;
6949 sw_idx &= tp->rx_ret_ring_mask;
6950
6951 /* Refresh hw_idx to see if there is new work */
6952 if (sw_idx == hw_idx) {
6953 hw_idx = *(tnapi->rx_rcb_prod_idx);
6954 rmb();
6955 }
6956 }
6957
6958 /* ACK the status ring. */
6959 tnapi->rx_rcb_ptr = sw_idx;
6960 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6961
6962 /* Refill RX ring(s). */
6963 if (!tg3_flag(tp, ENABLE_RSS)) {
6964 /* Sync BD data before updating mailbox */
6965 wmb();
6966
6967 if (work_mask & RXD_OPAQUE_RING_STD) {
6968 tpr->rx_std_prod_idx = std_prod_idx &
6969 tp->rx_std_ring_mask;
6970 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6971 tpr->rx_std_prod_idx);
6972 }
6973 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6974 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6975 tp->rx_jmb_ring_mask;
6976 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6977 tpr->rx_jmb_prod_idx);
6978 }
6979 mmiowb();
6980 } else if (work_mask) {
6981 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6982 * updated before the producer indices can be updated.
6983 */
6984 smp_wmb();
6985
6986 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6987 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6988
6989 if (tnapi != &tp->napi[1]) {
6990 tp->rx_refill = true;
6991 napi_schedule(&tp->napi[1].napi);
6992 }
6993 }
6994
6995 return received;
6996 }
6997
6998 static void tg3_poll_link(struct tg3 *tp)
6999 {
7000 /* handle link change and other phy events */
7001 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7002 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7003
7004 if (sblk->status & SD_STATUS_LINK_CHG) {
7005 sblk->status = SD_STATUS_UPDATED |
7006 (sblk->status & ~SD_STATUS_LINK_CHG);
7007 spin_lock(&tp->lock);
7008 if (tg3_flag(tp, USE_PHYLIB)) {
7009 tw32_f(MAC_STATUS,
7010 (MAC_STATUS_SYNC_CHANGED |
7011 MAC_STATUS_CFG_CHANGED |
7012 MAC_STATUS_MI_COMPLETION |
7013 MAC_STATUS_LNKSTATE_CHANGED));
7014 udelay(40);
7015 } else
7016 tg3_setup_phy(tp, false);
7017 spin_unlock(&tp->lock);
7018 }
7019 }
7020 }
7021
7022 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7023 struct tg3_rx_prodring_set *dpr,
7024 struct tg3_rx_prodring_set *spr)
7025 {
7026 u32 si, di, cpycnt, src_prod_idx;
7027 int i, err = 0;
7028
7029 while (1) {
7030 src_prod_idx = spr->rx_std_prod_idx;
7031
7032 /* Make sure updates to the rx_std_buffers[] entries and the
7033 * standard producer index are seen in the correct order.
7034 */
7035 smp_rmb();
7036
7037 if (spr->rx_std_cons_idx == src_prod_idx)
7038 break;
7039
7040 if (spr->rx_std_cons_idx < src_prod_idx)
7041 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7042 else
7043 cpycnt = tp->rx_std_ring_mask + 1 -
7044 spr->rx_std_cons_idx;
7045
7046 cpycnt = min(cpycnt,
7047 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7048
7049 si = spr->rx_std_cons_idx;
7050 di = dpr->rx_std_prod_idx;
7051
7052 for (i = di; i < di + cpycnt; i++) {
7053 if (dpr->rx_std_buffers[i].data) {
7054 cpycnt = i - di;
7055 err = -ENOSPC;
7056 break;
7057 }
7058 }
7059
7060 if (!cpycnt)
7061 break;
7062
7063 /* Ensure that updates to the rx_std_buffers ring and the
7064 * shadowed hardware producer ring from tg3_recycle_skb() are
7065 * ordered correctly WRT the skb check above.
7066 */
7067 smp_rmb();
7068
7069 memcpy(&dpr->rx_std_buffers[di],
7070 &spr->rx_std_buffers[si],
7071 cpycnt * sizeof(struct ring_info));
7072
7073 for (i = 0; i < cpycnt; i++, di++, si++) {
7074 struct tg3_rx_buffer_desc *sbd, *dbd;
7075 sbd = &spr->rx_std[si];
7076 dbd = &dpr->rx_std[di];
7077 dbd->addr_hi = sbd->addr_hi;
7078 dbd->addr_lo = sbd->addr_lo;
7079 }
7080
7081 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7082 tp->rx_std_ring_mask;
7083 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7084 tp->rx_std_ring_mask;
7085 }
7086
7087 while (1) {
7088 src_prod_idx = spr->rx_jmb_prod_idx;
7089
7090 /* Make sure updates to the rx_jmb_buffers[] entries and
7091 * the jumbo producer index are seen in the correct order.
7092 */
7093 smp_rmb();
7094
7095 if (spr->rx_jmb_cons_idx == src_prod_idx)
7096 break;
7097
7098 if (spr->rx_jmb_cons_idx < src_prod_idx)
7099 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7100 else
7101 cpycnt = tp->rx_jmb_ring_mask + 1 -
7102 spr->rx_jmb_cons_idx;
7103
7104 cpycnt = min(cpycnt,
7105 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7106
7107 si = spr->rx_jmb_cons_idx;
7108 di = dpr->rx_jmb_prod_idx;
7109
7110 for (i = di; i < di + cpycnt; i++) {
7111 if (dpr->rx_jmb_buffers[i].data) {
7112 cpycnt = i - di;
7113 err = -ENOSPC;
7114 break;
7115 }
7116 }
7117
7118 if (!cpycnt)
7119 break;
7120
7121 /* Ensure that updates to the rx_jmb_buffers ring and the
7122 * shadowed hardware producer ring from tg3_recycle_skb() are
7123 * ordered correctly WRT the skb check above.
7124 */
7125 smp_rmb();
7126
7127 memcpy(&dpr->rx_jmb_buffers[di],
7128 &spr->rx_jmb_buffers[si],
7129 cpycnt * sizeof(struct ring_info));
7130
7131 for (i = 0; i < cpycnt; i++, di++, si++) {
7132 struct tg3_rx_buffer_desc *sbd, *dbd;
7133 sbd = &spr->rx_jmb[si].std;
7134 dbd = &dpr->rx_jmb[di].std;
7135 dbd->addr_hi = sbd->addr_hi;
7136 dbd->addr_lo = sbd->addr_lo;
7137 }
7138
7139 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7140 tp->rx_jmb_ring_mask;
7141 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7142 tp->rx_jmb_ring_mask;
7143 }
7144
7145 return err;
7146 }
7147
7148 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7149 {
7150 struct tg3 *tp = tnapi->tp;
7151
7152 /* run TX completion thread */
7153 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7154 tg3_tx(tnapi);
7155 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7156 return work_done;
7157 }
7158
7159 if (!tnapi->rx_rcb_prod_idx)
7160 return work_done;
7161
7162 /* run RX thread, within the bounds set by NAPI.
7163 * All RX "locking" is done by ensuring outside
7164 * code synchronizes with tg3->napi.poll()
7165 */
7166 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7167 work_done += tg3_rx(tnapi, budget - work_done);
7168
7169 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7170 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7171 int i, err = 0;
7172 u32 std_prod_idx = dpr->rx_std_prod_idx;
7173 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7174
7175 tp->rx_refill = false;
7176 for (i = 1; i <= tp->rxq_cnt; i++)
7177 err |= tg3_rx_prodring_xfer(tp, dpr,
7178 &tp->napi[i].prodring);
7179
7180 wmb();
7181
7182 if (std_prod_idx != dpr->rx_std_prod_idx)
7183 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7184 dpr->rx_std_prod_idx);
7185
7186 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7187 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7188 dpr->rx_jmb_prod_idx);
7189
7190 mmiowb();
7191
7192 if (err)
7193 tw32_f(HOSTCC_MODE, tp->coal_now);
7194 }
7195
7196 return work_done;
7197 }
7198
7199 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7200 {
7201 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7202 schedule_work(&tp->reset_task);
7203 }
7204
7205 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7206 {
7207 cancel_work_sync(&tp->reset_task);
7208 tg3_flag_clear(tp, RESET_TASK_PENDING);
7209 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7210 }
7211
7212 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7213 {
7214 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7215 struct tg3 *tp = tnapi->tp;
7216 int work_done = 0;
7217 struct tg3_hw_status *sblk = tnapi->hw_status;
7218
7219 while (1) {
7220 work_done = tg3_poll_work(tnapi, work_done, budget);
7221
7222 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7223 goto tx_recovery;
7224
7225 if (unlikely(work_done >= budget))
7226 break;
7227
7228 /* tp->last_tag is used in tg3_int_reenable() below
7229 * to tell the hw how much work has been processed,
7230 * so we must read it before checking for more work.
7231 */
7232 tnapi->last_tag = sblk->status_tag;
7233 tnapi->last_irq_tag = tnapi->last_tag;
7234 rmb();
7235
7236 /* check for RX/TX work to do */
7237 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7238 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7239
7240 /* This test here is not race free, but will reduce
7241 * the number of interrupts by looping again.
7242 */
7243 if (tnapi == &tp->napi[1] && tp->rx_refill)
7244 continue;
7245
7246 napi_complete(napi);
7247 /* Reenable interrupts. */
7248 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7249
7250 /* This test here is synchronized by napi_schedule()
7251 * and napi_complete() to close the race condition.
7252 */
7253 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7254 tw32(HOSTCC_MODE, tp->coalesce_mode |
7255 HOSTCC_MODE_ENABLE |
7256 tnapi->coal_now);
7257 }
7258 mmiowb();
7259 break;
7260 }
7261 }
7262
7263 return work_done;
7264
7265 tx_recovery:
7266 /* work_done is guaranteed to be less than budget. */
7267 napi_complete(napi);
7268 tg3_reset_task_schedule(tp);
7269 return work_done;
7270 }
7271
7272 static void tg3_process_error(struct tg3 *tp)
7273 {
7274 u32 val;
7275 bool real_error = false;
7276
7277 if (tg3_flag(tp, ERROR_PROCESSED))
7278 return;
7279
7280 /* Check Flow Attention register */
7281 val = tr32(HOSTCC_FLOW_ATTN);
7282 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7283 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7284 real_error = true;
7285 }
7286
7287 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7288 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7289 real_error = true;
7290 }
7291
7292 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7293 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7294 real_error = true;
7295 }
7296
7297 if (!real_error)
7298 return;
7299
7300 tg3_dump_state(tp);
7301
7302 tg3_flag_set(tp, ERROR_PROCESSED);
7303 tg3_reset_task_schedule(tp);
7304 }
7305
7306 static int tg3_poll(struct napi_struct *napi, int budget)
7307 {
7308 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7309 struct tg3 *tp = tnapi->tp;
7310 int work_done = 0;
7311 struct tg3_hw_status *sblk = tnapi->hw_status;
7312
7313 while (1) {
7314 if (sblk->status & SD_STATUS_ERROR)
7315 tg3_process_error(tp);
7316
7317 tg3_poll_link(tp);
7318
7319 work_done = tg3_poll_work(tnapi, work_done, budget);
7320
7321 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7322 goto tx_recovery;
7323
7324 if (unlikely(work_done >= budget))
7325 break;
7326
7327 if (tg3_flag(tp, TAGGED_STATUS)) {
7328 /* tp->last_tag is used in tg3_int_reenable() below
7329 * to tell the hw how much work has been processed,
7330 * so we must read it before checking for more work.
7331 */
7332 tnapi->last_tag = sblk->status_tag;
7333 tnapi->last_irq_tag = tnapi->last_tag;
7334 rmb();
7335 } else
7336 sblk->status &= ~SD_STATUS_UPDATED;
7337
7338 if (likely(!tg3_has_work(tnapi))) {
7339 napi_complete(napi);
7340 tg3_int_reenable(tnapi);
7341 break;
7342 }
7343 }
7344
7345 return work_done;
7346
7347 tx_recovery:
7348 /* work_done is guaranteed to be less than budget. */
7349 napi_complete(napi);
7350 tg3_reset_task_schedule(tp);
7351 return work_done;
7352 }
7353
7354 static void tg3_napi_disable(struct tg3 *tp)
7355 {
7356 int i;
7357
7358 for (i = tp->irq_cnt - 1; i >= 0; i--)
7359 napi_disable(&tp->napi[i].napi);
7360 }
7361
7362 static void tg3_napi_enable(struct tg3 *tp)
7363 {
7364 int i;
7365
7366 for (i = 0; i < tp->irq_cnt; i++)
7367 napi_enable(&tp->napi[i].napi);
7368 }
7369
7370 static void tg3_napi_init(struct tg3 *tp)
7371 {
7372 int i;
7373
7374 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7375 for (i = 1; i < tp->irq_cnt; i++)
7376 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7377 }
7378
7379 static void tg3_napi_fini(struct tg3 *tp)
7380 {
7381 int i;
7382
7383 for (i = 0; i < tp->irq_cnt; i++)
7384 netif_napi_del(&tp->napi[i].napi);
7385 }
7386
7387 static inline void tg3_netif_stop(struct tg3 *tp)
7388 {
7389 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7390 tg3_napi_disable(tp);
7391 netif_carrier_off(tp->dev);
7392 netif_tx_disable(tp->dev);
7393 }
7394
7395 /* tp->lock must be held */
7396 static inline void tg3_netif_start(struct tg3 *tp)
7397 {
7398 tg3_ptp_resume(tp);
7399
7400 /* NOTE: unconditional netif_tx_wake_all_queues is only
7401 * appropriate so long as all callers are assured to
7402 * have free tx slots (such as after tg3_init_hw)
7403 */
7404 netif_tx_wake_all_queues(tp->dev);
7405
7406 if (tp->link_up)
7407 netif_carrier_on(tp->dev);
7408
7409 tg3_napi_enable(tp);
7410 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7411 tg3_enable_ints(tp);
7412 }
7413
7414 static void tg3_irq_quiesce(struct tg3 *tp)
7415 {
7416 int i;
7417
7418 BUG_ON(tp->irq_sync);
7419
7420 tp->irq_sync = 1;
7421 smp_mb();
7422
7423 for (i = 0; i < tp->irq_cnt; i++)
7424 synchronize_irq(tp->napi[i].irq_vec);
7425 }
7426
7427 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7428 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7429 * with as well. Most of the time, this is not necessary except when
7430 * shutting down the device.
7431 */
7432 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7433 {
7434 spin_lock_bh(&tp->lock);
7435 if (irq_sync)
7436 tg3_irq_quiesce(tp);
7437 }
7438
7439 static inline void tg3_full_unlock(struct tg3 *tp)
7440 {
7441 spin_unlock_bh(&tp->lock);
7442 }
7443
7444 /* One-shot MSI handler - Chip automatically disables interrupt
7445 * after sending MSI so driver doesn't have to do it.
7446 */
7447 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7448 {
7449 struct tg3_napi *tnapi = dev_id;
7450 struct tg3 *tp = tnapi->tp;
7451
7452 prefetch(tnapi->hw_status);
7453 if (tnapi->rx_rcb)
7454 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7455
7456 if (likely(!tg3_irq_sync(tp)))
7457 napi_schedule(&tnapi->napi);
7458
7459 return IRQ_HANDLED;
7460 }
7461
7462 /* MSI ISR - No need to check for interrupt sharing and no need to
7463 * flush status block and interrupt mailbox. PCI ordering rules
7464 * guarantee that MSI will arrive after the status block.
7465 */
7466 static irqreturn_t tg3_msi(int irq, void *dev_id)
7467 {
7468 struct tg3_napi *tnapi = dev_id;
7469 struct tg3 *tp = tnapi->tp;
7470
7471 prefetch(tnapi->hw_status);
7472 if (tnapi->rx_rcb)
7473 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7474 /*
7475 * Writing any value to intr-mbox-0 clears PCI INTA# and
7476 * chip-internal interrupt pending events.
7477 * Writing non-zero to intr-mbox-0 additional tells the
7478 * NIC to stop sending us irqs, engaging "in-intr-handler"
7479 * event coalescing.
7480 */
7481 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7482 if (likely(!tg3_irq_sync(tp)))
7483 napi_schedule(&tnapi->napi);
7484
7485 return IRQ_RETVAL(1);
7486 }
7487
7488 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7489 {
7490 struct tg3_napi *tnapi = dev_id;
7491 struct tg3 *tp = tnapi->tp;
7492 struct tg3_hw_status *sblk = tnapi->hw_status;
7493 unsigned int handled = 1;
7494
7495 /* In INTx mode, it is possible for the interrupt to arrive at
7496 * the CPU before the status block posted prior to the interrupt.
7497 * Reading the PCI State register will confirm whether the
7498 * interrupt is ours and will flush the status block.
7499 */
7500 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7501 if (tg3_flag(tp, CHIP_RESETTING) ||
7502 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7503 handled = 0;
7504 goto out;
7505 }
7506 }
7507
7508 /*
7509 * Writing any value to intr-mbox-0 clears PCI INTA# and
7510 * chip-internal interrupt pending events.
7511 * Writing non-zero to intr-mbox-0 additional tells the
7512 * NIC to stop sending us irqs, engaging "in-intr-handler"
7513 * event coalescing.
7514 *
7515 * Flush the mailbox to de-assert the IRQ immediately to prevent
7516 * spurious interrupts. The flush impacts performance but
7517 * excessive spurious interrupts can be worse in some cases.
7518 */
7519 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7520 if (tg3_irq_sync(tp))
7521 goto out;
7522 sblk->status &= ~SD_STATUS_UPDATED;
7523 if (likely(tg3_has_work(tnapi))) {
7524 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7525 napi_schedule(&tnapi->napi);
7526 } else {
7527 /* No work, shared interrupt perhaps? re-enable
7528 * interrupts, and flush that PCI write
7529 */
7530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7531 0x00000000);
7532 }
7533 out:
7534 return IRQ_RETVAL(handled);
7535 }
7536
7537 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7538 {
7539 struct tg3_napi *tnapi = dev_id;
7540 struct tg3 *tp = tnapi->tp;
7541 struct tg3_hw_status *sblk = tnapi->hw_status;
7542 unsigned int handled = 1;
7543
7544 /* In INTx mode, it is possible for the interrupt to arrive at
7545 * the CPU before the status block posted prior to the interrupt.
7546 * Reading the PCI State register will confirm whether the
7547 * interrupt is ours and will flush the status block.
7548 */
7549 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7550 if (tg3_flag(tp, CHIP_RESETTING) ||
7551 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7552 handled = 0;
7553 goto out;
7554 }
7555 }
7556
7557 /*
7558 * writing any value to intr-mbox-0 clears PCI INTA# and
7559 * chip-internal interrupt pending events.
7560 * writing non-zero to intr-mbox-0 additional tells the
7561 * NIC to stop sending us irqs, engaging "in-intr-handler"
7562 * event coalescing.
7563 *
7564 * Flush the mailbox to de-assert the IRQ immediately to prevent
7565 * spurious interrupts. The flush impacts performance but
7566 * excessive spurious interrupts can be worse in some cases.
7567 */
7568 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7569
7570 /*
7571 * In a shared interrupt configuration, sometimes other devices'
7572 * interrupts will scream. We record the current status tag here
7573 * so that the above check can report that the screaming interrupts
7574 * are unhandled. Eventually they will be silenced.
7575 */
7576 tnapi->last_irq_tag = sblk->status_tag;
7577
7578 if (tg3_irq_sync(tp))
7579 goto out;
7580
7581 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7582
7583 napi_schedule(&tnapi->napi);
7584
7585 out:
7586 return IRQ_RETVAL(handled);
7587 }
7588
7589 /* ISR for interrupt test */
7590 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7591 {
7592 struct tg3_napi *tnapi = dev_id;
7593 struct tg3 *tp = tnapi->tp;
7594 struct tg3_hw_status *sblk = tnapi->hw_status;
7595
7596 if ((sblk->status & SD_STATUS_UPDATED) ||
7597 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7598 tg3_disable_ints(tp);
7599 return IRQ_RETVAL(1);
7600 }
7601 return IRQ_RETVAL(0);
7602 }
7603
7604 #ifdef CONFIG_NET_POLL_CONTROLLER
7605 static void tg3_poll_controller(struct net_device *dev)
7606 {
7607 int i;
7608 struct tg3 *tp = netdev_priv(dev);
7609
7610 if (tg3_irq_sync(tp))
7611 return;
7612
7613 for (i = 0; i < tp->irq_cnt; i++)
7614 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7615 }
7616 #endif
7617
7618 static void tg3_tx_timeout(struct net_device *dev)
7619 {
7620 struct tg3 *tp = netdev_priv(dev);
7621
7622 if (netif_msg_tx_err(tp)) {
7623 netdev_err(dev, "transmit timed out, resetting\n");
7624 tg3_dump_state(tp);
7625 }
7626
7627 tg3_reset_task_schedule(tp);
7628 }
7629
7630 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7631 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7632 {
7633 u32 base = (u32) mapping & 0xffffffff;
7634
7635 return base + len + 8 < base;
7636 }
7637
7638 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7639 * of any 4GB boundaries: 4G, 8G, etc
7640 */
7641 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7642 u32 len, u32 mss)
7643 {
7644 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7645 u32 base = (u32) mapping & 0xffffffff;
7646
7647 return ((base + len + (mss & 0x3fff)) < base);
7648 }
7649 return 0;
7650 }
7651
7652 /* Test for DMA addresses > 40-bit */
7653 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7654 int len)
7655 {
7656 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7657 if (tg3_flag(tp, 40BIT_DMA_BUG))
7658 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7659 return 0;
7660 #else
7661 return 0;
7662 #endif
7663 }
7664
7665 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7666 dma_addr_t mapping, u32 len, u32 flags,
7667 u32 mss, u32 vlan)
7668 {
7669 txbd->addr_hi = ((u64) mapping >> 32);
7670 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7671 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7672 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7673 }
7674
7675 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7676 dma_addr_t map, u32 len, u32 flags,
7677 u32 mss, u32 vlan)
7678 {
7679 struct tg3 *tp = tnapi->tp;
7680 bool hwbug = false;
7681
7682 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7683 hwbug = true;
7684
7685 if (tg3_4g_overflow_test(map, len))
7686 hwbug = true;
7687
7688 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7689 hwbug = true;
7690
7691 if (tg3_40bit_overflow_test(tp, map, len))
7692 hwbug = true;
7693
7694 if (tp->dma_limit) {
7695 u32 prvidx = *entry;
7696 u32 tmp_flag = flags & ~TXD_FLAG_END;
7697 while (len > tp->dma_limit && *budget) {
7698 u32 frag_len = tp->dma_limit;
7699 len -= tp->dma_limit;
7700
7701 /* Avoid the 8byte DMA problem */
7702 if (len <= 8) {
7703 len += tp->dma_limit / 2;
7704 frag_len = tp->dma_limit / 2;
7705 }
7706
7707 tnapi->tx_buffers[*entry].fragmented = true;
7708
7709 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7710 frag_len, tmp_flag, mss, vlan);
7711 *budget -= 1;
7712 prvidx = *entry;
7713 *entry = NEXT_TX(*entry);
7714
7715 map += frag_len;
7716 }
7717
7718 if (len) {
7719 if (*budget) {
7720 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7721 len, flags, mss, vlan);
7722 *budget -= 1;
7723 *entry = NEXT_TX(*entry);
7724 } else {
7725 hwbug = true;
7726 tnapi->tx_buffers[prvidx].fragmented = false;
7727 }
7728 }
7729 } else {
7730 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7731 len, flags, mss, vlan);
7732 *entry = NEXT_TX(*entry);
7733 }
7734
7735 return hwbug;
7736 }
7737
7738 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7739 {
7740 int i;
7741 struct sk_buff *skb;
7742 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7743
7744 skb = txb->skb;
7745 txb->skb = NULL;
7746
7747 pci_unmap_single(tnapi->tp->pdev,
7748 dma_unmap_addr(txb, mapping),
7749 skb_headlen(skb),
7750 PCI_DMA_TODEVICE);
7751
7752 while (txb->fragmented) {
7753 txb->fragmented = false;
7754 entry = NEXT_TX(entry);
7755 txb = &tnapi->tx_buffers[entry];
7756 }
7757
7758 for (i = 0; i <= last; i++) {
7759 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7760
7761 entry = NEXT_TX(entry);
7762 txb = &tnapi->tx_buffers[entry];
7763
7764 pci_unmap_page(tnapi->tp->pdev,
7765 dma_unmap_addr(txb, mapping),
7766 skb_frag_size(frag), PCI_DMA_TODEVICE);
7767
7768 while (txb->fragmented) {
7769 txb->fragmented = false;
7770 entry = NEXT_TX(entry);
7771 txb = &tnapi->tx_buffers[entry];
7772 }
7773 }
7774 }
7775
7776 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7777 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7778 struct sk_buff **pskb,
7779 u32 *entry, u32 *budget,
7780 u32 base_flags, u32 mss, u32 vlan)
7781 {
7782 struct tg3 *tp = tnapi->tp;
7783 struct sk_buff *new_skb, *skb = *pskb;
7784 dma_addr_t new_addr = 0;
7785 int ret = 0;
7786
7787 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7788 new_skb = skb_copy(skb, GFP_ATOMIC);
7789 else {
7790 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7791
7792 new_skb = skb_copy_expand(skb,
7793 skb_headroom(skb) + more_headroom,
7794 skb_tailroom(skb), GFP_ATOMIC);
7795 }
7796
7797 if (!new_skb) {
7798 ret = -1;
7799 } else {
7800 /* New SKB is guaranteed to be linear. */
7801 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7802 PCI_DMA_TODEVICE);
7803 /* Make sure the mapping succeeded */
7804 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7805 dev_kfree_skb_any(new_skb);
7806 ret = -1;
7807 } else {
7808 u32 save_entry = *entry;
7809
7810 base_flags |= TXD_FLAG_END;
7811
7812 tnapi->tx_buffers[*entry].skb = new_skb;
7813 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7814 mapping, new_addr);
7815
7816 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7817 new_skb->len, base_flags,
7818 mss, vlan)) {
7819 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7820 dev_kfree_skb_any(new_skb);
7821 ret = -1;
7822 }
7823 }
7824 }
7825
7826 dev_kfree_skb_any(skb);
7827 *pskb = new_skb;
7828 return ret;
7829 }
7830
7831 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7832
7833 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7834 * TSO header is greater than 80 bytes.
7835 */
7836 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7837 {
7838 struct sk_buff *segs, *nskb;
7839 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7840
7841 /* Estimate the number of fragments in the worst case */
7842 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7843 netif_stop_queue(tp->dev);
7844
7845 /* netif_tx_stop_queue() must be done before checking
7846 * checking tx index in tg3_tx_avail() below, because in
7847 * tg3_tx(), we update tx index before checking for
7848 * netif_tx_queue_stopped().
7849 */
7850 smp_mb();
7851 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7852 return NETDEV_TX_BUSY;
7853
7854 netif_wake_queue(tp->dev);
7855 }
7856
7857 segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
7858 if (IS_ERR(segs) || !segs)
7859 goto tg3_tso_bug_end;
7860
7861 do {
7862 nskb = segs;
7863 segs = segs->next;
7864 nskb->next = NULL;
7865 tg3_start_xmit(nskb, tp->dev);
7866 } while (segs);
7867
7868 tg3_tso_bug_end:
7869 dev_kfree_skb_any(skb);
7870
7871 return NETDEV_TX_OK;
7872 }
7873
7874 /* hard_start_xmit for all devices */
7875 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7876 {
7877 struct tg3 *tp = netdev_priv(dev);
7878 u32 len, entry, base_flags, mss, vlan = 0;
7879 u32 budget;
7880 int i = -1, would_hit_hwbug;
7881 dma_addr_t mapping;
7882 struct tg3_napi *tnapi;
7883 struct netdev_queue *txq;
7884 unsigned int last;
7885 struct iphdr *iph = NULL;
7886 struct tcphdr *tcph = NULL;
7887 __sum16 tcp_csum = 0, ip_csum = 0;
7888 __be16 ip_tot_len = 0;
7889
7890 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7891 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7892 if (tg3_flag(tp, ENABLE_TSS))
7893 tnapi++;
7894
7895 budget = tg3_tx_avail(tnapi);
7896
7897 /* We are running in BH disabled context with netif_tx_lock
7898 * and TX reclaim runs via tp->napi.poll inside of a software
7899 * interrupt. Furthermore, IRQ processing runs lockless so we have
7900 * no IRQ context deadlocks to worry about either. Rejoice!
7901 */
7902 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7903 if (!netif_tx_queue_stopped(txq)) {
7904 netif_tx_stop_queue(txq);
7905
7906 /* This is a hard error, log it. */
7907 netdev_err(dev,
7908 "BUG! Tx Ring full when queue awake!\n");
7909 }
7910 return NETDEV_TX_BUSY;
7911 }
7912
7913 entry = tnapi->tx_prod;
7914 base_flags = 0;
7915 if (skb->ip_summed == CHECKSUM_PARTIAL)
7916 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7917
7918 mss = skb_shinfo(skb)->gso_size;
7919 if (mss) {
7920 u32 tcp_opt_len, hdr_len;
7921
7922 if (skb_cow_head(skb, 0))
7923 goto drop;
7924
7925 iph = ip_hdr(skb);
7926 tcp_opt_len = tcp_optlen(skb);
7927
7928 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7929
7930 if (!skb_is_gso_v6(skb)) {
7931 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7932 tg3_flag(tp, TSO_BUG))
7933 return tg3_tso_bug(tp, skb);
7934
7935 ip_csum = iph->check;
7936 ip_tot_len = iph->tot_len;
7937 iph->check = 0;
7938 iph->tot_len = htons(mss + hdr_len);
7939 }
7940
7941 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7942 TXD_FLAG_CPU_POST_DMA);
7943
7944 tcph = tcp_hdr(skb);
7945 tcp_csum = tcph->check;
7946
7947 if (tg3_flag(tp, HW_TSO_1) ||
7948 tg3_flag(tp, HW_TSO_2) ||
7949 tg3_flag(tp, HW_TSO_3)) {
7950 tcph->check = 0;
7951 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7952 } else {
7953 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7954 0, IPPROTO_TCP, 0);
7955 }
7956
7957 if (tg3_flag(tp, HW_TSO_3)) {
7958 mss |= (hdr_len & 0xc) << 12;
7959 if (hdr_len & 0x10)
7960 base_flags |= 0x00000010;
7961 base_flags |= (hdr_len & 0x3e0) << 5;
7962 } else if (tg3_flag(tp, HW_TSO_2))
7963 mss |= hdr_len << 9;
7964 else if (tg3_flag(tp, HW_TSO_1) ||
7965 tg3_asic_rev(tp) == ASIC_REV_5705) {
7966 if (tcp_opt_len || iph->ihl > 5) {
7967 int tsflags;
7968
7969 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7970 mss |= (tsflags << 11);
7971 }
7972 } else {
7973 if (tcp_opt_len || iph->ihl > 5) {
7974 int tsflags;
7975
7976 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7977 base_flags |= tsflags << 12;
7978 }
7979 }
7980 }
7981
7982 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7983 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7984 base_flags |= TXD_FLAG_JMB_PKT;
7985
7986 if (vlan_tx_tag_present(skb)) {
7987 base_flags |= TXD_FLAG_VLAN;
7988 vlan = vlan_tx_tag_get(skb);
7989 }
7990
7991 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7992 tg3_flag(tp, TX_TSTAMP_EN)) {
7993 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7994 base_flags |= TXD_FLAG_HWTSTAMP;
7995 }
7996
7997 len = skb_headlen(skb);
7998
7999 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8000 if (pci_dma_mapping_error(tp->pdev, mapping))
8001 goto drop;
8002
8003
8004 tnapi->tx_buffers[entry].skb = skb;
8005 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8006
8007 would_hit_hwbug = 0;
8008
8009 if (tg3_flag(tp, 5701_DMA_BUG))
8010 would_hit_hwbug = 1;
8011
8012 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8013 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8014 mss, vlan)) {
8015 would_hit_hwbug = 1;
8016 } else if (skb_shinfo(skb)->nr_frags > 0) {
8017 u32 tmp_mss = mss;
8018
8019 if (!tg3_flag(tp, HW_TSO_1) &&
8020 !tg3_flag(tp, HW_TSO_2) &&
8021 !tg3_flag(tp, HW_TSO_3))
8022 tmp_mss = 0;
8023
8024 /* Now loop through additional data
8025 * fragments, and queue them.
8026 */
8027 last = skb_shinfo(skb)->nr_frags - 1;
8028 for (i = 0; i <= last; i++) {
8029 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8030
8031 len = skb_frag_size(frag);
8032 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8033 len, DMA_TO_DEVICE);
8034
8035 tnapi->tx_buffers[entry].skb = NULL;
8036 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8037 mapping);
8038 if (dma_mapping_error(&tp->pdev->dev, mapping))
8039 goto dma_error;
8040
8041 if (!budget ||
8042 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8043 len, base_flags |
8044 ((i == last) ? TXD_FLAG_END : 0),
8045 tmp_mss, vlan)) {
8046 would_hit_hwbug = 1;
8047 break;
8048 }
8049 }
8050 }
8051
8052 if (would_hit_hwbug) {
8053 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8054
8055 if (mss) {
8056 /* If it's a TSO packet, do GSO instead of
8057 * allocating and copying to a large linear SKB
8058 */
8059 if (ip_tot_len) {
8060 iph->check = ip_csum;
8061 iph->tot_len = ip_tot_len;
8062 }
8063 tcph->check = tcp_csum;
8064 return tg3_tso_bug(tp, skb);
8065 }
8066
8067 /* If the workaround fails due to memory/mapping
8068 * failure, silently drop this packet.
8069 */
8070 entry = tnapi->tx_prod;
8071 budget = tg3_tx_avail(tnapi);
8072 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8073 base_flags, mss, vlan))
8074 goto drop_nofree;
8075 }
8076
8077 skb_tx_timestamp(skb);
8078 netdev_tx_sent_queue(txq, skb->len);
8079
8080 /* Sync BD data before updating mailbox */
8081 wmb();
8082
8083 /* Packets are ready, update Tx producer idx local and on card. */
8084 tw32_tx_mbox(tnapi->prodmbox, entry);
8085
8086 tnapi->tx_prod = entry;
8087 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8088 netif_tx_stop_queue(txq);
8089
8090 /* netif_tx_stop_queue() must be done before checking
8091 * checking tx index in tg3_tx_avail() below, because in
8092 * tg3_tx(), we update tx index before checking for
8093 * netif_tx_queue_stopped().
8094 */
8095 smp_mb();
8096 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8097 netif_tx_wake_queue(txq);
8098 }
8099
8100 mmiowb();
8101 return NETDEV_TX_OK;
8102
8103 dma_error:
8104 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8105 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8106 drop:
8107 dev_kfree_skb_any(skb);
8108 drop_nofree:
8109 tp->tx_dropped++;
8110 return NETDEV_TX_OK;
8111 }
8112
8113 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8114 {
8115 if (enable) {
8116 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8117 MAC_MODE_PORT_MODE_MASK);
8118
8119 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8120
8121 if (!tg3_flag(tp, 5705_PLUS))
8122 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8123
8124 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8125 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8126 else
8127 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8128 } else {
8129 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8130
8131 if (tg3_flag(tp, 5705_PLUS) ||
8132 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8133 tg3_asic_rev(tp) == ASIC_REV_5700)
8134 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8135 }
8136
8137 tw32(MAC_MODE, tp->mac_mode);
8138 udelay(40);
8139 }
8140
8141 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8142 {
8143 u32 val, bmcr, mac_mode, ptest = 0;
8144
8145 tg3_phy_toggle_apd(tp, false);
8146 tg3_phy_toggle_automdix(tp, false);
8147
8148 if (extlpbk && tg3_phy_set_extloopbk(tp))
8149 return -EIO;
8150
8151 bmcr = BMCR_FULLDPLX;
8152 switch (speed) {
8153 case SPEED_10:
8154 break;
8155 case SPEED_100:
8156 bmcr |= BMCR_SPEED100;
8157 break;
8158 case SPEED_1000:
8159 default:
8160 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8161 speed = SPEED_100;
8162 bmcr |= BMCR_SPEED100;
8163 } else {
8164 speed = SPEED_1000;
8165 bmcr |= BMCR_SPEED1000;
8166 }
8167 }
8168
8169 if (extlpbk) {
8170 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8171 tg3_readphy(tp, MII_CTRL1000, &val);
8172 val |= CTL1000_AS_MASTER |
8173 CTL1000_ENABLE_MASTER;
8174 tg3_writephy(tp, MII_CTRL1000, val);
8175 } else {
8176 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8177 MII_TG3_FET_PTEST_TRIM_2;
8178 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8179 }
8180 } else
8181 bmcr |= BMCR_LOOPBACK;
8182
8183 tg3_writephy(tp, MII_BMCR, bmcr);
8184
8185 /* The write needs to be flushed for the FETs */
8186 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8187 tg3_readphy(tp, MII_BMCR, &bmcr);
8188
8189 udelay(40);
8190
8191 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8192 tg3_asic_rev(tp) == ASIC_REV_5785) {
8193 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8194 MII_TG3_FET_PTEST_FRC_TX_LINK |
8195 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8196
8197 /* The write needs to be flushed for the AC131 */
8198 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8199 }
8200
8201 /* Reset to prevent losing 1st rx packet intermittently */
8202 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8203 tg3_flag(tp, 5780_CLASS)) {
8204 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8205 udelay(10);
8206 tw32_f(MAC_RX_MODE, tp->rx_mode);
8207 }
8208
8209 mac_mode = tp->mac_mode &
8210 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8211 if (speed == SPEED_1000)
8212 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8213 else
8214 mac_mode |= MAC_MODE_PORT_MODE_MII;
8215
8216 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8217 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8218
8219 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8220 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8221 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8222 mac_mode |= MAC_MODE_LINK_POLARITY;
8223
8224 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8225 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8226 }
8227
8228 tw32(MAC_MODE, mac_mode);
8229 udelay(40);
8230
8231 return 0;
8232 }
8233
8234 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8235 {
8236 struct tg3 *tp = netdev_priv(dev);
8237
8238 if (features & NETIF_F_LOOPBACK) {
8239 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8240 return;
8241
8242 spin_lock_bh(&tp->lock);
8243 tg3_mac_loopback(tp, true);
8244 netif_carrier_on(tp->dev);
8245 spin_unlock_bh(&tp->lock);
8246 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8247 } else {
8248 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8249 return;
8250
8251 spin_lock_bh(&tp->lock);
8252 tg3_mac_loopback(tp, false);
8253 /* Force link status check */
8254 tg3_setup_phy(tp, true);
8255 spin_unlock_bh(&tp->lock);
8256 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8257 }
8258 }
8259
8260 static netdev_features_t tg3_fix_features(struct net_device *dev,
8261 netdev_features_t features)
8262 {
8263 struct tg3 *tp = netdev_priv(dev);
8264
8265 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8266 features &= ~NETIF_F_ALL_TSO;
8267
8268 return features;
8269 }
8270
8271 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8272 {
8273 netdev_features_t changed = dev->features ^ features;
8274
8275 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8276 tg3_set_loopback(dev, features);
8277
8278 return 0;
8279 }
8280
8281 static void tg3_rx_prodring_free(struct tg3 *tp,
8282 struct tg3_rx_prodring_set *tpr)
8283 {
8284 int i;
8285
8286 if (tpr != &tp->napi[0].prodring) {
8287 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8288 i = (i + 1) & tp->rx_std_ring_mask)
8289 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8290 tp->rx_pkt_map_sz);
8291
8292 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8293 for (i = tpr->rx_jmb_cons_idx;
8294 i != tpr->rx_jmb_prod_idx;
8295 i = (i + 1) & tp->rx_jmb_ring_mask) {
8296 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8297 TG3_RX_JMB_MAP_SZ);
8298 }
8299 }
8300
8301 return;
8302 }
8303
8304 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8305 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8306 tp->rx_pkt_map_sz);
8307
8308 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8309 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8310 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8311 TG3_RX_JMB_MAP_SZ);
8312 }
8313 }
8314
8315 /* Initialize rx rings for packet processing.
8316 *
8317 * The chip has been shut down and the driver detached from
8318 * the networking, so no interrupts or new tx packets will
8319 * end up in the driver. tp->{tx,}lock are held and thus
8320 * we may not sleep.
8321 */
8322 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8323 struct tg3_rx_prodring_set *tpr)
8324 {
8325 u32 i, rx_pkt_dma_sz;
8326
8327 tpr->rx_std_cons_idx = 0;
8328 tpr->rx_std_prod_idx = 0;
8329 tpr->rx_jmb_cons_idx = 0;
8330 tpr->rx_jmb_prod_idx = 0;
8331
8332 if (tpr != &tp->napi[0].prodring) {
8333 memset(&tpr->rx_std_buffers[0], 0,
8334 TG3_RX_STD_BUFF_RING_SIZE(tp));
8335 if (tpr->rx_jmb_buffers)
8336 memset(&tpr->rx_jmb_buffers[0], 0,
8337 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8338 goto done;
8339 }
8340
8341 /* Zero out all descriptors. */
8342 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8343
8344 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8345 if (tg3_flag(tp, 5780_CLASS) &&
8346 tp->dev->mtu > ETH_DATA_LEN)
8347 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8348 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8349
8350 /* Initialize invariants of the rings, we only set this
8351 * stuff once. This works because the card does not
8352 * write into the rx buffer posting rings.
8353 */
8354 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8355 struct tg3_rx_buffer_desc *rxd;
8356
8357 rxd = &tpr->rx_std[i];
8358 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8359 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8360 rxd->opaque = (RXD_OPAQUE_RING_STD |
8361 (i << RXD_OPAQUE_INDEX_SHIFT));
8362 }
8363
8364 /* Now allocate fresh SKBs for each rx ring. */
8365 for (i = 0; i < tp->rx_pending; i++) {
8366 unsigned int frag_size;
8367
8368 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8369 &frag_size) < 0) {
8370 netdev_warn(tp->dev,
8371 "Using a smaller RX standard ring. Only "
8372 "%d out of %d buffers were allocated "
8373 "successfully\n", i, tp->rx_pending);
8374 if (i == 0)
8375 goto initfail;
8376 tp->rx_pending = i;
8377 break;
8378 }
8379 }
8380
8381 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8382 goto done;
8383
8384 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8385
8386 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8387 goto done;
8388
8389 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8390 struct tg3_rx_buffer_desc *rxd;
8391
8392 rxd = &tpr->rx_jmb[i].std;
8393 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8394 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8395 RXD_FLAG_JUMBO;
8396 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8397 (i << RXD_OPAQUE_INDEX_SHIFT));
8398 }
8399
8400 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8401 unsigned int frag_size;
8402
8403 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8404 &frag_size) < 0) {
8405 netdev_warn(tp->dev,
8406 "Using a smaller RX jumbo ring. Only %d "
8407 "out of %d buffers were allocated "
8408 "successfully\n", i, tp->rx_jumbo_pending);
8409 if (i == 0)
8410 goto initfail;
8411 tp->rx_jumbo_pending = i;
8412 break;
8413 }
8414 }
8415
8416 done:
8417 return 0;
8418
8419 initfail:
8420 tg3_rx_prodring_free(tp, tpr);
8421 return -ENOMEM;
8422 }
8423
8424 static void tg3_rx_prodring_fini(struct tg3 *tp,
8425 struct tg3_rx_prodring_set *tpr)
8426 {
8427 kfree(tpr->rx_std_buffers);
8428 tpr->rx_std_buffers = NULL;
8429 kfree(tpr->rx_jmb_buffers);
8430 tpr->rx_jmb_buffers = NULL;
8431 if (tpr->rx_std) {
8432 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8433 tpr->rx_std, tpr->rx_std_mapping);
8434 tpr->rx_std = NULL;
8435 }
8436 if (tpr->rx_jmb) {
8437 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8438 tpr->rx_jmb, tpr->rx_jmb_mapping);
8439 tpr->rx_jmb = NULL;
8440 }
8441 }
8442
8443 static int tg3_rx_prodring_init(struct tg3 *tp,
8444 struct tg3_rx_prodring_set *tpr)
8445 {
8446 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8447 GFP_KERNEL);
8448 if (!tpr->rx_std_buffers)
8449 return -ENOMEM;
8450
8451 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8452 TG3_RX_STD_RING_BYTES(tp),
8453 &tpr->rx_std_mapping,
8454 GFP_KERNEL);
8455 if (!tpr->rx_std)
8456 goto err_out;
8457
8458 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8459 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8460 GFP_KERNEL);
8461 if (!tpr->rx_jmb_buffers)
8462 goto err_out;
8463
8464 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8465 TG3_RX_JMB_RING_BYTES(tp),
8466 &tpr->rx_jmb_mapping,
8467 GFP_KERNEL);
8468 if (!tpr->rx_jmb)
8469 goto err_out;
8470 }
8471
8472 return 0;
8473
8474 err_out:
8475 tg3_rx_prodring_fini(tp, tpr);
8476 return -ENOMEM;
8477 }
8478
8479 /* Free up pending packets in all rx/tx rings.
8480 *
8481 * The chip has been shut down and the driver detached from
8482 * the networking, so no interrupts or new tx packets will
8483 * end up in the driver. tp->{tx,}lock is not held and we are not
8484 * in an interrupt context and thus may sleep.
8485 */
8486 static void tg3_free_rings(struct tg3 *tp)
8487 {
8488 int i, j;
8489
8490 for (j = 0; j < tp->irq_cnt; j++) {
8491 struct tg3_napi *tnapi = &tp->napi[j];
8492
8493 tg3_rx_prodring_free(tp, &tnapi->prodring);
8494
8495 if (!tnapi->tx_buffers)
8496 continue;
8497
8498 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8499 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8500
8501 if (!skb)
8502 continue;
8503
8504 tg3_tx_skb_unmap(tnapi, i,
8505 skb_shinfo(skb)->nr_frags - 1);
8506
8507 dev_kfree_skb_any(skb);
8508 }
8509 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8510 }
8511 }
8512
8513 /* Initialize tx/rx rings for packet processing.
8514 *
8515 * The chip has been shut down and the driver detached from
8516 * the networking, so no interrupts or new tx packets will
8517 * end up in the driver. tp->{tx,}lock are held and thus
8518 * we may not sleep.
8519 */
8520 static int tg3_init_rings(struct tg3 *tp)
8521 {
8522 int i;
8523
8524 /* Free up all the SKBs. */
8525 tg3_free_rings(tp);
8526
8527 for (i = 0; i < tp->irq_cnt; i++) {
8528 struct tg3_napi *tnapi = &tp->napi[i];
8529
8530 tnapi->last_tag = 0;
8531 tnapi->last_irq_tag = 0;
8532 tnapi->hw_status->status = 0;
8533 tnapi->hw_status->status_tag = 0;
8534 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8535
8536 tnapi->tx_prod = 0;
8537 tnapi->tx_cons = 0;
8538 if (tnapi->tx_ring)
8539 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8540
8541 tnapi->rx_rcb_ptr = 0;
8542 if (tnapi->rx_rcb)
8543 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8544
8545 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8546 tg3_free_rings(tp);
8547 return -ENOMEM;
8548 }
8549 }
8550
8551 return 0;
8552 }
8553
8554 static void tg3_mem_tx_release(struct tg3 *tp)
8555 {
8556 int i;
8557
8558 for (i = 0; i < tp->irq_max; i++) {
8559 struct tg3_napi *tnapi = &tp->napi[i];
8560
8561 if (tnapi->tx_ring) {
8562 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8563 tnapi->tx_ring, tnapi->tx_desc_mapping);
8564 tnapi->tx_ring = NULL;
8565 }
8566
8567 kfree(tnapi->tx_buffers);
8568 tnapi->tx_buffers = NULL;
8569 }
8570 }
8571
8572 static int tg3_mem_tx_acquire(struct tg3 *tp)
8573 {
8574 int i;
8575 struct tg3_napi *tnapi = &tp->napi[0];
8576
8577 /* If multivector TSS is enabled, vector 0 does not handle
8578 * tx interrupts. Don't allocate any resources for it.
8579 */
8580 if (tg3_flag(tp, ENABLE_TSS))
8581 tnapi++;
8582
8583 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8584 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8585 TG3_TX_RING_SIZE, GFP_KERNEL);
8586 if (!tnapi->tx_buffers)
8587 goto err_out;
8588
8589 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8590 TG3_TX_RING_BYTES,
8591 &tnapi->tx_desc_mapping,
8592 GFP_KERNEL);
8593 if (!tnapi->tx_ring)
8594 goto err_out;
8595 }
8596
8597 return 0;
8598
8599 err_out:
8600 tg3_mem_tx_release(tp);
8601 return -ENOMEM;
8602 }
8603
8604 static void tg3_mem_rx_release(struct tg3 *tp)
8605 {
8606 int i;
8607
8608 for (i = 0; i < tp->irq_max; i++) {
8609 struct tg3_napi *tnapi = &tp->napi[i];
8610
8611 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8612
8613 if (!tnapi->rx_rcb)
8614 continue;
8615
8616 dma_free_coherent(&tp->pdev->dev,
8617 TG3_RX_RCB_RING_BYTES(tp),
8618 tnapi->rx_rcb,
8619 tnapi->rx_rcb_mapping);
8620 tnapi->rx_rcb = NULL;
8621 }
8622 }
8623
8624 static int tg3_mem_rx_acquire(struct tg3 *tp)
8625 {
8626 unsigned int i, limit;
8627
8628 limit = tp->rxq_cnt;
8629
8630 /* If RSS is enabled, we need a (dummy) producer ring
8631 * set on vector zero. This is the true hw prodring.
8632 */
8633 if (tg3_flag(tp, ENABLE_RSS))
8634 limit++;
8635
8636 for (i = 0; i < limit; i++) {
8637 struct tg3_napi *tnapi = &tp->napi[i];
8638
8639 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8640 goto err_out;
8641
8642 /* If multivector RSS is enabled, vector 0
8643 * does not handle rx or tx interrupts.
8644 * Don't allocate any resources for it.
8645 */
8646 if (!i && tg3_flag(tp, ENABLE_RSS))
8647 continue;
8648
8649 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8650 TG3_RX_RCB_RING_BYTES(tp),
8651 &tnapi->rx_rcb_mapping,
8652 GFP_KERNEL);
8653 if (!tnapi->rx_rcb)
8654 goto err_out;
8655 }
8656
8657 return 0;
8658
8659 err_out:
8660 tg3_mem_rx_release(tp);
8661 return -ENOMEM;
8662 }
8663
8664 /*
8665 * Must not be invoked with interrupt sources disabled and
8666 * the hardware shutdown down.
8667 */
8668 static void tg3_free_consistent(struct tg3 *tp)
8669 {
8670 int i;
8671
8672 for (i = 0; i < tp->irq_cnt; i++) {
8673 struct tg3_napi *tnapi = &tp->napi[i];
8674
8675 if (tnapi->hw_status) {
8676 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8677 tnapi->hw_status,
8678 tnapi->status_mapping);
8679 tnapi->hw_status = NULL;
8680 }
8681 }
8682
8683 tg3_mem_rx_release(tp);
8684 tg3_mem_tx_release(tp);
8685
8686 if (tp->hw_stats) {
8687 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8688 tp->hw_stats, tp->stats_mapping);
8689 tp->hw_stats = NULL;
8690 }
8691 }
8692
8693 /*
8694 * Must not be invoked with interrupt sources disabled and
8695 * the hardware shutdown down. Can sleep.
8696 */
8697 static int tg3_alloc_consistent(struct tg3 *tp)
8698 {
8699 int i;
8700
8701 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8702 sizeof(struct tg3_hw_stats),
8703 &tp->stats_mapping, GFP_KERNEL);
8704 if (!tp->hw_stats)
8705 goto err_out;
8706
8707 for (i = 0; i < tp->irq_cnt; i++) {
8708 struct tg3_napi *tnapi = &tp->napi[i];
8709 struct tg3_hw_status *sblk;
8710
8711 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8712 TG3_HW_STATUS_SIZE,
8713 &tnapi->status_mapping,
8714 GFP_KERNEL);
8715 if (!tnapi->hw_status)
8716 goto err_out;
8717
8718 sblk = tnapi->hw_status;
8719
8720 if (tg3_flag(tp, ENABLE_RSS)) {
8721 u16 *prodptr = NULL;
8722
8723 /*
8724 * When RSS is enabled, the status block format changes
8725 * slightly. The "rx_jumbo_consumer", "reserved",
8726 * and "rx_mini_consumer" members get mapped to the
8727 * other three rx return ring producer indexes.
8728 */
8729 switch (i) {
8730 case 1:
8731 prodptr = &sblk->idx[0].rx_producer;
8732 break;
8733 case 2:
8734 prodptr = &sblk->rx_jumbo_consumer;
8735 break;
8736 case 3:
8737 prodptr = &sblk->reserved;
8738 break;
8739 case 4:
8740 prodptr = &sblk->rx_mini_consumer;
8741 break;
8742 }
8743 tnapi->rx_rcb_prod_idx = prodptr;
8744 } else {
8745 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8746 }
8747 }
8748
8749 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8750 goto err_out;
8751
8752 return 0;
8753
8754 err_out:
8755 tg3_free_consistent(tp);
8756 return -ENOMEM;
8757 }
8758
8759 #define MAX_WAIT_CNT 1000
8760
8761 /* To stop a block, clear the enable bit and poll till it
8762 * clears. tp->lock is held.
8763 */
8764 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8765 {
8766 unsigned int i;
8767 u32 val;
8768
8769 if (tg3_flag(tp, 5705_PLUS)) {
8770 switch (ofs) {
8771 case RCVLSC_MODE:
8772 case DMAC_MODE:
8773 case MBFREE_MODE:
8774 case BUFMGR_MODE:
8775 case MEMARB_MODE:
8776 /* We can't enable/disable these bits of the
8777 * 5705/5750, just say success.
8778 */
8779 return 0;
8780
8781 default:
8782 break;
8783 }
8784 }
8785
8786 val = tr32(ofs);
8787 val &= ~enable_bit;
8788 tw32_f(ofs, val);
8789
8790 for (i = 0; i < MAX_WAIT_CNT; i++) {
8791 if (pci_channel_offline(tp->pdev)) {
8792 dev_err(&tp->pdev->dev,
8793 "tg3_stop_block device offline, "
8794 "ofs=%lx enable_bit=%x\n",
8795 ofs, enable_bit);
8796 return -ENODEV;
8797 }
8798
8799 udelay(100);
8800 val = tr32(ofs);
8801 if ((val & enable_bit) == 0)
8802 break;
8803 }
8804
8805 if (i == MAX_WAIT_CNT && !silent) {
8806 dev_err(&tp->pdev->dev,
8807 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8808 ofs, enable_bit);
8809 return -ENODEV;
8810 }
8811
8812 return 0;
8813 }
8814
8815 /* tp->lock is held. */
8816 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8817 {
8818 int i, err;
8819
8820 tg3_disable_ints(tp);
8821
8822 if (pci_channel_offline(tp->pdev)) {
8823 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8824 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8825 err = -ENODEV;
8826 goto err_no_dev;
8827 }
8828
8829 tp->rx_mode &= ~RX_MODE_ENABLE;
8830 tw32_f(MAC_RX_MODE, tp->rx_mode);
8831 udelay(10);
8832
8833 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8834 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8835 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8836 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8837 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8838 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8839
8840 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8841 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8842 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8843 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8844 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8845 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8846 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8847
8848 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8849 tw32_f(MAC_MODE, tp->mac_mode);
8850 udelay(40);
8851
8852 tp->tx_mode &= ~TX_MODE_ENABLE;
8853 tw32_f(MAC_TX_MODE, tp->tx_mode);
8854
8855 for (i = 0; i < MAX_WAIT_CNT; i++) {
8856 udelay(100);
8857 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8858 break;
8859 }
8860 if (i >= MAX_WAIT_CNT) {
8861 dev_err(&tp->pdev->dev,
8862 "%s timed out, TX_MODE_ENABLE will not clear "
8863 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8864 err |= -ENODEV;
8865 }
8866
8867 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8868 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8869 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8870
8871 tw32(FTQ_RESET, 0xffffffff);
8872 tw32(FTQ_RESET, 0x00000000);
8873
8874 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8875 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8876
8877 err_no_dev:
8878 for (i = 0; i < tp->irq_cnt; i++) {
8879 struct tg3_napi *tnapi = &tp->napi[i];
8880 if (tnapi->hw_status)
8881 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8882 }
8883
8884 return err;
8885 }
8886
8887 /* Save PCI command register before chip reset */
8888 static void tg3_save_pci_state(struct tg3 *tp)
8889 {
8890 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8891 }
8892
8893 /* Restore PCI state after chip reset */
8894 static void tg3_restore_pci_state(struct tg3 *tp)
8895 {
8896 u32 val;
8897
8898 /* Re-enable indirect register accesses. */
8899 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8900 tp->misc_host_ctrl);
8901
8902 /* Set MAX PCI retry to zero. */
8903 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8904 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8905 tg3_flag(tp, PCIX_MODE))
8906 val |= PCISTATE_RETRY_SAME_DMA;
8907 /* Allow reads and writes to the APE register and memory space. */
8908 if (tg3_flag(tp, ENABLE_APE))
8909 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8910 PCISTATE_ALLOW_APE_SHMEM_WR |
8911 PCISTATE_ALLOW_APE_PSPACE_WR;
8912 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8913
8914 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8915
8916 if (!tg3_flag(tp, PCI_EXPRESS)) {
8917 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8918 tp->pci_cacheline_sz);
8919 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8920 tp->pci_lat_timer);
8921 }
8922
8923 /* Make sure PCI-X relaxed ordering bit is clear. */
8924 if (tg3_flag(tp, PCIX_MODE)) {
8925 u16 pcix_cmd;
8926
8927 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8928 &pcix_cmd);
8929 pcix_cmd &= ~PCI_X_CMD_ERO;
8930 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8931 pcix_cmd);
8932 }
8933
8934 if (tg3_flag(tp, 5780_CLASS)) {
8935
8936 /* Chip reset on 5780 will reset MSI enable bit,
8937 * so need to restore it.
8938 */
8939 if (tg3_flag(tp, USING_MSI)) {
8940 u16 ctrl;
8941
8942 pci_read_config_word(tp->pdev,
8943 tp->msi_cap + PCI_MSI_FLAGS,
8944 &ctrl);
8945 pci_write_config_word(tp->pdev,
8946 tp->msi_cap + PCI_MSI_FLAGS,
8947 ctrl | PCI_MSI_FLAGS_ENABLE);
8948 val = tr32(MSGINT_MODE);
8949 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8950 }
8951 }
8952 }
8953
8954 static void tg3_override_clk(struct tg3 *tp)
8955 {
8956 u32 val;
8957
8958 switch (tg3_asic_rev(tp)) {
8959 case ASIC_REV_5717:
8960 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8961 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8962 TG3_CPMU_MAC_ORIDE_ENABLE);
8963 break;
8964
8965 case ASIC_REV_5719:
8966 case ASIC_REV_5720:
8967 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8968 break;
8969
8970 default:
8971 return;
8972 }
8973 }
8974
8975 static void tg3_restore_clk(struct tg3 *tp)
8976 {
8977 u32 val;
8978
8979 switch (tg3_asic_rev(tp)) {
8980 case ASIC_REV_5717:
8981 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8982 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
8983 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
8984 break;
8985
8986 case ASIC_REV_5719:
8987 case ASIC_REV_5720:
8988 val = tr32(TG3_CPMU_CLCK_ORIDE);
8989 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8990 break;
8991
8992 default:
8993 return;
8994 }
8995 }
8996
8997 /* tp->lock is held. */
8998 static int tg3_chip_reset(struct tg3 *tp)
8999 {
9000 u32 val;
9001 void (*write_op)(struct tg3 *, u32, u32);
9002 int i, err;
9003
9004 if (!pci_device_is_present(tp->pdev))
9005 return -ENODEV;
9006
9007 tg3_nvram_lock(tp);
9008
9009 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9010
9011 /* No matching tg3_nvram_unlock() after this because
9012 * chip reset below will undo the nvram lock.
9013 */
9014 tp->nvram_lock_cnt = 0;
9015
9016 /* GRC_MISC_CFG core clock reset will clear the memory
9017 * enable bit in PCI register 4 and the MSI enable bit
9018 * on some chips, so we save relevant registers here.
9019 */
9020 tg3_save_pci_state(tp);
9021
9022 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9023 tg3_flag(tp, 5755_PLUS))
9024 tw32(GRC_FASTBOOT_PC, 0);
9025
9026 /*
9027 * We must avoid the readl() that normally takes place.
9028 * It locks machines, causes machine checks, and other
9029 * fun things. So, temporarily disable the 5701
9030 * hardware workaround, while we do the reset.
9031 */
9032 write_op = tp->write32;
9033 if (write_op == tg3_write_flush_reg32)
9034 tp->write32 = tg3_write32;
9035
9036 /* Prevent the irq handler from reading or writing PCI registers
9037 * during chip reset when the memory enable bit in the PCI command
9038 * register may be cleared. The chip does not generate interrupt
9039 * at this time, but the irq handler may still be called due to irq
9040 * sharing or irqpoll.
9041 */
9042 tg3_flag_set(tp, CHIP_RESETTING);
9043 for (i = 0; i < tp->irq_cnt; i++) {
9044 struct tg3_napi *tnapi = &tp->napi[i];
9045 if (tnapi->hw_status) {
9046 tnapi->hw_status->status = 0;
9047 tnapi->hw_status->status_tag = 0;
9048 }
9049 tnapi->last_tag = 0;
9050 tnapi->last_irq_tag = 0;
9051 }
9052 smp_mb();
9053
9054 for (i = 0; i < tp->irq_cnt; i++)
9055 synchronize_irq(tp->napi[i].irq_vec);
9056
9057 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9058 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9059 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9060 }
9061
9062 /* do the reset */
9063 val = GRC_MISC_CFG_CORECLK_RESET;
9064
9065 if (tg3_flag(tp, PCI_EXPRESS)) {
9066 /* Force PCIe 1.0a mode */
9067 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9068 !tg3_flag(tp, 57765_PLUS) &&
9069 tr32(TG3_PCIE_PHY_TSTCTL) ==
9070 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9071 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9072
9073 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9074 tw32(GRC_MISC_CFG, (1 << 29));
9075 val |= (1 << 29);
9076 }
9077 }
9078
9079 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9080 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9081 tw32(GRC_VCPU_EXT_CTRL,
9082 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9083 }
9084
9085 /* Set the clock to the highest frequency to avoid timeouts. With link
9086 * aware mode, the clock speed could be slow and bootcode does not
9087 * complete within the expected time. Override the clock to allow the
9088 * bootcode to finish sooner and then restore it.
9089 */
9090 tg3_override_clk(tp);
9091
9092 /* Manage gphy power for all CPMU absent PCIe devices. */
9093 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9094 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9095
9096 tw32(GRC_MISC_CFG, val);
9097
9098 /* restore 5701 hardware bug workaround write method */
9099 tp->write32 = write_op;
9100
9101 /* Unfortunately, we have to delay before the PCI read back.
9102 * Some 575X chips even will not respond to a PCI cfg access
9103 * when the reset command is given to the chip.
9104 *
9105 * How do these hardware designers expect things to work
9106 * properly if the PCI write is posted for a long period
9107 * of time? It is always necessary to have some method by
9108 * which a register read back can occur to push the write
9109 * out which does the reset.
9110 *
9111 * For most tg3 variants the trick below was working.
9112 * Ho hum...
9113 */
9114 udelay(120);
9115
9116 /* Flush PCI posted writes. The normal MMIO registers
9117 * are inaccessible at this time so this is the only
9118 * way to make this reliably (actually, this is no longer
9119 * the case, see above). I tried to use indirect
9120 * register read/write but this upset some 5701 variants.
9121 */
9122 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9123
9124 udelay(120);
9125
9126 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9127 u16 val16;
9128
9129 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9130 int j;
9131 u32 cfg_val;
9132
9133 /* Wait for link training to complete. */
9134 for (j = 0; j < 5000; j++)
9135 udelay(100);
9136
9137 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9138 pci_write_config_dword(tp->pdev, 0xc4,
9139 cfg_val | (1 << 15));
9140 }
9141
9142 /* Clear the "no snoop" and "relaxed ordering" bits. */
9143 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9144 /*
9145 * Older PCIe devices only support the 128 byte
9146 * MPS setting. Enforce the restriction.
9147 */
9148 if (!tg3_flag(tp, CPMU_PRESENT))
9149 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9150 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9151
9152 /* Clear error status */
9153 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9154 PCI_EXP_DEVSTA_CED |
9155 PCI_EXP_DEVSTA_NFED |
9156 PCI_EXP_DEVSTA_FED |
9157 PCI_EXP_DEVSTA_URD);
9158 }
9159
9160 tg3_restore_pci_state(tp);
9161
9162 tg3_flag_clear(tp, CHIP_RESETTING);
9163 tg3_flag_clear(tp, ERROR_PROCESSED);
9164
9165 val = 0;
9166 if (tg3_flag(tp, 5780_CLASS))
9167 val = tr32(MEMARB_MODE);
9168 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9169
9170 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9171 tg3_stop_fw(tp);
9172 tw32(0x5000, 0x400);
9173 }
9174
9175 if (tg3_flag(tp, IS_SSB_CORE)) {
9176 /*
9177 * BCM4785: In order to avoid repercussions from using
9178 * potentially defective internal ROM, stop the Rx RISC CPU,
9179 * which is not required.
9180 */
9181 tg3_stop_fw(tp);
9182 tg3_halt_cpu(tp, RX_CPU_BASE);
9183 }
9184
9185 err = tg3_poll_fw(tp);
9186 if (err)
9187 return err;
9188
9189 tw32(GRC_MODE, tp->grc_mode);
9190
9191 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9192 val = tr32(0xc4);
9193
9194 tw32(0xc4, val | (1 << 15));
9195 }
9196
9197 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9198 tg3_asic_rev(tp) == ASIC_REV_5705) {
9199 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9200 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9201 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9202 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9203 }
9204
9205 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9206 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9207 val = tp->mac_mode;
9208 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9209 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9210 val = tp->mac_mode;
9211 } else
9212 val = 0;
9213
9214 tw32_f(MAC_MODE, val);
9215 udelay(40);
9216
9217 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9218
9219 tg3_mdio_start(tp);
9220
9221 if (tg3_flag(tp, PCI_EXPRESS) &&
9222 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9223 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9224 !tg3_flag(tp, 57765_PLUS)) {
9225 val = tr32(0x7c00);
9226
9227 tw32(0x7c00, val | (1 << 25));
9228 }
9229
9230 tg3_restore_clk(tp);
9231
9232 /* Reprobe ASF enable state. */
9233 tg3_flag_clear(tp, ENABLE_ASF);
9234 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9235 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9236
9237 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9238 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9239 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9240 u32 nic_cfg;
9241
9242 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9243 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9244 tg3_flag_set(tp, ENABLE_ASF);
9245 tp->last_event_jiffies = jiffies;
9246 if (tg3_flag(tp, 5750_PLUS))
9247 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9248
9249 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9250 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9251 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9252 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9253 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9254 }
9255 }
9256
9257 return 0;
9258 }
9259
9260 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9261 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9262 static void __tg3_set_rx_mode(struct net_device *);
9263
9264 /* tp->lock is held. */
9265 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9266 {
9267 int err;
9268
9269 tg3_stop_fw(tp);
9270
9271 tg3_write_sig_pre_reset(tp, kind);
9272
9273 tg3_abort_hw(tp, silent);
9274 err = tg3_chip_reset(tp);
9275
9276 __tg3_set_mac_addr(tp, false);
9277
9278 tg3_write_sig_legacy(tp, kind);
9279 tg3_write_sig_post_reset(tp, kind);
9280
9281 if (tp->hw_stats) {
9282 /* Save the stats across chip resets... */
9283 tg3_get_nstats(tp, &tp->net_stats_prev);
9284 tg3_get_estats(tp, &tp->estats_prev);
9285
9286 /* And make sure the next sample is new data */
9287 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9288 }
9289
9290 return err;
9291 }
9292
9293 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9294 {
9295 struct tg3 *tp = netdev_priv(dev);
9296 struct sockaddr *addr = p;
9297 int err = 0;
9298 bool skip_mac_1 = false;
9299
9300 if (!is_valid_ether_addr(addr->sa_data))
9301 return -EADDRNOTAVAIL;
9302
9303 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9304
9305 if (!netif_running(dev))
9306 return 0;
9307
9308 if (tg3_flag(tp, ENABLE_ASF)) {
9309 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9310
9311 addr0_high = tr32(MAC_ADDR_0_HIGH);
9312 addr0_low = tr32(MAC_ADDR_0_LOW);
9313 addr1_high = tr32(MAC_ADDR_1_HIGH);
9314 addr1_low = tr32(MAC_ADDR_1_LOW);
9315
9316 /* Skip MAC addr 1 if ASF is using it. */
9317 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9318 !(addr1_high == 0 && addr1_low == 0))
9319 skip_mac_1 = true;
9320 }
9321 spin_lock_bh(&tp->lock);
9322 __tg3_set_mac_addr(tp, skip_mac_1);
9323 __tg3_set_rx_mode(dev);
9324 spin_unlock_bh(&tp->lock);
9325
9326 return err;
9327 }
9328
9329 /* tp->lock is held. */
9330 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9331 dma_addr_t mapping, u32 maxlen_flags,
9332 u32 nic_addr)
9333 {
9334 tg3_write_mem(tp,
9335 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9336 ((u64) mapping >> 32));
9337 tg3_write_mem(tp,
9338 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9339 ((u64) mapping & 0xffffffff));
9340 tg3_write_mem(tp,
9341 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9342 maxlen_flags);
9343
9344 if (!tg3_flag(tp, 5705_PLUS))
9345 tg3_write_mem(tp,
9346 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9347 nic_addr);
9348 }
9349
9350
9351 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9352 {
9353 int i = 0;
9354
9355 if (!tg3_flag(tp, ENABLE_TSS)) {
9356 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9357 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9358 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9359 } else {
9360 tw32(HOSTCC_TXCOL_TICKS, 0);
9361 tw32(HOSTCC_TXMAX_FRAMES, 0);
9362 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9363
9364 for (; i < tp->txq_cnt; i++) {
9365 u32 reg;
9366
9367 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9368 tw32(reg, ec->tx_coalesce_usecs);
9369 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9370 tw32(reg, ec->tx_max_coalesced_frames);
9371 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9372 tw32(reg, ec->tx_max_coalesced_frames_irq);
9373 }
9374 }
9375
9376 for (; i < tp->irq_max - 1; i++) {
9377 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9378 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9379 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9380 }
9381 }
9382
9383 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9384 {
9385 int i = 0;
9386 u32 limit = tp->rxq_cnt;
9387
9388 if (!tg3_flag(tp, ENABLE_RSS)) {
9389 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9390 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9391 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9392 limit--;
9393 } else {
9394 tw32(HOSTCC_RXCOL_TICKS, 0);
9395 tw32(HOSTCC_RXMAX_FRAMES, 0);
9396 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9397 }
9398
9399 for (; i < limit; i++) {
9400 u32 reg;
9401
9402 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9403 tw32(reg, ec->rx_coalesce_usecs);
9404 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9405 tw32(reg, ec->rx_max_coalesced_frames);
9406 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9407 tw32(reg, ec->rx_max_coalesced_frames_irq);
9408 }
9409
9410 for (; i < tp->irq_max - 1; i++) {
9411 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9412 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9413 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9414 }
9415 }
9416
9417 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9418 {
9419 tg3_coal_tx_init(tp, ec);
9420 tg3_coal_rx_init(tp, ec);
9421
9422 if (!tg3_flag(tp, 5705_PLUS)) {
9423 u32 val = ec->stats_block_coalesce_usecs;
9424
9425 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9426 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9427
9428 if (!tp->link_up)
9429 val = 0;
9430
9431 tw32(HOSTCC_STAT_COAL_TICKS, val);
9432 }
9433 }
9434
9435 /* tp->lock is held. */
9436 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9437 {
9438 u32 txrcb, limit;
9439
9440 /* Disable all transmit rings but the first. */
9441 if (!tg3_flag(tp, 5705_PLUS))
9442 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9443 else if (tg3_flag(tp, 5717_PLUS))
9444 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9445 else if (tg3_flag(tp, 57765_CLASS) ||
9446 tg3_asic_rev(tp) == ASIC_REV_5762)
9447 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9448 else
9449 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9450
9451 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9452 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9453 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9454 BDINFO_FLAGS_DISABLED);
9455 }
9456
9457 /* tp->lock is held. */
9458 static void tg3_tx_rcbs_init(struct tg3 *tp)
9459 {
9460 int i = 0;
9461 u32 txrcb = NIC_SRAM_SEND_RCB;
9462
9463 if (tg3_flag(tp, ENABLE_TSS))
9464 i++;
9465
9466 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9467 struct tg3_napi *tnapi = &tp->napi[i];
9468
9469 if (!tnapi->tx_ring)
9470 continue;
9471
9472 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9473 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9474 NIC_SRAM_TX_BUFFER_DESC);
9475 }
9476 }
9477
9478 /* tp->lock is held. */
9479 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9480 {
9481 u32 rxrcb, limit;
9482
9483 /* Disable all receive return rings but the first. */
9484 if (tg3_flag(tp, 5717_PLUS))
9485 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9486 else if (!tg3_flag(tp, 5705_PLUS))
9487 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9488 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9489 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9490 tg3_flag(tp, 57765_CLASS))
9491 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9492 else
9493 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9494
9495 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9496 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9497 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9498 BDINFO_FLAGS_DISABLED);
9499 }
9500
9501 /* tp->lock is held. */
9502 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9503 {
9504 int i = 0;
9505 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9506
9507 if (tg3_flag(tp, ENABLE_RSS))
9508 i++;
9509
9510 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9511 struct tg3_napi *tnapi = &tp->napi[i];
9512
9513 if (!tnapi->rx_rcb)
9514 continue;
9515
9516 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9517 (tp->rx_ret_ring_mask + 1) <<
9518 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9519 }
9520 }
9521
9522 /* tp->lock is held. */
9523 static void tg3_rings_reset(struct tg3 *tp)
9524 {
9525 int i;
9526 u32 stblk;
9527 struct tg3_napi *tnapi = &tp->napi[0];
9528
9529 tg3_tx_rcbs_disable(tp);
9530
9531 tg3_rx_ret_rcbs_disable(tp);
9532
9533 /* Disable interrupts */
9534 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9535 tp->napi[0].chk_msi_cnt = 0;
9536 tp->napi[0].last_rx_cons = 0;
9537 tp->napi[0].last_tx_cons = 0;
9538
9539 /* Zero mailbox registers. */
9540 if (tg3_flag(tp, SUPPORT_MSIX)) {
9541 for (i = 1; i < tp->irq_max; i++) {
9542 tp->napi[i].tx_prod = 0;
9543 tp->napi[i].tx_cons = 0;
9544 if (tg3_flag(tp, ENABLE_TSS))
9545 tw32_mailbox(tp->napi[i].prodmbox, 0);
9546 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9547 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9548 tp->napi[i].chk_msi_cnt = 0;
9549 tp->napi[i].last_rx_cons = 0;
9550 tp->napi[i].last_tx_cons = 0;
9551 }
9552 if (!tg3_flag(tp, ENABLE_TSS))
9553 tw32_mailbox(tp->napi[0].prodmbox, 0);
9554 } else {
9555 tp->napi[0].tx_prod = 0;
9556 tp->napi[0].tx_cons = 0;
9557 tw32_mailbox(tp->napi[0].prodmbox, 0);
9558 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9559 }
9560
9561 /* Make sure the NIC-based send BD rings are disabled. */
9562 if (!tg3_flag(tp, 5705_PLUS)) {
9563 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9564 for (i = 0; i < 16; i++)
9565 tw32_tx_mbox(mbox + i * 8, 0);
9566 }
9567
9568 /* Clear status block in ram. */
9569 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9570
9571 /* Set status block DMA address */
9572 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9573 ((u64) tnapi->status_mapping >> 32));
9574 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9575 ((u64) tnapi->status_mapping & 0xffffffff));
9576
9577 stblk = HOSTCC_STATBLCK_RING1;
9578
9579 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9580 u64 mapping = (u64)tnapi->status_mapping;
9581 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9582 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9583 stblk += 8;
9584
9585 /* Clear status block in ram. */
9586 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9587 }
9588
9589 tg3_tx_rcbs_init(tp);
9590 tg3_rx_ret_rcbs_init(tp);
9591 }
9592
9593 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9594 {
9595 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9596
9597 if (!tg3_flag(tp, 5750_PLUS) ||
9598 tg3_flag(tp, 5780_CLASS) ||
9599 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9600 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9601 tg3_flag(tp, 57765_PLUS))
9602 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9603 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9604 tg3_asic_rev(tp) == ASIC_REV_5787)
9605 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9606 else
9607 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9608
9609 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9610 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9611
9612 val = min(nic_rep_thresh, host_rep_thresh);
9613 tw32(RCVBDI_STD_THRESH, val);
9614
9615 if (tg3_flag(tp, 57765_PLUS))
9616 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9617
9618 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9619 return;
9620
9621 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9622
9623 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9624
9625 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9626 tw32(RCVBDI_JUMBO_THRESH, val);
9627
9628 if (tg3_flag(tp, 57765_PLUS))
9629 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9630 }
9631
9632 static inline u32 calc_crc(unsigned char *buf, int len)
9633 {
9634 u32 reg;
9635 u32 tmp;
9636 int j, k;
9637
9638 reg = 0xffffffff;
9639
9640 for (j = 0; j < len; j++) {
9641 reg ^= buf[j];
9642
9643 for (k = 0; k < 8; k++) {
9644 tmp = reg & 0x01;
9645
9646 reg >>= 1;
9647
9648 if (tmp)
9649 reg ^= 0xedb88320;
9650 }
9651 }
9652
9653 return ~reg;
9654 }
9655
9656 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9657 {
9658 /* accept or reject all multicast frames */
9659 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9660 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9661 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9662 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9663 }
9664
9665 static void __tg3_set_rx_mode(struct net_device *dev)
9666 {
9667 struct tg3 *tp = netdev_priv(dev);
9668 u32 rx_mode;
9669
9670 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9671 RX_MODE_KEEP_VLAN_TAG);
9672
9673 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9674 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9675 * flag clear.
9676 */
9677 if (!tg3_flag(tp, ENABLE_ASF))
9678 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9679 #endif
9680
9681 if (dev->flags & IFF_PROMISC) {
9682 /* Promiscuous mode. */
9683 rx_mode |= RX_MODE_PROMISC;
9684 } else if (dev->flags & IFF_ALLMULTI) {
9685 /* Accept all multicast. */
9686 tg3_set_multi(tp, 1);
9687 } else if (netdev_mc_empty(dev)) {
9688 /* Reject all multicast. */
9689 tg3_set_multi(tp, 0);
9690 } else {
9691 /* Accept one or more multicast(s). */
9692 struct netdev_hw_addr *ha;
9693 u32 mc_filter[4] = { 0, };
9694 u32 regidx;
9695 u32 bit;
9696 u32 crc;
9697
9698 netdev_for_each_mc_addr(ha, dev) {
9699 crc = calc_crc(ha->addr, ETH_ALEN);
9700 bit = ~crc & 0x7f;
9701 regidx = (bit & 0x60) >> 5;
9702 bit &= 0x1f;
9703 mc_filter[regidx] |= (1 << bit);
9704 }
9705
9706 tw32(MAC_HASH_REG_0, mc_filter[0]);
9707 tw32(MAC_HASH_REG_1, mc_filter[1]);
9708 tw32(MAC_HASH_REG_2, mc_filter[2]);
9709 tw32(MAC_HASH_REG_3, mc_filter[3]);
9710 }
9711
9712 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9713 rx_mode |= RX_MODE_PROMISC;
9714 } else if (!(dev->flags & IFF_PROMISC)) {
9715 /* Add all entries into to the mac addr filter list */
9716 int i = 0;
9717 struct netdev_hw_addr *ha;
9718
9719 netdev_for_each_uc_addr(ha, dev) {
9720 __tg3_set_one_mac_addr(tp, ha->addr,
9721 i + TG3_UCAST_ADDR_IDX(tp));
9722 i++;
9723 }
9724 }
9725
9726 if (rx_mode != tp->rx_mode) {
9727 tp->rx_mode = rx_mode;
9728 tw32_f(MAC_RX_MODE, rx_mode);
9729 udelay(10);
9730 }
9731 }
9732
9733 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9734 {
9735 int i;
9736
9737 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9738 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9739 }
9740
9741 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9742 {
9743 int i;
9744
9745 if (!tg3_flag(tp, SUPPORT_MSIX))
9746 return;
9747
9748 if (tp->rxq_cnt == 1) {
9749 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9750 return;
9751 }
9752
9753 /* Validate table against current IRQ count */
9754 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9755 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9756 break;
9757 }
9758
9759 if (i != TG3_RSS_INDIR_TBL_SIZE)
9760 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9761 }
9762
9763 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9764 {
9765 int i = 0;
9766 u32 reg = MAC_RSS_INDIR_TBL_0;
9767
9768 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9769 u32 val = tp->rss_ind_tbl[i];
9770 i++;
9771 for (; i % 8; i++) {
9772 val <<= 4;
9773 val |= tp->rss_ind_tbl[i];
9774 }
9775 tw32(reg, val);
9776 reg += 4;
9777 }
9778 }
9779
9780 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9781 {
9782 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9783 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9784 else
9785 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9786 }
9787
9788 /* tp->lock is held. */
9789 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9790 {
9791 u32 val, rdmac_mode;
9792 int i, err, limit;
9793 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9794
9795 tg3_disable_ints(tp);
9796
9797 tg3_stop_fw(tp);
9798
9799 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9800
9801 if (tg3_flag(tp, INIT_COMPLETE))
9802 tg3_abort_hw(tp, 1);
9803
9804 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9805 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9806 tg3_phy_pull_config(tp);
9807 tg3_eee_pull_config(tp, NULL);
9808 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9809 }
9810
9811 /* Enable MAC control of LPI */
9812 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9813 tg3_setup_eee(tp);
9814
9815 if (reset_phy)
9816 tg3_phy_reset(tp);
9817
9818 err = tg3_chip_reset(tp);
9819 if (err)
9820 return err;
9821
9822 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9823
9824 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9825 val = tr32(TG3_CPMU_CTRL);
9826 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9827 tw32(TG3_CPMU_CTRL, val);
9828
9829 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9830 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9831 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9832 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9833
9834 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9835 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9836 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9837 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9838
9839 val = tr32(TG3_CPMU_HST_ACC);
9840 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9841 val |= CPMU_HST_ACC_MACCLK_6_25;
9842 tw32(TG3_CPMU_HST_ACC, val);
9843 }
9844
9845 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9846 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9847 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9848 PCIE_PWR_MGMT_L1_THRESH_4MS;
9849 tw32(PCIE_PWR_MGMT_THRESH, val);
9850
9851 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9852 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9853
9854 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9855
9856 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9857 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9858 }
9859
9860 if (tg3_flag(tp, L1PLLPD_EN)) {
9861 u32 grc_mode = tr32(GRC_MODE);
9862
9863 /* Access the lower 1K of PL PCIE block registers. */
9864 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9865 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9866
9867 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9868 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9869 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9870
9871 tw32(GRC_MODE, grc_mode);
9872 }
9873
9874 if (tg3_flag(tp, 57765_CLASS)) {
9875 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9876 u32 grc_mode = tr32(GRC_MODE);
9877
9878 /* Access the lower 1K of PL PCIE block registers. */
9879 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9880 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9881
9882 val = tr32(TG3_PCIE_TLDLPL_PORT +
9883 TG3_PCIE_PL_LO_PHYCTL5);
9884 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9885 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9886
9887 tw32(GRC_MODE, grc_mode);
9888 }
9889
9890 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9891 u32 grc_mode;
9892
9893 /* Fix transmit hangs */
9894 val = tr32(TG3_CPMU_PADRNG_CTL);
9895 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9896 tw32(TG3_CPMU_PADRNG_CTL, val);
9897
9898 grc_mode = tr32(GRC_MODE);
9899
9900 /* Access the lower 1K of DL PCIE block registers. */
9901 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9902 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9903
9904 val = tr32(TG3_PCIE_TLDLPL_PORT +
9905 TG3_PCIE_DL_LO_FTSMAX);
9906 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9907 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9908 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9909
9910 tw32(GRC_MODE, grc_mode);
9911 }
9912
9913 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9914 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9915 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9916 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9917 }
9918
9919 /* This works around an issue with Athlon chipsets on
9920 * B3 tigon3 silicon. This bit has no effect on any
9921 * other revision. But do not set this on PCI Express
9922 * chips and don't even touch the clocks if the CPMU is present.
9923 */
9924 if (!tg3_flag(tp, CPMU_PRESENT)) {
9925 if (!tg3_flag(tp, PCI_EXPRESS))
9926 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9927 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9928 }
9929
9930 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9931 tg3_flag(tp, PCIX_MODE)) {
9932 val = tr32(TG3PCI_PCISTATE);
9933 val |= PCISTATE_RETRY_SAME_DMA;
9934 tw32(TG3PCI_PCISTATE, val);
9935 }
9936
9937 if (tg3_flag(tp, ENABLE_APE)) {
9938 /* Allow reads and writes to the
9939 * APE register and memory space.
9940 */
9941 val = tr32(TG3PCI_PCISTATE);
9942 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9943 PCISTATE_ALLOW_APE_SHMEM_WR |
9944 PCISTATE_ALLOW_APE_PSPACE_WR;
9945 tw32(TG3PCI_PCISTATE, val);
9946 }
9947
9948 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9949 /* Enable some hw fixes. */
9950 val = tr32(TG3PCI_MSI_DATA);
9951 val |= (1 << 26) | (1 << 28) | (1 << 29);
9952 tw32(TG3PCI_MSI_DATA, val);
9953 }
9954
9955 /* Descriptor ring init may make accesses to the
9956 * NIC SRAM area to setup the TX descriptors, so we
9957 * can only do this after the hardware has been
9958 * successfully reset.
9959 */
9960 err = tg3_init_rings(tp);
9961 if (err)
9962 return err;
9963
9964 if (tg3_flag(tp, 57765_PLUS)) {
9965 val = tr32(TG3PCI_DMA_RW_CTRL) &
9966 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9967 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9968 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9969 if (!tg3_flag(tp, 57765_CLASS) &&
9970 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9971 tg3_asic_rev(tp) != ASIC_REV_5762)
9972 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9973 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9974 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9975 tg3_asic_rev(tp) != ASIC_REV_5761) {
9976 /* This value is determined during the probe time DMA
9977 * engine test, tg3_test_dma.
9978 */
9979 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9980 }
9981
9982 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9983 GRC_MODE_4X_NIC_SEND_RINGS |
9984 GRC_MODE_NO_TX_PHDR_CSUM |
9985 GRC_MODE_NO_RX_PHDR_CSUM);
9986 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9987
9988 /* Pseudo-header checksum is done by hardware logic and not
9989 * the offload processers, so make the chip do the pseudo-
9990 * header checksums on receive. For transmit it is more
9991 * convenient to do the pseudo-header checksum in software
9992 * as Linux does that on transmit for us in all cases.
9993 */
9994 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9995
9996 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9997 if (tp->rxptpctl)
9998 tw32(TG3_RX_PTP_CTL,
9999 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10000
10001 if (tg3_flag(tp, PTP_CAPABLE))
10002 val |= GRC_MODE_TIME_SYNC_ENABLE;
10003
10004 tw32(GRC_MODE, tp->grc_mode | val);
10005
10006 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10007 val = tr32(GRC_MISC_CFG);
10008 val &= ~0xff;
10009 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10010 tw32(GRC_MISC_CFG, val);
10011
10012 /* Initialize MBUF/DESC pool. */
10013 if (tg3_flag(tp, 5750_PLUS)) {
10014 /* Do nothing. */
10015 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10016 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10017 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10018 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10019 else
10020 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10021 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10022 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10023 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10024 int fw_len;
10025
10026 fw_len = tp->fw_len;
10027 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10028 tw32(BUFMGR_MB_POOL_ADDR,
10029 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10030 tw32(BUFMGR_MB_POOL_SIZE,
10031 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10032 }
10033
10034 if (tp->dev->mtu <= ETH_DATA_LEN) {
10035 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10036 tp->bufmgr_config.mbuf_read_dma_low_water);
10037 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10038 tp->bufmgr_config.mbuf_mac_rx_low_water);
10039 tw32(BUFMGR_MB_HIGH_WATER,
10040 tp->bufmgr_config.mbuf_high_water);
10041 } else {
10042 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10043 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10044 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10045 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10046 tw32(BUFMGR_MB_HIGH_WATER,
10047 tp->bufmgr_config.mbuf_high_water_jumbo);
10048 }
10049 tw32(BUFMGR_DMA_LOW_WATER,
10050 tp->bufmgr_config.dma_low_water);
10051 tw32(BUFMGR_DMA_HIGH_WATER,
10052 tp->bufmgr_config.dma_high_water);
10053
10054 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10055 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10056 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10057 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10058 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10059 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10060 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10061 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10062 tw32(BUFMGR_MODE, val);
10063 for (i = 0; i < 2000; i++) {
10064 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10065 break;
10066 udelay(10);
10067 }
10068 if (i >= 2000) {
10069 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10070 return -ENODEV;
10071 }
10072
10073 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10074 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10075
10076 tg3_setup_rxbd_thresholds(tp);
10077
10078 /* Initialize TG3_BDINFO's at:
10079 * RCVDBDI_STD_BD: standard eth size rx ring
10080 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10081 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10082 *
10083 * like so:
10084 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10085 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10086 * ring attribute flags
10087 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10088 *
10089 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10090 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10091 *
10092 * The size of each ring is fixed in the firmware, but the location is
10093 * configurable.
10094 */
10095 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10096 ((u64) tpr->rx_std_mapping >> 32));
10097 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10098 ((u64) tpr->rx_std_mapping & 0xffffffff));
10099 if (!tg3_flag(tp, 5717_PLUS))
10100 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10101 NIC_SRAM_RX_BUFFER_DESC);
10102
10103 /* Disable the mini ring */
10104 if (!tg3_flag(tp, 5705_PLUS))
10105 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10106 BDINFO_FLAGS_DISABLED);
10107
10108 /* Program the jumbo buffer descriptor ring control
10109 * blocks on those devices that have them.
10110 */
10111 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10112 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10113
10114 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10115 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10116 ((u64) tpr->rx_jmb_mapping >> 32));
10117 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10118 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10119 val = TG3_RX_JMB_RING_SIZE(tp) <<
10120 BDINFO_FLAGS_MAXLEN_SHIFT;
10121 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10122 val | BDINFO_FLAGS_USE_EXT_RECV);
10123 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10124 tg3_flag(tp, 57765_CLASS) ||
10125 tg3_asic_rev(tp) == ASIC_REV_5762)
10126 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10127 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10128 } else {
10129 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10130 BDINFO_FLAGS_DISABLED);
10131 }
10132
10133 if (tg3_flag(tp, 57765_PLUS)) {
10134 val = TG3_RX_STD_RING_SIZE(tp);
10135 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10136 val |= (TG3_RX_STD_DMA_SZ << 2);
10137 } else
10138 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10139 } else
10140 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10141
10142 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10143
10144 tpr->rx_std_prod_idx = tp->rx_pending;
10145 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10146
10147 tpr->rx_jmb_prod_idx =
10148 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10149 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10150
10151 tg3_rings_reset(tp);
10152
10153 /* Initialize MAC address and backoff seed. */
10154 __tg3_set_mac_addr(tp, false);
10155
10156 /* MTU + ethernet header + FCS + optional VLAN tag */
10157 tw32(MAC_RX_MTU_SIZE,
10158 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10159
10160 /* The slot time is changed by tg3_setup_phy if we
10161 * run at gigabit with half duplex.
10162 */
10163 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10164 (6 << TX_LENGTHS_IPG_SHIFT) |
10165 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10166
10167 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10168 tg3_asic_rev(tp) == ASIC_REV_5762)
10169 val |= tr32(MAC_TX_LENGTHS) &
10170 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10171 TX_LENGTHS_CNT_DWN_VAL_MSK);
10172
10173 tw32(MAC_TX_LENGTHS, val);
10174
10175 /* Receive rules. */
10176 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10177 tw32(RCVLPC_CONFIG, 0x0181);
10178
10179 /* Calculate RDMAC_MODE setting early, we need it to determine
10180 * the RCVLPC_STATE_ENABLE mask.
10181 */
10182 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10183 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10184 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10185 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10186 RDMAC_MODE_LNGREAD_ENAB);
10187
10188 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10189 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10190
10191 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10192 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10193 tg3_asic_rev(tp) == ASIC_REV_57780)
10194 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10195 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10196 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10197
10198 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10199 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10200 if (tg3_flag(tp, TSO_CAPABLE) &&
10201 tg3_asic_rev(tp) == ASIC_REV_5705) {
10202 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10203 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10204 !tg3_flag(tp, IS_5788)) {
10205 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10206 }
10207 }
10208
10209 if (tg3_flag(tp, PCI_EXPRESS))
10210 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10211
10212 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10213 tp->dma_limit = 0;
10214 if (tp->dev->mtu <= ETH_DATA_LEN) {
10215 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10216 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10217 }
10218 }
10219
10220 if (tg3_flag(tp, HW_TSO_1) ||
10221 tg3_flag(tp, HW_TSO_2) ||
10222 tg3_flag(tp, HW_TSO_3))
10223 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10224
10225 if (tg3_flag(tp, 57765_PLUS) ||
10226 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10227 tg3_asic_rev(tp) == ASIC_REV_57780)
10228 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10229
10230 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10231 tg3_asic_rev(tp) == ASIC_REV_5762)
10232 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10233
10234 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10235 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10236 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10237 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10238 tg3_flag(tp, 57765_PLUS)) {
10239 u32 tgtreg;
10240
10241 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10242 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10243 else
10244 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10245
10246 val = tr32(tgtreg);
10247 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10248 tg3_asic_rev(tp) == ASIC_REV_5762) {
10249 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10250 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10251 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10252 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10253 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10254 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10255 }
10256 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10257 }
10258
10259 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10260 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10261 tg3_asic_rev(tp) == ASIC_REV_5762) {
10262 u32 tgtreg;
10263
10264 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10265 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10266 else
10267 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10268
10269 val = tr32(tgtreg);
10270 tw32(tgtreg, val |
10271 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10272 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10273 }
10274
10275 /* Receive/send statistics. */
10276 if (tg3_flag(tp, 5750_PLUS)) {
10277 val = tr32(RCVLPC_STATS_ENABLE);
10278 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10279 tw32(RCVLPC_STATS_ENABLE, val);
10280 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10281 tg3_flag(tp, TSO_CAPABLE)) {
10282 val = tr32(RCVLPC_STATS_ENABLE);
10283 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10284 tw32(RCVLPC_STATS_ENABLE, val);
10285 } else {
10286 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10287 }
10288 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10289 tw32(SNDDATAI_STATSENAB, 0xffffff);
10290 tw32(SNDDATAI_STATSCTRL,
10291 (SNDDATAI_SCTRL_ENABLE |
10292 SNDDATAI_SCTRL_FASTUPD));
10293
10294 /* Setup host coalescing engine. */
10295 tw32(HOSTCC_MODE, 0);
10296 for (i = 0; i < 2000; i++) {
10297 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10298 break;
10299 udelay(10);
10300 }
10301
10302 __tg3_set_coalesce(tp, &tp->coal);
10303
10304 if (!tg3_flag(tp, 5705_PLUS)) {
10305 /* Status/statistics block address. See tg3_timer,
10306 * the tg3_periodic_fetch_stats call there, and
10307 * tg3_get_stats to see how this works for 5705/5750 chips.
10308 */
10309 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10310 ((u64) tp->stats_mapping >> 32));
10311 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10312 ((u64) tp->stats_mapping & 0xffffffff));
10313 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10314
10315 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10316
10317 /* Clear statistics and status block memory areas */
10318 for (i = NIC_SRAM_STATS_BLK;
10319 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10320 i += sizeof(u32)) {
10321 tg3_write_mem(tp, i, 0);
10322 udelay(40);
10323 }
10324 }
10325
10326 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10327
10328 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10329 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10330 if (!tg3_flag(tp, 5705_PLUS))
10331 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10332
10333 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10334 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10335 /* reset to prevent losing 1st rx packet intermittently */
10336 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10337 udelay(10);
10338 }
10339
10340 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10341 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10342 MAC_MODE_FHDE_ENABLE;
10343 if (tg3_flag(tp, ENABLE_APE))
10344 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10345 if (!tg3_flag(tp, 5705_PLUS) &&
10346 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10347 tg3_asic_rev(tp) != ASIC_REV_5700)
10348 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10349 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10350 udelay(40);
10351
10352 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10353 * If TG3_FLAG_IS_NIC is zero, we should read the
10354 * register to preserve the GPIO settings for LOMs. The GPIOs,
10355 * whether used as inputs or outputs, are set by boot code after
10356 * reset.
10357 */
10358 if (!tg3_flag(tp, IS_NIC)) {
10359 u32 gpio_mask;
10360
10361 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10362 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10363 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10364
10365 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10366 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10367 GRC_LCLCTRL_GPIO_OUTPUT3;
10368
10369 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10370 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10371
10372 tp->grc_local_ctrl &= ~gpio_mask;
10373 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10374
10375 /* GPIO1 must be driven high for eeprom write protect */
10376 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10377 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10378 GRC_LCLCTRL_GPIO_OUTPUT1);
10379 }
10380 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10381 udelay(100);
10382
10383 if (tg3_flag(tp, USING_MSIX)) {
10384 val = tr32(MSGINT_MODE);
10385 val |= MSGINT_MODE_ENABLE;
10386 if (tp->irq_cnt > 1)
10387 val |= MSGINT_MODE_MULTIVEC_EN;
10388 if (!tg3_flag(tp, 1SHOT_MSI))
10389 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10390 tw32(MSGINT_MODE, val);
10391 }
10392
10393 if (!tg3_flag(tp, 5705_PLUS)) {
10394 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10395 udelay(40);
10396 }
10397
10398 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10399 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10400 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10401 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10402 WDMAC_MODE_LNGREAD_ENAB);
10403
10404 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10405 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10406 if (tg3_flag(tp, TSO_CAPABLE) &&
10407 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10408 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10409 /* nothing */
10410 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10411 !tg3_flag(tp, IS_5788)) {
10412 val |= WDMAC_MODE_RX_ACCEL;
10413 }
10414 }
10415
10416 /* Enable host coalescing bug fix */
10417 if (tg3_flag(tp, 5755_PLUS))
10418 val |= WDMAC_MODE_STATUS_TAG_FIX;
10419
10420 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10421 val |= WDMAC_MODE_BURST_ALL_DATA;
10422
10423 tw32_f(WDMAC_MODE, val);
10424 udelay(40);
10425
10426 if (tg3_flag(tp, PCIX_MODE)) {
10427 u16 pcix_cmd;
10428
10429 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10430 &pcix_cmd);
10431 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10432 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10433 pcix_cmd |= PCI_X_CMD_READ_2K;
10434 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10435 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10436 pcix_cmd |= PCI_X_CMD_READ_2K;
10437 }
10438 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10439 pcix_cmd);
10440 }
10441
10442 tw32_f(RDMAC_MODE, rdmac_mode);
10443 udelay(40);
10444
10445 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10446 tg3_asic_rev(tp) == ASIC_REV_5720) {
10447 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10448 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10449 break;
10450 }
10451 if (i < TG3_NUM_RDMA_CHANNELS) {
10452 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10453 val |= tg3_lso_rd_dma_workaround_bit(tp);
10454 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10455 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10456 }
10457 }
10458
10459 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10460 if (!tg3_flag(tp, 5705_PLUS))
10461 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10462
10463 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10464 tw32(SNDDATAC_MODE,
10465 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10466 else
10467 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10468
10469 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10470 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10471 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10472 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10473 val |= RCVDBDI_MODE_LRG_RING_SZ;
10474 tw32(RCVDBDI_MODE, val);
10475 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10476 if (tg3_flag(tp, HW_TSO_1) ||
10477 tg3_flag(tp, HW_TSO_2) ||
10478 tg3_flag(tp, HW_TSO_3))
10479 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10480 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10481 if (tg3_flag(tp, ENABLE_TSS))
10482 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10483 tw32(SNDBDI_MODE, val);
10484 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10485
10486 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10487 err = tg3_load_5701_a0_firmware_fix(tp);
10488 if (err)
10489 return err;
10490 }
10491
10492 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10493 /* Ignore any errors for the firmware download. If download
10494 * fails, the device will operate with EEE disabled
10495 */
10496 tg3_load_57766_firmware(tp);
10497 }
10498
10499 if (tg3_flag(tp, TSO_CAPABLE)) {
10500 err = tg3_load_tso_firmware(tp);
10501 if (err)
10502 return err;
10503 }
10504
10505 tp->tx_mode = TX_MODE_ENABLE;
10506
10507 if (tg3_flag(tp, 5755_PLUS) ||
10508 tg3_asic_rev(tp) == ASIC_REV_5906)
10509 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10510
10511 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10512 tg3_asic_rev(tp) == ASIC_REV_5762) {
10513 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10514 tp->tx_mode &= ~val;
10515 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10516 }
10517
10518 tw32_f(MAC_TX_MODE, tp->tx_mode);
10519 udelay(100);
10520
10521 if (tg3_flag(tp, ENABLE_RSS)) {
10522 tg3_rss_write_indir_tbl(tp);
10523
10524 /* Setup the "secret" hash key. */
10525 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10526 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10527 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10528 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10529 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10530 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10531 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10532 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10533 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10534 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10535 }
10536
10537 tp->rx_mode = RX_MODE_ENABLE;
10538 if (tg3_flag(tp, 5755_PLUS))
10539 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10540
10541 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10542 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10543
10544 if (tg3_flag(tp, ENABLE_RSS))
10545 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10546 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10547 RX_MODE_RSS_IPV6_HASH_EN |
10548 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10549 RX_MODE_RSS_IPV4_HASH_EN |
10550 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10551
10552 tw32_f(MAC_RX_MODE, tp->rx_mode);
10553 udelay(10);
10554
10555 tw32(MAC_LED_CTRL, tp->led_ctrl);
10556
10557 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10558 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10559 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10560 udelay(10);
10561 }
10562 tw32_f(MAC_RX_MODE, tp->rx_mode);
10563 udelay(10);
10564
10565 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10566 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10567 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10568 /* Set drive transmission level to 1.2V */
10569 /* only if the signal pre-emphasis bit is not set */
10570 val = tr32(MAC_SERDES_CFG);
10571 val &= 0xfffff000;
10572 val |= 0x880;
10573 tw32(MAC_SERDES_CFG, val);
10574 }
10575 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10576 tw32(MAC_SERDES_CFG, 0x616000);
10577 }
10578
10579 /* Prevent chip from dropping frames when flow control
10580 * is enabled.
10581 */
10582 if (tg3_flag(tp, 57765_CLASS))
10583 val = 1;
10584 else
10585 val = 2;
10586 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10587
10588 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10589 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10590 /* Use hardware link auto-negotiation */
10591 tg3_flag_set(tp, HW_AUTONEG);
10592 }
10593
10594 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10595 tg3_asic_rev(tp) == ASIC_REV_5714) {
10596 u32 tmp;
10597
10598 tmp = tr32(SERDES_RX_CTRL);
10599 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10600 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10601 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10602 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10603 }
10604
10605 if (!tg3_flag(tp, USE_PHYLIB)) {
10606 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10607 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10608
10609 err = tg3_setup_phy(tp, false);
10610 if (err)
10611 return err;
10612
10613 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10614 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10615 u32 tmp;
10616
10617 /* Clear CRC stats. */
10618 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10619 tg3_writephy(tp, MII_TG3_TEST1,
10620 tmp | MII_TG3_TEST1_CRC_EN);
10621 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10622 }
10623 }
10624 }
10625
10626 __tg3_set_rx_mode(tp->dev);
10627
10628 /* Initialize receive rules. */
10629 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10630 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10631 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10632 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10633
10634 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10635 limit = 8;
10636 else
10637 limit = 16;
10638 if (tg3_flag(tp, ENABLE_ASF))
10639 limit -= 4;
10640 switch (limit) {
10641 case 16:
10642 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10643 case 15:
10644 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10645 case 14:
10646 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10647 case 13:
10648 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10649 case 12:
10650 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10651 case 11:
10652 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10653 case 10:
10654 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10655 case 9:
10656 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10657 case 8:
10658 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10659 case 7:
10660 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10661 case 6:
10662 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10663 case 5:
10664 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10665 case 4:
10666 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10667 case 3:
10668 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10669 case 2:
10670 case 1:
10671
10672 default:
10673 break;
10674 }
10675
10676 if (tg3_flag(tp, ENABLE_APE))
10677 /* Write our heartbeat update interval to APE. */
10678 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10679 APE_HOST_HEARTBEAT_INT_DISABLE);
10680
10681 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10682
10683 return 0;
10684 }
10685
10686 /* Called at device open time to get the chip ready for
10687 * packet processing. Invoked with tp->lock held.
10688 */
10689 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10690 {
10691 /* Chip may have been just powered on. If so, the boot code may still
10692 * be running initialization. Wait for it to finish to avoid races in
10693 * accessing the hardware.
10694 */
10695 tg3_enable_register_access(tp);
10696 tg3_poll_fw(tp);
10697
10698 tg3_switch_clocks(tp);
10699
10700 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10701
10702 return tg3_reset_hw(tp, reset_phy);
10703 }
10704
10705 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10706 {
10707 int i;
10708
10709 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10710 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10711
10712 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10713 off += len;
10714
10715 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10716 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10717 memset(ocir, 0, TG3_OCIR_LEN);
10718 }
10719 }
10720
10721 /* sysfs attributes for hwmon */
10722 static ssize_t tg3_show_temp(struct device *dev,
10723 struct device_attribute *devattr, char *buf)
10724 {
10725 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10726 struct tg3 *tp = dev_get_drvdata(dev);
10727 u32 temperature;
10728
10729 spin_lock_bh(&tp->lock);
10730 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10731 sizeof(temperature));
10732 spin_unlock_bh(&tp->lock);
10733 return sprintf(buf, "%u\n", temperature);
10734 }
10735
10736
10737 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10738 TG3_TEMP_SENSOR_OFFSET);
10739 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10740 TG3_TEMP_CAUTION_OFFSET);
10741 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10742 TG3_TEMP_MAX_OFFSET);
10743
10744 static struct attribute *tg3_attrs[] = {
10745 &sensor_dev_attr_temp1_input.dev_attr.attr,
10746 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10747 &sensor_dev_attr_temp1_max.dev_attr.attr,
10748 NULL
10749 };
10750 ATTRIBUTE_GROUPS(tg3);
10751
10752 static void tg3_hwmon_close(struct tg3 *tp)
10753 {
10754 if (tp->hwmon_dev) {
10755 hwmon_device_unregister(tp->hwmon_dev);
10756 tp->hwmon_dev = NULL;
10757 }
10758 }
10759
10760 static void tg3_hwmon_open(struct tg3 *tp)
10761 {
10762 int i;
10763 u32 size = 0;
10764 struct pci_dev *pdev = tp->pdev;
10765 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10766
10767 tg3_sd_scan_scratchpad(tp, ocirs);
10768
10769 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10770 if (!ocirs[i].src_data_length)
10771 continue;
10772
10773 size += ocirs[i].src_hdr_length;
10774 size += ocirs[i].src_data_length;
10775 }
10776
10777 if (!size)
10778 return;
10779
10780 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10781 tp, tg3_groups);
10782 if (IS_ERR(tp->hwmon_dev)) {
10783 tp->hwmon_dev = NULL;
10784 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10785 }
10786 }
10787
10788
10789 #define TG3_STAT_ADD32(PSTAT, REG) \
10790 do { u32 __val = tr32(REG); \
10791 (PSTAT)->low += __val; \
10792 if ((PSTAT)->low < __val) \
10793 (PSTAT)->high += 1; \
10794 } while (0)
10795
10796 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10797 {
10798 struct tg3_hw_stats *sp = tp->hw_stats;
10799
10800 if (!tp->link_up)
10801 return;
10802
10803 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10804 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10805 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10806 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10807 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10808 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10809 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10810 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10811 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10812 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10813 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10814 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10815 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10816 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10817 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10818 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10819 u32 val;
10820
10821 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10822 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10823 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10824 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10825 }
10826
10827 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10828 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10829 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10830 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10831 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10832 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10833 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10834 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10835 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10836 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10837 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10838 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10839 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10840 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10841
10842 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10843 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10844 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10845 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10846 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10847 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10848 } else {
10849 u32 val = tr32(HOSTCC_FLOW_ATTN);
10850 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10851 if (val) {
10852 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10853 sp->rx_discards.low += val;
10854 if (sp->rx_discards.low < val)
10855 sp->rx_discards.high += 1;
10856 }
10857 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10858 }
10859 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10860 }
10861
10862 static void tg3_chk_missed_msi(struct tg3 *tp)
10863 {
10864 u32 i;
10865
10866 for (i = 0; i < tp->irq_cnt; i++) {
10867 struct tg3_napi *tnapi = &tp->napi[i];
10868
10869 if (tg3_has_work(tnapi)) {
10870 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10871 tnapi->last_tx_cons == tnapi->tx_cons) {
10872 if (tnapi->chk_msi_cnt < 1) {
10873 tnapi->chk_msi_cnt++;
10874 return;
10875 }
10876 tg3_msi(0, tnapi);
10877 }
10878 }
10879 tnapi->chk_msi_cnt = 0;
10880 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10881 tnapi->last_tx_cons = tnapi->tx_cons;
10882 }
10883 }
10884
10885 static void tg3_timer(unsigned long __opaque)
10886 {
10887 struct tg3 *tp = (struct tg3 *) __opaque;
10888
10889 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10890 goto restart_timer;
10891
10892 spin_lock(&tp->lock);
10893
10894 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10895 tg3_flag(tp, 57765_CLASS))
10896 tg3_chk_missed_msi(tp);
10897
10898 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10899 /* BCM4785: Flush posted writes from GbE to host memory. */
10900 tr32(HOSTCC_MODE);
10901 }
10902
10903 if (!tg3_flag(tp, TAGGED_STATUS)) {
10904 /* All of this garbage is because when using non-tagged
10905 * IRQ status the mailbox/status_block protocol the chip
10906 * uses with the cpu is race prone.
10907 */
10908 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10909 tw32(GRC_LOCAL_CTRL,
10910 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10911 } else {
10912 tw32(HOSTCC_MODE, tp->coalesce_mode |
10913 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10914 }
10915
10916 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10917 spin_unlock(&tp->lock);
10918 tg3_reset_task_schedule(tp);
10919 goto restart_timer;
10920 }
10921 }
10922
10923 /* This part only runs once per second. */
10924 if (!--tp->timer_counter) {
10925 if (tg3_flag(tp, 5705_PLUS))
10926 tg3_periodic_fetch_stats(tp);
10927
10928 if (tp->setlpicnt && !--tp->setlpicnt)
10929 tg3_phy_eee_enable(tp);
10930
10931 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10932 u32 mac_stat;
10933 int phy_event;
10934
10935 mac_stat = tr32(MAC_STATUS);
10936
10937 phy_event = 0;
10938 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10939 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10940 phy_event = 1;
10941 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10942 phy_event = 1;
10943
10944 if (phy_event)
10945 tg3_setup_phy(tp, false);
10946 } else if (tg3_flag(tp, POLL_SERDES)) {
10947 u32 mac_stat = tr32(MAC_STATUS);
10948 int need_setup = 0;
10949
10950 if (tp->link_up &&
10951 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10952 need_setup = 1;
10953 }
10954 if (!tp->link_up &&
10955 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10956 MAC_STATUS_SIGNAL_DET))) {
10957 need_setup = 1;
10958 }
10959 if (need_setup) {
10960 if (!tp->serdes_counter) {
10961 tw32_f(MAC_MODE,
10962 (tp->mac_mode &
10963 ~MAC_MODE_PORT_MODE_MASK));
10964 udelay(40);
10965 tw32_f(MAC_MODE, tp->mac_mode);
10966 udelay(40);
10967 }
10968 tg3_setup_phy(tp, false);
10969 }
10970 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10971 tg3_flag(tp, 5780_CLASS)) {
10972 tg3_serdes_parallel_detect(tp);
10973 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
10974 u32 cpmu = tr32(TG3_CPMU_STATUS);
10975 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
10976 TG3_CPMU_STATUS_LINK_MASK);
10977
10978 if (link_up != tp->link_up)
10979 tg3_setup_phy(tp, false);
10980 }
10981
10982 tp->timer_counter = tp->timer_multiplier;
10983 }
10984
10985 /* Heartbeat is only sent once every 2 seconds.
10986 *
10987 * The heartbeat is to tell the ASF firmware that the host
10988 * driver is still alive. In the event that the OS crashes,
10989 * ASF needs to reset the hardware to free up the FIFO space
10990 * that may be filled with rx packets destined for the host.
10991 * If the FIFO is full, ASF will no longer function properly.
10992 *
10993 * Unintended resets have been reported on real time kernels
10994 * where the timer doesn't run on time. Netpoll will also have
10995 * same problem.
10996 *
10997 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10998 * to check the ring condition when the heartbeat is expiring
10999 * before doing the reset. This will prevent most unintended
11000 * resets.
11001 */
11002 if (!--tp->asf_counter) {
11003 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11004 tg3_wait_for_event_ack(tp);
11005
11006 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11007 FWCMD_NICDRV_ALIVE3);
11008 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11009 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11010 TG3_FW_UPDATE_TIMEOUT_SEC);
11011
11012 tg3_generate_fw_event(tp);
11013 }
11014 tp->asf_counter = tp->asf_multiplier;
11015 }
11016
11017 spin_unlock(&tp->lock);
11018
11019 restart_timer:
11020 tp->timer.expires = jiffies + tp->timer_offset;
11021 add_timer(&tp->timer);
11022 }
11023
11024 static void tg3_timer_init(struct tg3 *tp)
11025 {
11026 if (tg3_flag(tp, TAGGED_STATUS) &&
11027 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11028 !tg3_flag(tp, 57765_CLASS))
11029 tp->timer_offset = HZ;
11030 else
11031 tp->timer_offset = HZ / 10;
11032
11033 BUG_ON(tp->timer_offset > HZ);
11034
11035 tp->timer_multiplier = (HZ / tp->timer_offset);
11036 tp->asf_multiplier = (HZ / tp->timer_offset) *
11037 TG3_FW_UPDATE_FREQ_SEC;
11038
11039 init_timer(&tp->timer);
11040 tp->timer.data = (unsigned long) tp;
11041 tp->timer.function = tg3_timer;
11042 }
11043
11044 static void tg3_timer_start(struct tg3 *tp)
11045 {
11046 tp->asf_counter = tp->asf_multiplier;
11047 tp->timer_counter = tp->timer_multiplier;
11048
11049 tp->timer.expires = jiffies + tp->timer_offset;
11050 add_timer(&tp->timer);
11051 }
11052
11053 static void tg3_timer_stop(struct tg3 *tp)
11054 {
11055 del_timer_sync(&tp->timer);
11056 }
11057
11058 /* Restart hardware after configuration changes, self-test, etc.
11059 * Invoked with tp->lock held.
11060 */
11061 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11062 __releases(tp->lock)
11063 __acquires(tp->lock)
11064 {
11065 int err;
11066
11067 err = tg3_init_hw(tp, reset_phy);
11068 if (err) {
11069 netdev_err(tp->dev,
11070 "Failed to re-initialize device, aborting\n");
11071 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11072 tg3_full_unlock(tp);
11073 tg3_timer_stop(tp);
11074 tp->irq_sync = 0;
11075 tg3_napi_enable(tp);
11076 dev_close(tp->dev);
11077 tg3_full_lock(tp, 0);
11078 }
11079 return err;
11080 }
11081
11082 static void tg3_reset_task(struct work_struct *work)
11083 {
11084 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11085 int err;
11086
11087 tg3_full_lock(tp, 0);
11088
11089 if (!netif_running(tp->dev)) {
11090 tg3_flag_clear(tp, RESET_TASK_PENDING);
11091 tg3_full_unlock(tp);
11092 return;
11093 }
11094
11095 tg3_full_unlock(tp);
11096
11097 tg3_phy_stop(tp);
11098
11099 tg3_netif_stop(tp);
11100
11101 tg3_full_lock(tp, 1);
11102
11103 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11104 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11105 tp->write32_rx_mbox = tg3_write_flush_reg32;
11106 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11107 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11108 }
11109
11110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11111 err = tg3_init_hw(tp, true);
11112 if (err)
11113 goto out;
11114
11115 tg3_netif_start(tp);
11116
11117 out:
11118 tg3_full_unlock(tp);
11119
11120 if (!err)
11121 tg3_phy_start(tp);
11122
11123 tg3_flag_clear(tp, RESET_TASK_PENDING);
11124 }
11125
11126 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11127 {
11128 irq_handler_t fn;
11129 unsigned long flags;
11130 char *name;
11131 struct tg3_napi *tnapi = &tp->napi[irq_num];
11132
11133 if (tp->irq_cnt == 1)
11134 name = tp->dev->name;
11135 else {
11136 name = &tnapi->irq_lbl[0];
11137 if (tnapi->tx_buffers && tnapi->rx_rcb)
11138 snprintf(name, IFNAMSIZ,
11139 "%s-txrx-%d", tp->dev->name, irq_num);
11140 else if (tnapi->tx_buffers)
11141 snprintf(name, IFNAMSIZ,
11142 "%s-tx-%d", tp->dev->name, irq_num);
11143 else if (tnapi->rx_rcb)
11144 snprintf(name, IFNAMSIZ,
11145 "%s-rx-%d", tp->dev->name, irq_num);
11146 else
11147 snprintf(name, IFNAMSIZ,
11148 "%s-%d", tp->dev->name, irq_num);
11149 name[IFNAMSIZ-1] = 0;
11150 }
11151
11152 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11153 fn = tg3_msi;
11154 if (tg3_flag(tp, 1SHOT_MSI))
11155 fn = tg3_msi_1shot;
11156 flags = 0;
11157 } else {
11158 fn = tg3_interrupt;
11159 if (tg3_flag(tp, TAGGED_STATUS))
11160 fn = tg3_interrupt_tagged;
11161 flags = IRQF_SHARED;
11162 }
11163
11164 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11165 }
11166
11167 static int tg3_test_interrupt(struct tg3 *tp)
11168 {
11169 struct tg3_napi *tnapi = &tp->napi[0];
11170 struct net_device *dev = tp->dev;
11171 int err, i, intr_ok = 0;
11172 u32 val;
11173
11174 if (!netif_running(dev))
11175 return -ENODEV;
11176
11177 tg3_disable_ints(tp);
11178
11179 free_irq(tnapi->irq_vec, tnapi);
11180
11181 /*
11182 * Turn off MSI one shot mode. Otherwise this test has no
11183 * observable way to know whether the interrupt was delivered.
11184 */
11185 if (tg3_flag(tp, 57765_PLUS)) {
11186 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11187 tw32(MSGINT_MODE, val);
11188 }
11189
11190 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11191 IRQF_SHARED, dev->name, tnapi);
11192 if (err)
11193 return err;
11194
11195 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11196 tg3_enable_ints(tp);
11197
11198 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11199 tnapi->coal_now);
11200
11201 for (i = 0; i < 5; i++) {
11202 u32 int_mbox, misc_host_ctrl;
11203
11204 int_mbox = tr32_mailbox(tnapi->int_mbox);
11205 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11206
11207 if ((int_mbox != 0) ||
11208 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11209 intr_ok = 1;
11210 break;
11211 }
11212
11213 if (tg3_flag(tp, 57765_PLUS) &&
11214 tnapi->hw_status->status_tag != tnapi->last_tag)
11215 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11216
11217 msleep(10);
11218 }
11219
11220 tg3_disable_ints(tp);
11221
11222 free_irq(tnapi->irq_vec, tnapi);
11223
11224 err = tg3_request_irq(tp, 0);
11225
11226 if (err)
11227 return err;
11228
11229 if (intr_ok) {
11230 /* Reenable MSI one shot mode. */
11231 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11232 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11233 tw32(MSGINT_MODE, val);
11234 }
11235 return 0;
11236 }
11237
11238 return -EIO;
11239 }
11240
11241 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11242 * successfully restored
11243 */
11244 static int tg3_test_msi(struct tg3 *tp)
11245 {
11246 int err;
11247 u16 pci_cmd;
11248
11249 if (!tg3_flag(tp, USING_MSI))
11250 return 0;
11251
11252 /* Turn off SERR reporting in case MSI terminates with Master
11253 * Abort.
11254 */
11255 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11256 pci_write_config_word(tp->pdev, PCI_COMMAND,
11257 pci_cmd & ~PCI_COMMAND_SERR);
11258
11259 err = tg3_test_interrupt(tp);
11260
11261 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11262
11263 if (!err)
11264 return 0;
11265
11266 /* other failures */
11267 if (err != -EIO)
11268 return err;
11269
11270 /* MSI test failed, go back to INTx mode */
11271 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11272 "to INTx mode. Please report this failure to the PCI "
11273 "maintainer and include system chipset information\n");
11274
11275 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11276
11277 pci_disable_msi(tp->pdev);
11278
11279 tg3_flag_clear(tp, USING_MSI);
11280 tp->napi[0].irq_vec = tp->pdev->irq;
11281
11282 err = tg3_request_irq(tp, 0);
11283 if (err)
11284 return err;
11285
11286 /* Need to reset the chip because the MSI cycle may have terminated
11287 * with Master Abort.
11288 */
11289 tg3_full_lock(tp, 1);
11290
11291 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11292 err = tg3_init_hw(tp, true);
11293
11294 tg3_full_unlock(tp);
11295
11296 if (err)
11297 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11298
11299 return err;
11300 }
11301
11302 static int tg3_request_firmware(struct tg3 *tp)
11303 {
11304 const struct tg3_firmware_hdr *fw_hdr;
11305
11306 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11307 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11308 tp->fw_needed);
11309 return -ENOENT;
11310 }
11311
11312 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11313
11314 /* Firmware blob starts with version numbers, followed by
11315 * start address and _full_ length including BSS sections
11316 * (which must be longer than the actual data, of course
11317 */
11318
11319 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11320 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11321 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11322 tp->fw_len, tp->fw_needed);
11323 release_firmware(tp->fw);
11324 tp->fw = NULL;
11325 return -EINVAL;
11326 }
11327
11328 /* We no longer need firmware; we have it. */
11329 tp->fw_needed = NULL;
11330 return 0;
11331 }
11332
11333 static u32 tg3_irq_count(struct tg3 *tp)
11334 {
11335 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11336
11337 if (irq_cnt > 1) {
11338 /* We want as many rx rings enabled as there are cpus.
11339 * In multiqueue MSI-X mode, the first MSI-X vector
11340 * only deals with link interrupts, etc, so we add
11341 * one to the number of vectors we are requesting.
11342 */
11343 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11344 }
11345
11346 return irq_cnt;
11347 }
11348
11349 static bool tg3_enable_msix(struct tg3 *tp)
11350 {
11351 int i, rc;
11352 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11353
11354 tp->txq_cnt = tp->txq_req;
11355 tp->rxq_cnt = tp->rxq_req;
11356 if (!tp->rxq_cnt)
11357 tp->rxq_cnt = netif_get_num_default_rss_queues();
11358 if (tp->rxq_cnt > tp->rxq_max)
11359 tp->rxq_cnt = tp->rxq_max;
11360
11361 /* Disable multiple TX rings by default. Simple round-robin hardware
11362 * scheduling of the TX rings can cause starvation of rings with
11363 * small packets when other rings have TSO or jumbo packets.
11364 */
11365 if (!tp->txq_req)
11366 tp->txq_cnt = 1;
11367
11368 tp->irq_cnt = tg3_irq_count(tp);
11369
11370 for (i = 0; i < tp->irq_max; i++) {
11371 msix_ent[i].entry = i;
11372 msix_ent[i].vector = 0;
11373 }
11374
11375 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11376 if (rc < 0) {
11377 return false;
11378 } else if (rc < tp->irq_cnt) {
11379 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11380 tp->irq_cnt, rc);
11381 tp->irq_cnt = rc;
11382 tp->rxq_cnt = max(rc - 1, 1);
11383 if (tp->txq_cnt)
11384 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11385 }
11386
11387 for (i = 0; i < tp->irq_max; i++)
11388 tp->napi[i].irq_vec = msix_ent[i].vector;
11389
11390 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11391 pci_disable_msix(tp->pdev);
11392 return false;
11393 }
11394
11395 if (tp->irq_cnt == 1)
11396 return true;
11397
11398 tg3_flag_set(tp, ENABLE_RSS);
11399
11400 if (tp->txq_cnt > 1)
11401 tg3_flag_set(tp, ENABLE_TSS);
11402
11403 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11404
11405 return true;
11406 }
11407
11408 static void tg3_ints_init(struct tg3 *tp)
11409 {
11410 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11411 !tg3_flag(tp, TAGGED_STATUS)) {
11412 /* All MSI supporting chips should support tagged
11413 * status. Assert that this is the case.
11414 */
11415 netdev_warn(tp->dev,
11416 "MSI without TAGGED_STATUS? Not using MSI\n");
11417 goto defcfg;
11418 }
11419
11420 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11421 tg3_flag_set(tp, USING_MSIX);
11422 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11423 tg3_flag_set(tp, USING_MSI);
11424
11425 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11426 u32 msi_mode = tr32(MSGINT_MODE);
11427 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11428 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11429 if (!tg3_flag(tp, 1SHOT_MSI))
11430 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11431 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11432 }
11433 defcfg:
11434 if (!tg3_flag(tp, USING_MSIX)) {
11435 tp->irq_cnt = 1;
11436 tp->napi[0].irq_vec = tp->pdev->irq;
11437 }
11438
11439 if (tp->irq_cnt == 1) {
11440 tp->txq_cnt = 1;
11441 tp->rxq_cnt = 1;
11442 netif_set_real_num_tx_queues(tp->dev, 1);
11443 netif_set_real_num_rx_queues(tp->dev, 1);
11444 }
11445 }
11446
11447 static void tg3_ints_fini(struct tg3 *tp)
11448 {
11449 if (tg3_flag(tp, USING_MSIX))
11450 pci_disable_msix(tp->pdev);
11451 else if (tg3_flag(tp, USING_MSI))
11452 pci_disable_msi(tp->pdev);
11453 tg3_flag_clear(tp, USING_MSI);
11454 tg3_flag_clear(tp, USING_MSIX);
11455 tg3_flag_clear(tp, ENABLE_RSS);
11456 tg3_flag_clear(tp, ENABLE_TSS);
11457 }
11458
11459 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11460 bool init)
11461 {
11462 struct net_device *dev = tp->dev;
11463 int i, err;
11464
11465 /*
11466 * Setup interrupts first so we know how
11467 * many NAPI resources to allocate
11468 */
11469 tg3_ints_init(tp);
11470
11471 tg3_rss_check_indir_tbl(tp);
11472
11473 /* The placement of this call is tied
11474 * to the setup and use of Host TX descriptors.
11475 */
11476 err = tg3_alloc_consistent(tp);
11477 if (err)
11478 goto out_ints_fini;
11479
11480 tg3_napi_init(tp);
11481
11482 tg3_napi_enable(tp);
11483
11484 for (i = 0; i < tp->irq_cnt; i++) {
11485 struct tg3_napi *tnapi = &tp->napi[i];
11486 err = tg3_request_irq(tp, i);
11487 if (err) {
11488 for (i--; i >= 0; i--) {
11489 tnapi = &tp->napi[i];
11490 free_irq(tnapi->irq_vec, tnapi);
11491 }
11492 goto out_napi_fini;
11493 }
11494 }
11495
11496 tg3_full_lock(tp, 0);
11497
11498 if (init)
11499 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11500
11501 err = tg3_init_hw(tp, reset_phy);
11502 if (err) {
11503 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11504 tg3_free_rings(tp);
11505 }
11506
11507 tg3_full_unlock(tp);
11508
11509 if (err)
11510 goto out_free_irq;
11511
11512 if (test_irq && tg3_flag(tp, USING_MSI)) {
11513 err = tg3_test_msi(tp);
11514
11515 if (err) {
11516 tg3_full_lock(tp, 0);
11517 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11518 tg3_free_rings(tp);
11519 tg3_full_unlock(tp);
11520
11521 goto out_napi_fini;
11522 }
11523
11524 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11525 u32 val = tr32(PCIE_TRANSACTION_CFG);
11526
11527 tw32(PCIE_TRANSACTION_CFG,
11528 val | PCIE_TRANS_CFG_1SHOT_MSI);
11529 }
11530 }
11531
11532 tg3_phy_start(tp);
11533
11534 tg3_hwmon_open(tp);
11535
11536 tg3_full_lock(tp, 0);
11537
11538 tg3_timer_start(tp);
11539 tg3_flag_set(tp, INIT_COMPLETE);
11540 tg3_enable_ints(tp);
11541
11542 if (init)
11543 tg3_ptp_init(tp);
11544 else
11545 tg3_ptp_resume(tp);
11546
11547
11548 tg3_full_unlock(tp);
11549
11550 netif_tx_start_all_queues(dev);
11551
11552 /*
11553 * Reset loopback feature if it was turned on while the device was down
11554 * make sure that it's installed properly now.
11555 */
11556 if (dev->features & NETIF_F_LOOPBACK)
11557 tg3_set_loopback(dev, dev->features);
11558
11559 return 0;
11560
11561 out_free_irq:
11562 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11563 struct tg3_napi *tnapi = &tp->napi[i];
11564 free_irq(tnapi->irq_vec, tnapi);
11565 }
11566
11567 out_napi_fini:
11568 tg3_napi_disable(tp);
11569 tg3_napi_fini(tp);
11570 tg3_free_consistent(tp);
11571
11572 out_ints_fini:
11573 tg3_ints_fini(tp);
11574
11575 return err;
11576 }
11577
11578 static void tg3_stop(struct tg3 *tp)
11579 {
11580 int i;
11581
11582 tg3_reset_task_cancel(tp);
11583 tg3_netif_stop(tp);
11584
11585 tg3_timer_stop(tp);
11586
11587 tg3_hwmon_close(tp);
11588
11589 tg3_phy_stop(tp);
11590
11591 tg3_full_lock(tp, 1);
11592
11593 tg3_disable_ints(tp);
11594
11595 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11596 tg3_free_rings(tp);
11597 tg3_flag_clear(tp, INIT_COMPLETE);
11598
11599 tg3_full_unlock(tp);
11600
11601 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11602 struct tg3_napi *tnapi = &tp->napi[i];
11603 free_irq(tnapi->irq_vec, tnapi);
11604 }
11605
11606 tg3_ints_fini(tp);
11607
11608 tg3_napi_fini(tp);
11609
11610 tg3_free_consistent(tp);
11611 }
11612
11613 static int tg3_open(struct net_device *dev)
11614 {
11615 struct tg3 *tp = netdev_priv(dev);
11616 int err;
11617
11618 if (tp->fw_needed) {
11619 err = tg3_request_firmware(tp);
11620 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11621 if (err) {
11622 netdev_warn(tp->dev, "EEE capability disabled\n");
11623 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11624 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11625 netdev_warn(tp->dev, "EEE capability restored\n");
11626 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11627 }
11628 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11629 if (err)
11630 return err;
11631 } else if (err) {
11632 netdev_warn(tp->dev, "TSO capability disabled\n");
11633 tg3_flag_clear(tp, TSO_CAPABLE);
11634 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11635 netdev_notice(tp->dev, "TSO capability restored\n");
11636 tg3_flag_set(tp, TSO_CAPABLE);
11637 }
11638 }
11639
11640 tg3_carrier_off(tp);
11641
11642 err = tg3_power_up(tp);
11643 if (err)
11644 return err;
11645
11646 tg3_full_lock(tp, 0);
11647
11648 tg3_disable_ints(tp);
11649 tg3_flag_clear(tp, INIT_COMPLETE);
11650
11651 tg3_full_unlock(tp);
11652
11653 err = tg3_start(tp,
11654 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11655 true, true);
11656 if (err) {
11657 tg3_frob_aux_power(tp, false);
11658 pci_set_power_state(tp->pdev, PCI_D3hot);
11659 }
11660
11661 if (tg3_flag(tp, PTP_CAPABLE)) {
11662 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11663 &tp->pdev->dev);
11664 if (IS_ERR(tp->ptp_clock))
11665 tp->ptp_clock = NULL;
11666 }
11667
11668 return err;
11669 }
11670
11671 static int tg3_close(struct net_device *dev)
11672 {
11673 struct tg3 *tp = netdev_priv(dev);
11674
11675 tg3_ptp_fini(tp);
11676
11677 tg3_stop(tp);
11678
11679 /* Clear stats across close / open calls */
11680 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11681 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11682
11683 if (pci_device_is_present(tp->pdev)) {
11684 tg3_power_down_prepare(tp);
11685
11686 tg3_carrier_off(tp);
11687 }
11688 return 0;
11689 }
11690
11691 static inline u64 get_stat64(tg3_stat64_t *val)
11692 {
11693 return ((u64)val->high << 32) | ((u64)val->low);
11694 }
11695
11696 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11697 {
11698 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11699
11700 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11701 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11702 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11703 u32 val;
11704
11705 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11706 tg3_writephy(tp, MII_TG3_TEST1,
11707 val | MII_TG3_TEST1_CRC_EN);
11708 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11709 } else
11710 val = 0;
11711
11712 tp->phy_crc_errors += val;
11713
11714 return tp->phy_crc_errors;
11715 }
11716
11717 return get_stat64(&hw_stats->rx_fcs_errors);
11718 }
11719
11720 #define ESTAT_ADD(member) \
11721 estats->member = old_estats->member + \
11722 get_stat64(&hw_stats->member)
11723
11724 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11725 {
11726 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11727 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11728
11729 ESTAT_ADD(rx_octets);
11730 ESTAT_ADD(rx_fragments);
11731 ESTAT_ADD(rx_ucast_packets);
11732 ESTAT_ADD(rx_mcast_packets);
11733 ESTAT_ADD(rx_bcast_packets);
11734 ESTAT_ADD(rx_fcs_errors);
11735 ESTAT_ADD(rx_align_errors);
11736 ESTAT_ADD(rx_xon_pause_rcvd);
11737 ESTAT_ADD(rx_xoff_pause_rcvd);
11738 ESTAT_ADD(rx_mac_ctrl_rcvd);
11739 ESTAT_ADD(rx_xoff_entered);
11740 ESTAT_ADD(rx_frame_too_long_errors);
11741 ESTAT_ADD(rx_jabbers);
11742 ESTAT_ADD(rx_undersize_packets);
11743 ESTAT_ADD(rx_in_length_errors);
11744 ESTAT_ADD(rx_out_length_errors);
11745 ESTAT_ADD(rx_64_or_less_octet_packets);
11746 ESTAT_ADD(rx_65_to_127_octet_packets);
11747 ESTAT_ADD(rx_128_to_255_octet_packets);
11748 ESTAT_ADD(rx_256_to_511_octet_packets);
11749 ESTAT_ADD(rx_512_to_1023_octet_packets);
11750 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11751 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11752 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11753 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11754 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11755
11756 ESTAT_ADD(tx_octets);
11757 ESTAT_ADD(tx_collisions);
11758 ESTAT_ADD(tx_xon_sent);
11759 ESTAT_ADD(tx_xoff_sent);
11760 ESTAT_ADD(tx_flow_control);
11761 ESTAT_ADD(tx_mac_errors);
11762 ESTAT_ADD(tx_single_collisions);
11763 ESTAT_ADD(tx_mult_collisions);
11764 ESTAT_ADD(tx_deferred);
11765 ESTAT_ADD(tx_excessive_collisions);
11766 ESTAT_ADD(tx_late_collisions);
11767 ESTAT_ADD(tx_collide_2times);
11768 ESTAT_ADD(tx_collide_3times);
11769 ESTAT_ADD(tx_collide_4times);
11770 ESTAT_ADD(tx_collide_5times);
11771 ESTAT_ADD(tx_collide_6times);
11772 ESTAT_ADD(tx_collide_7times);
11773 ESTAT_ADD(tx_collide_8times);
11774 ESTAT_ADD(tx_collide_9times);
11775 ESTAT_ADD(tx_collide_10times);
11776 ESTAT_ADD(tx_collide_11times);
11777 ESTAT_ADD(tx_collide_12times);
11778 ESTAT_ADD(tx_collide_13times);
11779 ESTAT_ADD(tx_collide_14times);
11780 ESTAT_ADD(tx_collide_15times);
11781 ESTAT_ADD(tx_ucast_packets);
11782 ESTAT_ADD(tx_mcast_packets);
11783 ESTAT_ADD(tx_bcast_packets);
11784 ESTAT_ADD(tx_carrier_sense_errors);
11785 ESTAT_ADD(tx_discards);
11786 ESTAT_ADD(tx_errors);
11787
11788 ESTAT_ADD(dma_writeq_full);
11789 ESTAT_ADD(dma_write_prioq_full);
11790 ESTAT_ADD(rxbds_empty);
11791 ESTAT_ADD(rx_discards);
11792 ESTAT_ADD(rx_errors);
11793 ESTAT_ADD(rx_threshold_hit);
11794
11795 ESTAT_ADD(dma_readq_full);
11796 ESTAT_ADD(dma_read_prioq_full);
11797 ESTAT_ADD(tx_comp_queue_full);
11798
11799 ESTAT_ADD(ring_set_send_prod_index);
11800 ESTAT_ADD(ring_status_update);
11801 ESTAT_ADD(nic_irqs);
11802 ESTAT_ADD(nic_avoided_irqs);
11803 ESTAT_ADD(nic_tx_threshold_hit);
11804
11805 ESTAT_ADD(mbuf_lwm_thresh_hit);
11806 }
11807
11808 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11809 {
11810 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11811 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11812
11813 stats->rx_packets = old_stats->rx_packets +
11814 get_stat64(&hw_stats->rx_ucast_packets) +
11815 get_stat64(&hw_stats->rx_mcast_packets) +
11816 get_stat64(&hw_stats->rx_bcast_packets);
11817
11818 stats->tx_packets = old_stats->tx_packets +
11819 get_stat64(&hw_stats->tx_ucast_packets) +
11820 get_stat64(&hw_stats->tx_mcast_packets) +
11821 get_stat64(&hw_stats->tx_bcast_packets);
11822
11823 stats->rx_bytes = old_stats->rx_bytes +
11824 get_stat64(&hw_stats->rx_octets);
11825 stats->tx_bytes = old_stats->tx_bytes +
11826 get_stat64(&hw_stats->tx_octets);
11827
11828 stats->rx_errors = old_stats->rx_errors +
11829 get_stat64(&hw_stats->rx_errors);
11830 stats->tx_errors = old_stats->tx_errors +
11831 get_stat64(&hw_stats->tx_errors) +
11832 get_stat64(&hw_stats->tx_mac_errors) +
11833 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11834 get_stat64(&hw_stats->tx_discards);
11835
11836 stats->multicast = old_stats->multicast +
11837 get_stat64(&hw_stats->rx_mcast_packets);
11838 stats->collisions = old_stats->collisions +
11839 get_stat64(&hw_stats->tx_collisions);
11840
11841 stats->rx_length_errors = old_stats->rx_length_errors +
11842 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11843 get_stat64(&hw_stats->rx_undersize_packets);
11844
11845 stats->rx_frame_errors = old_stats->rx_frame_errors +
11846 get_stat64(&hw_stats->rx_align_errors);
11847 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11848 get_stat64(&hw_stats->tx_discards);
11849 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11850 get_stat64(&hw_stats->tx_carrier_sense_errors);
11851
11852 stats->rx_crc_errors = old_stats->rx_crc_errors +
11853 tg3_calc_crc_errors(tp);
11854
11855 stats->rx_missed_errors = old_stats->rx_missed_errors +
11856 get_stat64(&hw_stats->rx_discards);
11857
11858 stats->rx_dropped = tp->rx_dropped;
11859 stats->tx_dropped = tp->tx_dropped;
11860 }
11861
11862 static int tg3_get_regs_len(struct net_device *dev)
11863 {
11864 return TG3_REG_BLK_SIZE;
11865 }
11866
11867 static void tg3_get_regs(struct net_device *dev,
11868 struct ethtool_regs *regs, void *_p)
11869 {
11870 struct tg3 *tp = netdev_priv(dev);
11871
11872 regs->version = 0;
11873
11874 memset(_p, 0, TG3_REG_BLK_SIZE);
11875
11876 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11877 return;
11878
11879 tg3_full_lock(tp, 0);
11880
11881 tg3_dump_legacy_regs(tp, (u32 *)_p);
11882
11883 tg3_full_unlock(tp);
11884 }
11885
11886 static int tg3_get_eeprom_len(struct net_device *dev)
11887 {
11888 struct tg3 *tp = netdev_priv(dev);
11889
11890 return tp->nvram_size;
11891 }
11892
11893 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11894 {
11895 struct tg3 *tp = netdev_priv(dev);
11896 int ret, cpmu_restore = 0;
11897 u8 *pd;
11898 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11899 __be32 val;
11900
11901 if (tg3_flag(tp, NO_NVRAM))
11902 return -EINVAL;
11903
11904 offset = eeprom->offset;
11905 len = eeprom->len;
11906 eeprom->len = 0;
11907
11908 eeprom->magic = TG3_EEPROM_MAGIC;
11909
11910 /* Override clock, link aware and link idle modes */
11911 if (tg3_flag(tp, CPMU_PRESENT)) {
11912 cpmu_val = tr32(TG3_CPMU_CTRL);
11913 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11914 CPMU_CTRL_LINK_IDLE_MODE)) {
11915 tw32(TG3_CPMU_CTRL, cpmu_val &
11916 ~(CPMU_CTRL_LINK_AWARE_MODE |
11917 CPMU_CTRL_LINK_IDLE_MODE));
11918 cpmu_restore = 1;
11919 }
11920 }
11921 tg3_override_clk(tp);
11922
11923 if (offset & 3) {
11924 /* adjustments to start on required 4 byte boundary */
11925 b_offset = offset & 3;
11926 b_count = 4 - b_offset;
11927 if (b_count > len) {
11928 /* i.e. offset=1 len=2 */
11929 b_count = len;
11930 }
11931 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11932 if (ret)
11933 goto eeprom_done;
11934 memcpy(data, ((char *)&val) + b_offset, b_count);
11935 len -= b_count;
11936 offset += b_count;
11937 eeprom->len += b_count;
11938 }
11939
11940 /* read bytes up to the last 4 byte boundary */
11941 pd = &data[eeprom->len];
11942 for (i = 0; i < (len - (len & 3)); i += 4) {
11943 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11944 if (ret) {
11945 if (i)
11946 i -= 4;
11947 eeprom->len += i;
11948 goto eeprom_done;
11949 }
11950 memcpy(pd + i, &val, 4);
11951 if (need_resched()) {
11952 if (signal_pending(current)) {
11953 eeprom->len += i;
11954 ret = -EINTR;
11955 goto eeprom_done;
11956 }
11957 cond_resched();
11958 }
11959 }
11960 eeprom->len += i;
11961
11962 if (len & 3) {
11963 /* read last bytes not ending on 4 byte boundary */
11964 pd = &data[eeprom->len];
11965 b_count = len & 3;
11966 b_offset = offset + len - b_count;
11967 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11968 if (ret)
11969 goto eeprom_done;
11970 memcpy(pd, &val, b_count);
11971 eeprom->len += b_count;
11972 }
11973 ret = 0;
11974
11975 eeprom_done:
11976 /* Restore clock, link aware and link idle modes */
11977 tg3_restore_clk(tp);
11978 if (cpmu_restore)
11979 tw32(TG3_CPMU_CTRL, cpmu_val);
11980
11981 return ret;
11982 }
11983
11984 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11985 {
11986 struct tg3 *tp = netdev_priv(dev);
11987 int ret;
11988 u32 offset, len, b_offset, odd_len;
11989 u8 *buf;
11990 __be32 start, end;
11991
11992 if (tg3_flag(tp, NO_NVRAM) ||
11993 eeprom->magic != TG3_EEPROM_MAGIC)
11994 return -EINVAL;
11995
11996 offset = eeprom->offset;
11997 len = eeprom->len;
11998
11999 if ((b_offset = (offset & 3))) {
12000 /* adjustments to start on required 4 byte boundary */
12001 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12002 if (ret)
12003 return ret;
12004 len += b_offset;
12005 offset &= ~3;
12006 if (len < 4)
12007 len = 4;
12008 }
12009
12010 odd_len = 0;
12011 if (len & 3) {
12012 /* adjustments to end on required 4 byte boundary */
12013 odd_len = 1;
12014 len = (len + 3) & ~3;
12015 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12016 if (ret)
12017 return ret;
12018 }
12019
12020 buf = data;
12021 if (b_offset || odd_len) {
12022 buf = kmalloc(len, GFP_KERNEL);
12023 if (!buf)
12024 return -ENOMEM;
12025 if (b_offset)
12026 memcpy(buf, &start, 4);
12027 if (odd_len)
12028 memcpy(buf+len-4, &end, 4);
12029 memcpy(buf + b_offset, data, eeprom->len);
12030 }
12031
12032 ret = tg3_nvram_write_block(tp, offset, len, buf);
12033
12034 if (buf != data)
12035 kfree(buf);
12036
12037 return ret;
12038 }
12039
12040 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12041 {
12042 struct tg3 *tp = netdev_priv(dev);
12043
12044 if (tg3_flag(tp, USE_PHYLIB)) {
12045 struct phy_device *phydev;
12046 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12047 return -EAGAIN;
12048 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12049 return phy_ethtool_gset(phydev, cmd);
12050 }
12051
12052 cmd->supported = (SUPPORTED_Autoneg);
12053
12054 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12055 cmd->supported |= (SUPPORTED_1000baseT_Half |
12056 SUPPORTED_1000baseT_Full);
12057
12058 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12059 cmd->supported |= (SUPPORTED_100baseT_Half |
12060 SUPPORTED_100baseT_Full |
12061 SUPPORTED_10baseT_Half |
12062 SUPPORTED_10baseT_Full |
12063 SUPPORTED_TP);
12064 cmd->port = PORT_TP;
12065 } else {
12066 cmd->supported |= SUPPORTED_FIBRE;
12067 cmd->port = PORT_FIBRE;
12068 }
12069
12070 cmd->advertising = tp->link_config.advertising;
12071 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12072 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12073 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12074 cmd->advertising |= ADVERTISED_Pause;
12075 } else {
12076 cmd->advertising |= ADVERTISED_Pause |
12077 ADVERTISED_Asym_Pause;
12078 }
12079 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12080 cmd->advertising |= ADVERTISED_Asym_Pause;
12081 }
12082 }
12083 if (netif_running(dev) && tp->link_up) {
12084 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12085 cmd->duplex = tp->link_config.active_duplex;
12086 cmd->lp_advertising = tp->link_config.rmt_adv;
12087 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12088 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12089 cmd->eth_tp_mdix = ETH_TP_MDI_X;
12090 else
12091 cmd->eth_tp_mdix = ETH_TP_MDI;
12092 }
12093 } else {
12094 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12095 cmd->duplex = DUPLEX_UNKNOWN;
12096 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12097 }
12098 cmd->phy_address = tp->phy_addr;
12099 cmd->transceiver = XCVR_INTERNAL;
12100 cmd->autoneg = tp->link_config.autoneg;
12101 cmd->maxtxpkt = 0;
12102 cmd->maxrxpkt = 0;
12103 return 0;
12104 }
12105
12106 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12107 {
12108 struct tg3 *tp = netdev_priv(dev);
12109 u32 speed = ethtool_cmd_speed(cmd);
12110
12111 if (tg3_flag(tp, USE_PHYLIB)) {
12112 struct phy_device *phydev;
12113 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12114 return -EAGAIN;
12115 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12116 return phy_ethtool_sset(phydev, cmd);
12117 }
12118
12119 if (cmd->autoneg != AUTONEG_ENABLE &&
12120 cmd->autoneg != AUTONEG_DISABLE)
12121 return -EINVAL;
12122
12123 if (cmd->autoneg == AUTONEG_DISABLE &&
12124 cmd->duplex != DUPLEX_FULL &&
12125 cmd->duplex != DUPLEX_HALF)
12126 return -EINVAL;
12127
12128 if (cmd->autoneg == AUTONEG_ENABLE) {
12129 u32 mask = ADVERTISED_Autoneg |
12130 ADVERTISED_Pause |
12131 ADVERTISED_Asym_Pause;
12132
12133 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12134 mask |= ADVERTISED_1000baseT_Half |
12135 ADVERTISED_1000baseT_Full;
12136
12137 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12138 mask |= ADVERTISED_100baseT_Half |
12139 ADVERTISED_100baseT_Full |
12140 ADVERTISED_10baseT_Half |
12141 ADVERTISED_10baseT_Full |
12142 ADVERTISED_TP;
12143 else
12144 mask |= ADVERTISED_FIBRE;
12145
12146 if (cmd->advertising & ~mask)
12147 return -EINVAL;
12148
12149 mask &= (ADVERTISED_1000baseT_Half |
12150 ADVERTISED_1000baseT_Full |
12151 ADVERTISED_100baseT_Half |
12152 ADVERTISED_100baseT_Full |
12153 ADVERTISED_10baseT_Half |
12154 ADVERTISED_10baseT_Full);
12155
12156 cmd->advertising &= mask;
12157 } else {
12158 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12159 if (speed != SPEED_1000)
12160 return -EINVAL;
12161
12162 if (cmd->duplex != DUPLEX_FULL)
12163 return -EINVAL;
12164 } else {
12165 if (speed != SPEED_100 &&
12166 speed != SPEED_10)
12167 return -EINVAL;
12168 }
12169 }
12170
12171 tg3_full_lock(tp, 0);
12172
12173 tp->link_config.autoneg = cmd->autoneg;
12174 if (cmd->autoneg == AUTONEG_ENABLE) {
12175 tp->link_config.advertising = (cmd->advertising |
12176 ADVERTISED_Autoneg);
12177 tp->link_config.speed = SPEED_UNKNOWN;
12178 tp->link_config.duplex = DUPLEX_UNKNOWN;
12179 } else {
12180 tp->link_config.advertising = 0;
12181 tp->link_config.speed = speed;
12182 tp->link_config.duplex = cmd->duplex;
12183 }
12184
12185 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12186
12187 tg3_warn_mgmt_link_flap(tp);
12188
12189 if (netif_running(dev))
12190 tg3_setup_phy(tp, true);
12191
12192 tg3_full_unlock(tp);
12193
12194 return 0;
12195 }
12196
12197 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12198 {
12199 struct tg3 *tp = netdev_priv(dev);
12200
12201 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12202 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12203 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12204 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12205 }
12206
12207 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12208 {
12209 struct tg3 *tp = netdev_priv(dev);
12210
12211 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12212 wol->supported = WAKE_MAGIC;
12213 else
12214 wol->supported = 0;
12215 wol->wolopts = 0;
12216 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12217 wol->wolopts = WAKE_MAGIC;
12218 memset(&wol->sopass, 0, sizeof(wol->sopass));
12219 }
12220
12221 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12222 {
12223 struct tg3 *tp = netdev_priv(dev);
12224 struct device *dp = &tp->pdev->dev;
12225
12226 if (wol->wolopts & ~WAKE_MAGIC)
12227 return -EINVAL;
12228 if ((wol->wolopts & WAKE_MAGIC) &&
12229 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12230 return -EINVAL;
12231
12232 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12233
12234 if (device_may_wakeup(dp))
12235 tg3_flag_set(tp, WOL_ENABLE);
12236 else
12237 tg3_flag_clear(tp, WOL_ENABLE);
12238
12239 return 0;
12240 }
12241
12242 static u32 tg3_get_msglevel(struct net_device *dev)
12243 {
12244 struct tg3 *tp = netdev_priv(dev);
12245 return tp->msg_enable;
12246 }
12247
12248 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12249 {
12250 struct tg3 *tp = netdev_priv(dev);
12251 tp->msg_enable = value;
12252 }
12253
12254 static int tg3_nway_reset(struct net_device *dev)
12255 {
12256 struct tg3 *tp = netdev_priv(dev);
12257 int r;
12258
12259 if (!netif_running(dev))
12260 return -EAGAIN;
12261
12262 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12263 return -EINVAL;
12264
12265 tg3_warn_mgmt_link_flap(tp);
12266
12267 if (tg3_flag(tp, USE_PHYLIB)) {
12268 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12269 return -EAGAIN;
12270 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12271 } else {
12272 u32 bmcr;
12273
12274 spin_lock_bh(&tp->lock);
12275 r = -EINVAL;
12276 tg3_readphy(tp, MII_BMCR, &bmcr);
12277 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12278 ((bmcr & BMCR_ANENABLE) ||
12279 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12280 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12281 BMCR_ANENABLE);
12282 r = 0;
12283 }
12284 spin_unlock_bh(&tp->lock);
12285 }
12286
12287 return r;
12288 }
12289
12290 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12291 {
12292 struct tg3 *tp = netdev_priv(dev);
12293
12294 ering->rx_max_pending = tp->rx_std_ring_mask;
12295 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12296 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12297 else
12298 ering->rx_jumbo_max_pending = 0;
12299
12300 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12301
12302 ering->rx_pending = tp->rx_pending;
12303 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12304 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12305 else
12306 ering->rx_jumbo_pending = 0;
12307
12308 ering->tx_pending = tp->napi[0].tx_pending;
12309 }
12310
12311 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12312 {
12313 struct tg3 *tp = netdev_priv(dev);
12314 int i, irq_sync = 0, err = 0;
12315
12316 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12317 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12318 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12319 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12320 (tg3_flag(tp, TSO_BUG) &&
12321 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12322 return -EINVAL;
12323
12324 if (netif_running(dev)) {
12325 tg3_phy_stop(tp);
12326 tg3_netif_stop(tp);
12327 irq_sync = 1;
12328 }
12329
12330 tg3_full_lock(tp, irq_sync);
12331
12332 tp->rx_pending = ering->rx_pending;
12333
12334 if (tg3_flag(tp, MAX_RXPEND_64) &&
12335 tp->rx_pending > 63)
12336 tp->rx_pending = 63;
12337
12338 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12339 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12340
12341 for (i = 0; i < tp->irq_max; i++)
12342 tp->napi[i].tx_pending = ering->tx_pending;
12343
12344 if (netif_running(dev)) {
12345 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12346 err = tg3_restart_hw(tp, false);
12347 if (!err)
12348 tg3_netif_start(tp);
12349 }
12350
12351 tg3_full_unlock(tp);
12352
12353 if (irq_sync && !err)
12354 tg3_phy_start(tp);
12355
12356 return err;
12357 }
12358
12359 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12360 {
12361 struct tg3 *tp = netdev_priv(dev);
12362
12363 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12364
12365 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12366 epause->rx_pause = 1;
12367 else
12368 epause->rx_pause = 0;
12369
12370 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12371 epause->tx_pause = 1;
12372 else
12373 epause->tx_pause = 0;
12374 }
12375
12376 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12377 {
12378 struct tg3 *tp = netdev_priv(dev);
12379 int err = 0;
12380
12381 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12382 tg3_warn_mgmt_link_flap(tp);
12383
12384 if (tg3_flag(tp, USE_PHYLIB)) {
12385 u32 newadv;
12386 struct phy_device *phydev;
12387
12388 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12389
12390 if (!(phydev->supported & SUPPORTED_Pause) ||
12391 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12392 (epause->rx_pause != epause->tx_pause)))
12393 return -EINVAL;
12394
12395 tp->link_config.flowctrl = 0;
12396 if (epause->rx_pause) {
12397 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12398
12399 if (epause->tx_pause) {
12400 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12401 newadv = ADVERTISED_Pause;
12402 } else
12403 newadv = ADVERTISED_Pause |
12404 ADVERTISED_Asym_Pause;
12405 } else if (epause->tx_pause) {
12406 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12407 newadv = ADVERTISED_Asym_Pause;
12408 } else
12409 newadv = 0;
12410
12411 if (epause->autoneg)
12412 tg3_flag_set(tp, PAUSE_AUTONEG);
12413 else
12414 tg3_flag_clear(tp, PAUSE_AUTONEG);
12415
12416 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12417 u32 oldadv = phydev->advertising &
12418 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12419 if (oldadv != newadv) {
12420 phydev->advertising &=
12421 ~(ADVERTISED_Pause |
12422 ADVERTISED_Asym_Pause);
12423 phydev->advertising |= newadv;
12424 if (phydev->autoneg) {
12425 /*
12426 * Always renegotiate the link to
12427 * inform our link partner of our
12428 * flow control settings, even if the
12429 * flow control is forced. Let
12430 * tg3_adjust_link() do the final
12431 * flow control setup.
12432 */
12433 return phy_start_aneg(phydev);
12434 }
12435 }
12436
12437 if (!epause->autoneg)
12438 tg3_setup_flow_control(tp, 0, 0);
12439 } else {
12440 tp->link_config.advertising &=
12441 ~(ADVERTISED_Pause |
12442 ADVERTISED_Asym_Pause);
12443 tp->link_config.advertising |= newadv;
12444 }
12445 } else {
12446 int irq_sync = 0;
12447
12448 if (netif_running(dev)) {
12449 tg3_netif_stop(tp);
12450 irq_sync = 1;
12451 }
12452
12453 tg3_full_lock(tp, irq_sync);
12454
12455 if (epause->autoneg)
12456 tg3_flag_set(tp, PAUSE_AUTONEG);
12457 else
12458 tg3_flag_clear(tp, PAUSE_AUTONEG);
12459 if (epause->rx_pause)
12460 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12461 else
12462 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12463 if (epause->tx_pause)
12464 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12465 else
12466 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12467
12468 if (netif_running(dev)) {
12469 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12470 err = tg3_restart_hw(tp, false);
12471 if (!err)
12472 tg3_netif_start(tp);
12473 }
12474
12475 tg3_full_unlock(tp);
12476 }
12477
12478 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12479
12480 return err;
12481 }
12482
12483 static int tg3_get_sset_count(struct net_device *dev, int sset)
12484 {
12485 switch (sset) {
12486 case ETH_SS_TEST:
12487 return TG3_NUM_TEST;
12488 case ETH_SS_STATS:
12489 return TG3_NUM_STATS;
12490 default:
12491 return -EOPNOTSUPP;
12492 }
12493 }
12494
12495 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12496 u32 *rules __always_unused)
12497 {
12498 struct tg3 *tp = netdev_priv(dev);
12499
12500 if (!tg3_flag(tp, SUPPORT_MSIX))
12501 return -EOPNOTSUPP;
12502
12503 switch (info->cmd) {
12504 case ETHTOOL_GRXRINGS:
12505 if (netif_running(tp->dev))
12506 info->data = tp->rxq_cnt;
12507 else {
12508 info->data = num_online_cpus();
12509 if (info->data > TG3_RSS_MAX_NUM_QS)
12510 info->data = TG3_RSS_MAX_NUM_QS;
12511 }
12512
12513 /* The first interrupt vector only
12514 * handles link interrupts.
12515 */
12516 info->data -= 1;
12517 return 0;
12518
12519 default:
12520 return -EOPNOTSUPP;
12521 }
12522 }
12523
12524 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12525 {
12526 u32 size = 0;
12527 struct tg3 *tp = netdev_priv(dev);
12528
12529 if (tg3_flag(tp, SUPPORT_MSIX))
12530 size = TG3_RSS_INDIR_TBL_SIZE;
12531
12532 return size;
12533 }
12534
12535 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
12536 {
12537 struct tg3 *tp = netdev_priv(dev);
12538 int i;
12539
12540 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12541 indir[i] = tp->rss_ind_tbl[i];
12542
12543 return 0;
12544 }
12545
12546 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
12547 {
12548 struct tg3 *tp = netdev_priv(dev);
12549 size_t i;
12550
12551 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12552 tp->rss_ind_tbl[i] = indir[i];
12553
12554 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12555 return 0;
12556
12557 /* It is legal to write the indirection
12558 * table while the device is running.
12559 */
12560 tg3_full_lock(tp, 0);
12561 tg3_rss_write_indir_tbl(tp);
12562 tg3_full_unlock(tp);
12563
12564 return 0;
12565 }
12566
12567 static void tg3_get_channels(struct net_device *dev,
12568 struct ethtool_channels *channel)
12569 {
12570 struct tg3 *tp = netdev_priv(dev);
12571 u32 deflt_qs = netif_get_num_default_rss_queues();
12572
12573 channel->max_rx = tp->rxq_max;
12574 channel->max_tx = tp->txq_max;
12575
12576 if (netif_running(dev)) {
12577 channel->rx_count = tp->rxq_cnt;
12578 channel->tx_count = tp->txq_cnt;
12579 } else {
12580 if (tp->rxq_req)
12581 channel->rx_count = tp->rxq_req;
12582 else
12583 channel->rx_count = min(deflt_qs, tp->rxq_max);
12584
12585 if (tp->txq_req)
12586 channel->tx_count = tp->txq_req;
12587 else
12588 channel->tx_count = min(deflt_qs, tp->txq_max);
12589 }
12590 }
12591
12592 static int tg3_set_channels(struct net_device *dev,
12593 struct ethtool_channels *channel)
12594 {
12595 struct tg3 *tp = netdev_priv(dev);
12596
12597 if (!tg3_flag(tp, SUPPORT_MSIX))
12598 return -EOPNOTSUPP;
12599
12600 if (channel->rx_count > tp->rxq_max ||
12601 channel->tx_count > tp->txq_max)
12602 return -EINVAL;
12603
12604 tp->rxq_req = channel->rx_count;
12605 tp->txq_req = channel->tx_count;
12606
12607 if (!netif_running(dev))
12608 return 0;
12609
12610 tg3_stop(tp);
12611
12612 tg3_carrier_off(tp);
12613
12614 tg3_start(tp, true, false, false);
12615
12616 return 0;
12617 }
12618
12619 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12620 {
12621 switch (stringset) {
12622 case ETH_SS_STATS:
12623 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12624 break;
12625 case ETH_SS_TEST:
12626 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12627 break;
12628 default:
12629 WARN_ON(1); /* we need a WARN() */
12630 break;
12631 }
12632 }
12633
12634 static int tg3_set_phys_id(struct net_device *dev,
12635 enum ethtool_phys_id_state state)
12636 {
12637 struct tg3 *tp = netdev_priv(dev);
12638
12639 if (!netif_running(tp->dev))
12640 return -EAGAIN;
12641
12642 switch (state) {
12643 case ETHTOOL_ID_ACTIVE:
12644 return 1; /* cycle on/off once per second */
12645
12646 case ETHTOOL_ID_ON:
12647 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12648 LED_CTRL_1000MBPS_ON |
12649 LED_CTRL_100MBPS_ON |
12650 LED_CTRL_10MBPS_ON |
12651 LED_CTRL_TRAFFIC_OVERRIDE |
12652 LED_CTRL_TRAFFIC_BLINK |
12653 LED_CTRL_TRAFFIC_LED);
12654 break;
12655
12656 case ETHTOOL_ID_OFF:
12657 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12658 LED_CTRL_TRAFFIC_OVERRIDE);
12659 break;
12660
12661 case ETHTOOL_ID_INACTIVE:
12662 tw32(MAC_LED_CTRL, tp->led_ctrl);
12663 break;
12664 }
12665
12666 return 0;
12667 }
12668
12669 static void tg3_get_ethtool_stats(struct net_device *dev,
12670 struct ethtool_stats *estats, u64 *tmp_stats)
12671 {
12672 struct tg3 *tp = netdev_priv(dev);
12673
12674 if (tp->hw_stats)
12675 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12676 else
12677 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12678 }
12679
12680 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12681 {
12682 int i;
12683 __be32 *buf;
12684 u32 offset = 0, len = 0;
12685 u32 magic, val;
12686
12687 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12688 return NULL;
12689
12690 if (magic == TG3_EEPROM_MAGIC) {
12691 for (offset = TG3_NVM_DIR_START;
12692 offset < TG3_NVM_DIR_END;
12693 offset += TG3_NVM_DIRENT_SIZE) {
12694 if (tg3_nvram_read(tp, offset, &val))
12695 return NULL;
12696
12697 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12698 TG3_NVM_DIRTYPE_EXTVPD)
12699 break;
12700 }
12701
12702 if (offset != TG3_NVM_DIR_END) {
12703 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12704 if (tg3_nvram_read(tp, offset + 4, &offset))
12705 return NULL;
12706
12707 offset = tg3_nvram_logical_addr(tp, offset);
12708 }
12709 }
12710
12711 if (!offset || !len) {
12712 offset = TG3_NVM_VPD_OFF;
12713 len = TG3_NVM_VPD_LEN;
12714 }
12715
12716 buf = kmalloc(len, GFP_KERNEL);
12717 if (buf == NULL)
12718 return NULL;
12719
12720 if (magic == TG3_EEPROM_MAGIC) {
12721 for (i = 0; i < len; i += 4) {
12722 /* The data is in little-endian format in NVRAM.
12723 * Use the big-endian read routines to preserve
12724 * the byte order as it exists in NVRAM.
12725 */
12726 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12727 goto error;
12728 }
12729 } else {
12730 u8 *ptr;
12731 ssize_t cnt;
12732 unsigned int pos = 0;
12733
12734 ptr = (u8 *)&buf[0];
12735 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12736 cnt = pci_read_vpd(tp->pdev, pos,
12737 len - pos, ptr);
12738 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12739 cnt = 0;
12740 else if (cnt < 0)
12741 goto error;
12742 }
12743 if (pos != len)
12744 goto error;
12745 }
12746
12747 *vpdlen = len;
12748
12749 return buf;
12750
12751 error:
12752 kfree(buf);
12753 return NULL;
12754 }
12755
12756 #define NVRAM_TEST_SIZE 0x100
12757 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12758 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12759 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12760 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12761 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12762 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12763 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12764 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12765
12766 static int tg3_test_nvram(struct tg3 *tp)
12767 {
12768 u32 csum, magic, len;
12769 __be32 *buf;
12770 int i, j, k, err = 0, size;
12771
12772 if (tg3_flag(tp, NO_NVRAM))
12773 return 0;
12774
12775 if (tg3_nvram_read(tp, 0, &magic) != 0)
12776 return -EIO;
12777
12778 if (magic == TG3_EEPROM_MAGIC)
12779 size = NVRAM_TEST_SIZE;
12780 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12781 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12782 TG3_EEPROM_SB_FORMAT_1) {
12783 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12784 case TG3_EEPROM_SB_REVISION_0:
12785 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12786 break;
12787 case TG3_EEPROM_SB_REVISION_2:
12788 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12789 break;
12790 case TG3_EEPROM_SB_REVISION_3:
12791 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12792 break;
12793 case TG3_EEPROM_SB_REVISION_4:
12794 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12795 break;
12796 case TG3_EEPROM_SB_REVISION_5:
12797 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12798 break;
12799 case TG3_EEPROM_SB_REVISION_6:
12800 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12801 break;
12802 default:
12803 return -EIO;
12804 }
12805 } else
12806 return 0;
12807 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12808 size = NVRAM_SELFBOOT_HW_SIZE;
12809 else
12810 return -EIO;
12811
12812 buf = kmalloc(size, GFP_KERNEL);
12813 if (buf == NULL)
12814 return -ENOMEM;
12815
12816 err = -EIO;
12817 for (i = 0, j = 0; i < size; i += 4, j++) {
12818 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12819 if (err)
12820 break;
12821 }
12822 if (i < size)
12823 goto out;
12824
12825 /* Selfboot format */
12826 magic = be32_to_cpu(buf[0]);
12827 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12828 TG3_EEPROM_MAGIC_FW) {
12829 u8 *buf8 = (u8 *) buf, csum8 = 0;
12830
12831 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12832 TG3_EEPROM_SB_REVISION_2) {
12833 /* For rev 2, the csum doesn't include the MBA. */
12834 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12835 csum8 += buf8[i];
12836 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12837 csum8 += buf8[i];
12838 } else {
12839 for (i = 0; i < size; i++)
12840 csum8 += buf8[i];
12841 }
12842
12843 if (csum8 == 0) {
12844 err = 0;
12845 goto out;
12846 }
12847
12848 err = -EIO;
12849 goto out;
12850 }
12851
12852 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12853 TG3_EEPROM_MAGIC_HW) {
12854 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12855 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12856 u8 *buf8 = (u8 *) buf;
12857
12858 /* Separate the parity bits and the data bytes. */
12859 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12860 if ((i == 0) || (i == 8)) {
12861 int l;
12862 u8 msk;
12863
12864 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12865 parity[k++] = buf8[i] & msk;
12866 i++;
12867 } else if (i == 16) {
12868 int l;
12869 u8 msk;
12870
12871 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12872 parity[k++] = buf8[i] & msk;
12873 i++;
12874
12875 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12876 parity[k++] = buf8[i] & msk;
12877 i++;
12878 }
12879 data[j++] = buf8[i];
12880 }
12881
12882 err = -EIO;
12883 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12884 u8 hw8 = hweight8(data[i]);
12885
12886 if ((hw8 & 0x1) && parity[i])
12887 goto out;
12888 else if (!(hw8 & 0x1) && !parity[i])
12889 goto out;
12890 }
12891 err = 0;
12892 goto out;
12893 }
12894
12895 err = -EIO;
12896
12897 /* Bootstrap checksum at offset 0x10 */
12898 csum = calc_crc((unsigned char *) buf, 0x10);
12899 if (csum != le32_to_cpu(buf[0x10/4]))
12900 goto out;
12901
12902 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12903 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12904 if (csum != le32_to_cpu(buf[0xfc/4]))
12905 goto out;
12906
12907 kfree(buf);
12908
12909 buf = tg3_vpd_readblock(tp, &len);
12910 if (!buf)
12911 return -ENOMEM;
12912
12913 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12914 if (i > 0) {
12915 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12916 if (j < 0)
12917 goto out;
12918
12919 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12920 goto out;
12921
12922 i += PCI_VPD_LRDT_TAG_SIZE;
12923 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12924 PCI_VPD_RO_KEYWORD_CHKSUM);
12925 if (j > 0) {
12926 u8 csum8 = 0;
12927
12928 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12929
12930 for (i = 0; i <= j; i++)
12931 csum8 += ((u8 *)buf)[i];
12932
12933 if (csum8)
12934 goto out;
12935 }
12936 }
12937
12938 err = 0;
12939
12940 out:
12941 kfree(buf);
12942 return err;
12943 }
12944
12945 #define TG3_SERDES_TIMEOUT_SEC 2
12946 #define TG3_COPPER_TIMEOUT_SEC 6
12947
12948 static int tg3_test_link(struct tg3 *tp)
12949 {
12950 int i, max;
12951
12952 if (!netif_running(tp->dev))
12953 return -ENODEV;
12954
12955 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12956 max = TG3_SERDES_TIMEOUT_SEC;
12957 else
12958 max = TG3_COPPER_TIMEOUT_SEC;
12959
12960 for (i = 0; i < max; i++) {
12961 if (tp->link_up)
12962 return 0;
12963
12964 if (msleep_interruptible(1000))
12965 break;
12966 }
12967
12968 return -EIO;
12969 }
12970
12971 /* Only test the commonly used registers */
12972 static int tg3_test_registers(struct tg3 *tp)
12973 {
12974 int i, is_5705, is_5750;
12975 u32 offset, read_mask, write_mask, val, save_val, read_val;
12976 static struct {
12977 u16 offset;
12978 u16 flags;
12979 #define TG3_FL_5705 0x1
12980 #define TG3_FL_NOT_5705 0x2
12981 #define TG3_FL_NOT_5788 0x4
12982 #define TG3_FL_NOT_5750 0x8
12983 u32 read_mask;
12984 u32 write_mask;
12985 } reg_tbl[] = {
12986 /* MAC Control Registers */
12987 { MAC_MODE, TG3_FL_NOT_5705,
12988 0x00000000, 0x00ef6f8c },
12989 { MAC_MODE, TG3_FL_5705,
12990 0x00000000, 0x01ef6b8c },
12991 { MAC_STATUS, TG3_FL_NOT_5705,
12992 0x03800107, 0x00000000 },
12993 { MAC_STATUS, TG3_FL_5705,
12994 0x03800100, 0x00000000 },
12995 { MAC_ADDR_0_HIGH, 0x0000,
12996 0x00000000, 0x0000ffff },
12997 { MAC_ADDR_0_LOW, 0x0000,
12998 0x00000000, 0xffffffff },
12999 { MAC_RX_MTU_SIZE, 0x0000,
13000 0x00000000, 0x0000ffff },
13001 { MAC_TX_MODE, 0x0000,
13002 0x00000000, 0x00000070 },
13003 { MAC_TX_LENGTHS, 0x0000,
13004 0x00000000, 0x00003fff },
13005 { MAC_RX_MODE, TG3_FL_NOT_5705,
13006 0x00000000, 0x000007fc },
13007 { MAC_RX_MODE, TG3_FL_5705,
13008 0x00000000, 0x000007dc },
13009 { MAC_HASH_REG_0, 0x0000,
13010 0x00000000, 0xffffffff },
13011 { MAC_HASH_REG_1, 0x0000,
13012 0x00000000, 0xffffffff },
13013 { MAC_HASH_REG_2, 0x0000,
13014 0x00000000, 0xffffffff },
13015 { MAC_HASH_REG_3, 0x0000,
13016 0x00000000, 0xffffffff },
13017
13018 /* Receive Data and Receive BD Initiator Control Registers. */
13019 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13020 0x00000000, 0xffffffff },
13021 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13022 0x00000000, 0xffffffff },
13023 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13024 0x00000000, 0x00000003 },
13025 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13026 0x00000000, 0xffffffff },
13027 { RCVDBDI_STD_BD+0, 0x0000,
13028 0x00000000, 0xffffffff },
13029 { RCVDBDI_STD_BD+4, 0x0000,
13030 0x00000000, 0xffffffff },
13031 { RCVDBDI_STD_BD+8, 0x0000,
13032 0x00000000, 0xffff0002 },
13033 { RCVDBDI_STD_BD+0xc, 0x0000,
13034 0x00000000, 0xffffffff },
13035
13036 /* Receive BD Initiator Control Registers. */
13037 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13038 0x00000000, 0xffffffff },
13039 { RCVBDI_STD_THRESH, TG3_FL_5705,
13040 0x00000000, 0x000003ff },
13041 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13042 0x00000000, 0xffffffff },
13043
13044 /* Host Coalescing Control Registers. */
13045 { HOSTCC_MODE, TG3_FL_NOT_5705,
13046 0x00000000, 0x00000004 },
13047 { HOSTCC_MODE, TG3_FL_5705,
13048 0x00000000, 0x000000f6 },
13049 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13050 0x00000000, 0xffffffff },
13051 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13052 0x00000000, 0x000003ff },
13053 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13054 0x00000000, 0xffffffff },
13055 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13056 0x00000000, 0x000003ff },
13057 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13058 0x00000000, 0xffffffff },
13059 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13060 0x00000000, 0x000000ff },
13061 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13062 0x00000000, 0xffffffff },
13063 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13064 0x00000000, 0x000000ff },
13065 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13066 0x00000000, 0xffffffff },
13067 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13068 0x00000000, 0xffffffff },
13069 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13070 0x00000000, 0xffffffff },
13071 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13072 0x00000000, 0x000000ff },
13073 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13074 0x00000000, 0xffffffff },
13075 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13076 0x00000000, 0x000000ff },
13077 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13078 0x00000000, 0xffffffff },
13079 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13080 0x00000000, 0xffffffff },
13081 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13082 0x00000000, 0xffffffff },
13083 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13084 0x00000000, 0xffffffff },
13085 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13086 0x00000000, 0xffffffff },
13087 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13088 0xffffffff, 0x00000000 },
13089 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13090 0xffffffff, 0x00000000 },
13091
13092 /* Buffer Manager Control Registers. */
13093 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13094 0x00000000, 0x007fff80 },
13095 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13096 0x00000000, 0x007fffff },
13097 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13098 0x00000000, 0x0000003f },
13099 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13100 0x00000000, 0x000001ff },
13101 { BUFMGR_MB_HIGH_WATER, 0x0000,
13102 0x00000000, 0x000001ff },
13103 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13104 0xffffffff, 0x00000000 },
13105 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13106 0xffffffff, 0x00000000 },
13107
13108 /* Mailbox Registers */
13109 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13110 0x00000000, 0x000001ff },
13111 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13112 0x00000000, 0x000001ff },
13113 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13114 0x00000000, 0x000007ff },
13115 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13116 0x00000000, 0x000001ff },
13117
13118 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13119 };
13120
13121 is_5705 = is_5750 = 0;
13122 if (tg3_flag(tp, 5705_PLUS)) {
13123 is_5705 = 1;
13124 if (tg3_flag(tp, 5750_PLUS))
13125 is_5750 = 1;
13126 }
13127
13128 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13129 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13130 continue;
13131
13132 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13133 continue;
13134
13135 if (tg3_flag(tp, IS_5788) &&
13136 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13137 continue;
13138
13139 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13140 continue;
13141
13142 offset = (u32) reg_tbl[i].offset;
13143 read_mask = reg_tbl[i].read_mask;
13144 write_mask = reg_tbl[i].write_mask;
13145
13146 /* Save the original register content */
13147 save_val = tr32(offset);
13148
13149 /* Determine the read-only value. */
13150 read_val = save_val & read_mask;
13151
13152 /* Write zero to the register, then make sure the read-only bits
13153 * are not changed and the read/write bits are all zeros.
13154 */
13155 tw32(offset, 0);
13156
13157 val = tr32(offset);
13158
13159 /* Test the read-only and read/write bits. */
13160 if (((val & read_mask) != read_val) || (val & write_mask))
13161 goto out;
13162
13163 /* Write ones to all the bits defined by RdMask and WrMask, then
13164 * make sure the read-only bits are not changed and the
13165 * read/write bits are all ones.
13166 */
13167 tw32(offset, read_mask | write_mask);
13168
13169 val = tr32(offset);
13170
13171 /* Test the read-only bits. */
13172 if ((val & read_mask) != read_val)
13173 goto out;
13174
13175 /* Test the read/write bits. */
13176 if ((val & write_mask) != write_mask)
13177 goto out;
13178
13179 tw32(offset, save_val);
13180 }
13181
13182 return 0;
13183
13184 out:
13185 if (netif_msg_hw(tp))
13186 netdev_err(tp->dev,
13187 "Register test failed at offset %x\n", offset);
13188 tw32(offset, save_val);
13189 return -EIO;
13190 }
13191
13192 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13193 {
13194 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13195 int i;
13196 u32 j;
13197
13198 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13199 for (j = 0; j < len; j += 4) {
13200 u32 val;
13201
13202 tg3_write_mem(tp, offset + j, test_pattern[i]);
13203 tg3_read_mem(tp, offset + j, &val);
13204 if (val != test_pattern[i])
13205 return -EIO;
13206 }
13207 }
13208 return 0;
13209 }
13210
13211 static int tg3_test_memory(struct tg3 *tp)
13212 {
13213 static struct mem_entry {
13214 u32 offset;
13215 u32 len;
13216 } mem_tbl_570x[] = {
13217 { 0x00000000, 0x00b50},
13218 { 0x00002000, 0x1c000},
13219 { 0xffffffff, 0x00000}
13220 }, mem_tbl_5705[] = {
13221 { 0x00000100, 0x0000c},
13222 { 0x00000200, 0x00008},
13223 { 0x00004000, 0x00800},
13224 { 0x00006000, 0x01000},
13225 { 0x00008000, 0x02000},
13226 { 0x00010000, 0x0e000},
13227 { 0xffffffff, 0x00000}
13228 }, mem_tbl_5755[] = {
13229 { 0x00000200, 0x00008},
13230 { 0x00004000, 0x00800},
13231 { 0x00006000, 0x00800},
13232 { 0x00008000, 0x02000},
13233 { 0x00010000, 0x0c000},
13234 { 0xffffffff, 0x00000}
13235 }, mem_tbl_5906[] = {
13236 { 0x00000200, 0x00008},
13237 { 0x00004000, 0x00400},
13238 { 0x00006000, 0x00400},
13239 { 0x00008000, 0x01000},
13240 { 0x00010000, 0x01000},
13241 { 0xffffffff, 0x00000}
13242 }, mem_tbl_5717[] = {
13243 { 0x00000200, 0x00008},
13244 { 0x00010000, 0x0a000},
13245 { 0x00020000, 0x13c00},
13246 { 0xffffffff, 0x00000}
13247 }, mem_tbl_57765[] = {
13248 { 0x00000200, 0x00008},
13249 { 0x00004000, 0x00800},
13250 { 0x00006000, 0x09800},
13251 { 0x00010000, 0x0a000},
13252 { 0xffffffff, 0x00000}
13253 };
13254 struct mem_entry *mem_tbl;
13255 int err = 0;
13256 int i;
13257
13258 if (tg3_flag(tp, 5717_PLUS))
13259 mem_tbl = mem_tbl_5717;
13260 else if (tg3_flag(tp, 57765_CLASS) ||
13261 tg3_asic_rev(tp) == ASIC_REV_5762)
13262 mem_tbl = mem_tbl_57765;
13263 else if (tg3_flag(tp, 5755_PLUS))
13264 mem_tbl = mem_tbl_5755;
13265 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13266 mem_tbl = mem_tbl_5906;
13267 else if (tg3_flag(tp, 5705_PLUS))
13268 mem_tbl = mem_tbl_5705;
13269 else
13270 mem_tbl = mem_tbl_570x;
13271
13272 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13273 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13274 if (err)
13275 break;
13276 }
13277
13278 return err;
13279 }
13280
13281 #define TG3_TSO_MSS 500
13282
13283 #define TG3_TSO_IP_HDR_LEN 20
13284 #define TG3_TSO_TCP_HDR_LEN 20
13285 #define TG3_TSO_TCP_OPT_LEN 12
13286
13287 static const u8 tg3_tso_header[] = {
13288 0x08, 0x00,
13289 0x45, 0x00, 0x00, 0x00,
13290 0x00, 0x00, 0x40, 0x00,
13291 0x40, 0x06, 0x00, 0x00,
13292 0x0a, 0x00, 0x00, 0x01,
13293 0x0a, 0x00, 0x00, 0x02,
13294 0x0d, 0x00, 0xe0, 0x00,
13295 0x00, 0x00, 0x01, 0x00,
13296 0x00, 0x00, 0x02, 0x00,
13297 0x80, 0x10, 0x10, 0x00,
13298 0x14, 0x09, 0x00, 0x00,
13299 0x01, 0x01, 0x08, 0x0a,
13300 0x11, 0x11, 0x11, 0x11,
13301 0x11, 0x11, 0x11, 0x11,
13302 };
13303
13304 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13305 {
13306 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13307 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13308 u32 budget;
13309 struct sk_buff *skb;
13310 u8 *tx_data, *rx_data;
13311 dma_addr_t map;
13312 int num_pkts, tx_len, rx_len, i, err;
13313 struct tg3_rx_buffer_desc *desc;
13314 struct tg3_napi *tnapi, *rnapi;
13315 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13316
13317 tnapi = &tp->napi[0];
13318 rnapi = &tp->napi[0];
13319 if (tp->irq_cnt > 1) {
13320 if (tg3_flag(tp, ENABLE_RSS))
13321 rnapi = &tp->napi[1];
13322 if (tg3_flag(tp, ENABLE_TSS))
13323 tnapi = &tp->napi[1];
13324 }
13325 coal_now = tnapi->coal_now | rnapi->coal_now;
13326
13327 err = -EIO;
13328
13329 tx_len = pktsz;
13330 skb = netdev_alloc_skb(tp->dev, tx_len);
13331 if (!skb)
13332 return -ENOMEM;
13333
13334 tx_data = skb_put(skb, tx_len);
13335 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13336 memset(tx_data + ETH_ALEN, 0x0, 8);
13337
13338 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13339
13340 if (tso_loopback) {
13341 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13342
13343 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13344 TG3_TSO_TCP_OPT_LEN;
13345
13346 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13347 sizeof(tg3_tso_header));
13348 mss = TG3_TSO_MSS;
13349
13350 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13351 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13352
13353 /* Set the total length field in the IP header */
13354 iph->tot_len = htons((u16)(mss + hdr_len));
13355
13356 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13357 TXD_FLAG_CPU_POST_DMA);
13358
13359 if (tg3_flag(tp, HW_TSO_1) ||
13360 tg3_flag(tp, HW_TSO_2) ||
13361 tg3_flag(tp, HW_TSO_3)) {
13362 struct tcphdr *th;
13363 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13364 th = (struct tcphdr *)&tx_data[val];
13365 th->check = 0;
13366 } else
13367 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13368
13369 if (tg3_flag(tp, HW_TSO_3)) {
13370 mss |= (hdr_len & 0xc) << 12;
13371 if (hdr_len & 0x10)
13372 base_flags |= 0x00000010;
13373 base_flags |= (hdr_len & 0x3e0) << 5;
13374 } else if (tg3_flag(tp, HW_TSO_2))
13375 mss |= hdr_len << 9;
13376 else if (tg3_flag(tp, HW_TSO_1) ||
13377 tg3_asic_rev(tp) == ASIC_REV_5705) {
13378 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13379 } else {
13380 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13381 }
13382
13383 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13384 } else {
13385 num_pkts = 1;
13386 data_off = ETH_HLEN;
13387
13388 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13389 tx_len > VLAN_ETH_FRAME_LEN)
13390 base_flags |= TXD_FLAG_JMB_PKT;
13391 }
13392
13393 for (i = data_off; i < tx_len; i++)
13394 tx_data[i] = (u8) (i & 0xff);
13395
13396 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13397 if (pci_dma_mapping_error(tp->pdev, map)) {
13398 dev_kfree_skb(skb);
13399 return -EIO;
13400 }
13401
13402 val = tnapi->tx_prod;
13403 tnapi->tx_buffers[val].skb = skb;
13404 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13405
13406 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13407 rnapi->coal_now);
13408
13409 udelay(10);
13410
13411 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13412
13413 budget = tg3_tx_avail(tnapi);
13414 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13415 base_flags | TXD_FLAG_END, mss, 0)) {
13416 tnapi->tx_buffers[val].skb = NULL;
13417 dev_kfree_skb(skb);
13418 return -EIO;
13419 }
13420
13421 tnapi->tx_prod++;
13422
13423 /* Sync BD data before updating mailbox */
13424 wmb();
13425
13426 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13427 tr32_mailbox(tnapi->prodmbox);
13428
13429 udelay(10);
13430
13431 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13432 for (i = 0; i < 35; i++) {
13433 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13434 coal_now);
13435
13436 udelay(10);
13437
13438 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13439 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13440 if ((tx_idx == tnapi->tx_prod) &&
13441 (rx_idx == (rx_start_idx + num_pkts)))
13442 break;
13443 }
13444
13445 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13446 dev_kfree_skb(skb);
13447
13448 if (tx_idx != tnapi->tx_prod)
13449 goto out;
13450
13451 if (rx_idx != rx_start_idx + num_pkts)
13452 goto out;
13453
13454 val = data_off;
13455 while (rx_idx != rx_start_idx) {
13456 desc = &rnapi->rx_rcb[rx_start_idx++];
13457 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13458 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13459
13460 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13461 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13462 goto out;
13463
13464 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13465 - ETH_FCS_LEN;
13466
13467 if (!tso_loopback) {
13468 if (rx_len != tx_len)
13469 goto out;
13470
13471 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13472 if (opaque_key != RXD_OPAQUE_RING_STD)
13473 goto out;
13474 } else {
13475 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13476 goto out;
13477 }
13478 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13479 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13480 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13481 goto out;
13482 }
13483
13484 if (opaque_key == RXD_OPAQUE_RING_STD) {
13485 rx_data = tpr->rx_std_buffers[desc_idx].data;
13486 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13487 mapping);
13488 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13489 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13490 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13491 mapping);
13492 } else
13493 goto out;
13494
13495 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13496 PCI_DMA_FROMDEVICE);
13497
13498 rx_data += TG3_RX_OFFSET(tp);
13499 for (i = data_off; i < rx_len; i++, val++) {
13500 if (*(rx_data + i) != (u8) (val & 0xff))
13501 goto out;
13502 }
13503 }
13504
13505 err = 0;
13506
13507 /* tg3_free_rings will unmap and free the rx_data */
13508 out:
13509 return err;
13510 }
13511
13512 #define TG3_STD_LOOPBACK_FAILED 1
13513 #define TG3_JMB_LOOPBACK_FAILED 2
13514 #define TG3_TSO_LOOPBACK_FAILED 4
13515 #define TG3_LOOPBACK_FAILED \
13516 (TG3_STD_LOOPBACK_FAILED | \
13517 TG3_JMB_LOOPBACK_FAILED | \
13518 TG3_TSO_LOOPBACK_FAILED)
13519
13520 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13521 {
13522 int err = -EIO;
13523 u32 eee_cap;
13524 u32 jmb_pkt_sz = 9000;
13525
13526 if (tp->dma_limit)
13527 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13528
13529 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13530 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13531
13532 if (!netif_running(tp->dev)) {
13533 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13534 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13535 if (do_extlpbk)
13536 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13537 goto done;
13538 }
13539
13540 err = tg3_reset_hw(tp, true);
13541 if (err) {
13542 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13543 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13544 if (do_extlpbk)
13545 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13546 goto done;
13547 }
13548
13549 if (tg3_flag(tp, ENABLE_RSS)) {
13550 int i;
13551
13552 /* Reroute all rx packets to the 1st queue */
13553 for (i = MAC_RSS_INDIR_TBL_0;
13554 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13555 tw32(i, 0x0);
13556 }
13557
13558 /* HW errata - mac loopback fails in some cases on 5780.
13559 * Normal traffic and PHY loopback are not affected by
13560 * errata. Also, the MAC loopback test is deprecated for
13561 * all newer ASIC revisions.
13562 */
13563 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13564 !tg3_flag(tp, CPMU_PRESENT)) {
13565 tg3_mac_loopback(tp, true);
13566
13567 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13568 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13569
13570 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13571 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13572 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13573
13574 tg3_mac_loopback(tp, false);
13575 }
13576
13577 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13578 !tg3_flag(tp, USE_PHYLIB)) {
13579 int i;
13580
13581 tg3_phy_lpbk_set(tp, 0, false);
13582
13583 /* Wait for link */
13584 for (i = 0; i < 100; i++) {
13585 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13586 break;
13587 mdelay(1);
13588 }
13589
13590 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13591 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13592 if (tg3_flag(tp, TSO_CAPABLE) &&
13593 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13594 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13595 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13596 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13597 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13598
13599 if (do_extlpbk) {
13600 tg3_phy_lpbk_set(tp, 0, true);
13601
13602 /* All link indications report up, but the hardware
13603 * isn't really ready for about 20 msec. Double it
13604 * to be sure.
13605 */
13606 mdelay(40);
13607
13608 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13609 data[TG3_EXT_LOOPB_TEST] |=
13610 TG3_STD_LOOPBACK_FAILED;
13611 if (tg3_flag(tp, TSO_CAPABLE) &&
13612 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13613 data[TG3_EXT_LOOPB_TEST] |=
13614 TG3_TSO_LOOPBACK_FAILED;
13615 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13616 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13617 data[TG3_EXT_LOOPB_TEST] |=
13618 TG3_JMB_LOOPBACK_FAILED;
13619 }
13620
13621 /* Re-enable gphy autopowerdown. */
13622 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13623 tg3_phy_toggle_apd(tp, true);
13624 }
13625
13626 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13627 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13628
13629 done:
13630 tp->phy_flags |= eee_cap;
13631
13632 return err;
13633 }
13634
13635 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13636 u64 *data)
13637 {
13638 struct tg3 *tp = netdev_priv(dev);
13639 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13640
13641 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13642 if (tg3_power_up(tp)) {
13643 etest->flags |= ETH_TEST_FL_FAILED;
13644 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13645 return;
13646 }
13647 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13648 }
13649
13650 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13651
13652 if (tg3_test_nvram(tp) != 0) {
13653 etest->flags |= ETH_TEST_FL_FAILED;
13654 data[TG3_NVRAM_TEST] = 1;
13655 }
13656 if (!doextlpbk && tg3_test_link(tp)) {
13657 etest->flags |= ETH_TEST_FL_FAILED;
13658 data[TG3_LINK_TEST] = 1;
13659 }
13660 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13661 int err, err2 = 0, irq_sync = 0;
13662
13663 if (netif_running(dev)) {
13664 tg3_phy_stop(tp);
13665 tg3_netif_stop(tp);
13666 irq_sync = 1;
13667 }
13668
13669 tg3_full_lock(tp, irq_sync);
13670 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13671 err = tg3_nvram_lock(tp);
13672 tg3_halt_cpu(tp, RX_CPU_BASE);
13673 if (!tg3_flag(tp, 5705_PLUS))
13674 tg3_halt_cpu(tp, TX_CPU_BASE);
13675 if (!err)
13676 tg3_nvram_unlock(tp);
13677
13678 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13679 tg3_phy_reset(tp);
13680
13681 if (tg3_test_registers(tp) != 0) {
13682 etest->flags |= ETH_TEST_FL_FAILED;
13683 data[TG3_REGISTER_TEST] = 1;
13684 }
13685
13686 if (tg3_test_memory(tp) != 0) {
13687 etest->flags |= ETH_TEST_FL_FAILED;
13688 data[TG3_MEMORY_TEST] = 1;
13689 }
13690
13691 if (doextlpbk)
13692 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13693
13694 if (tg3_test_loopback(tp, data, doextlpbk))
13695 etest->flags |= ETH_TEST_FL_FAILED;
13696
13697 tg3_full_unlock(tp);
13698
13699 if (tg3_test_interrupt(tp) != 0) {
13700 etest->flags |= ETH_TEST_FL_FAILED;
13701 data[TG3_INTERRUPT_TEST] = 1;
13702 }
13703
13704 tg3_full_lock(tp, 0);
13705
13706 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13707 if (netif_running(dev)) {
13708 tg3_flag_set(tp, INIT_COMPLETE);
13709 err2 = tg3_restart_hw(tp, true);
13710 if (!err2)
13711 tg3_netif_start(tp);
13712 }
13713
13714 tg3_full_unlock(tp);
13715
13716 if (irq_sync && !err2)
13717 tg3_phy_start(tp);
13718 }
13719 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13720 tg3_power_down_prepare(tp);
13721
13722 }
13723
13724 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13725 {
13726 struct tg3 *tp = netdev_priv(dev);
13727 struct hwtstamp_config stmpconf;
13728
13729 if (!tg3_flag(tp, PTP_CAPABLE))
13730 return -EOPNOTSUPP;
13731
13732 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13733 return -EFAULT;
13734
13735 if (stmpconf.flags)
13736 return -EINVAL;
13737
13738 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13739 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13740 return -ERANGE;
13741
13742 switch (stmpconf.rx_filter) {
13743 case HWTSTAMP_FILTER_NONE:
13744 tp->rxptpctl = 0;
13745 break;
13746 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13747 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13748 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13749 break;
13750 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13751 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13752 TG3_RX_PTP_CTL_SYNC_EVNT;
13753 break;
13754 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13755 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13756 TG3_RX_PTP_CTL_DELAY_REQ;
13757 break;
13758 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13759 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13760 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13761 break;
13762 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13763 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13764 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13765 break;
13766 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13767 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13768 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13769 break;
13770 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13771 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13772 TG3_RX_PTP_CTL_SYNC_EVNT;
13773 break;
13774 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13775 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13776 TG3_RX_PTP_CTL_SYNC_EVNT;
13777 break;
13778 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13779 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13780 TG3_RX_PTP_CTL_SYNC_EVNT;
13781 break;
13782 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13783 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13784 TG3_RX_PTP_CTL_DELAY_REQ;
13785 break;
13786 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13787 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13788 TG3_RX_PTP_CTL_DELAY_REQ;
13789 break;
13790 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13791 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13792 TG3_RX_PTP_CTL_DELAY_REQ;
13793 break;
13794 default:
13795 return -ERANGE;
13796 }
13797
13798 if (netif_running(dev) && tp->rxptpctl)
13799 tw32(TG3_RX_PTP_CTL,
13800 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13801
13802 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13803 tg3_flag_set(tp, TX_TSTAMP_EN);
13804 else
13805 tg3_flag_clear(tp, TX_TSTAMP_EN);
13806
13807 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13808 -EFAULT : 0;
13809 }
13810
13811 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13812 {
13813 struct tg3 *tp = netdev_priv(dev);
13814 struct hwtstamp_config stmpconf;
13815
13816 if (!tg3_flag(tp, PTP_CAPABLE))
13817 return -EOPNOTSUPP;
13818
13819 stmpconf.flags = 0;
13820 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13821 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13822
13823 switch (tp->rxptpctl) {
13824 case 0:
13825 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13826 break;
13827 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13828 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13829 break;
13830 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13831 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13832 break;
13833 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13834 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13835 break;
13836 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13837 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13838 break;
13839 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13840 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13841 break;
13842 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13843 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13844 break;
13845 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13846 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13847 break;
13848 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13849 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13850 break;
13851 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13852 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13853 break;
13854 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13855 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13856 break;
13857 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13858 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13859 break;
13860 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13861 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13862 break;
13863 default:
13864 WARN_ON_ONCE(1);
13865 return -ERANGE;
13866 }
13867
13868 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13869 -EFAULT : 0;
13870 }
13871
13872 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13873 {
13874 struct mii_ioctl_data *data = if_mii(ifr);
13875 struct tg3 *tp = netdev_priv(dev);
13876 int err;
13877
13878 if (tg3_flag(tp, USE_PHYLIB)) {
13879 struct phy_device *phydev;
13880 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13881 return -EAGAIN;
13882 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13883 return phy_mii_ioctl(phydev, ifr, cmd);
13884 }
13885
13886 switch (cmd) {
13887 case SIOCGMIIPHY:
13888 data->phy_id = tp->phy_addr;
13889
13890 /* fallthru */
13891 case SIOCGMIIREG: {
13892 u32 mii_regval;
13893
13894 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13895 break; /* We have no PHY */
13896
13897 if (!netif_running(dev))
13898 return -EAGAIN;
13899
13900 spin_lock_bh(&tp->lock);
13901 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13902 data->reg_num & 0x1f, &mii_regval);
13903 spin_unlock_bh(&tp->lock);
13904
13905 data->val_out = mii_regval;
13906
13907 return err;
13908 }
13909
13910 case SIOCSMIIREG:
13911 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13912 break; /* We have no PHY */
13913
13914 if (!netif_running(dev))
13915 return -EAGAIN;
13916
13917 spin_lock_bh(&tp->lock);
13918 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13919 data->reg_num & 0x1f, data->val_in);
13920 spin_unlock_bh(&tp->lock);
13921
13922 return err;
13923
13924 case SIOCSHWTSTAMP:
13925 return tg3_hwtstamp_set(dev, ifr);
13926
13927 case SIOCGHWTSTAMP:
13928 return tg3_hwtstamp_get(dev, ifr);
13929
13930 default:
13931 /* do nothing */
13932 break;
13933 }
13934 return -EOPNOTSUPP;
13935 }
13936
13937 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13938 {
13939 struct tg3 *tp = netdev_priv(dev);
13940
13941 memcpy(ec, &tp->coal, sizeof(*ec));
13942 return 0;
13943 }
13944
13945 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13946 {
13947 struct tg3 *tp = netdev_priv(dev);
13948 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13949 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13950
13951 if (!tg3_flag(tp, 5705_PLUS)) {
13952 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13953 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13954 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13955 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13956 }
13957
13958 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13959 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13960 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13961 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13962 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13963 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13964 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13965 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13966 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13967 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13968 return -EINVAL;
13969
13970 /* No rx interrupts will be generated if both are zero */
13971 if ((ec->rx_coalesce_usecs == 0) &&
13972 (ec->rx_max_coalesced_frames == 0))
13973 return -EINVAL;
13974
13975 /* No tx interrupts will be generated if both are zero */
13976 if ((ec->tx_coalesce_usecs == 0) &&
13977 (ec->tx_max_coalesced_frames == 0))
13978 return -EINVAL;
13979
13980 /* Only copy relevant parameters, ignore all others. */
13981 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13982 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13983 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13984 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13985 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13986 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13987 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13988 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13989 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13990
13991 if (netif_running(dev)) {
13992 tg3_full_lock(tp, 0);
13993 __tg3_set_coalesce(tp, &tp->coal);
13994 tg3_full_unlock(tp);
13995 }
13996 return 0;
13997 }
13998
13999 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14000 {
14001 struct tg3 *tp = netdev_priv(dev);
14002
14003 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14004 netdev_warn(tp->dev, "Board does not support EEE!\n");
14005 return -EOPNOTSUPP;
14006 }
14007
14008 if (edata->advertised != tp->eee.advertised) {
14009 netdev_warn(tp->dev,
14010 "Direct manipulation of EEE advertisement is not supported\n");
14011 return -EINVAL;
14012 }
14013
14014 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14015 netdev_warn(tp->dev,
14016 "Maximal Tx Lpi timer supported is %#x(u)\n",
14017 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14018 return -EINVAL;
14019 }
14020
14021 tp->eee = *edata;
14022
14023 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14024 tg3_warn_mgmt_link_flap(tp);
14025
14026 if (netif_running(tp->dev)) {
14027 tg3_full_lock(tp, 0);
14028 tg3_setup_eee(tp);
14029 tg3_phy_reset(tp);
14030 tg3_full_unlock(tp);
14031 }
14032
14033 return 0;
14034 }
14035
14036 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14037 {
14038 struct tg3 *tp = netdev_priv(dev);
14039
14040 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14041 netdev_warn(tp->dev,
14042 "Board does not support EEE!\n");
14043 return -EOPNOTSUPP;
14044 }
14045
14046 *edata = tp->eee;
14047 return 0;
14048 }
14049
14050 static const struct ethtool_ops tg3_ethtool_ops = {
14051 .get_settings = tg3_get_settings,
14052 .set_settings = tg3_set_settings,
14053 .get_drvinfo = tg3_get_drvinfo,
14054 .get_regs_len = tg3_get_regs_len,
14055 .get_regs = tg3_get_regs,
14056 .get_wol = tg3_get_wol,
14057 .set_wol = tg3_set_wol,
14058 .get_msglevel = tg3_get_msglevel,
14059 .set_msglevel = tg3_set_msglevel,
14060 .nway_reset = tg3_nway_reset,
14061 .get_link = ethtool_op_get_link,
14062 .get_eeprom_len = tg3_get_eeprom_len,
14063 .get_eeprom = tg3_get_eeprom,
14064 .set_eeprom = tg3_set_eeprom,
14065 .get_ringparam = tg3_get_ringparam,
14066 .set_ringparam = tg3_set_ringparam,
14067 .get_pauseparam = tg3_get_pauseparam,
14068 .set_pauseparam = tg3_set_pauseparam,
14069 .self_test = tg3_self_test,
14070 .get_strings = tg3_get_strings,
14071 .set_phys_id = tg3_set_phys_id,
14072 .get_ethtool_stats = tg3_get_ethtool_stats,
14073 .get_coalesce = tg3_get_coalesce,
14074 .set_coalesce = tg3_set_coalesce,
14075 .get_sset_count = tg3_get_sset_count,
14076 .get_rxnfc = tg3_get_rxnfc,
14077 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14078 .get_rxfh = tg3_get_rxfh,
14079 .set_rxfh = tg3_set_rxfh,
14080 .get_channels = tg3_get_channels,
14081 .set_channels = tg3_set_channels,
14082 .get_ts_info = tg3_get_ts_info,
14083 .get_eee = tg3_get_eee,
14084 .set_eee = tg3_set_eee,
14085 };
14086
14087 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14088 struct rtnl_link_stats64 *stats)
14089 {
14090 struct tg3 *tp = netdev_priv(dev);
14091
14092 spin_lock_bh(&tp->lock);
14093 if (!tp->hw_stats) {
14094 spin_unlock_bh(&tp->lock);
14095 return &tp->net_stats_prev;
14096 }
14097
14098 tg3_get_nstats(tp, stats);
14099 spin_unlock_bh(&tp->lock);
14100
14101 return stats;
14102 }
14103
14104 static void tg3_set_rx_mode(struct net_device *dev)
14105 {
14106 struct tg3 *tp = netdev_priv(dev);
14107
14108 if (!netif_running(dev))
14109 return;
14110
14111 tg3_full_lock(tp, 0);
14112 __tg3_set_rx_mode(dev);
14113 tg3_full_unlock(tp);
14114 }
14115
14116 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14117 int new_mtu)
14118 {
14119 dev->mtu = new_mtu;
14120
14121 if (new_mtu > ETH_DATA_LEN) {
14122 if (tg3_flag(tp, 5780_CLASS)) {
14123 netdev_update_features(dev);
14124 tg3_flag_clear(tp, TSO_CAPABLE);
14125 } else {
14126 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14127 }
14128 } else {
14129 if (tg3_flag(tp, 5780_CLASS)) {
14130 tg3_flag_set(tp, TSO_CAPABLE);
14131 netdev_update_features(dev);
14132 }
14133 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14134 }
14135 }
14136
14137 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14138 {
14139 struct tg3 *tp = netdev_priv(dev);
14140 int err;
14141 bool reset_phy = false;
14142
14143 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14144 return -EINVAL;
14145
14146 if (!netif_running(dev)) {
14147 /* We'll just catch it later when the
14148 * device is up'd.
14149 */
14150 tg3_set_mtu(dev, tp, new_mtu);
14151 return 0;
14152 }
14153
14154 tg3_phy_stop(tp);
14155
14156 tg3_netif_stop(tp);
14157
14158 tg3_set_mtu(dev, tp, new_mtu);
14159
14160 tg3_full_lock(tp, 1);
14161
14162 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14163
14164 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14165 * breaks all requests to 256 bytes.
14166 */
14167 if (tg3_asic_rev(tp) == ASIC_REV_57766)
14168 reset_phy = true;
14169
14170 err = tg3_restart_hw(tp, reset_phy);
14171
14172 if (!err)
14173 tg3_netif_start(tp);
14174
14175 tg3_full_unlock(tp);
14176
14177 if (!err)
14178 tg3_phy_start(tp);
14179
14180 return err;
14181 }
14182
14183 static const struct net_device_ops tg3_netdev_ops = {
14184 .ndo_open = tg3_open,
14185 .ndo_stop = tg3_close,
14186 .ndo_start_xmit = tg3_start_xmit,
14187 .ndo_get_stats64 = tg3_get_stats64,
14188 .ndo_validate_addr = eth_validate_addr,
14189 .ndo_set_rx_mode = tg3_set_rx_mode,
14190 .ndo_set_mac_address = tg3_set_mac_addr,
14191 .ndo_do_ioctl = tg3_ioctl,
14192 .ndo_tx_timeout = tg3_tx_timeout,
14193 .ndo_change_mtu = tg3_change_mtu,
14194 .ndo_fix_features = tg3_fix_features,
14195 .ndo_set_features = tg3_set_features,
14196 #ifdef CONFIG_NET_POLL_CONTROLLER
14197 .ndo_poll_controller = tg3_poll_controller,
14198 #endif
14199 };
14200
14201 static void tg3_get_eeprom_size(struct tg3 *tp)
14202 {
14203 u32 cursize, val, magic;
14204
14205 tp->nvram_size = EEPROM_CHIP_SIZE;
14206
14207 if (tg3_nvram_read(tp, 0, &magic) != 0)
14208 return;
14209
14210 if ((magic != TG3_EEPROM_MAGIC) &&
14211 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14212 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14213 return;
14214
14215 /*
14216 * Size the chip by reading offsets at increasing powers of two.
14217 * When we encounter our validation signature, we know the addressing
14218 * has wrapped around, and thus have our chip size.
14219 */
14220 cursize = 0x10;
14221
14222 while (cursize < tp->nvram_size) {
14223 if (tg3_nvram_read(tp, cursize, &val) != 0)
14224 return;
14225
14226 if (val == magic)
14227 break;
14228
14229 cursize <<= 1;
14230 }
14231
14232 tp->nvram_size = cursize;
14233 }
14234
14235 static void tg3_get_nvram_size(struct tg3 *tp)
14236 {
14237 u32 val;
14238
14239 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14240 return;
14241
14242 /* Selfboot format */
14243 if (val != TG3_EEPROM_MAGIC) {
14244 tg3_get_eeprom_size(tp);
14245 return;
14246 }
14247
14248 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14249 if (val != 0) {
14250 /* This is confusing. We want to operate on the
14251 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14252 * call will read from NVRAM and byteswap the data
14253 * according to the byteswapping settings for all
14254 * other register accesses. This ensures the data we
14255 * want will always reside in the lower 16-bits.
14256 * However, the data in NVRAM is in LE format, which
14257 * means the data from the NVRAM read will always be
14258 * opposite the endianness of the CPU. The 16-bit
14259 * byteswap then brings the data to CPU endianness.
14260 */
14261 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14262 return;
14263 }
14264 }
14265 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14266 }
14267
14268 static void tg3_get_nvram_info(struct tg3 *tp)
14269 {
14270 u32 nvcfg1;
14271
14272 nvcfg1 = tr32(NVRAM_CFG1);
14273 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14274 tg3_flag_set(tp, FLASH);
14275 } else {
14276 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14277 tw32(NVRAM_CFG1, nvcfg1);
14278 }
14279
14280 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14281 tg3_flag(tp, 5780_CLASS)) {
14282 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14283 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14284 tp->nvram_jedecnum = JEDEC_ATMEL;
14285 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14286 tg3_flag_set(tp, NVRAM_BUFFERED);
14287 break;
14288 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14289 tp->nvram_jedecnum = JEDEC_ATMEL;
14290 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14291 break;
14292 case FLASH_VENDOR_ATMEL_EEPROM:
14293 tp->nvram_jedecnum = JEDEC_ATMEL;
14294 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14295 tg3_flag_set(tp, NVRAM_BUFFERED);
14296 break;
14297 case FLASH_VENDOR_ST:
14298 tp->nvram_jedecnum = JEDEC_ST;
14299 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14300 tg3_flag_set(tp, NVRAM_BUFFERED);
14301 break;
14302 case FLASH_VENDOR_SAIFUN:
14303 tp->nvram_jedecnum = JEDEC_SAIFUN;
14304 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14305 break;
14306 case FLASH_VENDOR_SST_SMALL:
14307 case FLASH_VENDOR_SST_LARGE:
14308 tp->nvram_jedecnum = JEDEC_SST;
14309 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14310 break;
14311 }
14312 } else {
14313 tp->nvram_jedecnum = JEDEC_ATMEL;
14314 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14315 tg3_flag_set(tp, NVRAM_BUFFERED);
14316 }
14317 }
14318
14319 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14320 {
14321 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14322 case FLASH_5752PAGE_SIZE_256:
14323 tp->nvram_pagesize = 256;
14324 break;
14325 case FLASH_5752PAGE_SIZE_512:
14326 tp->nvram_pagesize = 512;
14327 break;
14328 case FLASH_5752PAGE_SIZE_1K:
14329 tp->nvram_pagesize = 1024;
14330 break;
14331 case FLASH_5752PAGE_SIZE_2K:
14332 tp->nvram_pagesize = 2048;
14333 break;
14334 case FLASH_5752PAGE_SIZE_4K:
14335 tp->nvram_pagesize = 4096;
14336 break;
14337 case FLASH_5752PAGE_SIZE_264:
14338 tp->nvram_pagesize = 264;
14339 break;
14340 case FLASH_5752PAGE_SIZE_528:
14341 tp->nvram_pagesize = 528;
14342 break;
14343 }
14344 }
14345
14346 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14347 {
14348 u32 nvcfg1;
14349
14350 nvcfg1 = tr32(NVRAM_CFG1);
14351
14352 /* NVRAM protection for TPM */
14353 if (nvcfg1 & (1 << 27))
14354 tg3_flag_set(tp, PROTECTED_NVRAM);
14355
14356 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14357 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14358 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14359 tp->nvram_jedecnum = JEDEC_ATMEL;
14360 tg3_flag_set(tp, NVRAM_BUFFERED);
14361 break;
14362 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14363 tp->nvram_jedecnum = JEDEC_ATMEL;
14364 tg3_flag_set(tp, NVRAM_BUFFERED);
14365 tg3_flag_set(tp, FLASH);
14366 break;
14367 case FLASH_5752VENDOR_ST_M45PE10:
14368 case FLASH_5752VENDOR_ST_M45PE20:
14369 case FLASH_5752VENDOR_ST_M45PE40:
14370 tp->nvram_jedecnum = JEDEC_ST;
14371 tg3_flag_set(tp, NVRAM_BUFFERED);
14372 tg3_flag_set(tp, FLASH);
14373 break;
14374 }
14375
14376 if (tg3_flag(tp, FLASH)) {
14377 tg3_nvram_get_pagesize(tp, nvcfg1);
14378 } else {
14379 /* For eeprom, set pagesize to maximum eeprom size */
14380 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14381
14382 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14383 tw32(NVRAM_CFG1, nvcfg1);
14384 }
14385 }
14386
14387 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14388 {
14389 u32 nvcfg1, protect = 0;
14390
14391 nvcfg1 = tr32(NVRAM_CFG1);
14392
14393 /* NVRAM protection for TPM */
14394 if (nvcfg1 & (1 << 27)) {
14395 tg3_flag_set(tp, PROTECTED_NVRAM);
14396 protect = 1;
14397 }
14398
14399 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14400 switch (nvcfg1) {
14401 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14402 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14403 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14404 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14405 tp->nvram_jedecnum = JEDEC_ATMEL;
14406 tg3_flag_set(tp, NVRAM_BUFFERED);
14407 tg3_flag_set(tp, FLASH);
14408 tp->nvram_pagesize = 264;
14409 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14410 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14411 tp->nvram_size = (protect ? 0x3e200 :
14412 TG3_NVRAM_SIZE_512KB);
14413 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14414 tp->nvram_size = (protect ? 0x1f200 :
14415 TG3_NVRAM_SIZE_256KB);
14416 else
14417 tp->nvram_size = (protect ? 0x1f200 :
14418 TG3_NVRAM_SIZE_128KB);
14419 break;
14420 case FLASH_5752VENDOR_ST_M45PE10:
14421 case FLASH_5752VENDOR_ST_M45PE20:
14422 case FLASH_5752VENDOR_ST_M45PE40:
14423 tp->nvram_jedecnum = JEDEC_ST;
14424 tg3_flag_set(tp, NVRAM_BUFFERED);
14425 tg3_flag_set(tp, FLASH);
14426 tp->nvram_pagesize = 256;
14427 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14428 tp->nvram_size = (protect ?
14429 TG3_NVRAM_SIZE_64KB :
14430 TG3_NVRAM_SIZE_128KB);
14431 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14432 tp->nvram_size = (protect ?
14433 TG3_NVRAM_SIZE_64KB :
14434 TG3_NVRAM_SIZE_256KB);
14435 else
14436 tp->nvram_size = (protect ?
14437 TG3_NVRAM_SIZE_128KB :
14438 TG3_NVRAM_SIZE_512KB);
14439 break;
14440 }
14441 }
14442
14443 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14444 {
14445 u32 nvcfg1;
14446
14447 nvcfg1 = tr32(NVRAM_CFG1);
14448
14449 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14450 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14451 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14452 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14453 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14454 tp->nvram_jedecnum = JEDEC_ATMEL;
14455 tg3_flag_set(tp, NVRAM_BUFFERED);
14456 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14457
14458 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14459 tw32(NVRAM_CFG1, nvcfg1);
14460 break;
14461 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14462 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14463 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14464 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14465 tp->nvram_jedecnum = JEDEC_ATMEL;
14466 tg3_flag_set(tp, NVRAM_BUFFERED);
14467 tg3_flag_set(tp, FLASH);
14468 tp->nvram_pagesize = 264;
14469 break;
14470 case FLASH_5752VENDOR_ST_M45PE10:
14471 case FLASH_5752VENDOR_ST_M45PE20:
14472 case FLASH_5752VENDOR_ST_M45PE40:
14473 tp->nvram_jedecnum = JEDEC_ST;
14474 tg3_flag_set(tp, NVRAM_BUFFERED);
14475 tg3_flag_set(tp, FLASH);
14476 tp->nvram_pagesize = 256;
14477 break;
14478 }
14479 }
14480
14481 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14482 {
14483 u32 nvcfg1, protect = 0;
14484
14485 nvcfg1 = tr32(NVRAM_CFG1);
14486
14487 /* NVRAM protection for TPM */
14488 if (nvcfg1 & (1 << 27)) {
14489 tg3_flag_set(tp, PROTECTED_NVRAM);
14490 protect = 1;
14491 }
14492
14493 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14494 switch (nvcfg1) {
14495 case FLASH_5761VENDOR_ATMEL_ADB021D:
14496 case FLASH_5761VENDOR_ATMEL_ADB041D:
14497 case FLASH_5761VENDOR_ATMEL_ADB081D:
14498 case FLASH_5761VENDOR_ATMEL_ADB161D:
14499 case FLASH_5761VENDOR_ATMEL_MDB021D:
14500 case FLASH_5761VENDOR_ATMEL_MDB041D:
14501 case FLASH_5761VENDOR_ATMEL_MDB081D:
14502 case FLASH_5761VENDOR_ATMEL_MDB161D:
14503 tp->nvram_jedecnum = JEDEC_ATMEL;
14504 tg3_flag_set(tp, NVRAM_BUFFERED);
14505 tg3_flag_set(tp, FLASH);
14506 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14507 tp->nvram_pagesize = 256;
14508 break;
14509 case FLASH_5761VENDOR_ST_A_M45PE20:
14510 case FLASH_5761VENDOR_ST_A_M45PE40:
14511 case FLASH_5761VENDOR_ST_A_M45PE80:
14512 case FLASH_5761VENDOR_ST_A_M45PE16:
14513 case FLASH_5761VENDOR_ST_M_M45PE20:
14514 case FLASH_5761VENDOR_ST_M_M45PE40:
14515 case FLASH_5761VENDOR_ST_M_M45PE80:
14516 case FLASH_5761VENDOR_ST_M_M45PE16:
14517 tp->nvram_jedecnum = JEDEC_ST;
14518 tg3_flag_set(tp, NVRAM_BUFFERED);
14519 tg3_flag_set(tp, FLASH);
14520 tp->nvram_pagesize = 256;
14521 break;
14522 }
14523
14524 if (protect) {
14525 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14526 } else {
14527 switch (nvcfg1) {
14528 case FLASH_5761VENDOR_ATMEL_ADB161D:
14529 case FLASH_5761VENDOR_ATMEL_MDB161D:
14530 case FLASH_5761VENDOR_ST_A_M45PE16:
14531 case FLASH_5761VENDOR_ST_M_M45PE16:
14532 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14533 break;
14534 case FLASH_5761VENDOR_ATMEL_ADB081D:
14535 case FLASH_5761VENDOR_ATMEL_MDB081D:
14536 case FLASH_5761VENDOR_ST_A_M45PE80:
14537 case FLASH_5761VENDOR_ST_M_M45PE80:
14538 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14539 break;
14540 case FLASH_5761VENDOR_ATMEL_ADB041D:
14541 case FLASH_5761VENDOR_ATMEL_MDB041D:
14542 case FLASH_5761VENDOR_ST_A_M45PE40:
14543 case FLASH_5761VENDOR_ST_M_M45PE40:
14544 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14545 break;
14546 case FLASH_5761VENDOR_ATMEL_ADB021D:
14547 case FLASH_5761VENDOR_ATMEL_MDB021D:
14548 case FLASH_5761VENDOR_ST_A_M45PE20:
14549 case FLASH_5761VENDOR_ST_M_M45PE20:
14550 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14551 break;
14552 }
14553 }
14554 }
14555
14556 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14557 {
14558 tp->nvram_jedecnum = JEDEC_ATMEL;
14559 tg3_flag_set(tp, NVRAM_BUFFERED);
14560 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14561 }
14562
14563 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14564 {
14565 u32 nvcfg1;
14566
14567 nvcfg1 = tr32(NVRAM_CFG1);
14568
14569 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14570 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14571 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14572 tp->nvram_jedecnum = JEDEC_ATMEL;
14573 tg3_flag_set(tp, NVRAM_BUFFERED);
14574 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14575
14576 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14577 tw32(NVRAM_CFG1, nvcfg1);
14578 return;
14579 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14580 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14581 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14582 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14583 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14584 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14585 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14586 tp->nvram_jedecnum = JEDEC_ATMEL;
14587 tg3_flag_set(tp, NVRAM_BUFFERED);
14588 tg3_flag_set(tp, FLASH);
14589
14590 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14591 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14592 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14593 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14594 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14595 break;
14596 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14597 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14598 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14599 break;
14600 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14601 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14602 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14603 break;
14604 }
14605 break;
14606 case FLASH_5752VENDOR_ST_M45PE10:
14607 case FLASH_5752VENDOR_ST_M45PE20:
14608 case FLASH_5752VENDOR_ST_M45PE40:
14609 tp->nvram_jedecnum = JEDEC_ST;
14610 tg3_flag_set(tp, NVRAM_BUFFERED);
14611 tg3_flag_set(tp, FLASH);
14612
14613 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14614 case FLASH_5752VENDOR_ST_M45PE10:
14615 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14616 break;
14617 case FLASH_5752VENDOR_ST_M45PE20:
14618 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14619 break;
14620 case FLASH_5752VENDOR_ST_M45PE40:
14621 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14622 break;
14623 }
14624 break;
14625 default:
14626 tg3_flag_set(tp, NO_NVRAM);
14627 return;
14628 }
14629
14630 tg3_nvram_get_pagesize(tp, nvcfg1);
14631 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14632 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14633 }
14634
14635
14636 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14637 {
14638 u32 nvcfg1;
14639
14640 nvcfg1 = tr32(NVRAM_CFG1);
14641
14642 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14643 case FLASH_5717VENDOR_ATMEL_EEPROM:
14644 case FLASH_5717VENDOR_MICRO_EEPROM:
14645 tp->nvram_jedecnum = JEDEC_ATMEL;
14646 tg3_flag_set(tp, NVRAM_BUFFERED);
14647 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14648
14649 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14650 tw32(NVRAM_CFG1, nvcfg1);
14651 return;
14652 case FLASH_5717VENDOR_ATMEL_MDB011D:
14653 case FLASH_5717VENDOR_ATMEL_ADB011B:
14654 case FLASH_5717VENDOR_ATMEL_ADB011D:
14655 case FLASH_5717VENDOR_ATMEL_MDB021D:
14656 case FLASH_5717VENDOR_ATMEL_ADB021B:
14657 case FLASH_5717VENDOR_ATMEL_ADB021D:
14658 case FLASH_5717VENDOR_ATMEL_45USPT:
14659 tp->nvram_jedecnum = JEDEC_ATMEL;
14660 tg3_flag_set(tp, NVRAM_BUFFERED);
14661 tg3_flag_set(tp, FLASH);
14662
14663 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14664 case FLASH_5717VENDOR_ATMEL_MDB021D:
14665 /* Detect size with tg3_nvram_get_size() */
14666 break;
14667 case FLASH_5717VENDOR_ATMEL_ADB021B:
14668 case FLASH_5717VENDOR_ATMEL_ADB021D:
14669 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14670 break;
14671 default:
14672 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14673 break;
14674 }
14675 break;
14676 case FLASH_5717VENDOR_ST_M_M25PE10:
14677 case FLASH_5717VENDOR_ST_A_M25PE10:
14678 case FLASH_5717VENDOR_ST_M_M45PE10:
14679 case FLASH_5717VENDOR_ST_A_M45PE10:
14680 case FLASH_5717VENDOR_ST_M_M25PE20:
14681 case FLASH_5717VENDOR_ST_A_M25PE20:
14682 case FLASH_5717VENDOR_ST_M_M45PE20:
14683 case FLASH_5717VENDOR_ST_A_M45PE20:
14684 case FLASH_5717VENDOR_ST_25USPT:
14685 case FLASH_5717VENDOR_ST_45USPT:
14686 tp->nvram_jedecnum = JEDEC_ST;
14687 tg3_flag_set(tp, NVRAM_BUFFERED);
14688 tg3_flag_set(tp, FLASH);
14689
14690 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14691 case FLASH_5717VENDOR_ST_M_M25PE20:
14692 case FLASH_5717VENDOR_ST_M_M45PE20:
14693 /* Detect size with tg3_nvram_get_size() */
14694 break;
14695 case FLASH_5717VENDOR_ST_A_M25PE20:
14696 case FLASH_5717VENDOR_ST_A_M45PE20:
14697 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14698 break;
14699 default:
14700 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14701 break;
14702 }
14703 break;
14704 default:
14705 tg3_flag_set(tp, NO_NVRAM);
14706 return;
14707 }
14708
14709 tg3_nvram_get_pagesize(tp, nvcfg1);
14710 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14711 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14712 }
14713
14714 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14715 {
14716 u32 nvcfg1, nvmpinstrp;
14717
14718 nvcfg1 = tr32(NVRAM_CFG1);
14719 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14720
14721 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14722 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14723 tg3_flag_set(tp, NO_NVRAM);
14724 return;
14725 }
14726
14727 switch (nvmpinstrp) {
14728 case FLASH_5762_EEPROM_HD:
14729 nvmpinstrp = FLASH_5720_EEPROM_HD;
14730 break;
14731 case FLASH_5762_EEPROM_LD:
14732 nvmpinstrp = FLASH_5720_EEPROM_LD;
14733 break;
14734 case FLASH_5720VENDOR_M_ST_M45PE20:
14735 /* This pinstrap supports multiple sizes, so force it
14736 * to read the actual size from location 0xf0.
14737 */
14738 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14739 break;
14740 }
14741 }
14742
14743 switch (nvmpinstrp) {
14744 case FLASH_5720_EEPROM_HD:
14745 case FLASH_5720_EEPROM_LD:
14746 tp->nvram_jedecnum = JEDEC_ATMEL;
14747 tg3_flag_set(tp, NVRAM_BUFFERED);
14748
14749 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14750 tw32(NVRAM_CFG1, nvcfg1);
14751 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14752 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14753 else
14754 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14755 return;
14756 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14757 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14758 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14759 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14760 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14761 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14762 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14763 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14764 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14765 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14766 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14767 case FLASH_5720VENDOR_ATMEL_45USPT:
14768 tp->nvram_jedecnum = JEDEC_ATMEL;
14769 tg3_flag_set(tp, NVRAM_BUFFERED);
14770 tg3_flag_set(tp, FLASH);
14771
14772 switch (nvmpinstrp) {
14773 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14774 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14775 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14776 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14777 break;
14778 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14779 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14780 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14781 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14782 break;
14783 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14784 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14785 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14786 break;
14787 default:
14788 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14789 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14790 break;
14791 }
14792 break;
14793 case FLASH_5720VENDOR_M_ST_M25PE10:
14794 case FLASH_5720VENDOR_M_ST_M45PE10:
14795 case FLASH_5720VENDOR_A_ST_M25PE10:
14796 case FLASH_5720VENDOR_A_ST_M45PE10:
14797 case FLASH_5720VENDOR_M_ST_M25PE20:
14798 case FLASH_5720VENDOR_M_ST_M45PE20:
14799 case FLASH_5720VENDOR_A_ST_M25PE20:
14800 case FLASH_5720VENDOR_A_ST_M45PE20:
14801 case FLASH_5720VENDOR_M_ST_M25PE40:
14802 case FLASH_5720VENDOR_M_ST_M45PE40:
14803 case FLASH_5720VENDOR_A_ST_M25PE40:
14804 case FLASH_5720VENDOR_A_ST_M45PE40:
14805 case FLASH_5720VENDOR_M_ST_M25PE80:
14806 case FLASH_5720VENDOR_M_ST_M45PE80:
14807 case FLASH_5720VENDOR_A_ST_M25PE80:
14808 case FLASH_5720VENDOR_A_ST_M45PE80:
14809 case FLASH_5720VENDOR_ST_25USPT:
14810 case FLASH_5720VENDOR_ST_45USPT:
14811 tp->nvram_jedecnum = JEDEC_ST;
14812 tg3_flag_set(tp, NVRAM_BUFFERED);
14813 tg3_flag_set(tp, FLASH);
14814
14815 switch (nvmpinstrp) {
14816 case FLASH_5720VENDOR_M_ST_M25PE20:
14817 case FLASH_5720VENDOR_M_ST_M45PE20:
14818 case FLASH_5720VENDOR_A_ST_M25PE20:
14819 case FLASH_5720VENDOR_A_ST_M45PE20:
14820 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14821 break;
14822 case FLASH_5720VENDOR_M_ST_M25PE40:
14823 case FLASH_5720VENDOR_M_ST_M45PE40:
14824 case FLASH_5720VENDOR_A_ST_M25PE40:
14825 case FLASH_5720VENDOR_A_ST_M45PE40:
14826 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14827 break;
14828 case FLASH_5720VENDOR_M_ST_M25PE80:
14829 case FLASH_5720VENDOR_M_ST_M45PE80:
14830 case FLASH_5720VENDOR_A_ST_M25PE80:
14831 case FLASH_5720VENDOR_A_ST_M45PE80:
14832 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14833 break;
14834 default:
14835 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14836 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14837 break;
14838 }
14839 break;
14840 default:
14841 tg3_flag_set(tp, NO_NVRAM);
14842 return;
14843 }
14844
14845 tg3_nvram_get_pagesize(tp, nvcfg1);
14846 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14847 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14848
14849 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14850 u32 val;
14851
14852 if (tg3_nvram_read(tp, 0, &val))
14853 return;
14854
14855 if (val != TG3_EEPROM_MAGIC &&
14856 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14857 tg3_flag_set(tp, NO_NVRAM);
14858 }
14859 }
14860
14861 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14862 static void tg3_nvram_init(struct tg3 *tp)
14863 {
14864 if (tg3_flag(tp, IS_SSB_CORE)) {
14865 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14866 tg3_flag_clear(tp, NVRAM);
14867 tg3_flag_clear(tp, NVRAM_BUFFERED);
14868 tg3_flag_set(tp, NO_NVRAM);
14869 return;
14870 }
14871
14872 tw32_f(GRC_EEPROM_ADDR,
14873 (EEPROM_ADDR_FSM_RESET |
14874 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14875 EEPROM_ADDR_CLKPERD_SHIFT)));
14876
14877 msleep(1);
14878
14879 /* Enable seeprom accesses. */
14880 tw32_f(GRC_LOCAL_CTRL,
14881 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14882 udelay(100);
14883
14884 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14885 tg3_asic_rev(tp) != ASIC_REV_5701) {
14886 tg3_flag_set(tp, NVRAM);
14887
14888 if (tg3_nvram_lock(tp)) {
14889 netdev_warn(tp->dev,
14890 "Cannot get nvram lock, %s failed\n",
14891 __func__);
14892 return;
14893 }
14894 tg3_enable_nvram_access(tp);
14895
14896 tp->nvram_size = 0;
14897
14898 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14899 tg3_get_5752_nvram_info(tp);
14900 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14901 tg3_get_5755_nvram_info(tp);
14902 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14903 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14904 tg3_asic_rev(tp) == ASIC_REV_5785)
14905 tg3_get_5787_nvram_info(tp);
14906 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14907 tg3_get_5761_nvram_info(tp);
14908 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14909 tg3_get_5906_nvram_info(tp);
14910 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14911 tg3_flag(tp, 57765_CLASS))
14912 tg3_get_57780_nvram_info(tp);
14913 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14914 tg3_asic_rev(tp) == ASIC_REV_5719)
14915 tg3_get_5717_nvram_info(tp);
14916 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14917 tg3_asic_rev(tp) == ASIC_REV_5762)
14918 tg3_get_5720_nvram_info(tp);
14919 else
14920 tg3_get_nvram_info(tp);
14921
14922 if (tp->nvram_size == 0)
14923 tg3_get_nvram_size(tp);
14924
14925 tg3_disable_nvram_access(tp);
14926 tg3_nvram_unlock(tp);
14927
14928 } else {
14929 tg3_flag_clear(tp, NVRAM);
14930 tg3_flag_clear(tp, NVRAM_BUFFERED);
14931
14932 tg3_get_eeprom_size(tp);
14933 }
14934 }
14935
14936 struct subsys_tbl_ent {
14937 u16 subsys_vendor, subsys_devid;
14938 u32 phy_id;
14939 };
14940
14941 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14942 /* Broadcom boards. */
14943 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14944 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14945 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14946 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14947 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14948 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14949 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14950 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14951 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14952 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14953 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14954 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14955 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14956 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14957 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14958 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14959 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14960 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14961 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14962 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14963 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14964 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14965
14966 /* 3com boards. */
14967 { TG3PCI_SUBVENDOR_ID_3COM,
14968 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14969 { TG3PCI_SUBVENDOR_ID_3COM,
14970 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14971 { TG3PCI_SUBVENDOR_ID_3COM,
14972 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14973 { TG3PCI_SUBVENDOR_ID_3COM,
14974 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14975 { TG3PCI_SUBVENDOR_ID_3COM,
14976 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14977
14978 /* DELL boards. */
14979 { TG3PCI_SUBVENDOR_ID_DELL,
14980 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14981 { TG3PCI_SUBVENDOR_ID_DELL,
14982 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14983 { TG3PCI_SUBVENDOR_ID_DELL,
14984 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14985 { TG3PCI_SUBVENDOR_ID_DELL,
14986 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14987
14988 /* Compaq boards. */
14989 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14990 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14991 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14992 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14993 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14994 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14995 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14996 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14997 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14998 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14999
15000 /* IBM boards. */
15001 { TG3PCI_SUBVENDOR_ID_IBM,
15002 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15003 };
15004
15005 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15006 {
15007 int i;
15008
15009 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15010 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15011 tp->pdev->subsystem_vendor) &&
15012 (subsys_id_to_phy_id[i].subsys_devid ==
15013 tp->pdev->subsystem_device))
15014 return &subsys_id_to_phy_id[i];
15015 }
15016 return NULL;
15017 }
15018
15019 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15020 {
15021 u32 val;
15022
15023 tp->phy_id = TG3_PHY_ID_INVALID;
15024 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15025
15026 /* Assume an onboard device and WOL capable by default. */
15027 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15028 tg3_flag_set(tp, WOL_CAP);
15029
15030 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15031 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15032 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15033 tg3_flag_set(tp, IS_NIC);
15034 }
15035 val = tr32(VCPU_CFGSHDW);
15036 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15037 tg3_flag_set(tp, ASPM_WORKAROUND);
15038 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15039 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15040 tg3_flag_set(tp, WOL_ENABLE);
15041 device_set_wakeup_enable(&tp->pdev->dev, true);
15042 }
15043 goto done;
15044 }
15045
15046 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15047 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15048 u32 nic_cfg, led_cfg;
15049 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15050 u32 nic_phy_id, ver, eeprom_phy_id;
15051 int eeprom_phy_serdes = 0;
15052
15053 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15054 tp->nic_sram_data_cfg = nic_cfg;
15055
15056 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15057 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15058 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15059 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15060 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15061 (ver > 0) && (ver < 0x100))
15062 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15063
15064 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15065 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15066
15067 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15068 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15069 tg3_asic_rev(tp) == ASIC_REV_5720)
15070 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15071
15072 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15073 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15074 eeprom_phy_serdes = 1;
15075
15076 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15077 if (nic_phy_id != 0) {
15078 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15079 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15080
15081 eeprom_phy_id = (id1 >> 16) << 10;
15082 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15083 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15084 } else
15085 eeprom_phy_id = 0;
15086
15087 tp->phy_id = eeprom_phy_id;
15088 if (eeprom_phy_serdes) {
15089 if (!tg3_flag(tp, 5705_PLUS))
15090 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15091 else
15092 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15093 }
15094
15095 if (tg3_flag(tp, 5750_PLUS))
15096 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15097 SHASTA_EXT_LED_MODE_MASK);
15098 else
15099 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15100
15101 switch (led_cfg) {
15102 default:
15103 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15104 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15105 break;
15106
15107 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15108 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15109 break;
15110
15111 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15112 tp->led_ctrl = LED_CTRL_MODE_MAC;
15113
15114 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15115 * read on some older 5700/5701 bootcode.
15116 */
15117 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15118 tg3_asic_rev(tp) == ASIC_REV_5701)
15119 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15120
15121 break;
15122
15123 case SHASTA_EXT_LED_SHARED:
15124 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15125 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15126 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15127 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15128 LED_CTRL_MODE_PHY_2);
15129
15130 if (tg3_flag(tp, 5717_PLUS) ||
15131 tg3_asic_rev(tp) == ASIC_REV_5762)
15132 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15133 LED_CTRL_BLINK_RATE_MASK;
15134
15135 break;
15136
15137 case SHASTA_EXT_LED_MAC:
15138 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15139 break;
15140
15141 case SHASTA_EXT_LED_COMBO:
15142 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15143 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15144 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15145 LED_CTRL_MODE_PHY_2);
15146 break;
15147
15148 }
15149
15150 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15151 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15152 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15153 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15154
15155 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15156 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15157
15158 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15159 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15160 if ((tp->pdev->subsystem_vendor ==
15161 PCI_VENDOR_ID_ARIMA) &&
15162 (tp->pdev->subsystem_device == 0x205a ||
15163 tp->pdev->subsystem_device == 0x2063))
15164 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15165 } else {
15166 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15167 tg3_flag_set(tp, IS_NIC);
15168 }
15169
15170 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15171 tg3_flag_set(tp, ENABLE_ASF);
15172 if (tg3_flag(tp, 5750_PLUS))
15173 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15174 }
15175
15176 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15177 tg3_flag(tp, 5750_PLUS))
15178 tg3_flag_set(tp, ENABLE_APE);
15179
15180 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15181 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15182 tg3_flag_clear(tp, WOL_CAP);
15183
15184 if (tg3_flag(tp, WOL_CAP) &&
15185 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15186 tg3_flag_set(tp, WOL_ENABLE);
15187 device_set_wakeup_enable(&tp->pdev->dev, true);
15188 }
15189
15190 if (cfg2 & (1 << 17))
15191 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15192
15193 /* serdes signal pre-emphasis in register 0x590 set by */
15194 /* bootcode if bit 18 is set */
15195 if (cfg2 & (1 << 18))
15196 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15197
15198 if ((tg3_flag(tp, 57765_PLUS) ||
15199 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15200 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15201 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15202 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15203
15204 if (tg3_flag(tp, PCI_EXPRESS)) {
15205 u32 cfg3;
15206
15207 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15208 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15209 !tg3_flag(tp, 57765_PLUS) &&
15210 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15211 tg3_flag_set(tp, ASPM_WORKAROUND);
15212 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15213 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15214 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15215 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15216 }
15217
15218 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15219 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15220 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15221 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15222 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15223 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15224
15225 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15226 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15227 }
15228 done:
15229 if (tg3_flag(tp, WOL_CAP))
15230 device_set_wakeup_enable(&tp->pdev->dev,
15231 tg3_flag(tp, WOL_ENABLE));
15232 else
15233 device_set_wakeup_capable(&tp->pdev->dev, false);
15234 }
15235
15236 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15237 {
15238 int i, err;
15239 u32 val2, off = offset * 8;
15240
15241 err = tg3_nvram_lock(tp);
15242 if (err)
15243 return err;
15244
15245 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15246 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15247 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15248 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15249 udelay(10);
15250
15251 for (i = 0; i < 100; i++) {
15252 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15253 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15254 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15255 break;
15256 }
15257 udelay(10);
15258 }
15259
15260 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15261
15262 tg3_nvram_unlock(tp);
15263 if (val2 & APE_OTP_STATUS_CMD_DONE)
15264 return 0;
15265
15266 return -EBUSY;
15267 }
15268
15269 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15270 {
15271 int i;
15272 u32 val;
15273
15274 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15275 tw32(OTP_CTRL, cmd);
15276
15277 /* Wait for up to 1 ms for command to execute. */
15278 for (i = 0; i < 100; i++) {
15279 val = tr32(OTP_STATUS);
15280 if (val & OTP_STATUS_CMD_DONE)
15281 break;
15282 udelay(10);
15283 }
15284
15285 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15286 }
15287
15288 /* Read the gphy configuration from the OTP region of the chip. The gphy
15289 * configuration is a 32-bit value that straddles the alignment boundary.
15290 * We do two 32-bit reads and then shift and merge the results.
15291 */
15292 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15293 {
15294 u32 bhalf_otp, thalf_otp;
15295
15296 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15297
15298 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15299 return 0;
15300
15301 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15302
15303 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15304 return 0;
15305
15306 thalf_otp = tr32(OTP_READ_DATA);
15307
15308 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15309
15310 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15311 return 0;
15312
15313 bhalf_otp = tr32(OTP_READ_DATA);
15314
15315 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15316 }
15317
15318 static void tg3_phy_init_link_config(struct tg3 *tp)
15319 {
15320 u32 adv = ADVERTISED_Autoneg;
15321
15322 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15323 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15324 adv |= ADVERTISED_1000baseT_Half;
15325 adv |= ADVERTISED_1000baseT_Full;
15326 }
15327
15328 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15329 adv |= ADVERTISED_100baseT_Half |
15330 ADVERTISED_100baseT_Full |
15331 ADVERTISED_10baseT_Half |
15332 ADVERTISED_10baseT_Full |
15333 ADVERTISED_TP;
15334 else
15335 adv |= ADVERTISED_FIBRE;
15336
15337 tp->link_config.advertising = adv;
15338 tp->link_config.speed = SPEED_UNKNOWN;
15339 tp->link_config.duplex = DUPLEX_UNKNOWN;
15340 tp->link_config.autoneg = AUTONEG_ENABLE;
15341 tp->link_config.active_speed = SPEED_UNKNOWN;
15342 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15343
15344 tp->old_link = -1;
15345 }
15346
15347 static int tg3_phy_probe(struct tg3 *tp)
15348 {
15349 u32 hw_phy_id_1, hw_phy_id_2;
15350 u32 hw_phy_id, hw_phy_id_masked;
15351 int err;
15352
15353 /* flow control autonegotiation is default behavior */
15354 tg3_flag_set(tp, PAUSE_AUTONEG);
15355 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15356
15357 if (tg3_flag(tp, ENABLE_APE)) {
15358 switch (tp->pci_fn) {
15359 case 0:
15360 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15361 break;
15362 case 1:
15363 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15364 break;
15365 case 2:
15366 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15367 break;
15368 case 3:
15369 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15370 break;
15371 }
15372 }
15373
15374 if (!tg3_flag(tp, ENABLE_ASF) &&
15375 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15376 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15377 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15378 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15379
15380 if (tg3_flag(tp, USE_PHYLIB))
15381 return tg3_phy_init(tp);
15382
15383 /* Reading the PHY ID register can conflict with ASF
15384 * firmware access to the PHY hardware.
15385 */
15386 err = 0;
15387 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15388 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15389 } else {
15390 /* Now read the physical PHY_ID from the chip and verify
15391 * that it is sane. If it doesn't look good, we fall back
15392 * to either the hard-coded table based PHY_ID and failing
15393 * that the value found in the eeprom area.
15394 */
15395 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15396 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15397
15398 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15399 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15400 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15401
15402 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15403 }
15404
15405 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15406 tp->phy_id = hw_phy_id;
15407 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15408 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15409 else
15410 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15411 } else {
15412 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15413 /* Do nothing, phy ID already set up in
15414 * tg3_get_eeprom_hw_cfg().
15415 */
15416 } else {
15417 struct subsys_tbl_ent *p;
15418
15419 /* No eeprom signature? Try the hardcoded
15420 * subsys device table.
15421 */
15422 p = tg3_lookup_by_subsys(tp);
15423 if (p) {
15424 tp->phy_id = p->phy_id;
15425 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15426 /* For now we saw the IDs 0xbc050cd0,
15427 * 0xbc050f80 and 0xbc050c30 on devices
15428 * connected to an BCM4785 and there are
15429 * probably more. Just assume that the phy is
15430 * supported when it is connected to a SSB core
15431 * for now.
15432 */
15433 return -ENODEV;
15434 }
15435
15436 if (!tp->phy_id ||
15437 tp->phy_id == TG3_PHY_ID_BCM8002)
15438 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15439 }
15440 }
15441
15442 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15443 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15444 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15445 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15446 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15447 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15448 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15449 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15450 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15451 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15452
15453 tp->eee.supported = SUPPORTED_100baseT_Full |
15454 SUPPORTED_1000baseT_Full;
15455 tp->eee.advertised = ADVERTISED_100baseT_Full |
15456 ADVERTISED_1000baseT_Full;
15457 tp->eee.eee_enabled = 1;
15458 tp->eee.tx_lpi_enabled = 1;
15459 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15460 }
15461
15462 tg3_phy_init_link_config(tp);
15463
15464 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15465 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15466 !tg3_flag(tp, ENABLE_APE) &&
15467 !tg3_flag(tp, ENABLE_ASF)) {
15468 u32 bmsr, dummy;
15469
15470 tg3_readphy(tp, MII_BMSR, &bmsr);
15471 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15472 (bmsr & BMSR_LSTATUS))
15473 goto skip_phy_reset;
15474
15475 err = tg3_phy_reset(tp);
15476 if (err)
15477 return err;
15478
15479 tg3_phy_set_wirespeed(tp);
15480
15481 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15482 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15483 tp->link_config.flowctrl);
15484
15485 tg3_writephy(tp, MII_BMCR,
15486 BMCR_ANENABLE | BMCR_ANRESTART);
15487 }
15488 }
15489
15490 skip_phy_reset:
15491 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15492 err = tg3_init_5401phy_dsp(tp);
15493 if (err)
15494 return err;
15495
15496 err = tg3_init_5401phy_dsp(tp);
15497 }
15498
15499 return err;
15500 }
15501
15502 static void tg3_read_vpd(struct tg3 *tp)
15503 {
15504 u8 *vpd_data;
15505 unsigned int block_end, rosize, len;
15506 u32 vpdlen;
15507 int j, i = 0;
15508
15509 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15510 if (!vpd_data)
15511 goto out_no_vpd;
15512
15513 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15514 if (i < 0)
15515 goto out_not_found;
15516
15517 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15518 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15519 i += PCI_VPD_LRDT_TAG_SIZE;
15520
15521 if (block_end > vpdlen)
15522 goto out_not_found;
15523
15524 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15525 PCI_VPD_RO_KEYWORD_MFR_ID);
15526 if (j > 0) {
15527 len = pci_vpd_info_field_size(&vpd_data[j]);
15528
15529 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15530 if (j + len > block_end || len != 4 ||
15531 memcmp(&vpd_data[j], "1028", 4))
15532 goto partno;
15533
15534 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15535 PCI_VPD_RO_KEYWORD_VENDOR0);
15536 if (j < 0)
15537 goto partno;
15538
15539 len = pci_vpd_info_field_size(&vpd_data[j]);
15540
15541 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15542 if (j + len > block_end)
15543 goto partno;
15544
15545 if (len >= sizeof(tp->fw_ver))
15546 len = sizeof(tp->fw_ver) - 1;
15547 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15548 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15549 &vpd_data[j]);
15550 }
15551
15552 partno:
15553 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15554 PCI_VPD_RO_KEYWORD_PARTNO);
15555 if (i < 0)
15556 goto out_not_found;
15557
15558 len = pci_vpd_info_field_size(&vpd_data[i]);
15559
15560 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15561 if (len > TG3_BPN_SIZE ||
15562 (len + i) > vpdlen)
15563 goto out_not_found;
15564
15565 memcpy(tp->board_part_number, &vpd_data[i], len);
15566
15567 out_not_found:
15568 kfree(vpd_data);
15569 if (tp->board_part_number[0])
15570 return;
15571
15572 out_no_vpd:
15573 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15574 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15576 strcpy(tp->board_part_number, "BCM5717");
15577 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15578 strcpy(tp->board_part_number, "BCM5718");
15579 else
15580 goto nomatch;
15581 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15582 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15583 strcpy(tp->board_part_number, "BCM57780");
15584 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15585 strcpy(tp->board_part_number, "BCM57760");
15586 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15587 strcpy(tp->board_part_number, "BCM57790");
15588 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15589 strcpy(tp->board_part_number, "BCM57788");
15590 else
15591 goto nomatch;
15592 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15593 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15594 strcpy(tp->board_part_number, "BCM57761");
15595 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15596 strcpy(tp->board_part_number, "BCM57765");
15597 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15598 strcpy(tp->board_part_number, "BCM57781");
15599 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15600 strcpy(tp->board_part_number, "BCM57785");
15601 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15602 strcpy(tp->board_part_number, "BCM57791");
15603 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15604 strcpy(tp->board_part_number, "BCM57795");
15605 else
15606 goto nomatch;
15607 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15608 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15609 strcpy(tp->board_part_number, "BCM57762");
15610 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15611 strcpy(tp->board_part_number, "BCM57766");
15612 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15613 strcpy(tp->board_part_number, "BCM57782");
15614 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15615 strcpy(tp->board_part_number, "BCM57786");
15616 else
15617 goto nomatch;
15618 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15619 strcpy(tp->board_part_number, "BCM95906");
15620 } else {
15621 nomatch:
15622 strcpy(tp->board_part_number, "none");
15623 }
15624 }
15625
15626 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15627 {
15628 u32 val;
15629
15630 if (tg3_nvram_read(tp, offset, &val) ||
15631 (val & 0xfc000000) != 0x0c000000 ||
15632 tg3_nvram_read(tp, offset + 4, &val) ||
15633 val != 0)
15634 return 0;
15635
15636 return 1;
15637 }
15638
15639 static void tg3_read_bc_ver(struct tg3 *tp)
15640 {
15641 u32 val, offset, start, ver_offset;
15642 int i, dst_off;
15643 bool newver = false;
15644
15645 if (tg3_nvram_read(tp, 0xc, &offset) ||
15646 tg3_nvram_read(tp, 0x4, &start))
15647 return;
15648
15649 offset = tg3_nvram_logical_addr(tp, offset);
15650
15651 if (tg3_nvram_read(tp, offset, &val))
15652 return;
15653
15654 if ((val & 0xfc000000) == 0x0c000000) {
15655 if (tg3_nvram_read(tp, offset + 4, &val))
15656 return;
15657
15658 if (val == 0)
15659 newver = true;
15660 }
15661
15662 dst_off = strlen(tp->fw_ver);
15663
15664 if (newver) {
15665 if (TG3_VER_SIZE - dst_off < 16 ||
15666 tg3_nvram_read(tp, offset + 8, &ver_offset))
15667 return;
15668
15669 offset = offset + ver_offset - start;
15670 for (i = 0; i < 16; i += 4) {
15671 __be32 v;
15672 if (tg3_nvram_read_be32(tp, offset + i, &v))
15673 return;
15674
15675 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15676 }
15677 } else {
15678 u32 major, minor;
15679
15680 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15681 return;
15682
15683 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15684 TG3_NVM_BCVER_MAJSFT;
15685 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15686 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15687 "v%d.%02d", major, minor);
15688 }
15689 }
15690
15691 static void tg3_read_hwsb_ver(struct tg3 *tp)
15692 {
15693 u32 val, major, minor;
15694
15695 /* Use native endian representation */
15696 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15697 return;
15698
15699 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15700 TG3_NVM_HWSB_CFG1_MAJSFT;
15701 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15702 TG3_NVM_HWSB_CFG1_MINSFT;
15703
15704 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15705 }
15706
15707 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15708 {
15709 u32 offset, major, minor, build;
15710
15711 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15712
15713 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15714 return;
15715
15716 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15717 case TG3_EEPROM_SB_REVISION_0:
15718 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15719 break;
15720 case TG3_EEPROM_SB_REVISION_2:
15721 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15722 break;
15723 case TG3_EEPROM_SB_REVISION_3:
15724 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15725 break;
15726 case TG3_EEPROM_SB_REVISION_4:
15727 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15728 break;
15729 case TG3_EEPROM_SB_REVISION_5:
15730 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15731 break;
15732 case TG3_EEPROM_SB_REVISION_6:
15733 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15734 break;
15735 default:
15736 return;
15737 }
15738
15739 if (tg3_nvram_read(tp, offset, &val))
15740 return;
15741
15742 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15743 TG3_EEPROM_SB_EDH_BLD_SHFT;
15744 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15745 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15746 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15747
15748 if (minor > 99 || build > 26)
15749 return;
15750
15751 offset = strlen(tp->fw_ver);
15752 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15753 " v%d.%02d", major, minor);
15754
15755 if (build > 0) {
15756 offset = strlen(tp->fw_ver);
15757 if (offset < TG3_VER_SIZE - 1)
15758 tp->fw_ver[offset] = 'a' + build - 1;
15759 }
15760 }
15761
15762 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15763 {
15764 u32 val, offset, start;
15765 int i, vlen;
15766
15767 for (offset = TG3_NVM_DIR_START;
15768 offset < TG3_NVM_DIR_END;
15769 offset += TG3_NVM_DIRENT_SIZE) {
15770 if (tg3_nvram_read(tp, offset, &val))
15771 return;
15772
15773 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15774 break;
15775 }
15776
15777 if (offset == TG3_NVM_DIR_END)
15778 return;
15779
15780 if (!tg3_flag(tp, 5705_PLUS))
15781 start = 0x08000000;
15782 else if (tg3_nvram_read(tp, offset - 4, &start))
15783 return;
15784
15785 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15786 !tg3_fw_img_is_valid(tp, offset) ||
15787 tg3_nvram_read(tp, offset + 8, &val))
15788 return;
15789
15790 offset += val - start;
15791
15792 vlen = strlen(tp->fw_ver);
15793
15794 tp->fw_ver[vlen++] = ',';
15795 tp->fw_ver[vlen++] = ' ';
15796
15797 for (i = 0; i < 4; i++) {
15798 __be32 v;
15799 if (tg3_nvram_read_be32(tp, offset, &v))
15800 return;
15801
15802 offset += sizeof(v);
15803
15804 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15805 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15806 break;
15807 }
15808
15809 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15810 vlen += sizeof(v);
15811 }
15812 }
15813
15814 static void tg3_probe_ncsi(struct tg3 *tp)
15815 {
15816 u32 apedata;
15817
15818 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15819 if (apedata != APE_SEG_SIG_MAGIC)
15820 return;
15821
15822 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15823 if (!(apedata & APE_FW_STATUS_READY))
15824 return;
15825
15826 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15827 tg3_flag_set(tp, APE_HAS_NCSI);
15828 }
15829
15830 static void tg3_read_dash_ver(struct tg3 *tp)
15831 {
15832 int vlen;
15833 u32 apedata;
15834 char *fwtype;
15835
15836 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15837
15838 if (tg3_flag(tp, APE_HAS_NCSI))
15839 fwtype = "NCSI";
15840 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15841 fwtype = "SMASH";
15842 else
15843 fwtype = "DASH";
15844
15845 vlen = strlen(tp->fw_ver);
15846
15847 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15848 fwtype,
15849 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15850 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15851 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15852 (apedata & APE_FW_VERSION_BLDMSK));
15853 }
15854
15855 static void tg3_read_otp_ver(struct tg3 *tp)
15856 {
15857 u32 val, val2;
15858
15859 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15860 return;
15861
15862 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15863 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15864 TG3_OTP_MAGIC0_VALID(val)) {
15865 u64 val64 = (u64) val << 32 | val2;
15866 u32 ver = 0;
15867 int i, vlen;
15868
15869 for (i = 0; i < 7; i++) {
15870 if ((val64 & 0xff) == 0)
15871 break;
15872 ver = val64 & 0xff;
15873 val64 >>= 8;
15874 }
15875 vlen = strlen(tp->fw_ver);
15876 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15877 }
15878 }
15879
15880 static void tg3_read_fw_ver(struct tg3 *tp)
15881 {
15882 u32 val;
15883 bool vpd_vers = false;
15884
15885 if (tp->fw_ver[0] != 0)
15886 vpd_vers = true;
15887
15888 if (tg3_flag(tp, NO_NVRAM)) {
15889 strcat(tp->fw_ver, "sb");
15890 tg3_read_otp_ver(tp);
15891 return;
15892 }
15893
15894 if (tg3_nvram_read(tp, 0, &val))
15895 return;
15896
15897 if (val == TG3_EEPROM_MAGIC)
15898 tg3_read_bc_ver(tp);
15899 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15900 tg3_read_sb_ver(tp, val);
15901 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15902 tg3_read_hwsb_ver(tp);
15903
15904 if (tg3_flag(tp, ENABLE_ASF)) {
15905 if (tg3_flag(tp, ENABLE_APE)) {
15906 tg3_probe_ncsi(tp);
15907 if (!vpd_vers)
15908 tg3_read_dash_ver(tp);
15909 } else if (!vpd_vers) {
15910 tg3_read_mgmtfw_ver(tp);
15911 }
15912 }
15913
15914 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15915 }
15916
15917 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15918 {
15919 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15920 return TG3_RX_RET_MAX_SIZE_5717;
15921 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15922 return TG3_RX_RET_MAX_SIZE_5700;
15923 else
15924 return TG3_RX_RET_MAX_SIZE_5705;
15925 }
15926
15927 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15928 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15929 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15930 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15931 { },
15932 };
15933
15934 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15935 {
15936 struct pci_dev *peer;
15937 unsigned int func, devnr = tp->pdev->devfn & ~7;
15938
15939 for (func = 0; func < 8; func++) {
15940 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15941 if (peer && peer != tp->pdev)
15942 break;
15943 pci_dev_put(peer);
15944 }
15945 /* 5704 can be configured in single-port mode, set peer to
15946 * tp->pdev in that case.
15947 */
15948 if (!peer) {
15949 peer = tp->pdev;
15950 return peer;
15951 }
15952
15953 /*
15954 * We don't need to keep the refcount elevated; there's no way
15955 * to remove one half of this device without removing the other
15956 */
15957 pci_dev_put(peer);
15958
15959 return peer;
15960 }
15961
15962 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15963 {
15964 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15965 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15966 u32 reg;
15967
15968 /* All devices that use the alternate
15969 * ASIC REV location have a CPMU.
15970 */
15971 tg3_flag_set(tp, CPMU_PRESENT);
15972
15973 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15974 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15975 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15976 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15977 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15978 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15979 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15980 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15981 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15982 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15983 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15984 reg = TG3PCI_GEN2_PRODID_ASICREV;
15985 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15986 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15987 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15988 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15989 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15990 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15991 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15992 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15993 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15994 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15995 reg = TG3PCI_GEN15_PRODID_ASICREV;
15996 else
15997 reg = TG3PCI_PRODID_ASICREV;
15998
15999 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16000 }
16001
16002 /* Wrong chip ID in 5752 A0. This code can be removed later
16003 * as A0 is not in production.
16004 */
16005 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16006 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16007
16008 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16009 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16010
16011 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16012 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16013 tg3_asic_rev(tp) == ASIC_REV_5720)
16014 tg3_flag_set(tp, 5717_PLUS);
16015
16016 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16017 tg3_asic_rev(tp) == ASIC_REV_57766)
16018 tg3_flag_set(tp, 57765_CLASS);
16019
16020 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16021 tg3_asic_rev(tp) == ASIC_REV_5762)
16022 tg3_flag_set(tp, 57765_PLUS);
16023
16024 /* Intentionally exclude ASIC_REV_5906 */
16025 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16026 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16027 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16028 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16029 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16030 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16031 tg3_flag(tp, 57765_PLUS))
16032 tg3_flag_set(tp, 5755_PLUS);
16033
16034 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16035 tg3_asic_rev(tp) == ASIC_REV_5714)
16036 tg3_flag_set(tp, 5780_CLASS);
16037
16038 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16039 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16040 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16041 tg3_flag(tp, 5755_PLUS) ||
16042 tg3_flag(tp, 5780_CLASS))
16043 tg3_flag_set(tp, 5750_PLUS);
16044
16045 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16046 tg3_flag(tp, 5750_PLUS))
16047 tg3_flag_set(tp, 5705_PLUS);
16048 }
16049
16050 static bool tg3_10_100_only_device(struct tg3 *tp,
16051 const struct pci_device_id *ent)
16052 {
16053 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16054
16055 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16056 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16057 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16058 return true;
16059
16060 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16061 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16062 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16063 return true;
16064 } else {
16065 return true;
16066 }
16067 }
16068
16069 return false;
16070 }
16071
16072 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16073 {
16074 u32 misc_ctrl_reg;
16075 u32 pci_state_reg, grc_misc_cfg;
16076 u32 val;
16077 u16 pci_cmd;
16078 int err;
16079
16080 /* Force memory write invalidate off. If we leave it on,
16081 * then on 5700_BX chips we have to enable a workaround.
16082 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16083 * to match the cacheline size. The Broadcom driver have this
16084 * workaround but turns MWI off all the times so never uses
16085 * it. This seems to suggest that the workaround is insufficient.
16086 */
16087 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16088 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16089 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16090
16091 /* Important! -- Make sure register accesses are byteswapped
16092 * correctly. Also, for those chips that require it, make
16093 * sure that indirect register accesses are enabled before
16094 * the first operation.
16095 */
16096 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16097 &misc_ctrl_reg);
16098 tp->misc_host_ctrl |= (misc_ctrl_reg &
16099 MISC_HOST_CTRL_CHIPREV);
16100 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16101 tp->misc_host_ctrl);
16102
16103 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16104
16105 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16106 * we need to disable memory and use config. cycles
16107 * only to access all registers. The 5702/03 chips
16108 * can mistakenly decode the special cycles from the
16109 * ICH chipsets as memory write cycles, causing corruption
16110 * of register and memory space. Only certain ICH bridges
16111 * will drive special cycles with non-zero data during the
16112 * address phase which can fall within the 5703's address
16113 * range. This is not an ICH bug as the PCI spec allows
16114 * non-zero address during special cycles. However, only
16115 * these ICH bridges are known to drive non-zero addresses
16116 * during special cycles.
16117 *
16118 * Since special cycles do not cross PCI bridges, we only
16119 * enable this workaround if the 5703 is on the secondary
16120 * bus of these ICH bridges.
16121 */
16122 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16123 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16124 static struct tg3_dev_id {
16125 u32 vendor;
16126 u32 device;
16127 u32 rev;
16128 } ich_chipsets[] = {
16129 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16130 PCI_ANY_ID },
16131 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16132 PCI_ANY_ID },
16133 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16134 0xa },
16135 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16136 PCI_ANY_ID },
16137 { },
16138 };
16139 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16140 struct pci_dev *bridge = NULL;
16141
16142 while (pci_id->vendor != 0) {
16143 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16144 bridge);
16145 if (!bridge) {
16146 pci_id++;
16147 continue;
16148 }
16149 if (pci_id->rev != PCI_ANY_ID) {
16150 if (bridge->revision > pci_id->rev)
16151 continue;
16152 }
16153 if (bridge->subordinate &&
16154 (bridge->subordinate->number ==
16155 tp->pdev->bus->number)) {
16156 tg3_flag_set(tp, ICH_WORKAROUND);
16157 pci_dev_put(bridge);
16158 break;
16159 }
16160 }
16161 }
16162
16163 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16164 static struct tg3_dev_id {
16165 u32 vendor;
16166 u32 device;
16167 } bridge_chipsets[] = {
16168 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16169 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16170 { },
16171 };
16172 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16173 struct pci_dev *bridge = NULL;
16174
16175 while (pci_id->vendor != 0) {
16176 bridge = pci_get_device(pci_id->vendor,
16177 pci_id->device,
16178 bridge);
16179 if (!bridge) {
16180 pci_id++;
16181 continue;
16182 }
16183 if (bridge->subordinate &&
16184 (bridge->subordinate->number <=
16185 tp->pdev->bus->number) &&
16186 (bridge->subordinate->busn_res.end >=
16187 tp->pdev->bus->number)) {
16188 tg3_flag_set(tp, 5701_DMA_BUG);
16189 pci_dev_put(bridge);
16190 break;
16191 }
16192 }
16193 }
16194
16195 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16196 * DMA addresses > 40-bit. This bridge may have other additional
16197 * 57xx devices behind it in some 4-port NIC designs for example.
16198 * Any tg3 device found behind the bridge will also need the 40-bit
16199 * DMA workaround.
16200 */
16201 if (tg3_flag(tp, 5780_CLASS)) {
16202 tg3_flag_set(tp, 40BIT_DMA_BUG);
16203 tp->msi_cap = tp->pdev->msi_cap;
16204 } else {
16205 struct pci_dev *bridge = NULL;
16206
16207 do {
16208 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16209 PCI_DEVICE_ID_SERVERWORKS_EPB,
16210 bridge);
16211 if (bridge && bridge->subordinate &&
16212 (bridge->subordinate->number <=
16213 tp->pdev->bus->number) &&
16214 (bridge->subordinate->busn_res.end >=
16215 tp->pdev->bus->number)) {
16216 tg3_flag_set(tp, 40BIT_DMA_BUG);
16217 pci_dev_put(bridge);
16218 break;
16219 }
16220 } while (bridge);
16221 }
16222
16223 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16224 tg3_asic_rev(tp) == ASIC_REV_5714)
16225 tp->pdev_peer = tg3_find_peer(tp);
16226
16227 /* Determine TSO capabilities */
16228 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16229 ; /* Do nothing. HW bug. */
16230 else if (tg3_flag(tp, 57765_PLUS))
16231 tg3_flag_set(tp, HW_TSO_3);
16232 else if (tg3_flag(tp, 5755_PLUS) ||
16233 tg3_asic_rev(tp) == ASIC_REV_5906)
16234 tg3_flag_set(tp, HW_TSO_2);
16235 else if (tg3_flag(tp, 5750_PLUS)) {
16236 tg3_flag_set(tp, HW_TSO_1);
16237 tg3_flag_set(tp, TSO_BUG);
16238 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16239 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16240 tg3_flag_clear(tp, TSO_BUG);
16241 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16242 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16243 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16244 tg3_flag_set(tp, FW_TSO);
16245 tg3_flag_set(tp, TSO_BUG);
16246 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16247 tp->fw_needed = FIRMWARE_TG3TSO5;
16248 else
16249 tp->fw_needed = FIRMWARE_TG3TSO;
16250 }
16251
16252 /* Selectively allow TSO based on operating conditions */
16253 if (tg3_flag(tp, HW_TSO_1) ||
16254 tg3_flag(tp, HW_TSO_2) ||
16255 tg3_flag(tp, HW_TSO_3) ||
16256 tg3_flag(tp, FW_TSO)) {
16257 /* For firmware TSO, assume ASF is disabled.
16258 * We'll disable TSO later if we discover ASF
16259 * is enabled in tg3_get_eeprom_hw_cfg().
16260 */
16261 tg3_flag_set(tp, TSO_CAPABLE);
16262 } else {
16263 tg3_flag_clear(tp, TSO_CAPABLE);
16264 tg3_flag_clear(tp, TSO_BUG);
16265 tp->fw_needed = NULL;
16266 }
16267
16268 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16269 tp->fw_needed = FIRMWARE_TG3;
16270
16271 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16272 tp->fw_needed = FIRMWARE_TG357766;
16273
16274 tp->irq_max = 1;
16275
16276 if (tg3_flag(tp, 5750_PLUS)) {
16277 tg3_flag_set(tp, SUPPORT_MSI);
16278 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16279 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16280 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16281 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16282 tp->pdev_peer == tp->pdev))
16283 tg3_flag_clear(tp, SUPPORT_MSI);
16284
16285 if (tg3_flag(tp, 5755_PLUS) ||
16286 tg3_asic_rev(tp) == ASIC_REV_5906) {
16287 tg3_flag_set(tp, 1SHOT_MSI);
16288 }
16289
16290 if (tg3_flag(tp, 57765_PLUS)) {
16291 tg3_flag_set(tp, SUPPORT_MSIX);
16292 tp->irq_max = TG3_IRQ_MAX_VECS;
16293 }
16294 }
16295
16296 tp->txq_max = 1;
16297 tp->rxq_max = 1;
16298 if (tp->irq_max > 1) {
16299 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16300 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16301
16302 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16303 tg3_asic_rev(tp) == ASIC_REV_5720)
16304 tp->txq_max = tp->irq_max - 1;
16305 }
16306
16307 if (tg3_flag(tp, 5755_PLUS) ||
16308 tg3_asic_rev(tp) == ASIC_REV_5906)
16309 tg3_flag_set(tp, SHORT_DMA_BUG);
16310
16311 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16312 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16313
16314 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16315 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16316 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16317 tg3_asic_rev(tp) == ASIC_REV_5762)
16318 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16319
16320 if (tg3_flag(tp, 57765_PLUS) &&
16321 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16322 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16323
16324 if (!tg3_flag(tp, 5705_PLUS) ||
16325 tg3_flag(tp, 5780_CLASS) ||
16326 tg3_flag(tp, USE_JUMBO_BDFLAG))
16327 tg3_flag_set(tp, JUMBO_CAPABLE);
16328
16329 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16330 &pci_state_reg);
16331
16332 if (pci_is_pcie(tp->pdev)) {
16333 u16 lnkctl;
16334
16335 tg3_flag_set(tp, PCI_EXPRESS);
16336
16337 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16338 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16339 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16340 tg3_flag_clear(tp, HW_TSO_2);
16341 tg3_flag_clear(tp, TSO_CAPABLE);
16342 }
16343 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16344 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16345 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16346 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16347 tg3_flag_set(tp, CLKREQ_BUG);
16348 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16349 tg3_flag_set(tp, L1PLLPD_EN);
16350 }
16351 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16352 /* BCM5785 devices are effectively PCIe devices, and should
16353 * follow PCIe codepaths, but do not have a PCIe capabilities
16354 * section.
16355 */
16356 tg3_flag_set(tp, PCI_EXPRESS);
16357 } else if (!tg3_flag(tp, 5705_PLUS) ||
16358 tg3_flag(tp, 5780_CLASS)) {
16359 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16360 if (!tp->pcix_cap) {
16361 dev_err(&tp->pdev->dev,
16362 "Cannot find PCI-X capability, aborting\n");
16363 return -EIO;
16364 }
16365
16366 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16367 tg3_flag_set(tp, PCIX_MODE);
16368 }
16369
16370 /* If we have an AMD 762 or VIA K8T800 chipset, write
16371 * reordering to the mailbox registers done by the host
16372 * controller can cause major troubles. We read back from
16373 * every mailbox register write to force the writes to be
16374 * posted to the chip in order.
16375 */
16376 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16377 !tg3_flag(tp, PCI_EXPRESS))
16378 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16379
16380 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16381 &tp->pci_cacheline_sz);
16382 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16383 &tp->pci_lat_timer);
16384 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16385 tp->pci_lat_timer < 64) {
16386 tp->pci_lat_timer = 64;
16387 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16388 tp->pci_lat_timer);
16389 }
16390
16391 /* Important! -- It is critical that the PCI-X hw workaround
16392 * situation is decided before the first MMIO register access.
16393 */
16394 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16395 /* 5700 BX chips need to have their TX producer index
16396 * mailboxes written twice to workaround a bug.
16397 */
16398 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16399
16400 /* If we are in PCI-X mode, enable register write workaround.
16401 *
16402 * The workaround is to use indirect register accesses
16403 * for all chip writes not to mailbox registers.
16404 */
16405 if (tg3_flag(tp, PCIX_MODE)) {
16406 u32 pm_reg;
16407
16408 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16409
16410 /* The chip can have it's power management PCI config
16411 * space registers clobbered due to this bug.
16412 * So explicitly force the chip into D0 here.
16413 */
16414 pci_read_config_dword(tp->pdev,
16415 tp->pdev->pm_cap + PCI_PM_CTRL,
16416 &pm_reg);
16417 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16418 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16419 pci_write_config_dword(tp->pdev,
16420 tp->pdev->pm_cap + PCI_PM_CTRL,
16421 pm_reg);
16422
16423 /* Also, force SERR#/PERR# in PCI command. */
16424 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16425 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16426 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16427 }
16428 }
16429
16430 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16431 tg3_flag_set(tp, PCI_HIGH_SPEED);
16432 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16433 tg3_flag_set(tp, PCI_32BIT);
16434
16435 /* Chip-specific fixup from Broadcom driver */
16436 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16437 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16438 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16439 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16440 }
16441
16442 /* Default fast path register access methods */
16443 tp->read32 = tg3_read32;
16444 tp->write32 = tg3_write32;
16445 tp->read32_mbox = tg3_read32;
16446 tp->write32_mbox = tg3_write32;
16447 tp->write32_tx_mbox = tg3_write32;
16448 tp->write32_rx_mbox = tg3_write32;
16449
16450 /* Various workaround register access methods */
16451 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16452 tp->write32 = tg3_write_indirect_reg32;
16453 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16454 (tg3_flag(tp, PCI_EXPRESS) &&
16455 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16456 /*
16457 * Back to back register writes can cause problems on these
16458 * chips, the workaround is to read back all reg writes
16459 * except those to mailbox regs.
16460 *
16461 * See tg3_write_indirect_reg32().
16462 */
16463 tp->write32 = tg3_write_flush_reg32;
16464 }
16465
16466 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16467 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16468 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16469 tp->write32_rx_mbox = tg3_write_flush_reg32;
16470 }
16471
16472 if (tg3_flag(tp, ICH_WORKAROUND)) {
16473 tp->read32 = tg3_read_indirect_reg32;
16474 tp->write32 = tg3_write_indirect_reg32;
16475 tp->read32_mbox = tg3_read_indirect_mbox;
16476 tp->write32_mbox = tg3_write_indirect_mbox;
16477 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16478 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16479
16480 iounmap(tp->regs);
16481 tp->regs = NULL;
16482
16483 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16484 pci_cmd &= ~PCI_COMMAND_MEMORY;
16485 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16486 }
16487 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16488 tp->read32_mbox = tg3_read32_mbox_5906;
16489 tp->write32_mbox = tg3_write32_mbox_5906;
16490 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16491 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16492 }
16493
16494 if (tp->write32 == tg3_write_indirect_reg32 ||
16495 (tg3_flag(tp, PCIX_MODE) &&
16496 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16497 tg3_asic_rev(tp) == ASIC_REV_5701)))
16498 tg3_flag_set(tp, SRAM_USE_CONFIG);
16499
16500 /* The memory arbiter has to be enabled in order for SRAM accesses
16501 * to succeed. Normally on powerup the tg3 chip firmware will make
16502 * sure it is enabled, but other entities such as system netboot
16503 * code might disable it.
16504 */
16505 val = tr32(MEMARB_MODE);
16506 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16507
16508 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16509 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16510 tg3_flag(tp, 5780_CLASS)) {
16511 if (tg3_flag(tp, PCIX_MODE)) {
16512 pci_read_config_dword(tp->pdev,
16513 tp->pcix_cap + PCI_X_STATUS,
16514 &val);
16515 tp->pci_fn = val & 0x7;
16516 }
16517 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16518 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16519 tg3_asic_rev(tp) == ASIC_REV_5720) {
16520 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16521 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16522 val = tr32(TG3_CPMU_STATUS);
16523
16524 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16525 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16526 else
16527 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16528 TG3_CPMU_STATUS_FSHFT_5719;
16529 }
16530
16531 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16532 tp->write32_tx_mbox = tg3_write_flush_reg32;
16533 tp->write32_rx_mbox = tg3_write_flush_reg32;
16534 }
16535
16536 /* Get eeprom hw config before calling tg3_set_power_state().
16537 * In particular, the TG3_FLAG_IS_NIC flag must be
16538 * determined before calling tg3_set_power_state() so that
16539 * we know whether or not to switch out of Vaux power.
16540 * When the flag is set, it means that GPIO1 is used for eeprom
16541 * write protect and also implies that it is a LOM where GPIOs
16542 * are not used to switch power.
16543 */
16544 tg3_get_eeprom_hw_cfg(tp);
16545
16546 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16547 tg3_flag_clear(tp, TSO_CAPABLE);
16548 tg3_flag_clear(tp, TSO_BUG);
16549 tp->fw_needed = NULL;
16550 }
16551
16552 if (tg3_flag(tp, ENABLE_APE)) {
16553 /* Allow reads and writes to the
16554 * APE register and memory space.
16555 */
16556 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16557 PCISTATE_ALLOW_APE_SHMEM_WR |
16558 PCISTATE_ALLOW_APE_PSPACE_WR;
16559 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16560 pci_state_reg);
16561
16562 tg3_ape_lock_init(tp);
16563 }
16564
16565 /* Set up tp->grc_local_ctrl before calling
16566 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16567 * will bring 5700's external PHY out of reset.
16568 * It is also used as eeprom write protect on LOMs.
16569 */
16570 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16571 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16572 tg3_flag(tp, EEPROM_WRITE_PROT))
16573 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16574 GRC_LCLCTRL_GPIO_OUTPUT1);
16575 /* Unused GPIO3 must be driven as output on 5752 because there
16576 * are no pull-up resistors on unused GPIO pins.
16577 */
16578 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16579 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16580
16581 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16582 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16583 tg3_flag(tp, 57765_CLASS))
16584 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16585
16586 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16587 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16588 /* Turn off the debug UART. */
16589 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16590 if (tg3_flag(tp, IS_NIC))
16591 /* Keep VMain power. */
16592 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16593 GRC_LCLCTRL_GPIO_OUTPUT0;
16594 }
16595
16596 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16597 tp->grc_local_ctrl |=
16598 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16599
16600 /* Switch out of Vaux if it is a NIC */
16601 tg3_pwrsrc_switch_to_vmain(tp);
16602
16603 /* Derive initial jumbo mode from MTU assigned in
16604 * ether_setup() via the alloc_etherdev() call
16605 */
16606 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16607 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16608
16609 /* Determine WakeOnLan speed to use. */
16610 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16611 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16612 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16613 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16614 tg3_flag_clear(tp, WOL_SPEED_100MB);
16615 } else {
16616 tg3_flag_set(tp, WOL_SPEED_100MB);
16617 }
16618
16619 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16620 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16621
16622 /* A few boards don't want Ethernet@WireSpeed phy feature */
16623 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16624 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16625 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16626 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16627 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16628 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16629 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16630
16631 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16632 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16633 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16634 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16635 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16636
16637 if (tg3_flag(tp, 5705_PLUS) &&
16638 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16639 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16640 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16641 !tg3_flag(tp, 57765_PLUS)) {
16642 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16643 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16644 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16645 tg3_asic_rev(tp) == ASIC_REV_5761) {
16646 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16647 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16648 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16649 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16650 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16651 } else
16652 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16653 }
16654
16655 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16656 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16657 tp->phy_otp = tg3_read_otp_phycfg(tp);
16658 if (tp->phy_otp == 0)
16659 tp->phy_otp = TG3_OTP_DEFAULT;
16660 }
16661
16662 if (tg3_flag(tp, CPMU_PRESENT))
16663 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16664 else
16665 tp->mi_mode = MAC_MI_MODE_BASE;
16666
16667 tp->coalesce_mode = 0;
16668 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16669 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16670 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16671
16672 /* Set these bits to enable statistics workaround. */
16673 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16674 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16675 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16676 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16677 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16678 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16679 }
16680
16681 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16682 tg3_asic_rev(tp) == ASIC_REV_57780)
16683 tg3_flag_set(tp, USE_PHYLIB);
16684
16685 err = tg3_mdio_init(tp);
16686 if (err)
16687 return err;
16688
16689 /* Initialize data/descriptor byte/word swapping. */
16690 val = tr32(GRC_MODE);
16691 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16692 tg3_asic_rev(tp) == ASIC_REV_5762)
16693 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16694 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16695 GRC_MODE_B2HRX_ENABLE |
16696 GRC_MODE_HTX2B_ENABLE |
16697 GRC_MODE_HOST_STACKUP);
16698 else
16699 val &= GRC_MODE_HOST_STACKUP;
16700
16701 tw32(GRC_MODE, val | tp->grc_mode);
16702
16703 tg3_switch_clocks(tp);
16704
16705 /* Clear this out for sanity. */
16706 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16707
16708 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16709 tw32(TG3PCI_REG_BASE_ADDR, 0);
16710
16711 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16712 &pci_state_reg);
16713 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16714 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16715 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16716 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16717 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16718 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16719 void __iomem *sram_base;
16720
16721 /* Write some dummy words into the SRAM status block
16722 * area, see if it reads back correctly. If the return
16723 * value is bad, force enable the PCIX workaround.
16724 */
16725 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16726
16727 writel(0x00000000, sram_base);
16728 writel(0x00000000, sram_base + 4);
16729 writel(0xffffffff, sram_base + 4);
16730 if (readl(sram_base) != 0x00000000)
16731 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16732 }
16733 }
16734
16735 udelay(50);
16736 tg3_nvram_init(tp);
16737
16738 /* If the device has an NVRAM, no need to load patch firmware */
16739 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16740 !tg3_flag(tp, NO_NVRAM))
16741 tp->fw_needed = NULL;
16742
16743 grc_misc_cfg = tr32(GRC_MISC_CFG);
16744 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16745
16746 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16747 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16748 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16749 tg3_flag_set(tp, IS_5788);
16750
16751 if (!tg3_flag(tp, IS_5788) &&
16752 tg3_asic_rev(tp) != ASIC_REV_5700)
16753 tg3_flag_set(tp, TAGGED_STATUS);
16754 if (tg3_flag(tp, TAGGED_STATUS)) {
16755 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16756 HOSTCC_MODE_CLRTICK_TXBD);
16757
16758 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16759 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16760 tp->misc_host_ctrl);
16761 }
16762
16763 /* Preserve the APE MAC_MODE bits */
16764 if (tg3_flag(tp, ENABLE_APE))
16765 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16766 else
16767 tp->mac_mode = 0;
16768
16769 if (tg3_10_100_only_device(tp, ent))
16770 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16771
16772 err = tg3_phy_probe(tp);
16773 if (err) {
16774 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16775 /* ... but do not return immediately ... */
16776 tg3_mdio_fini(tp);
16777 }
16778
16779 tg3_read_vpd(tp);
16780 tg3_read_fw_ver(tp);
16781
16782 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16783 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16784 } else {
16785 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16786 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16787 else
16788 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16789 }
16790
16791 /* 5700 {AX,BX} chips have a broken status block link
16792 * change bit implementation, so we must use the
16793 * status register in those cases.
16794 */
16795 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16796 tg3_flag_set(tp, USE_LINKCHG_REG);
16797 else
16798 tg3_flag_clear(tp, USE_LINKCHG_REG);
16799
16800 /* The led_ctrl is set during tg3_phy_probe, here we might
16801 * have to force the link status polling mechanism based
16802 * upon subsystem IDs.
16803 */
16804 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16805 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16806 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16807 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16808 tg3_flag_set(tp, USE_LINKCHG_REG);
16809 }
16810
16811 /* For all SERDES we poll the MAC status register. */
16812 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16813 tg3_flag_set(tp, POLL_SERDES);
16814 else
16815 tg3_flag_clear(tp, POLL_SERDES);
16816
16817 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16818 tg3_flag_set(tp, POLL_CPMU_LINK);
16819
16820 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16821 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16822 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16823 tg3_flag(tp, PCIX_MODE)) {
16824 tp->rx_offset = NET_SKB_PAD;
16825 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16826 tp->rx_copy_thresh = ~(u16)0;
16827 #endif
16828 }
16829
16830 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16831 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16832 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16833
16834 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16835
16836 /* Increment the rx prod index on the rx std ring by at most
16837 * 8 for these chips to workaround hw errata.
16838 */
16839 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16840 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16841 tg3_asic_rev(tp) == ASIC_REV_5755)
16842 tp->rx_std_max_post = 8;
16843
16844 if (tg3_flag(tp, ASPM_WORKAROUND))
16845 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16846 PCIE_PWR_MGMT_L1_THRESH_MSK;
16847
16848 return err;
16849 }
16850
16851 #ifdef CONFIG_SPARC
16852 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16853 {
16854 struct net_device *dev = tp->dev;
16855 struct pci_dev *pdev = tp->pdev;
16856 struct device_node *dp = pci_device_to_OF_node(pdev);
16857 const unsigned char *addr;
16858 int len;
16859
16860 addr = of_get_property(dp, "local-mac-address", &len);
16861 if (addr && len == ETH_ALEN) {
16862 memcpy(dev->dev_addr, addr, ETH_ALEN);
16863 return 0;
16864 }
16865 return -ENODEV;
16866 }
16867
16868 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16869 {
16870 struct net_device *dev = tp->dev;
16871
16872 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16873 return 0;
16874 }
16875 #endif
16876
16877 static int tg3_get_device_address(struct tg3 *tp)
16878 {
16879 struct net_device *dev = tp->dev;
16880 u32 hi, lo, mac_offset;
16881 int addr_ok = 0;
16882 int err;
16883
16884 #ifdef CONFIG_SPARC
16885 if (!tg3_get_macaddr_sparc(tp))
16886 return 0;
16887 #endif
16888
16889 if (tg3_flag(tp, IS_SSB_CORE)) {
16890 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16891 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16892 return 0;
16893 }
16894
16895 mac_offset = 0x7c;
16896 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16897 tg3_flag(tp, 5780_CLASS)) {
16898 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16899 mac_offset = 0xcc;
16900 if (tg3_nvram_lock(tp))
16901 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16902 else
16903 tg3_nvram_unlock(tp);
16904 } else if (tg3_flag(tp, 5717_PLUS)) {
16905 if (tp->pci_fn & 1)
16906 mac_offset = 0xcc;
16907 if (tp->pci_fn > 1)
16908 mac_offset += 0x18c;
16909 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16910 mac_offset = 0x10;
16911
16912 /* First try to get it from MAC address mailbox. */
16913 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16914 if ((hi >> 16) == 0x484b) {
16915 dev->dev_addr[0] = (hi >> 8) & 0xff;
16916 dev->dev_addr[1] = (hi >> 0) & 0xff;
16917
16918 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16919 dev->dev_addr[2] = (lo >> 24) & 0xff;
16920 dev->dev_addr[3] = (lo >> 16) & 0xff;
16921 dev->dev_addr[4] = (lo >> 8) & 0xff;
16922 dev->dev_addr[5] = (lo >> 0) & 0xff;
16923
16924 /* Some old bootcode may report a 0 MAC address in SRAM */
16925 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16926 }
16927 if (!addr_ok) {
16928 /* Next, try NVRAM. */
16929 if (!tg3_flag(tp, NO_NVRAM) &&
16930 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16931 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16932 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16933 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16934 }
16935 /* Finally just fetch it out of the MAC control regs. */
16936 else {
16937 hi = tr32(MAC_ADDR_0_HIGH);
16938 lo = tr32(MAC_ADDR_0_LOW);
16939
16940 dev->dev_addr[5] = lo & 0xff;
16941 dev->dev_addr[4] = (lo >> 8) & 0xff;
16942 dev->dev_addr[3] = (lo >> 16) & 0xff;
16943 dev->dev_addr[2] = (lo >> 24) & 0xff;
16944 dev->dev_addr[1] = hi & 0xff;
16945 dev->dev_addr[0] = (hi >> 8) & 0xff;
16946 }
16947 }
16948
16949 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16950 #ifdef CONFIG_SPARC
16951 if (!tg3_get_default_macaddr_sparc(tp))
16952 return 0;
16953 #endif
16954 return -EINVAL;
16955 }
16956 return 0;
16957 }
16958
16959 #define BOUNDARY_SINGLE_CACHELINE 1
16960 #define BOUNDARY_MULTI_CACHELINE 2
16961
16962 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16963 {
16964 int cacheline_size;
16965 u8 byte;
16966 int goal;
16967
16968 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16969 if (byte == 0)
16970 cacheline_size = 1024;
16971 else
16972 cacheline_size = (int) byte * 4;
16973
16974 /* On 5703 and later chips, the boundary bits have no
16975 * effect.
16976 */
16977 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16978 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16979 !tg3_flag(tp, PCI_EXPRESS))
16980 goto out;
16981
16982 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16983 goal = BOUNDARY_MULTI_CACHELINE;
16984 #else
16985 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16986 goal = BOUNDARY_SINGLE_CACHELINE;
16987 #else
16988 goal = 0;
16989 #endif
16990 #endif
16991
16992 if (tg3_flag(tp, 57765_PLUS)) {
16993 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16994 goto out;
16995 }
16996
16997 if (!goal)
16998 goto out;
16999
17000 /* PCI controllers on most RISC systems tend to disconnect
17001 * when a device tries to burst across a cache-line boundary.
17002 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17003 *
17004 * Unfortunately, for PCI-E there are only limited
17005 * write-side controls for this, and thus for reads
17006 * we will still get the disconnects. We'll also waste
17007 * these PCI cycles for both read and write for chips
17008 * other than 5700 and 5701 which do not implement the
17009 * boundary bits.
17010 */
17011 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17012 switch (cacheline_size) {
17013 case 16:
17014 case 32:
17015 case 64:
17016 case 128:
17017 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17018 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17019 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17020 } else {
17021 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17022 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17023 }
17024 break;
17025
17026 case 256:
17027 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17028 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17029 break;
17030
17031 default:
17032 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17033 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17034 break;
17035 }
17036 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17037 switch (cacheline_size) {
17038 case 16:
17039 case 32:
17040 case 64:
17041 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17042 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17043 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17044 break;
17045 }
17046 /* fallthrough */
17047 case 128:
17048 default:
17049 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17050 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17051 break;
17052 }
17053 } else {
17054 switch (cacheline_size) {
17055 case 16:
17056 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17057 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17058 DMA_RWCTRL_WRITE_BNDRY_16);
17059 break;
17060 }
17061 /* fallthrough */
17062 case 32:
17063 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17064 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17065 DMA_RWCTRL_WRITE_BNDRY_32);
17066 break;
17067 }
17068 /* fallthrough */
17069 case 64:
17070 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17071 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17072 DMA_RWCTRL_WRITE_BNDRY_64);
17073 break;
17074 }
17075 /* fallthrough */
17076 case 128:
17077 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17078 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17079 DMA_RWCTRL_WRITE_BNDRY_128);
17080 break;
17081 }
17082 /* fallthrough */
17083 case 256:
17084 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17085 DMA_RWCTRL_WRITE_BNDRY_256);
17086 break;
17087 case 512:
17088 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17089 DMA_RWCTRL_WRITE_BNDRY_512);
17090 break;
17091 case 1024:
17092 default:
17093 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17094 DMA_RWCTRL_WRITE_BNDRY_1024);
17095 break;
17096 }
17097 }
17098
17099 out:
17100 return val;
17101 }
17102
17103 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17104 int size, bool to_device)
17105 {
17106 struct tg3_internal_buffer_desc test_desc;
17107 u32 sram_dma_descs;
17108 int i, ret;
17109
17110 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17111
17112 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17113 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17114 tw32(RDMAC_STATUS, 0);
17115 tw32(WDMAC_STATUS, 0);
17116
17117 tw32(BUFMGR_MODE, 0);
17118 tw32(FTQ_RESET, 0);
17119
17120 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17121 test_desc.addr_lo = buf_dma & 0xffffffff;
17122 test_desc.nic_mbuf = 0x00002100;
17123 test_desc.len = size;
17124
17125 /*
17126 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17127 * the *second* time the tg3 driver was getting loaded after an
17128 * initial scan.
17129 *
17130 * Broadcom tells me:
17131 * ...the DMA engine is connected to the GRC block and a DMA
17132 * reset may affect the GRC block in some unpredictable way...
17133 * The behavior of resets to individual blocks has not been tested.
17134 *
17135 * Broadcom noted the GRC reset will also reset all sub-components.
17136 */
17137 if (to_device) {
17138 test_desc.cqid_sqid = (13 << 8) | 2;
17139
17140 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17141 udelay(40);
17142 } else {
17143 test_desc.cqid_sqid = (16 << 8) | 7;
17144
17145 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17146 udelay(40);
17147 }
17148 test_desc.flags = 0x00000005;
17149
17150 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17151 u32 val;
17152
17153 val = *(((u32 *)&test_desc) + i);
17154 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17155 sram_dma_descs + (i * sizeof(u32)));
17156 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17157 }
17158 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17159
17160 if (to_device)
17161 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17162 else
17163 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17164
17165 ret = -ENODEV;
17166 for (i = 0; i < 40; i++) {
17167 u32 val;
17168
17169 if (to_device)
17170 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17171 else
17172 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17173 if ((val & 0xffff) == sram_dma_descs) {
17174 ret = 0;
17175 break;
17176 }
17177
17178 udelay(100);
17179 }
17180
17181 return ret;
17182 }
17183
17184 #define TEST_BUFFER_SIZE 0x2000
17185
17186 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
17187 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17188 { },
17189 };
17190
17191 static int tg3_test_dma(struct tg3 *tp)
17192 {
17193 dma_addr_t buf_dma;
17194 u32 *buf, saved_dma_rwctrl;
17195 int ret = 0;
17196
17197 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17198 &buf_dma, GFP_KERNEL);
17199 if (!buf) {
17200 ret = -ENOMEM;
17201 goto out_nofree;
17202 }
17203
17204 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17205 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17206
17207 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17208
17209 if (tg3_flag(tp, 57765_PLUS))
17210 goto out;
17211
17212 if (tg3_flag(tp, PCI_EXPRESS)) {
17213 /* DMA read watermark not used on PCIE */
17214 tp->dma_rwctrl |= 0x00180000;
17215 } else if (!tg3_flag(tp, PCIX_MODE)) {
17216 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17217 tg3_asic_rev(tp) == ASIC_REV_5750)
17218 tp->dma_rwctrl |= 0x003f0000;
17219 else
17220 tp->dma_rwctrl |= 0x003f000f;
17221 } else {
17222 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17223 tg3_asic_rev(tp) == ASIC_REV_5704) {
17224 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17225 u32 read_water = 0x7;
17226
17227 /* If the 5704 is behind the EPB bridge, we can
17228 * do the less restrictive ONE_DMA workaround for
17229 * better performance.
17230 */
17231 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17232 tg3_asic_rev(tp) == ASIC_REV_5704)
17233 tp->dma_rwctrl |= 0x8000;
17234 else if (ccval == 0x6 || ccval == 0x7)
17235 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17236
17237 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17238 read_water = 4;
17239 /* Set bit 23 to enable PCIX hw bug fix */
17240 tp->dma_rwctrl |=
17241 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17242 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17243 (1 << 23);
17244 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17245 /* 5780 always in PCIX mode */
17246 tp->dma_rwctrl |= 0x00144000;
17247 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17248 /* 5714 always in PCIX mode */
17249 tp->dma_rwctrl |= 0x00148000;
17250 } else {
17251 tp->dma_rwctrl |= 0x001b000f;
17252 }
17253 }
17254 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17255 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17256
17257 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17258 tg3_asic_rev(tp) == ASIC_REV_5704)
17259 tp->dma_rwctrl &= 0xfffffff0;
17260
17261 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17262 tg3_asic_rev(tp) == ASIC_REV_5701) {
17263 /* Remove this if it causes problems for some boards. */
17264 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17265
17266 /* On 5700/5701 chips, we need to set this bit.
17267 * Otherwise the chip will issue cacheline transactions
17268 * to streamable DMA memory with not all the byte
17269 * enables turned on. This is an error on several
17270 * RISC PCI controllers, in particular sparc64.
17271 *
17272 * On 5703/5704 chips, this bit has been reassigned
17273 * a different meaning. In particular, it is used
17274 * on those chips to enable a PCI-X workaround.
17275 */
17276 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17277 }
17278
17279 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17280
17281
17282 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17283 tg3_asic_rev(tp) != ASIC_REV_5701)
17284 goto out;
17285
17286 /* It is best to perform DMA test with maximum write burst size
17287 * to expose the 5700/5701 write DMA bug.
17288 */
17289 saved_dma_rwctrl = tp->dma_rwctrl;
17290 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17291 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17292
17293 while (1) {
17294 u32 *p = buf, i;
17295
17296 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17297 p[i] = i;
17298
17299 /* Send the buffer to the chip. */
17300 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17301 if (ret) {
17302 dev_err(&tp->pdev->dev,
17303 "%s: Buffer write failed. err = %d\n",
17304 __func__, ret);
17305 break;
17306 }
17307
17308 /* Now read it back. */
17309 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17310 if (ret) {
17311 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17312 "err = %d\n", __func__, ret);
17313 break;
17314 }
17315
17316 /* Verify it. */
17317 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17318 if (p[i] == i)
17319 continue;
17320
17321 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17322 DMA_RWCTRL_WRITE_BNDRY_16) {
17323 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17324 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17325 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17326 break;
17327 } else {
17328 dev_err(&tp->pdev->dev,
17329 "%s: Buffer corrupted on read back! "
17330 "(%d != %d)\n", __func__, p[i], i);
17331 ret = -ENODEV;
17332 goto out;
17333 }
17334 }
17335
17336 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17337 /* Success. */
17338 ret = 0;
17339 break;
17340 }
17341 }
17342 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17343 DMA_RWCTRL_WRITE_BNDRY_16) {
17344 /* DMA test passed without adjusting DMA boundary,
17345 * now look for chipsets that are known to expose the
17346 * DMA bug without failing the test.
17347 */
17348 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17349 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17350 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17351 } else {
17352 /* Safe to use the calculated DMA boundary. */
17353 tp->dma_rwctrl = saved_dma_rwctrl;
17354 }
17355
17356 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17357 }
17358
17359 out:
17360 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17361 out_nofree:
17362 return ret;
17363 }
17364
17365 static void tg3_init_bufmgr_config(struct tg3 *tp)
17366 {
17367 if (tg3_flag(tp, 57765_PLUS)) {
17368 tp->bufmgr_config.mbuf_read_dma_low_water =
17369 DEFAULT_MB_RDMA_LOW_WATER_5705;
17370 tp->bufmgr_config.mbuf_mac_rx_low_water =
17371 DEFAULT_MB_MACRX_LOW_WATER_57765;
17372 tp->bufmgr_config.mbuf_high_water =
17373 DEFAULT_MB_HIGH_WATER_57765;
17374
17375 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17376 DEFAULT_MB_RDMA_LOW_WATER_5705;
17377 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17378 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17379 tp->bufmgr_config.mbuf_high_water_jumbo =
17380 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17381 } else if (tg3_flag(tp, 5705_PLUS)) {
17382 tp->bufmgr_config.mbuf_read_dma_low_water =
17383 DEFAULT_MB_RDMA_LOW_WATER_5705;
17384 tp->bufmgr_config.mbuf_mac_rx_low_water =
17385 DEFAULT_MB_MACRX_LOW_WATER_5705;
17386 tp->bufmgr_config.mbuf_high_water =
17387 DEFAULT_MB_HIGH_WATER_5705;
17388 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17389 tp->bufmgr_config.mbuf_mac_rx_low_water =
17390 DEFAULT_MB_MACRX_LOW_WATER_5906;
17391 tp->bufmgr_config.mbuf_high_water =
17392 DEFAULT_MB_HIGH_WATER_5906;
17393 }
17394
17395 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17396 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17397 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17398 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17399 tp->bufmgr_config.mbuf_high_water_jumbo =
17400 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17401 } else {
17402 tp->bufmgr_config.mbuf_read_dma_low_water =
17403 DEFAULT_MB_RDMA_LOW_WATER;
17404 tp->bufmgr_config.mbuf_mac_rx_low_water =
17405 DEFAULT_MB_MACRX_LOW_WATER;
17406 tp->bufmgr_config.mbuf_high_water =
17407 DEFAULT_MB_HIGH_WATER;
17408
17409 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17410 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17411 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17412 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17413 tp->bufmgr_config.mbuf_high_water_jumbo =
17414 DEFAULT_MB_HIGH_WATER_JUMBO;
17415 }
17416
17417 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17418 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17419 }
17420
17421 static char *tg3_phy_string(struct tg3 *tp)
17422 {
17423 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17424 case TG3_PHY_ID_BCM5400: return "5400";
17425 case TG3_PHY_ID_BCM5401: return "5401";
17426 case TG3_PHY_ID_BCM5411: return "5411";
17427 case TG3_PHY_ID_BCM5701: return "5701";
17428 case TG3_PHY_ID_BCM5703: return "5703";
17429 case TG3_PHY_ID_BCM5704: return "5704";
17430 case TG3_PHY_ID_BCM5705: return "5705";
17431 case TG3_PHY_ID_BCM5750: return "5750";
17432 case TG3_PHY_ID_BCM5752: return "5752";
17433 case TG3_PHY_ID_BCM5714: return "5714";
17434 case TG3_PHY_ID_BCM5780: return "5780";
17435 case TG3_PHY_ID_BCM5755: return "5755";
17436 case TG3_PHY_ID_BCM5787: return "5787";
17437 case TG3_PHY_ID_BCM5784: return "5784";
17438 case TG3_PHY_ID_BCM5756: return "5722/5756";
17439 case TG3_PHY_ID_BCM5906: return "5906";
17440 case TG3_PHY_ID_BCM5761: return "5761";
17441 case TG3_PHY_ID_BCM5718C: return "5718C";
17442 case TG3_PHY_ID_BCM5718S: return "5718S";
17443 case TG3_PHY_ID_BCM57765: return "57765";
17444 case TG3_PHY_ID_BCM5719C: return "5719C";
17445 case TG3_PHY_ID_BCM5720C: return "5720C";
17446 case TG3_PHY_ID_BCM5762: return "5762C";
17447 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17448 case 0: return "serdes";
17449 default: return "unknown";
17450 }
17451 }
17452
17453 static char *tg3_bus_string(struct tg3 *tp, char *str)
17454 {
17455 if (tg3_flag(tp, PCI_EXPRESS)) {
17456 strcpy(str, "PCI Express");
17457 return str;
17458 } else if (tg3_flag(tp, PCIX_MODE)) {
17459 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17460
17461 strcpy(str, "PCIX:");
17462
17463 if ((clock_ctrl == 7) ||
17464 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17465 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17466 strcat(str, "133MHz");
17467 else if (clock_ctrl == 0)
17468 strcat(str, "33MHz");
17469 else if (clock_ctrl == 2)
17470 strcat(str, "50MHz");
17471 else if (clock_ctrl == 4)
17472 strcat(str, "66MHz");
17473 else if (clock_ctrl == 6)
17474 strcat(str, "100MHz");
17475 } else {
17476 strcpy(str, "PCI:");
17477 if (tg3_flag(tp, PCI_HIGH_SPEED))
17478 strcat(str, "66MHz");
17479 else
17480 strcat(str, "33MHz");
17481 }
17482 if (tg3_flag(tp, PCI_32BIT))
17483 strcat(str, ":32-bit");
17484 else
17485 strcat(str, ":64-bit");
17486 return str;
17487 }
17488
17489 static void tg3_init_coal(struct tg3 *tp)
17490 {
17491 struct ethtool_coalesce *ec = &tp->coal;
17492
17493 memset(ec, 0, sizeof(*ec));
17494 ec->cmd = ETHTOOL_GCOALESCE;
17495 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17496 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17497 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17498 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17499 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17500 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17501 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17502 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17503 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17504
17505 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17506 HOSTCC_MODE_CLRTICK_TXBD)) {
17507 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17508 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17509 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17510 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17511 }
17512
17513 if (tg3_flag(tp, 5705_PLUS)) {
17514 ec->rx_coalesce_usecs_irq = 0;
17515 ec->tx_coalesce_usecs_irq = 0;
17516 ec->stats_block_coalesce_usecs = 0;
17517 }
17518 }
17519
17520 static int tg3_init_one(struct pci_dev *pdev,
17521 const struct pci_device_id *ent)
17522 {
17523 struct net_device *dev;
17524 struct tg3 *tp;
17525 int i, err;
17526 u32 sndmbx, rcvmbx, intmbx;
17527 char str[40];
17528 u64 dma_mask, persist_dma_mask;
17529 netdev_features_t features = 0;
17530
17531 printk_once(KERN_INFO "%s\n", version);
17532
17533 err = pci_enable_device(pdev);
17534 if (err) {
17535 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17536 return err;
17537 }
17538
17539 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17540 if (err) {
17541 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17542 goto err_out_disable_pdev;
17543 }
17544
17545 pci_set_master(pdev);
17546
17547 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17548 if (!dev) {
17549 err = -ENOMEM;
17550 goto err_out_free_res;
17551 }
17552
17553 SET_NETDEV_DEV(dev, &pdev->dev);
17554
17555 tp = netdev_priv(dev);
17556 tp->pdev = pdev;
17557 tp->dev = dev;
17558 tp->rx_mode = TG3_DEF_RX_MODE;
17559 tp->tx_mode = TG3_DEF_TX_MODE;
17560 tp->irq_sync = 1;
17561
17562 if (tg3_debug > 0)
17563 tp->msg_enable = tg3_debug;
17564 else
17565 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17566
17567 if (pdev_is_ssb_gige_core(pdev)) {
17568 tg3_flag_set(tp, IS_SSB_CORE);
17569 if (ssb_gige_must_flush_posted_writes(pdev))
17570 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17571 if (ssb_gige_one_dma_at_once(pdev))
17572 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17573 if (ssb_gige_have_roboswitch(pdev)) {
17574 tg3_flag_set(tp, USE_PHYLIB);
17575 tg3_flag_set(tp, ROBOSWITCH);
17576 }
17577 if (ssb_gige_is_rgmii(pdev))
17578 tg3_flag_set(tp, RGMII_MODE);
17579 }
17580
17581 /* The word/byte swap controls here control register access byte
17582 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17583 * setting below.
17584 */
17585 tp->misc_host_ctrl =
17586 MISC_HOST_CTRL_MASK_PCI_INT |
17587 MISC_HOST_CTRL_WORD_SWAP |
17588 MISC_HOST_CTRL_INDIR_ACCESS |
17589 MISC_HOST_CTRL_PCISTATE_RW;
17590
17591 /* The NONFRM (non-frame) byte/word swap controls take effect
17592 * on descriptor entries, anything which isn't packet data.
17593 *
17594 * The StrongARM chips on the board (one for tx, one for rx)
17595 * are running in big-endian mode.
17596 */
17597 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17598 GRC_MODE_WSWAP_NONFRM_DATA);
17599 #ifdef __BIG_ENDIAN
17600 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17601 #endif
17602 spin_lock_init(&tp->lock);
17603 spin_lock_init(&tp->indirect_lock);
17604 INIT_WORK(&tp->reset_task, tg3_reset_task);
17605
17606 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17607 if (!tp->regs) {
17608 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17609 err = -ENOMEM;
17610 goto err_out_free_dev;
17611 }
17612
17613 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17614 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17615 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17616 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17617 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17618 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17619 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17620 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17621 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17622 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17623 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17624 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17625 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17626 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17627 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17628 tg3_flag_set(tp, ENABLE_APE);
17629 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17630 if (!tp->aperegs) {
17631 dev_err(&pdev->dev,
17632 "Cannot map APE registers, aborting\n");
17633 err = -ENOMEM;
17634 goto err_out_iounmap;
17635 }
17636 }
17637
17638 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17639 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17640
17641 dev->ethtool_ops = &tg3_ethtool_ops;
17642 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17643 dev->netdev_ops = &tg3_netdev_ops;
17644 dev->irq = pdev->irq;
17645
17646 err = tg3_get_invariants(tp, ent);
17647 if (err) {
17648 dev_err(&pdev->dev,
17649 "Problem fetching invariants of chip, aborting\n");
17650 goto err_out_apeunmap;
17651 }
17652
17653 /* The EPB bridge inside 5714, 5715, and 5780 and any
17654 * device behind the EPB cannot support DMA addresses > 40-bit.
17655 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17656 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17657 * do DMA address check in tg3_start_xmit().
17658 */
17659 if (tg3_flag(tp, IS_5788))
17660 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17661 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17662 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17663 #ifdef CONFIG_HIGHMEM
17664 dma_mask = DMA_BIT_MASK(64);
17665 #endif
17666 } else
17667 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17668
17669 /* Configure DMA attributes. */
17670 if (dma_mask > DMA_BIT_MASK(32)) {
17671 err = pci_set_dma_mask(pdev, dma_mask);
17672 if (!err) {
17673 features |= NETIF_F_HIGHDMA;
17674 err = pci_set_consistent_dma_mask(pdev,
17675 persist_dma_mask);
17676 if (err < 0) {
17677 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17678 "DMA for consistent allocations\n");
17679 goto err_out_apeunmap;
17680 }
17681 }
17682 }
17683 if (err || dma_mask == DMA_BIT_MASK(32)) {
17684 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17685 if (err) {
17686 dev_err(&pdev->dev,
17687 "No usable DMA configuration, aborting\n");
17688 goto err_out_apeunmap;
17689 }
17690 }
17691
17692 tg3_init_bufmgr_config(tp);
17693
17694 /* 5700 B0 chips do not support checksumming correctly due
17695 * to hardware bugs.
17696 */
17697 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17698 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17699
17700 if (tg3_flag(tp, 5755_PLUS))
17701 features |= NETIF_F_IPV6_CSUM;
17702 }
17703
17704 /* TSO is on by default on chips that support hardware TSO.
17705 * Firmware TSO on older chips gives lower performance, so it
17706 * is off by default, but can be enabled using ethtool.
17707 */
17708 if ((tg3_flag(tp, HW_TSO_1) ||
17709 tg3_flag(tp, HW_TSO_2) ||
17710 tg3_flag(tp, HW_TSO_3)) &&
17711 (features & NETIF_F_IP_CSUM))
17712 features |= NETIF_F_TSO;
17713 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17714 if (features & NETIF_F_IPV6_CSUM)
17715 features |= NETIF_F_TSO6;
17716 if (tg3_flag(tp, HW_TSO_3) ||
17717 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17718 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17719 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17720 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17721 tg3_asic_rev(tp) == ASIC_REV_57780)
17722 features |= NETIF_F_TSO_ECN;
17723 }
17724
17725 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17726 NETIF_F_HW_VLAN_CTAG_RX;
17727 dev->vlan_features |= features;
17728
17729 /*
17730 * Add loopback capability only for a subset of devices that support
17731 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17732 * loopback for the remaining devices.
17733 */
17734 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17735 !tg3_flag(tp, CPMU_PRESENT))
17736 /* Add the loopback capability */
17737 features |= NETIF_F_LOOPBACK;
17738
17739 dev->hw_features |= features;
17740 dev->priv_flags |= IFF_UNICAST_FLT;
17741
17742 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17743 !tg3_flag(tp, TSO_CAPABLE) &&
17744 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17745 tg3_flag_set(tp, MAX_RXPEND_64);
17746 tp->rx_pending = 63;
17747 }
17748
17749 err = tg3_get_device_address(tp);
17750 if (err) {
17751 dev_err(&pdev->dev,
17752 "Could not obtain valid ethernet address, aborting\n");
17753 goto err_out_apeunmap;
17754 }
17755
17756 /*
17757 * Reset chip in case UNDI or EFI driver did not shutdown
17758 * DMA self test will enable WDMAC and we'll see (spurious)
17759 * pending DMA on the PCI bus at that point.
17760 */
17761 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17762 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17763 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17764 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17765 }
17766
17767 err = tg3_test_dma(tp);
17768 if (err) {
17769 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17770 goto err_out_apeunmap;
17771 }
17772
17773 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17774 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17775 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17776 for (i = 0; i < tp->irq_max; i++) {
17777 struct tg3_napi *tnapi = &tp->napi[i];
17778
17779 tnapi->tp = tp;
17780 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17781
17782 tnapi->int_mbox = intmbx;
17783 if (i <= 4)
17784 intmbx += 0x8;
17785 else
17786 intmbx += 0x4;
17787
17788 tnapi->consmbox = rcvmbx;
17789 tnapi->prodmbox = sndmbx;
17790
17791 if (i)
17792 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17793 else
17794 tnapi->coal_now = HOSTCC_MODE_NOW;
17795
17796 if (!tg3_flag(tp, SUPPORT_MSIX))
17797 break;
17798
17799 /*
17800 * If we support MSIX, we'll be using RSS. If we're using
17801 * RSS, the first vector only handles link interrupts and the
17802 * remaining vectors handle rx and tx interrupts. Reuse the
17803 * mailbox values for the next iteration. The values we setup
17804 * above are still useful for the single vectored mode.
17805 */
17806 if (!i)
17807 continue;
17808
17809 rcvmbx += 0x8;
17810
17811 if (sndmbx & 0x4)
17812 sndmbx -= 0x4;
17813 else
17814 sndmbx += 0xc;
17815 }
17816
17817 tg3_init_coal(tp);
17818
17819 pci_set_drvdata(pdev, dev);
17820
17821 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17822 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17823 tg3_asic_rev(tp) == ASIC_REV_5762)
17824 tg3_flag_set(tp, PTP_CAPABLE);
17825
17826 tg3_timer_init(tp);
17827
17828 tg3_carrier_off(tp);
17829
17830 err = register_netdev(dev);
17831 if (err) {
17832 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17833 goto err_out_apeunmap;
17834 }
17835
17836 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17837 tp->board_part_number,
17838 tg3_chip_rev_id(tp),
17839 tg3_bus_string(tp, str),
17840 dev->dev_addr);
17841
17842 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17843 struct phy_device *phydev;
17844 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17845 netdev_info(dev,
17846 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17847 phydev->drv->name, dev_name(&phydev->dev));
17848 } else {
17849 char *ethtype;
17850
17851 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17852 ethtype = "10/100Base-TX";
17853 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17854 ethtype = "1000Base-SX";
17855 else
17856 ethtype = "10/100/1000Base-T";
17857
17858 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17859 "(WireSpeed[%d], EEE[%d])\n",
17860 tg3_phy_string(tp), ethtype,
17861 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17862 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17863 }
17864
17865 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17866 (dev->features & NETIF_F_RXCSUM) != 0,
17867 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17868 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17869 tg3_flag(tp, ENABLE_ASF) != 0,
17870 tg3_flag(tp, TSO_CAPABLE) != 0);
17871 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17872 tp->dma_rwctrl,
17873 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17874 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17875
17876 pci_save_state(pdev);
17877
17878 return 0;
17879
17880 err_out_apeunmap:
17881 if (tp->aperegs) {
17882 iounmap(tp->aperegs);
17883 tp->aperegs = NULL;
17884 }
17885
17886 err_out_iounmap:
17887 if (tp->regs) {
17888 iounmap(tp->regs);
17889 tp->regs = NULL;
17890 }
17891
17892 err_out_free_dev:
17893 free_netdev(dev);
17894
17895 err_out_free_res:
17896 pci_release_regions(pdev);
17897
17898 err_out_disable_pdev:
17899 if (pci_is_enabled(pdev))
17900 pci_disable_device(pdev);
17901 return err;
17902 }
17903
17904 static void tg3_remove_one(struct pci_dev *pdev)
17905 {
17906 struct net_device *dev = pci_get_drvdata(pdev);
17907
17908 if (dev) {
17909 struct tg3 *tp = netdev_priv(dev);
17910
17911 release_firmware(tp->fw);
17912
17913 tg3_reset_task_cancel(tp);
17914
17915 if (tg3_flag(tp, USE_PHYLIB)) {
17916 tg3_phy_fini(tp);
17917 tg3_mdio_fini(tp);
17918 }
17919
17920 unregister_netdev(dev);
17921 if (tp->aperegs) {
17922 iounmap(tp->aperegs);
17923 tp->aperegs = NULL;
17924 }
17925 if (tp->regs) {
17926 iounmap(tp->regs);
17927 tp->regs = NULL;
17928 }
17929 free_netdev(dev);
17930 pci_release_regions(pdev);
17931 pci_disable_device(pdev);
17932 }
17933 }
17934
17935 #ifdef CONFIG_PM_SLEEP
17936 static int tg3_suspend(struct device *device)
17937 {
17938 struct pci_dev *pdev = to_pci_dev(device);
17939 struct net_device *dev = pci_get_drvdata(pdev);
17940 struct tg3 *tp = netdev_priv(dev);
17941 int err = 0;
17942
17943 rtnl_lock();
17944
17945 if (!netif_running(dev))
17946 goto unlock;
17947
17948 tg3_reset_task_cancel(tp);
17949 tg3_phy_stop(tp);
17950 tg3_netif_stop(tp);
17951
17952 tg3_timer_stop(tp);
17953
17954 tg3_full_lock(tp, 1);
17955 tg3_disable_ints(tp);
17956 tg3_full_unlock(tp);
17957
17958 netif_device_detach(dev);
17959
17960 tg3_full_lock(tp, 0);
17961 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17962 tg3_flag_clear(tp, INIT_COMPLETE);
17963 tg3_full_unlock(tp);
17964
17965 err = tg3_power_down_prepare(tp);
17966 if (err) {
17967 int err2;
17968
17969 tg3_full_lock(tp, 0);
17970
17971 tg3_flag_set(tp, INIT_COMPLETE);
17972 err2 = tg3_restart_hw(tp, true);
17973 if (err2)
17974 goto out;
17975
17976 tg3_timer_start(tp);
17977
17978 netif_device_attach(dev);
17979 tg3_netif_start(tp);
17980
17981 out:
17982 tg3_full_unlock(tp);
17983
17984 if (!err2)
17985 tg3_phy_start(tp);
17986 }
17987
17988 unlock:
17989 rtnl_unlock();
17990 return err;
17991 }
17992
17993 static int tg3_resume(struct device *device)
17994 {
17995 struct pci_dev *pdev = to_pci_dev(device);
17996 struct net_device *dev = pci_get_drvdata(pdev);
17997 struct tg3 *tp = netdev_priv(dev);
17998 int err = 0;
17999
18000 rtnl_lock();
18001
18002 if (!netif_running(dev))
18003 goto unlock;
18004
18005 netif_device_attach(dev);
18006
18007 tg3_full_lock(tp, 0);
18008
18009 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18010
18011 tg3_flag_set(tp, INIT_COMPLETE);
18012 err = tg3_restart_hw(tp,
18013 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18014 if (err)
18015 goto out;
18016
18017 tg3_timer_start(tp);
18018
18019 tg3_netif_start(tp);
18020
18021 out:
18022 tg3_full_unlock(tp);
18023
18024 if (!err)
18025 tg3_phy_start(tp);
18026
18027 unlock:
18028 rtnl_unlock();
18029 return err;
18030 }
18031 #endif /* CONFIG_PM_SLEEP */
18032
18033 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18034
18035 static void tg3_shutdown(struct pci_dev *pdev)
18036 {
18037 struct net_device *dev = pci_get_drvdata(pdev);
18038 struct tg3 *tp = netdev_priv(dev);
18039
18040 rtnl_lock();
18041 netif_device_detach(dev);
18042
18043 if (netif_running(dev))
18044 dev_close(dev);
18045
18046 if (system_state == SYSTEM_POWER_OFF)
18047 tg3_power_down(tp);
18048
18049 rtnl_unlock();
18050 }
18051
18052 /**
18053 * tg3_io_error_detected - called when PCI error is detected
18054 * @pdev: Pointer to PCI device
18055 * @state: The current pci connection state
18056 *
18057 * This function is called after a PCI bus error affecting
18058 * this device has been detected.
18059 */
18060 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18061 pci_channel_state_t state)
18062 {
18063 struct net_device *netdev = pci_get_drvdata(pdev);
18064 struct tg3 *tp = netdev_priv(netdev);
18065 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18066
18067 netdev_info(netdev, "PCI I/O error detected\n");
18068
18069 rtnl_lock();
18070
18071 /* We probably don't have netdev yet */
18072 if (!netdev || !netif_running(netdev))
18073 goto done;
18074
18075 tg3_phy_stop(tp);
18076
18077 tg3_netif_stop(tp);
18078
18079 tg3_timer_stop(tp);
18080
18081 /* Want to make sure that the reset task doesn't run */
18082 tg3_reset_task_cancel(tp);
18083
18084 netif_device_detach(netdev);
18085
18086 /* Clean up software state, even if MMIO is blocked */
18087 tg3_full_lock(tp, 0);
18088 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18089 tg3_full_unlock(tp);
18090
18091 done:
18092 if (state == pci_channel_io_perm_failure) {
18093 if (netdev) {
18094 tg3_napi_enable(tp);
18095 dev_close(netdev);
18096 }
18097 err = PCI_ERS_RESULT_DISCONNECT;
18098 } else {
18099 pci_disable_device(pdev);
18100 }
18101
18102 rtnl_unlock();
18103
18104 return err;
18105 }
18106
18107 /**
18108 * tg3_io_slot_reset - called after the pci bus has been reset.
18109 * @pdev: Pointer to PCI device
18110 *
18111 * Restart the card from scratch, as if from a cold-boot.
18112 * At this point, the card has exprienced a hard reset,
18113 * followed by fixups by BIOS, and has its config space
18114 * set up identically to what it was at cold boot.
18115 */
18116 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18117 {
18118 struct net_device *netdev = pci_get_drvdata(pdev);
18119 struct tg3 *tp = netdev_priv(netdev);
18120 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18121 int err;
18122
18123 rtnl_lock();
18124
18125 if (pci_enable_device(pdev)) {
18126 dev_err(&pdev->dev,
18127 "Cannot re-enable PCI device after reset.\n");
18128 goto done;
18129 }
18130
18131 pci_set_master(pdev);
18132 pci_restore_state(pdev);
18133 pci_save_state(pdev);
18134
18135 if (!netdev || !netif_running(netdev)) {
18136 rc = PCI_ERS_RESULT_RECOVERED;
18137 goto done;
18138 }
18139
18140 err = tg3_power_up(tp);
18141 if (err)
18142 goto done;
18143
18144 rc = PCI_ERS_RESULT_RECOVERED;
18145
18146 done:
18147 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18148 tg3_napi_enable(tp);
18149 dev_close(netdev);
18150 }
18151 rtnl_unlock();
18152
18153 return rc;
18154 }
18155
18156 /**
18157 * tg3_io_resume - called when traffic can start flowing again.
18158 * @pdev: Pointer to PCI device
18159 *
18160 * This callback is called when the error recovery driver tells
18161 * us that its OK to resume normal operation.
18162 */
18163 static void tg3_io_resume(struct pci_dev *pdev)
18164 {
18165 struct net_device *netdev = pci_get_drvdata(pdev);
18166 struct tg3 *tp = netdev_priv(netdev);
18167 int err;
18168
18169 rtnl_lock();
18170
18171 if (!netif_running(netdev))
18172 goto done;
18173
18174 tg3_full_lock(tp, 0);
18175 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18176 tg3_flag_set(tp, INIT_COMPLETE);
18177 err = tg3_restart_hw(tp, true);
18178 if (err) {
18179 tg3_full_unlock(tp);
18180 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18181 goto done;
18182 }
18183
18184 netif_device_attach(netdev);
18185
18186 tg3_timer_start(tp);
18187
18188 tg3_netif_start(tp);
18189
18190 tg3_full_unlock(tp);
18191
18192 tg3_phy_start(tp);
18193
18194 done:
18195 rtnl_unlock();
18196 }
18197
18198 static const struct pci_error_handlers tg3_err_handler = {
18199 .error_detected = tg3_io_error_detected,
18200 .slot_reset = tg3_io_slot_reset,
18201 .resume = tg3_io_resume
18202 };
18203
18204 static struct pci_driver tg3_driver = {
18205 .name = DRV_MODULE_NAME,
18206 .id_table = tg3_pci_tbl,
18207 .probe = tg3_init_one,
18208 .remove = tg3_remove_one,
18209 .err_handler = &tg3_err_handler,
18210 .driver.pm = &tg3_pm_ops,
18211 .shutdown = tg3_shutdown,
18212 };
18213
18214 module_pci_driver(tg3_driver);
This page took 0.982957 seconds and 5 git commands to generate.