at86rf230: mask irq's before deregister device
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 136
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Jan 03, 2014"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
215 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217 #define FIRMWARE_TG3 "tigon/tg3.bin"
218 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
219 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
220 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
221
222 static char version[] =
223 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(DRV_MODULE_VERSION);
229 MODULE_FIRMWARE(FIRMWARE_TG3);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239
240 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 PCI_VENDOR_ID_LENOVO,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 { "rx_octets" },
365 { "rx_fragments" },
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
369 { "rx_fcs_errors" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
376 { "rx_jabbers" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
390
391 { "tx_octets" },
392 { "tx_collisions" },
393
394 { "tx_xon_sent" },
395 { "tx_xoff_sent" },
396 { "tx_flow_control" },
397 { "tx_mac_errors" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
400 { "tx_deferred" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
421 { "tx_discards" },
422 { "tx_errors" },
423
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
426 { "rxbds_empty" },
427 { "rx_discards" },
428 { "rx_errors" },
429 { "rx_threshold_hit" },
430
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
434
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
437 { "nic_irqs" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
440
441 { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
453
454
455 static const struct {
456 const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 [TG3_NVRAM_TEST] = { "nvram test (online) " },
459 [TG3_LINK_TEST] = { "link test (online) " },
460 [TG3_REGISTER_TEST] = { "register test (offline)" },
461 [TG3_MEMORY_TEST] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
466 };
467
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
469
470
471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 writel(val, tp->regs + off);
474 }
475
476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 return readl(tp->regs + off);
479 }
480
481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 writel(val, tp->aperegs + off);
484 }
485
486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 return readl(tp->aperegs + off);
489 }
490
491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 unsigned long flags;
494
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 writel(val, tp->regs + off);
504 readl(tp->regs + off);
505 }
506
507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 unsigned long flags;
510 u32 val;
511
512 spin_lock_irqsave(&tp->indirect_lock, flags);
513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 return val;
517 }
518
519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 unsigned long flags;
522
523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 TG3_64BIT_REG_LOW, val);
526 return;
527 }
528 if (off == TG3_RX_STD_PROD_IDX_REG) {
529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 TG3_64BIT_REG_LOW, val);
531 return;
532 }
533
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
541 */
542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 (val == 0x1)) {
544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 }
547 }
548
549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 unsigned long flags;
552 u32 val;
553
554 spin_lock_irqsave(&tp->indirect_lock, flags);
555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 */
566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 /* Non-posted methods */
570 tp->write32(tp, off, val);
571 else {
572 /* Posted method */
573 tg3_write32(tp, off, val);
574 if (usec_wait)
575 udelay(usec_wait);
576 tp->read32(tp, off);
577 }
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
580 */
581 if (usec_wait)
582 udelay(usec_wait);
583 }
584
585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 tp->write32_mbox(tp, off, val);
588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 !tg3_flag(tp, ICH_WORKAROUND)))
591 tp->read32_mbox(tp, off);
592 }
593
594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 void __iomem *mbox = tp->regs + off;
597 writel(val, mbox);
598 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 writel(val, mbox);
600 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 readl(mbox);
603 }
604
605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
625
626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 unsigned long flags;
629
630 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 return;
633
634 spin_lock_irqsave(&tp->indirect_lock, flags);
635 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 } else {
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 }
648 spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 unsigned long flags;
654
655 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 *val = 0;
658 return;
659 }
660
661 spin_lock_irqsave(&tp->indirect_lock, flags);
662 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 } else {
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 }
675 spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 int i;
681 u32 regbase, bit;
682
683 if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 regbase = TG3_APE_LOCK_GRANT;
685 else
686 regbase = TG3_APE_PER_LOCK_GRANT;
687
688 /* Make sure the driver hasn't any stale locks. */
689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 switch (i) {
691 case TG3_APE_LOCK_PHY0:
692 case TG3_APE_LOCK_PHY1:
693 case TG3_APE_LOCK_PHY2:
694 case TG3_APE_LOCK_PHY3:
695 bit = APE_LOCK_GRANT_DRIVER;
696 break;
697 default:
698 if (!tp->pci_fn)
699 bit = APE_LOCK_GRANT_DRIVER;
700 else
701 bit = 1 << tp->pci_fn;
702 }
703 tg3_ape_write32(tp, regbase + 4 * i, bit);
704 }
705
706 }
707
708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 int i, off;
711 int ret = 0;
712 u32 status, req, gnt, bit;
713
714 if (!tg3_flag(tp, ENABLE_APE))
715 return 0;
716
717 switch (locknum) {
718 case TG3_APE_LOCK_GPIO:
719 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 return 0;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 case TG3_APE_LOCK_GRC:
782 case TG3_APE_LOCK_MEM:
783 if (!tp->pci_fn)
784 bit = APE_LOCK_GRANT_DRIVER;
785 else
786 bit = 1 << tp->pci_fn;
787 break;
788 case TG3_APE_LOCK_PHY0:
789 case TG3_APE_LOCK_PHY1:
790 case TG3_APE_LOCK_PHY2:
791 case TG3_APE_LOCK_PHY3:
792 bit = APE_LOCK_GRANT_DRIVER;
793 break;
794 default:
795 return;
796 }
797
798 if (tg3_asic_rev(tp) == ASIC_REV_5761)
799 gnt = TG3_APE_LOCK_GRANT;
800 else
801 gnt = TG3_APE_PER_LOCK_GRANT;
802
803 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804 }
805
806 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807 {
808 u32 apedata;
809
810 while (timeout_us) {
811 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812 return -EBUSY;
813
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816 break;
817
818 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820 udelay(10);
821 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822 }
823
824 return timeout_us ? 0 : -EBUSY;
825 }
826
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 u32 i, apedata;
830
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 break;
836
837 udelay(10);
838 }
839
840 return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 u32 len)
845 {
846 int err;
847 u32 i, bufoff, msgoff, maxlen, apedata;
848
849 if (!tg3_flag(tp, APE_HAS_NCSI))
850 return 0;
851
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
854 return -ENODEV;
855
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
858 return -EAGAIN;
859
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 TG3_APE_SHMEM_BASE;
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865 while (len) {
866 u32 length;
867
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
870 len -= length;
871
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
874 return -EAGAIN;
875
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
878 if (err)
879 return err;
880
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892 base_off += length;
893
894 if (tg3_ape_wait_for_event(tp, 30000))
895 return -EAGAIN;
896
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
900 data++;
901 }
902 }
903
904 return 0;
905 }
906
907 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908 {
909 int err;
910 u32 apedata;
911
912 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913 if (apedata != APE_SEG_SIG_MAGIC)
914 return -EAGAIN;
915
916 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917 if (!(apedata & APE_FW_STATUS_READY))
918 return -EAGAIN;
919
920 /* Wait for up to 1 millisecond for APE to service previous event. */
921 err = tg3_ape_event_lock(tp, 1000);
922 if (err)
923 return err;
924
925 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926 event | APE_EVENT_STATUS_EVENT_PENDING);
927
928 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931 return 0;
932 }
933
934 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935 {
936 u32 event;
937 u32 apedata;
938
939 if (!tg3_flag(tp, ENABLE_APE))
940 return;
941
942 switch (kind) {
943 case RESET_KIND_INIT:
944 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945 APE_HOST_SEG_SIG_MAGIC);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947 APE_HOST_SEG_LEN_MAGIC);
948 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953 APE_HOST_BEHAV_NO_PHYLOCK);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955 TG3_APE_HOST_DRVR_STATE_START);
956
957 event = APE_EVENT_STATUS_STATE_START;
958 break;
959 case RESET_KIND_SHUTDOWN:
960 /* With the interface we are currently using,
961 * APE does not track driver state. Wiping
962 * out the HOST SEGMENT SIGNATURE forces
963 * the APE to assume OS absent status.
964 */
965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967 if (device_may_wakeup(&tp->pdev->dev) &&
968 tg3_flag(tp, WOL_ENABLE)) {
969 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970 TG3_APE_HOST_WOL_SPEED_AUTO);
971 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972 } else
973 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977 event = APE_EVENT_STATUS_STATE_UNLOAD;
978 break;
979 default:
980 return;
981 }
982
983 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985 tg3_ape_send_event(tp, event);
986 }
987
988 static void tg3_disable_ints(struct tg3 *tp)
989 {
990 int i;
991
992 tw32(TG3PCI_MISC_HOST_CTRL,
993 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994 for (i = 0; i < tp->irq_max; i++)
995 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996 }
997
998 static void tg3_enable_ints(struct tg3 *tp)
999 {
1000 int i;
1001
1002 tp->irq_sync = 0;
1003 wmb();
1004
1005 tw32(TG3PCI_MISC_HOST_CTRL,
1006 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009 for (i = 0; i < tp->irq_cnt; i++) {
1010 struct tg3_napi *tnapi = &tp->napi[i];
1011
1012 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 if (tg3_flag(tp, 1SHOT_MSI))
1014 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016 tp->coal_now |= tnapi->coal_now;
1017 }
1018
1019 /* Force an initial interrupt */
1020 if (!tg3_flag(tp, TAGGED_STATUS) &&
1021 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023 else
1024 tw32(HOSTCC_MODE, tp->coal_now);
1025
1026 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027 }
1028
1029 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030 {
1031 struct tg3 *tp = tnapi->tp;
1032 struct tg3_hw_status *sblk = tnapi->hw_status;
1033 unsigned int work_exists = 0;
1034
1035 /* check for phy events */
1036 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037 if (sblk->status & SD_STATUS_LINK_CHG)
1038 work_exists = 1;
1039 }
1040
1041 /* check for TX work to do */
1042 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043 work_exists = 1;
1044
1045 /* check for RX work to do */
1046 if (tnapi->rx_rcb_prod_idx &&
1047 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048 work_exists = 1;
1049
1050 return work_exists;
1051 }
1052
1053 /* tg3_int_reenable
1054 * similar to tg3_enable_ints, but it accurately determines whether there
1055 * is new work pending and can return without flushing the PIO write
1056 * which reenables interrupts
1057 */
1058 static void tg3_int_reenable(struct tg3_napi *tnapi)
1059 {
1060 struct tg3 *tp = tnapi->tp;
1061
1062 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063 mmiowb();
1064
1065 /* When doing tagged status, this work check is unnecessary.
1066 * The last_tag we write above tells the chip which piece of
1067 * work we've completed.
1068 */
1069 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070 tw32(HOSTCC_MODE, tp->coalesce_mode |
1071 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072 }
1073
1074 static void tg3_switch_clocks(struct tg3 *tp)
1075 {
1076 u32 clock_ctrl;
1077 u32 orig_clock_ctrl;
1078
1079 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080 return;
1081
1082 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084 orig_clock_ctrl = clock_ctrl;
1085 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086 CLOCK_CTRL_CLKRUN_OENABLE |
1087 0x1f);
1088 tp->pci_clock_ctrl = clock_ctrl;
1089
1090 if (tg3_flag(tp, 5705_PLUS)) {
1091 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094 }
1095 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097 clock_ctrl |
1098 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099 40);
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102 40);
1103 }
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105 }
1106
1107 #define PHY_BUSY_LOOPS 5000
1108
1109 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110 u32 *val)
1111 {
1112 u32 frame_val;
1113 unsigned int loops;
1114 int ret;
1115
1116 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117 tw32_f(MAC_MI_MODE,
1118 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119 udelay(80);
1120 }
1121
1122 tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124 *val = 0x0;
1125
1126 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 MI_COM_PHY_ADDR_MASK);
1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 MI_COM_REG_ADDR_MASK);
1130 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132 tw32_f(MAC_MI_COM, frame_val);
1133
1134 loops = PHY_BUSY_LOOPS;
1135 while (loops != 0) {
1136 udelay(10);
1137 frame_val = tr32(MAC_MI_COM);
1138
1139 if ((frame_val & MI_COM_BUSY) == 0) {
1140 udelay(5);
1141 frame_val = tr32(MAC_MI_COM);
1142 break;
1143 }
1144 loops -= 1;
1145 }
1146
1147 ret = -EBUSY;
1148 if (loops != 0) {
1149 *val = frame_val & MI_COM_DATA_MASK;
1150 ret = 0;
1151 }
1152
1153 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 udelay(80);
1156 }
1157
1158 tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160 return ret;
1161 }
1162
1163 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164 {
1165 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166 }
1167
1168 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169 u32 val)
1170 {
1171 u32 frame_val;
1172 unsigned int loops;
1173 int ret;
1174
1175 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177 return 0;
1178
1179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180 tw32_f(MAC_MI_MODE,
1181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182 udelay(80);
1183 }
1184
1185 tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188 MI_COM_PHY_ADDR_MASK);
1189 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190 MI_COM_REG_ADDR_MASK);
1191 frame_val |= (val & MI_COM_DATA_MASK);
1192 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194 tw32_f(MAC_MI_COM, frame_val);
1195
1196 loops = PHY_BUSY_LOOPS;
1197 while (loops != 0) {
1198 udelay(10);
1199 frame_val = tr32(MAC_MI_COM);
1200 if ((frame_val & MI_COM_BUSY) == 0) {
1201 udelay(5);
1202 frame_val = tr32(MAC_MI_COM);
1203 break;
1204 }
1205 loops -= 1;
1206 }
1207
1208 ret = -EBUSY;
1209 if (loops != 0)
1210 ret = 0;
1211
1212 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214 udelay(80);
1215 }
1216
1217 tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219 return ret;
1220 }
1221
1222 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223 {
1224 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225 }
1226
1227 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 {
1229 int err;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232 if (err)
1233 goto done;
1234
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236 if (err)
1237 goto done;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241 if (err)
1242 goto done;
1243
1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246 done:
1247 return err;
1248 }
1249
1250 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 {
1252 int err;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255 if (err)
1256 goto done;
1257
1258 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264 if (err)
1265 goto done;
1266
1267 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269 done:
1270 return err;
1271 }
1272
1273 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 {
1275 int err;
1276
1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278 if (!err)
1279 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281 return err;
1282 }
1283
1284 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 {
1286 int err;
1287
1288 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289 if (!err)
1290 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292 return err;
1293 }
1294
1295 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 {
1297 int err;
1298
1299 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301 MII_TG3_AUXCTL_SHDWSEL_MISC);
1302 if (!err)
1303 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305 return err;
1306 }
1307
1308 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309 {
1310 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311 set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314 }
1315
1316 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317 {
1318 u32 val;
1319 int err;
1320
1321 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323 if (err)
1324 return err;
1325
1326 if (enable)
1327 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328 else
1329 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334 return err;
1335 }
1336
1337 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338 {
1339 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340 reg | val | MII_TG3_MISC_SHDW_WREN);
1341 }
1342
1343 static int tg3_bmcr_reset(struct tg3 *tp)
1344 {
1345 u32 phy_control;
1346 int limit, err;
1347
1348 /* OK, reset it, and poll the BMCR_RESET bit until it
1349 * clears or we time out.
1350 */
1351 phy_control = BMCR_RESET;
1352 err = tg3_writephy(tp, MII_BMCR, phy_control);
1353 if (err != 0)
1354 return -EBUSY;
1355
1356 limit = 5000;
1357 while (limit--) {
1358 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359 if (err != 0)
1360 return -EBUSY;
1361
1362 if ((phy_control & BMCR_RESET) == 0) {
1363 udelay(40);
1364 break;
1365 }
1366 udelay(10);
1367 }
1368 if (limit < 0)
1369 return -EBUSY;
1370
1371 return 0;
1372 }
1373
1374 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375 {
1376 struct tg3 *tp = bp->priv;
1377 u32 val;
1378
1379 spin_lock_bh(&tp->lock);
1380
1381 if (__tg3_readphy(tp, mii_id, reg, &val))
1382 val = -EIO;
1383
1384 spin_unlock_bh(&tp->lock);
1385
1386 return val;
1387 }
1388
1389 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390 {
1391 struct tg3 *tp = bp->priv;
1392 u32 ret = 0;
1393
1394 spin_lock_bh(&tp->lock);
1395
1396 if (__tg3_writephy(tp, mii_id, reg, val))
1397 ret = -EIO;
1398
1399 spin_unlock_bh(&tp->lock);
1400
1401 return ret;
1402 }
1403
1404 static int tg3_mdio_reset(struct mii_bus *bp)
1405 {
1406 return 0;
1407 }
1408
1409 static void tg3_mdio_config_5785(struct tg3 *tp)
1410 {
1411 u32 val;
1412 struct phy_device *phydev;
1413
1414 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1415 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1416 case PHY_ID_BCM50610:
1417 case PHY_ID_BCM50610M:
1418 val = MAC_PHYCFG2_50610_LED_MODES;
1419 break;
1420 case PHY_ID_BCMAC131:
1421 val = MAC_PHYCFG2_AC131_LED_MODES;
1422 break;
1423 case PHY_ID_RTL8211C:
1424 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1425 break;
1426 case PHY_ID_RTL8201E:
1427 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1428 break;
1429 default:
1430 return;
1431 }
1432
1433 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1434 tw32(MAC_PHYCFG2, val);
1435
1436 val = tr32(MAC_PHYCFG1);
1437 val &= ~(MAC_PHYCFG1_RGMII_INT |
1438 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1439 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1440 tw32(MAC_PHYCFG1, val);
1441
1442 return;
1443 }
1444
1445 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1446 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1447 MAC_PHYCFG2_FMODE_MASK_MASK |
1448 MAC_PHYCFG2_GMODE_MASK_MASK |
1449 MAC_PHYCFG2_ACT_MASK_MASK |
1450 MAC_PHYCFG2_QUAL_MASK_MASK |
1451 MAC_PHYCFG2_INBAND_ENABLE;
1452
1453 tw32(MAC_PHYCFG2, val);
1454
1455 val = tr32(MAC_PHYCFG1);
1456 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1457 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1458 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1459 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1460 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1461 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1462 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1463 }
1464 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1465 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1466 tw32(MAC_PHYCFG1, val);
1467
1468 val = tr32(MAC_EXT_RGMII_MODE);
1469 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1470 MAC_RGMII_MODE_RX_QUALITY |
1471 MAC_RGMII_MODE_RX_ACTIVITY |
1472 MAC_RGMII_MODE_RX_ENG_DET |
1473 MAC_RGMII_MODE_TX_ENABLE |
1474 MAC_RGMII_MODE_TX_LOWPWR |
1475 MAC_RGMII_MODE_TX_RESET);
1476 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1477 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1478 val |= MAC_RGMII_MODE_RX_INT_B |
1479 MAC_RGMII_MODE_RX_QUALITY |
1480 MAC_RGMII_MODE_RX_ACTIVITY |
1481 MAC_RGMII_MODE_RX_ENG_DET;
1482 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1483 val |= MAC_RGMII_MODE_TX_ENABLE |
1484 MAC_RGMII_MODE_TX_LOWPWR |
1485 MAC_RGMII_MODE_TX_RESET;
1486 }
1487 tw32(MAC_EXT_RGMII_MODE, val);
1488 }
1489
1490 static void tg3_mdio_start(struct tg3 *tp)
1491 {
1492 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1493 tw32_f(MAC_MI_MODE, tp->mi_mode);
1494 udelay(80);
1495
1496 if (tg3_flag(tp, MDIOBUS_INITED) &&
1497 tg3_asic_rev(tp) == ASIC_REV_5785)
1498 tg3_mdio_config_5785(tp);
1499 }
1500
1501 static int tg3_mdio_init(struct tg3 *tp)
1502 {
1503 int i;
1504 u32 reg;
1505 struct phy_device *phydev;
1506
1507 if (tg3_flag(tp, 5717_PLUS)) {
1508 u32 is_serdes;
1509
1510 tp->phy_addr = tp->pci_fn + 1;
1511
1512 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1513 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1514 else
1515 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1516 TG3_CPMU_PHY_STRAP_IS_SERDES;
1517 if (is_serdes)
1518 tp->phy_addr += 7;
1519 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1520 int addr;
1521
1522 addr = ssb_gige_get_phyaddr(tp->pdev);
1523 if (addr < 0)
1524 return addr;
1525 tp->phy_addr = addr;
1526 } else
1527 tp->phy_addr = TG3_PHY_MII_ADDR;
1528
1529 tg3_mdio_start(tp);
1530
1531 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1532 return 0;
1533
1534 tp->mdio_bus = mdiobus_alloc();
1535 if (tp->mdio_bus == NULL)
1536 return -ENOMEM;
1537
1538 tp->mdio_bus->name = "tg3 mdio bus";
1539 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1540 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1541 tp->mdio_bus->priv = tp;
1542 tp->mdio_bus->parent = &tp->pdev->dev;
1543 tp->mdio_bus->read = &tg3_mdio_read;
1544 tp->mdio_bus->write = &tg3_mdio_write;
1545 tp->mdio_bus->reset = &tg3_mdio_reset;
1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547 tp->mdio_bus->irq = &tp->mdio_irq[0];
1548
1549 for (i = 0; i < PHY_MAX_ADDR; i++)
1550 tp->mdio_bus->irq[i] = PHY_POLL;
1551
1552 /* The bus registration will look for all the PHYs on the mdio bus.
1553 * Unfortunately, it does not ensure the PHY is powered up before
1554 * accessing the PHY ID registers. A chip reset is the
1555 * quickest way to bring the device back to an operational state..
1556 */
1557 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1558 tg3_bmcr_reset(tp);
1559
1560 i = mdiobus_register(tp->mdio_bus);
1561 if (i) {
1562 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1563 mdiobus_free(tp->mdio_bus);
1564 return i;
1565 }
1566
1567 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1568
1569 if (!phydev || !phydev->drv) {
1570 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1571 mdiobus_unregister(tp->mdio_bus);
1572 mdiobus_free(tp->mdio_bus);
1573 return -ENODEV;
1574 }
1575
1576 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1577 case PHY_ID_BCM57780:
1578 phydev->interface = PHY_INTERFACE_MODE_GMII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 break;
1581 case PHY_ID_BCM50610:
1582 case PHY_ID_BCM50610M:
1583 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1584 PHY_BRCM_RX_REFCLK_UNUSED |
1585 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1586 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1587 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1588 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1589 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1590 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1591 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1592 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1593 /* fallthru */
1594 case PHY_ID_RTL8211C:
1595 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1596 break;
1597 case PHY_ID_RTL8201E:
1598 case PHY_ID_BCMAC131:
1599 phydev->interface = PHY_INTERFACE_MODE_MII;
1600 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1601 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1602 break;
1603 }
1604
1605 tg3_flag_set(tp, MDIOBUS_INITED);
1606
1607 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1608 tg3_mdio_config_5785(tp);
1609
1610 return 0;
1611 }
1612
1613 static void tg3_mdio_fini(struct tg3 *tp)
1614 {
1615 if (tg3_flag(tp, MDIOBUS_INITED)) {
1616 tg3_flag_clear(tp, MDIOBUS_INITED);
1617 mdiobus_unregister(tp->mdio_bus);
1618 mdiobus_free(tp->mdio_bus);
1619 }
1620 }
1621
1622 /* tp->lock is held. */
1623 static inline void tg3_generate_fw_event(struct tg3 *tp)
1624 {
1625 u32 val;
1626
1627 val = tr32(GRC_RX_CPU_EVENT);
1628 val |= GRC_RX_CPU_DRIVER_EVENT;
1629 tw32_f(GRC_RX_CPU_EVENT, val);
1630
1631 tp->last_event_jiffies = jiffies;
1632 }
1633
1634 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1635
1636 /* tp->lock is held. */
1637 static void tg3_wait_for_event_ack(struct tg3 *tp)
1638 {
1639 int i;
1640 unsigned int delay_cnt;
1641 long time_remain;
1642
1643 /* If enough time has passed, no wait is necessary. */
1644 time_remain = (long)(tp->last_event_jiffies + 1 +
1645 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1646 (long)jiffies;
1647 if (time_remain < 0)
1648 return;
1649
1650 /* Check if we can shorten the wait time. */
1651 delay_cnt = jiffies_to_usecs(time_remain);
1652 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1653 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1654 delay_cnt = (delay_cnt >> 3) + 1;
1655
1656 for (i = 0; i < delay_cnt; i++) {
1657 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1658 break;
1659 if (pci_channel_offline(tp->pdev))
1660 break;
1661
1662 udelay(8);
1663 }
1664 }
1665
1666 /* tp->lock is held. */
1667 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1668 {
1669 u32 reg, val;
1670
1671 val = 0;
1672 if (!tg3_readphy(tp, MII_BMCR, &reg))
1673 val = reg << 16;
1674 if (!tg3_readphy(tp, MII_BMSR, &reg))
1675 val |= (reg & 0xffff);
1676 *data++ = val;
1677
1678 val = 0;
1679 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1680 val = reg << 16;
1681 if (!tg3_readphy(tp, MII_LPA, &reg))
1682 val |= (reg & 0xffff);
1683 *data++ = val;
1684
1685 val = 0;
1686 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1687 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1688 val = reg << 16;
1689 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1690 val |= (reg & 0xffff);
1691 }
1692 *data++ = val;
1693
1694 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1695 val = reg << 16;
1696 else
1697 val = 0;
1698 *data++ = val;
1699 }
1700
1701 /* tp->lock is held. */
1702 static void tg3_ump_link_report(struct tg3 *tp)
1703 {
1704 u32 data[4];
1705
1706 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1707 return;
1708
1709 tg3_phy_gather_ump_data(tp, data);
1710
1711 tg3_wait_for_event_ack(tp);
1712
1713 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1716 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1717 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1718 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1719
1720 tg3_generate_fw_event(tp);
1721 }
1722
1723 /* tp->lock is held. */
1724 static void tg3_stop_fw(struct tg3 *tp)
1725 {
1726 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1727 /* Wait for RX cpu to ACK the previous event. */
1728 tg3_wait_for_event_ack(tp);
1729
1730 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1731
1732 tg3_generate_fw_event(tp);
1733
1734 /* Wait for RX cpu to ACK this event. */
1735 tg3_wait_for_event_ack(tp);
1736 }
1737 }
1738
1739 /* tp->lock is held. */
1740 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1741 {
1742 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1743 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1744
1745 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1746 switch (kind) {
1747 case RESET_KIND_INIT:
1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 DRV_STATE_START);
1750 break;
1751
1752 case RESET_KIND_SHUTDOWN:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_UNLOAD);
1755 break;
1756
1757 case RESET_KIND_SUSPEND:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_SUSPEND);
1760 break;
1761
1762 default:
1763 break;
1764 }
1765 }
1766 }
1767
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1770 {
1771 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1772 switch (kind) {
1773 case RESET_KIND_INIT:
1774 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775 DRV_STATE_START_DONE);
1776 break;
1777
1778 case RESET_KIND_SHUTDOWN:
1779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780 DRV_STATE_UNLOAD_DONE);
1781 break;
1782
1783 default:
1784 break;
1785 }
1786 }
1787 }
1788
1789 /* tp->lock is held. */
1790 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1791 {
1792 if (tg3_flag(tp, ENABLE_ASF)) {
1793 switch (kind) {
1794 case RESET_KIND_INIT:
1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796 DRV_STATE_START);
1797 break;
1798
1799 case RESET_KIND_SHUTDOWN:
1800 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1801 DRV_STATE_UNLOAD);
1802 break;
1803
1804 case RESET_KIND_SUSPEND:
1805 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1806 DRV_STATE_SUSPEND);
1807 break;
1808
1809 default:
1810 break;
1811 }
1812 }
1813 }
1814
1815 static int tg3_poll_fw(struct tg3 *tp)
1816 {
1817 int i;
1818 u32 val;
1819
1820 if (tg3_flag(tp, NO_FWARE_REPORTED))
1821 return 0;
1822
1823 if (tg3_flag(tp, IS_SSB_CORE)) {
1824 /* We don't use firmware. */
1825 return 0;
1826 }
1827
1828 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1829 /* Wait up to 20ms for init done. */
1830 for (i = 0; i < 200; i++) {
1831 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1832 return 0;
1833 if (pci_channel_offline(tp->pdev))
1834 return -ENODEV;
1835
1836 udelay(100);
1837 }
1838 return -ENODEV;
1839 }
1840
1841 /* Wait for firmware initialization to complete. */
1842 for (i = 0; i < 100000; i++) {
1843 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1844 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1845 break;
1846 if (pci_channel_offline(tp->pdev)) {
1847 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1848 tg3_flag_set(tp, NO_FWARE_REPORTED);
1849 netdev_info(tp->dev, "No firmware running\n");
1850 }
1851
1852 break;
1853 }
1854
1855 udelay(10);
1856 }
1857
1858 /* Chip might not be fitted with firmware. Some Sun onboard
1859 * parts are configured like that. So don't signal the timeout
1860 * of the above loop as an error, but do report the lack of
1861 * running firmware once.
1862 */
1863 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1864 tg3_flag_set(tp, NO_FWARE_REPORTED);
1865
1866 netdev_info(tp->dev, "No firmware running\n");
1867 }
1868
1869 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1870 /* The 57765 A0 needs a little more
1871 * time to do some important work.
1872 */
1873 mdelay(10);
1874 }
1875
1876 return 0;
1877 }
1878
1879 static void tg3_link_report(struct tg3 *tp)
1880 {
1881 if (!netif_carrier_ok(tp->dev)) {
1882 netif_info(tp, link, tp->dev, "Link is down\n");
1883 tg3_ump_link_report(tp);
1884 } else if (netif_msg_link(tp)) {
1885 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1886 (tp->link_config.active_speed == SPEED_1000 ?
1887 1000 :
1888 (tp->link_config.active_speed == SPEED_100 ?
1889 100 : 10)),
1890 (tp->link_config.active_duplex == DUPLEX_FULL ?
1891 "full" : "half"));
1892
1893 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1894 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1895 "on" : "off",
1896 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1897 "on" : "off");
1898
1899 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1900 netdev_info(tp->dev, "EEE is %s\n",
1901 tp->setlpicnt ? "enabled" : "disabled");
1902
1903 tg3_ump_link_report(tp);
1904 }
1905
1906 tp->link_up = netif_carrier_ok(tp->dev);
1907 }
1908
1909 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1910 {
1911 u32 flowctrl = 0;
1912
1913 if (adv & ADVERTISE_PAUSE_CAP) {
1914 flowctrl |= FLOW_CTRL_RX;
1915 if (!(adv & ADVERTISE_PAUSE_ASYM))
1916 flowctrl |= FLOW_CTRL_TX;
1917 } else if (adv & ADVERTISE_PAUSE_ASYM)
1918 flowctrl |= FLOW_CTRL_TX;
1919
1920 return flowctrl;
1921 }
1922
1923 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1924 {
1925 u16 miireg;
1926
1927 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1928 miireg = ADVERTISE_1000XPAUSE;
1929 else if (flow_ctrl & FLOW_CTRL_TX)
1930 miireg = ADVERTISE_1000XPSE_ASYM;
1931 else if (flow_ctrl & FLOW_CTRL_RX)
1932 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1933 else
1934 miireg = 0;
1935
1936 return miireg;
1937 }
1938
1939 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1940 {
1941 u32 flowctrl = 0;
1942
1943 if (adv & ADVERTISE_1000XPAUSE) {
1944 flowctrl |= FLOW_CTRL_RX;
1945 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1946 flowctrl |= FLOW_CTRL_TX;
1947 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1948 flowctrl |= FLOW_CTRL_TX;
1949
1950 return flowctrl;
1951 }
1952
1953 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1954 {
1955 u8 cap = 0;
1956
1957 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1958 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1959 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1960 if (lcladv & ADVERTISE_1000XPAUSE)
1961 cap = FLOW_CTRL_RX;
1962 if (rmtadv & ADVERTISE_1000XPAUSE)
1963 cap = FLOW_CTRL_TX;
1964 }
1965
1966 return cap;
1967 }
1968
1969 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1970 {
1971 u8 autoneg;
1972 u8 flowctrl = 0;
1973 u32 old_rx_mode = tp->rx_mode;
1974 u32 old_tx_mode = tp->tx_mode;
1975
1976 if (tg3_flag(tp, USE_PHYLIB))
1977 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1978 else
1979 autoneg = tp->link_config.autoneg;
1980
1981 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1982 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1983 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1984 else
1985 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1986 } else
1987 flowctrl = tp->link_config.flowctrl;
1988
1989 tp->link_config.active_flowctrl = flowctrl;
1990
1991 if (flowctrl & FLOW_CTRL_RX)
1992 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1993 else
1994 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1995
1996 if (old_rx_mode != tp->rx_mode)
1997 tw32_f(MAC_RX_MODE, tp->rx_mode);
1998
1999 if (flowctrl & FLOW_CTRL_TX)
2000 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2001 else
2002 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2003
2004 if (old_tx_mode != tp->tx_mode)
2005 tw32_f(MAC_TX_MODE, tp->tx_mode);
2006 }
2007
2008 static void tg3_adjust_link(struct net_device *dev)
2009 {
2010 u8 oldflowctrl, linkmesg = 0;
2011 u32 mac_mode, lcl_adv, rmt_adv;
2012 struct tg3 *tp = netdev_priv(dev);
2013 struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2014
2015 spin_lock_bh(&tp->lock);
2016
2017 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2018 MAC_MODE_HALF_DUPLEX);
2019
2020 oldflowctrl = tp->link_config.active_flowctrl;
2021
2022 if (phydev->link) {
2023 lcl_adv = 0;
2024 rmt_adv = 0;
2025
2026 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2027 mac_mode |= MAC_MODE_PORT_MODE_MII;
2028 else if (phydev->speed == SPEED_1000 ||
2029 tg3_asic_rev(tp) != ASIC_REV_5785)
2030 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2031 else
2032 mac_mode |= MAC_MODE_PORT_MODE_MII;
2033
2034 if (phydev->duplex == DUPLEX_HALF)
2035 mac_mode |= MAC_MODE_HALF_DUPLEX;
2036 else {
2037 lcl_adv = mii_advertise_flowctrl(
2038 tp->link_config.flowctrl);
2039
2040 if (phydev->pause)
2041 rmt_adv = LPA_PAUSE_CAP;
2042 if (phydev->asym_pause)
2043 rmt_adv |= LPA_PAUSE_ASYM;
2044 }
2045
2046 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2047 } else
2048 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2049
2050 if (mac_mode != tp->mac_mode) {
2051 tp->mac_mode = mac_mode;
2052 tw32_f(MAC_MODE, tp->mac_mode);
2053 udelay(40);
2054 }
2055
2056 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2057 if (phydev->speed == SPEED_10)
2058 tw32(MAC_MI_STAT,
2059 MAC_MI_STAT_10MBPS_MODE |
2060 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2061 else
2062 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2063 }
2064
2065 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2066 tw32(MAC_TX_LENGTHS,
2067 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2068 (6 << TX_LENGTHS_IPG_SHIFT) |
2069 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2070 else
2071 tw32(MAC_TX_LENGTHS,
2072 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2073 (6 << TX_LENGTHS_IPG_SHIFT) |
2074 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2075
2076 if (phydev->link != tp->old_link ||
2077 phydev->speed != tp->link_config.active_speed ||
2078 phydev->duplex != tp->link_config.active_duplex ||
2079 oldflowctrl != tp->link_config.active_flowctrl)
2080 linkmesg = 1;
2081
2082 tp->old_link = phydev->link;
2083 tp->link_config.active_speed = phydev->speed;
2084 tp->link_config.active_duplex = phydev->duplex;
2085
2086 spin_unlock_bh(&tp->lock);
2087
2088 if (linkmesg)
2089 tg3_link_report(tp);
2090 }
2091
2092 static int tg3_phy_init(struct tg3 *tp)
2093 {
2094 struct phy_device *phydev;
2095
2096 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2097 return 0;
2098
2099 /* Bring the PHY back to a known state. */
2100 tg3_bmcr_reset(tp);
2101
2102 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2103
2104 /* Attach the MAC to the PHY. */
2105 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2106 tg3_adjust_link, phydev->interface);
2107 if (IS_ERR(phydev)) {
2108 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2109 return PTR_ERR(phydev);
2110 }
2111
2112 /* Mask with MAC supported features. */
2113 switch (phydev->interface) {
2114 case PHY_INTERFACE_MODE_GMII:
2115 case PHY_INTERFACE_MODE_RGMII:
2116 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2117 phydev->supported &= (PHY_GBIT_FEATURES |
2118 SUPPORTED_Pause |
2119 SUPPORTED_Asym_Pause);
2120 break;
2121 }
2122 /* fallthru */
2123 case PHY_INTERFACE_MODE_MII:
2124 phydev->supported &= (PHY_BASIC_FEATURES |
2125 SUPPORTED_Pause |
2126 SUPPORTED_Asym_Pause);
2127 break;
2128 default:
2129 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2130 return -EINVAL;
2131 }
2132
2133 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2134
2135 phydev->advertising = phydev->supported;
2136
2137 return 0;
2138 }
2139
2140 static void tg3_phy_start(struct tg3 *tp)
2141 {
2142 struct phy_device *phydev;
2143
2144 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2145 return;
2146
2147 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2148
2149 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2150 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2151 phydev->speed = tp->link_config.speed;
2152 phydev->duplex = tp->link_config.duplex;
2153 phydev->autoneg = tp->link_config.autoneg;
2154 phydev->advertising = tp->link_config.advertising;
2155 }
2156
2157 phy_start(phydev);
2158
2159 phy_start_aneg(phydev);
2160 }
2161
2162 static void tg3_phy_stop(struct tg3 *tp)
2163 {
2164 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2165 return;
2166
2167 phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2168 }
2169
2170 static void tg3_phy_fini(struct tg3 *tp)
2171 {
2172 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2173 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2174 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2175 }
2176 }
2177
2178 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2179 {
2180 int err;
2181 u32 val;
2182
2183 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2184 return 0;
2185
2186 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2187 /* Cannot do read-modify-write on 5401 */
2188 err = tg3_phy_auxctl_write(tp,
2189 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2190 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2191 0x4c20);
2192 goto done;
2193 }
2194
2195 err = tg3_phy_auxctl_read(tp,
2196 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2197 if (err)
2198 return err;
2199
2200 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2201 err = tg3_phy_auxctl_write(tp,
2202 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2203
2204 done:
2205 return err;
2206 }
2207
2208 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2209 {
2210 u32 phytest;
2211
2212 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2213 u32 phy;
2214
2215 tg3_writephy(tp, MII_TG3_FET_TEST,
2216 phytest | MII_TG3_FET_SHADOW_EN);
2217 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2218 if (enable)
2219 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2220 else
2221 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2222 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2223 }
2224 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2225 }
2226 }
2227
2228 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2229 {
2230 u32 reg;
2231
2232 if (!tg3_flag(tp, 5705_PLUS) ||
2233 (tg3_flag(tp, 5717_PLUS) &&
2234 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2235 return;
2236
2237 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2238 tg3_phy_fet_toggle_apd(tp, enable);
2239 return;
2240 }
2241
2242 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2243 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2244 MII_TG3_MISC_SHDW_SCR5_SDTL |
2245 MII_TG3_MISC_SHDW_SCR5_C125OE;
2246 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2247 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2248
2249 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2250
2251
2252 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2253 if (enable)
2254 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2255
2256 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2257 }
2258
2259 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2260 {
2261 u32 phy;
2262
2263 if (!tg3_flag(tp, 5705_PLUS) ||
2264 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2265 return;
2266
2267 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2268 u32 ephy;
2269
2270 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2271 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2272
2273 tg3_writephy(tp, MII_TG3_FET_TEST,
2274 ephy | MII_TG3_FET_SHADOW_EN);
2275 if (!tg3_readphy(tp, reg, &phy)) {
2276 if (enable)
2277 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2278 else
2279 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2280 tg3_writephy(tp, reg, phy);
2281 }
2282 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2283 }
2284 } else {
2285 int ret;
2286
2287 ret = tg3_phy_auxctl_read(tp,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2289 if (!ret) {
2290 if (enable)
2291 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2292 else
2293 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2294 tg3_phy_auxctl_write(tp,
2295 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2296 }
2297 }
2298 }
2299
2300 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2301 {
2302 int ret;
2303 u32 val;
2304
2305 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2306 return;
2307
2308 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2309 if (!ret)
2310 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2311 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2312 }
2313
2314 static void tg3_phy_apply_otp(struct tg3 *tp)
2315 {
2316 u32 otp, phy;
2317
2318 if (!tp->phy_otp)
2319 return;
2320
2321 otp = tp->phy_otp;
2322
2323 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2324 return;
2325
2326 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2327 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2328 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2329
2330 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2331 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2333
2334 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2335 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2336 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2337
2338 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2340
2341 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2342 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2343
2344 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2345 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2346 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2347
2348 tg3_phy_toggle_auxctl_smdsp(tp, false);
2349 }
2350
2351 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2352 {
2353 u32 val;
2354 struct ethtool_eee *dest = &tp->eee;
2355
2356 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2357 return;
2358
2359 if (eee)
2360 dest = eee;
2361
2362 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2363 return;
2364
2365 /* Pull eee_active */
2366 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2367 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2368 dest->eee_active = 1;
2369 } else
2370 dest->eee_active = 0;
2371
2372 /* Pull lp advertised settings */
2373 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2374 return;
2375 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2376
2377 /* Pull advertised and eee_enabled settings */
2378 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2379 return;
2380 dest->eee_enabled = !!val;
2381 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2382
2383 /* Pull tx_lpi_enabled */
2384 val = tr32(TG3_CPMU_EEE_MODE);
2385 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2386
2387 /* Pull lpi timer value */
2388 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2389 }
2390
2391 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2392 {
2393 u32 val;
2394
2395 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2396 return;
2397
2398 tp->setlpicnt = 0;
2399
2400 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2401 current_link_up &&
2402 tp->link_config.active_duplex == DUPLEX_FULL &&
2403 (tp->link_config.active_speed == SPEED_100 ||
2404 tp->link_config.active_speed == SPEED_1000)) {
2405 u32 eeectl;
2406
2407 if (tp->link_config.active_speed == SPEED_1000)
2408 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2409 else
2410 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2411
2412 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2413
2414 tg3_eee_pull_config(tp, NULL);
2415 if (tp->eee.eee_active)
2416 tp->setlpicnt = 2;
2417 }
2418
2419 if (!tp->setlpicnt) {
2420 if (current_link_up &&
2421 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2422 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2423 tg3_phy_toggle_auxctl_smdsp(tp, false);
2424 }
2425
2426 val = tr32(TG3_CPMU_EEE_MODE);
2427 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2428 }
2429 }
2430
2431 static void tg3_phy_eee_enable(struct tg3 *tp)
2432 {
2433 u32 val;
2434
2435 if (tp->link_config.active_speed == SPEED_1000 &&
2436 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2437 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2438 tg3_flag(tp, 57765_CLASS)) &&
2439 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2440 val = MII_TG3_DSP_TAP26_ALNOKO |
2441 MII_TG3_DSP_TAP26_RMRXSTO;
2442 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2443 tg3_phy_toggle_auxctl_smdsp(tp, false);
2444 }
2445
2446 val = tr32(TG3_CPMU_EEE_MODE);
2447 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2448 }
2449
2450 static int tg3_wait_macro_done(struct tg3 *tp)
2451 {
2452 int limit = 100;
2453
2454 while (limit--) {
2455 u32 tmp32;
2456
2457 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2458 if ((tmp32 & 0x1000) == 0)
2459 break;
2460 }
2461 }
2462 if (limit < 0)
2463 return -EBUSY;
2464
2465 return 0;
2466 }
2467
2468 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2469 {
2470 static const u32 test_pat[4][6] = {
2471 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2472 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2473 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2474 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2475 };
2476 int chan;
2477
2478 for (chan = 0; chan < 4; chan++) {
2479 int i;
2480
2481 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2482 (chan * 0x2000) | 0x0200);
2483 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2484
2485 for (i = 0; i < 6; i++)
2486 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2487 test_pat[chan][i]);
2488
2489 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2490 if (tg3_wait_macro_done(tp)) {
2491 *resetp = 1;
2492 return -EBUSY;
2493 }
2494
2495 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2496 (chan * 0x2000) | 0x0200);
2497 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2498 if (tg3_wait_macro_done(tp)) {
2499 *resetp = 1;
2500 return -EBUSY;
2501 }
2502
2503 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2504 if (tg3_wait_macro_done(tp)) {
2505 *resetp = 1;
2506 return -EBUSY;
2507 }
2508
2509 for (i = 0; i < 6; i += 2) {
2510 u32 low, high;
2511
2512 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2513 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2514 tg3_wait_macro_done(tp)) {
2515 *resetp = 1;
2516 return -EBUSY;
2517 }
2518 low &= 0x7fff;
2519 high &= 0x000f;
2520 if (low != test_pat[chan][i] ||
2521 high != test_pat[chan][i+1]) {
2522 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2523 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2524 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2525
2526 return -EBUSY;
2527 }
2528 }
2529 }
2530
2531 return 0;
2532 }
2533
2534 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2535 {
2536 int chan;
2537
2538 for (chan = 0; chan < 4; chan++) {
2539 int i;
2540
2541 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2542 (chan * 0x2000) | 0x0200);
2543 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2544 for (i = 0; i < 6; i++)
2545 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2546 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2547 if (tg3_wait_macro_done(tp))
2548 return -EBUSY;
2549 }
2550
2551 return 0;
2552 }
2553
2554 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2555 {
2556 u32 reg32, phy9_orig;
2557 int retries, do_phy_reset, err;
2558
2559 retries = 10;
2560 do_phy_reset = 1;
2561 do {
2562 if (do_phy_reset) {
2563 err = tg3_bmcr_reset(tp);
2564 if (err)
2565 return err;
2566 do_phy_reset = 0;
2567 }
2568
2569 /* Disable transmitter and interrupt. */
2570 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2571 continue;
2572
2573 reg32 |= 0x3000;
2574 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2575
2576 /* Set full-duplex, 1000 mbps. */
2577 tg3_writephy(tp, MII_BMCR,
2578 BMCR_FULLDPLX | BMCR_SPEED1000);
2579
2580 /* Set to master mode. */
2581 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2582 continue;
2583
2584 tg3_writephy(tp, MII_CTRL1000,
2585 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2586
2587 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2588 if (err)
2589 return err;
2590
2591 /* Block the PHY control access. */
2592 tg3_phydsp_write(tp, 0x8005, 0x0800);
2593
2594 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2595 if (!err)
2596 break;
2597 } while (--retries);
2598
2599 err = tg3_phy_reset_chanpat(tp);
2600 if (err)
2601 return err;
2602
2603 tg3_phydsp_write(tp, 0x8005, 0x0000);
2604
2605 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2606 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2607
2608 tg3_phy_toggle_auxctl_smdsp(tp, false);
2609
2610 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2611
2612 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2613 if (err)
2614 return err;
2615
2616 reg32 &= ~0x3000;
2617 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2618
2619 return 0;
2620 }
2621
2622 static void tg3_carrier_off(struct tg3 *tp)
2623 {
2624 netif_carrier_off(tp->dev);
2625 tp->link_up = false;
2626 }
2627
2628 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2629 {
2630 if (tg3_flag(tp, ENABLE_ASF))
2631 netdev_warn(tp->dev,
2632 "Management side-band traffic will be interrupted during phy settings change\n");
2633 }
2634
2635 /* This will reset the tigon3 PHY if there is no valid
2636 * link unless the FORCE argument is non-zero.
2637 */
2638 static int tg3_phy_reset(struct tg3 *tp)
2639 {
2640 u32 val, cpmuctrl;
2641 int err;
2642
2643 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2644 val = tr32(GRC_MISC_CFG);
2645 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2646 udelay(40);
2647 }
2648 err = tg3_readphy(tp, MII_BMSR, &val);
2649 err |= tg3_readphy(tp, MII_BMSR, &val);
2650 if (err != 0)
2651 return -EBUSY;
2652
2653 if (netif_running(tp->dev) && tp->link_up) {
2654 netif_carrier_off(tp->dev);
2655 tg3_link_report(tp);
2656 }
2657
2658 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2659 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2660 tg3_asic_rev(tp) == ASIC_REV_5705) {
2661 err = tg3_phy_reset_5703_4_5(tp);
2662 if (err)
2663 return err;
2664 goto out;
2665 }
2666
2667 cpmuctrl = 0;
2668 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2669 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2670 cpmuctrl = tr32(TG3_CPMU_CTRL);
2671 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2672 tw32(TG3_CPMU_CTRL,
2673 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2674 }
2675
2676 err = tg3_bmcr_reset(tp);
2677 if (err)
2678 return err;
2679
2680 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2681 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2682 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2683
2684 tw32(TG3_CPMU_CTRL, cpmuctrl);
2685 }
2686
2687 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2688 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2689 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2690 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2691 CPMU_LSPD_1000MB_MACCLK_12_5) {
2692 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2693 udelay(40);
2694 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2695 }
2696 }
2697
2698 if (tg3_flag(tp, 5717_PLUS) &&
2699 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2700 return 0;
2701
2702 tg3_phy_apply_otp(tp);
2703
2704 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2705 tg3_phy_toggle_apd(tp, true);
2706 else
2707 tg3_phy_toggle_apd(tp, false);
2708
2709 out:
2710 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2711 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2712 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2713 tg3_phydsp_write(tp, 0x000a, 0x0323);
2714 tg3_phy_toggle_auxctl_smdsp(tp, false);
2715 }
2716
2717 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2718 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2719 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2720 }
2721
2722 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2723 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2724 tg3_phydsp_write(tp, 0x000a, 0x310b);
2725 tg3_phydsp_write(tp, 0x201f, 0x9506);
2726 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2727 tg3_phy_toggle_auxctl_smdsp(tp, false);
2728 }
2729 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2730 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2731 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2732 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2733 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2734 tg3_writephy(tp, MII_TG3_TEST1,
2735 MII_TG3_TEST1_TRIM_EN | 0x4);
2736 } else
2737 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2738
2739 tg3_phy_toggle_auxctl_smdsp(tp, false);
2740 }
2741 }
2742
2743 /* Set Extended packet length bit (bit 14) on all chips that */
2744 /* support jumbo frames */
2745 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2746 /* Cannot do read-modify-write on 5401 */
2747 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2748 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2749 /* Set bit 14 with read-modify-write to preserve other bits */
2750 err = tg3_phy_auxctl_read(tp,
2751 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2752 if (!err)
2753 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2754 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2755 }
2756
2757 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2758 * jumbo frames transmission.
2759 */
2760 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2761 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2762 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2763 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2764 }
2765
2766 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2767 /* adjust output voltage */
2768 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2769 }
2770
2771 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2772 tg3_phydsp_write(tp, 0xffb, 0x4000);
2773
2774 tg3_phy_toggle_automdix(tp, true);
2775 tg3_phy_set_wirespeed(tp);
2776 return 0;
2777 }
2778
2779 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2780 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2781 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2782 TG3_GPIO_MSG_NEED_VAUX)
2783 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2784 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2785 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2786 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2787 (TG3_GPIO_MSG_DRVR_PRES << 12))
2788
2789 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2790 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2791 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2792 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2793 (TG3_GPIO_MSG_NEED_VAUX << 12))
2794
2795 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2796 {
2797 u32 status, shift;
2798
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2802 else
2803 status = tr32(TG3_CPMU_DRV_STATUS);
2804
2805 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2806 status &= ~(TG3_GPIO_MSG_MASK << shift);
2807 status |= (newstat << shift);
2808
2809 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2810 tg3_asic_rev(tp) == ASIC_REV_5719)
2811 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2812 else
2813 tw32(TG3_CPMU_DRV_STATUS, status);
2814
2815 return status >> TG3_APE_GPIO_MSG_SHIFT;
2816 }
2817
2818 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2819 {
2820 if (!tg3_flag(tp, IS_NIC))
2821 return 0;
2822
2823 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2824 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2825 tg3_asic_rev(tp) == ASIC_REV_5720) {
2826 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2827 return -EIO;
2828
2829 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2830
2831 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2832 TG3_GRC_LCLCTL_PWRSW_DELAY);
2833
2834 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2835 } else {
2836 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2837 TG3_GRC_LCLCTL_PWRSW_DELAY);
2838 }
2839
2840 return 0;
2841 }
2842
2843 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2844 {
2845 u32 grc_local_ctrl;
2846
2847 if (!tg3_flag(tp, IS_NIC) ||
2848 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2849 tg3_asic_rev(tp) == ASIC_REV_5701)
2850 return;
2851
2852 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2853
2854 tw32_wait_f(GRC_LOCAL_CTRL,
2855 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY);
2857
2858 tw32_wait_f(GRC_LOCAL_CTRL,
2859 grc_local_ctrl,
2860 TG3_GRC_LCLCTL_PWRSW_DELAY);
2861
2862 tw32_wait_f(GRC_LOCAL_CTRL,
2863 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2864 TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 }
2866
2867 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2868 {
2869 if (!tg3_flag(tp, IS_NIC))
2870 return;
2871
2872 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2873 tg3_asic_rev(tp) == ASIC_REV_5701) {
2874 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2875 (GRC_LCLCTRL_GPIO_OE0 |
2876 GRC_LCLCTRL_GPIO_OE1 |
2877 GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 GRC_LCLCTRL_GPIO_OUTPUT1),
2880 TG3_GRC_LCLCTL_PWRSW_DELAY);
2881 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2882 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2883 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2884 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2885 GRC_LCLCTRL_GPIO_OE1 |
2886 GRC_LCLCTRL_GPIO_OE2 |
2887 GRC_LCLCTRL_GPIO_OUTPUT0 |
2888 GRC_LCLCTRL_GPIO_OUTPUT1 |
2889 tp->grc_local_ctrl;
2890 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2892
2893 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2894 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 TG3_GRC_LCLCTL_PWRSW_DELAY);
2896
2897 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2898 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 } else {
2901 u32 no_gpio2;
2902 u32 grc_local_ctrl = 0;
2903
2904 /* Workaround to prevent overdrawing Amps. */
2905 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2907 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2908 grc_local_ctrl,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY);
2910 }
2911
2912 /* On 5753 and variants, GPIO2 cannot be used. */
2913 no_gpio2 = tp->nic_sram_data_cfg &
2914 NIC_SRAM_DATA_CFG_NO_GPIO2;
2915
2916 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2917 GRC_LCLCTRL_GPIO_OE1 |
2918 GRC_LCLCTRL_GPIO_OE2 |
2919 GRC_LCLCTRL_GPIO_OUTPUT1 |
2920 GRC_LCLCTRL_GPIO_OUTPUT2;
2921 if (no_gpio2) {
2922 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2923 GRC_LCLCTRL_GPIO_OUTPUT2);
2924 }
2925 tw32_wait_f(GRC_LOCAL_CTRL,
2926 tp->grc_local_ctrl | grc_local_ctrl,
2927 TG3_GRC_LCLCTL_PWRSW_DELAY);
2928
2929 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2930
2931 tw32_wait_f(GRC_LOCAL_CTRL,
2932 tp->grc_local_ctrl | grc_local_ctrl,
2933 TG3_GRC_LCLCTL_PWRSW_DELAY);
2934
2935 if (!no_gpio2) {
2936 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2937 tw32_wait_f(GRC_LOCAL_CTRL,
2938 tp->grc_local_ctrl | grc_local_ctrl,
2939 TG3_GRC_LCLCTL_PWRSW_DELAY);
2940 }
2941 }
2942 }
2943
2944 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2945 {
2946 u32 msg = 0;
2947
2948 /* Serialize power state transitions */
2949 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2950 return;
2951
2952 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2953 msg = TG3_GPIO_MSG_NEED_VAUX;
2954
2955 msg = tg3_set_function_status(tp, msg);
2956
2957 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2958 goto done;
2959
2960 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2961 tg3_pwrsrc_switch_to_vaux(tp);
2962 else
2963 tg3_pwrsrc_die_with_vmain(tp);
2964
2965 done:
2966 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2967 }
2968
2969 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2970 {
2971 bool need_vaux = false;
2972
2973 /* The GPIOs do something completely different on 57765. */
2974 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2975 return;
2976
2977 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2978 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2979 tg3_asic_rev(tp) == ASIC_REV_5720) {
2980 tg3_frob_aux_power_5717(tp, include_wol ?
2981 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2982 return;
2983 }
2984
2985 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2986 struct net_device *dev_peer;
2987
2988 dev_peer = pci_get_drvdata(tp->pdev_peer);
2989
2990 /* remove_one() may have been run on the peer. */
2991 if (dev_peer) {
2992 struct tg3 *tp_peer = netdev_priv(dev_peer);
2993
2994 if (tg3_flag(tp_peer, INIT_COMPLETE))
2995 return;
2996
2997 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2998 tg3_flag(tp_peer, ENABLE_ASF))
2999 need_vaux = true;
3000 }
3001 }
3002
3003 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3004 tg3_flag(tp, ENABLE_ASF))
3005 need_vaux = true;
3006
3007 if (need_vaux)
3008 tg3_pwrsrc_switch_to_vaux(tp);
3009 else
3010 tg3_pwrsrc_die_with_vmain(tp);
3011 }
3012
3013 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3014 {
3015 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3016 return 1;
3017 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3018 if (speed != SPEED_10)
3019 return 1;
3020 } else if (speed == SPEED_10)
3021 return 1;
3022
3023 return 0;
3024 }
3025
3026 static bool tg3_phy_power_bug(struct tg3 *tp)
3027 {
3028 switch (tg3_asic_rev(tp)) {
3029 case ASIC_REV_5700:
3030 case ASIC_REV_5704:
3031 return true;
3032 case ASIC_REV_5780:
3033 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3034 return true;
3035 return false;
3036 case ASIC_REV_5717:
3037 if (!tp->pci_fn)
3038 return true;
3039 return false;
3040 case ASIC_REV_5719:
3041 case ASIC_REV_5720:
3042 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3043 !tp->pci_fn)
3044 return true;
3045 return false;
3046 }
3047
3048 return false;
3049 }
3050
3051 static bool tg3_phy_led_bug(struct tg3 *tp)
3052 {
3053 switch (tg3_asic_rev(tp)) {
3054 case ASIC_REV_5719:
3055 case ASIC_REV_5720:
3056 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3057 !tp->pci_fn)
3058 return true;
3059 return false;
3060 }
3061
3062 return false;
3063 }
3064
3065 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3066 {
3067 u32 val;
3068
3069 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3070 return;
3071
3072 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3073 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3074 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3075 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3076
3077 sg_dig_ctrl |=
3078 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3079 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3080 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3081 }
3082 return;
3083 }
3084
3085 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3086 tg3_bmcr_reset(tp);
3087 val = tr32(GRC_MISC_CFG);
3088 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3089 udelay(40);
3090 return;
3091 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3092 u32 phytest;
3093 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3094 u32 phy;
3095
3096 tg3_writephy(tp, MII_ADVERTISE, 0);
3097 tg3_writephy(tp, MII_BMCR,
3098 BMCR_ANENABLE | BMCR_ANRESTART);
3099
3100 tg3_writephy(tp, MII_TG3_FET_TEST,
3101 phytest | MII_TG3_FET_SHADOW_EN);
3102 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3103 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3104 tg3_writephy(tp,
3105 MII_TG3_FET_SHDW_AUXMODE4,
3106 phy);
3107 }
3108 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3109 }
3110 return;
3111 } else if (do_low_power) {
3112 if (!tg3_phy_led_bug(tp))
3113 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3114 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3115
3116 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3117 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3118 MII_TG3_AUXCTL_PCTL_VREG_11V;
3119 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3120 }
3121
3122 /* The PHY should not be powered down on some chips because
3123 * of bugs.
3124 */
3125 if (tg3_phy_power_bug(tp))
3126 return;
3127
3128 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3129 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3130 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3131 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3132 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3133 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3134 }
3135
3136 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3137 }
3138
3139 /* tp->lock is held. */
3140 static int tg3_nvram_lock(struct tg3 *tp)
3141 {
3142 if (tg3_flag(tp, NVRAM)) {
3143 int i;
3144
3145 if (tp->nvram_lock_cnt == 0) {
3146 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3147 for (i = 0; i < 8000; i++) {
3148 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3149 break;
3150 udelay(20);
3151 }
3152 if (i == 8000) {
3153 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3154 return -ENODEV;
3155 }
3156 }
3157 tp->nvram_lock_cnt++;
3158 }
3159 return 0;
3160 }
3161
3162 /* tp->lock is held. */
3163 static void tg3_nvram_unlock(struct tg3 *tp)
3164 {
3165 if (tg3_flag(tp, NVRAM)) {
3166 if (tp->nvram_lock_cnt > 0)
3167 tp->nvram_lock_cnt--;
3168 if (tp->nvram_lock_cnt == 0)
3169 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3170 }
3171 }
3172
3173 /* tp->lock is held. */
3174 static void tg3_enable_nvram_access(struct tg3 *tp)
3175 {
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3180 }
3181 }
3182
3183 /* tp->lock is held. */
3184 static void tg3_disable_nvram_access(struct tg3 *tp)
3185 {
3186 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3187 u32 nvaccess = tr32(NVRAM_ACCESS);
3188
3189 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3190 }
3191 }
3192
3193 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3194 u32 offset, u32 *val)
3195 {
3196 u32 tmp;
3197 int i;
3198
3199 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3200 return -EINVAL;
3201
3202 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3203 EEPROM_ADDR_DEVID_MASK |
3204 EEPROM_ADDR_READ);
3205 tw32(GRC_EEPROM_ADDR,
3206 tmp |
3207 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3208 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3209 EEPROM_ADDR_ADDR_MASK) |
3210 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3211
3212 for (i = 0; i < 1000; i++) {
3213 tmp = tr32(GRC_EEPROM_ADDR);
3214
3215 if (tmp & EEPROM_ADDR_COMPLETE)
3216 break;
3217 msleep(1);
3218 }
3219 if (!(tmp & EEPROM_ADDR_COMPLETE))
3220 return -EBUSY;
3221
3222 tmp = tr32(GRC_EEPROM_DATA);
3223
3224 /*
3225 * The data will always be opposite the native endian
3226 * format. Perform a blind byteswap to compensate.
3227 */
3228 *val = swab32(tmp);
3229
3230 return 0;
3231 }
3232
3233 #define NVRAM_CMD_TIMEOUT 10000
3234
3235 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3236 {
3237 int i;
3238
3239 tw32(NVRAM_CMD, nvram_cmd);
3240 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3241 udelay(10);
3242 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3243 udelay(10);
3244 break;
3245 }
3246 }
3247
3248 if (i == NVRAM_CMD_TIMEOUT)
3249 return -EBUSY;
3250
3251 return 0;
3252 }
3253
3254 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3255 {
3256 if (tg3_flag(tp, NVRAM) &&
3257 tg3_flag(tp, NVRAM_BUFFERED) &&
3258 tg3_flag(tp, FLASH) &&
3259 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3260 (tp->nvram_jedecnum == JEDEC_ATMEL))
3261
3262 addr = ((addr / tp->nvram_pagesize) <<
3263 ATMEL_AT45DB0X1B_PAGE_POS) +
3264 (addr % tp->nvram_pagesize);
3265
3266 return addr;
3267 }
3268
3269 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3270 {
3271 if (tg3_flag(tp, NVRAM) &&
3272 tg3_flag(tp, NVRAM_BUFFERED) &&
3273 tg3_flag(tp, FLASH) &&
3274 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3275 (tp->nvram_jedecnum == JEDEC_ATMEL))
3276
3277 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3278 tp->nvram_pagesize) +
3279 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3280
3281 return addr;
3282 }
3283
3284 /* NOTE: Data read in from NVRAM is byteswapped according to
3285 * the byteswapping settings for all other register accesses.
3286 * tg3 devices are BE devices, so on a BE machine, the data
3287 * returned will be exactly as it is seen in NVRAM. On a LE
3288 * machine, the 32-bit value will be byteswapped.
3289 */
3290 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3291 {
3292 int ret;
3293
3294 if (!tg3_flag(tp, NVRAM))
3295 return tg3_nvram_read_using_eeprom(tp, offset, val);
3296
3297 offset = tg3_nvram_phys_addr(tp, offset);
3298
3299 if (offset > NVRAM_ADDR_MSK)
3300 return -EINVAL;
3301
3302 ret = tg3_nvram_lock(tp);
3303 if (ret)
3304 return ret;
3305
3306 tg3_enable_nvram_access(tp);
3307
3308 tw32(NVRAM_ADDR, offset);
3309 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3310 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3311
3312 if (ret == 0)
3313 *val = tr32(NVRAM_RDDATA);
3314
3315 tg3_disable_nvram_access(tp);
3316
3317 tg3_nvram_unlock(tp);
3318
3319 return ret;
3320 }
3321
3322 /* Ensures NVRAM data is in bytestream format. */
3323 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3324 {
3325 u32 v;
3326 int res = tg3_nvram_read(tp, offset, &v);
3327 if (!res)
3328 *val = cpu_to_be32(v);
3329 return res;
3330 }
3331
3332 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3333 u32 offset, u32 len, u8 *buf)
3334 {
3335 int i, j, rc = 0;
3336 u32 val;
3337
3338 for (i = 0; i < len; i += 4) {
3339 u32 addr;
3340 __be32 data;
3341
3342 addr = offset + i;
3343
3344 memcpy(&data, buf + i, 4);
3345
3346 /*
3347 * The SEEPROM interface expects the data to always be opposite
3348 * the native endian format. We accomplish this by reversing
3349 * all the operations that would have been performed on the
3350 * data from a call to tg3_nvram_read_be32().
3351 */
3352 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3353
3354 val = tr32(GRC_EEPROM_ADDR);
3355 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3356
3357 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3358 EEPROM_ADDR_READ);
3359 tw32(GRC_EEPROM_ADDR, val |
3360 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3361 (addr & EEPROM_ADDR_ADDR_MASK) |
3362 EEPROM_ADDR_START |
3363 EEPROM_ADDR_WRITE);
3364
3365 for (j = 0; j < 1000; j++) {
3366 val = tr32(GRC_EEPROM_ADDR);
3367
3368 if (val & EEPROM_ADDR_COMPLETE)
3369 break;
3370 msleep(1);
3371 }
3372 if (!(val & EEPROM_ADDR_COMPLETE)) {
3373 rc = -EBUSY;
3374 break;
3375 }
3376 }
3377
3378 return rc;
3379 }
3380
3381 /* offset and length are dword aligned */
3382 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3383 u8 *buf)
3384 {
3385 int ret = 0;
3386 u32 pagesize = tp->nvram_pagesize;
3387 u32 pagemask = pagesize - 1;
3388 u32 nvram_cmd;
3389 u8 *tmp;
3390
3391 tmp = kmalloc(pagesize, GFP_KERNEL);
3392 if (tmp == NULL)
3393 return -ENOMEM;
3394
3395 while (len) {
3396 int j;
3397 u32 phy_addr, page_off, size;
3398
3399 phy_addr = offset & ~pagemask;
3400
3401 for (j = 0; j < pagesize; j += 4) {
3402 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3403 (__be32 *) (tmp + j));
3404 if (ret)
3405 break;
3406 }
3407 if (ret)
3408 break;
3409
3410 page_off = offset & pagemask;
3411 size = pagesize;
3412 if (len < size)
3413 size = len;
3414
3415 len -= size;
3416
3417 memcpy(tmp + page_off, buf, size);
3418
3419 offset = offset + (pagesize - page_off);
3420
3421 tg3_enable_nvram_access(tp);
3422
3423 /*
3424 * Before we can erase the flash page, we need
3425 * to issue a special "write enable" command.
3426 */
3427 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3428
3429 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 break;
3431
3432 /* Erase the target page */
3433 tw32(NVRAM_ADDR, phy_addr);
3434
3435 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3436 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3437
3438 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439 break;
3440
3441 /* Issue another write enable to start the write. */
3442 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3443
3444 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3445 break;
3446
3447 for (j = 0; j < pagesize; j += 4) {
3448 __be32 data;
3449
3450 data = *((__be32 *) (tmp + j));
3451
3452 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3453
3454 tw32(NVRAM_ADDR, phy_addr + j);
3455
3456 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3457 NVRAM_CMD_WR;
3458
3459 if (j == 0)
3460 nvram_cmd |= NVRAM_CMD_FIRST;
3461 else if (j == (pagesize - 4))
3462 nvram_cmd |= NVRAM_CMD_LAST;
3463
3464 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3465 if (ret)
3466 break;
3467 }
3468 if (ret)
3469 break;
3470 }
3471
3472 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3473 tg3_nvram_exec_cmd(tp, nvram_cmd);
3474
3475 kfree(tmp);
3476
3477 return ret;
3478 }
3479
3480 /* offset and length are dword aligned */
3481 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3482 u8 *buf)
3483 {
3484 int i, ret = 0;
3485
3486 for (i = 0; i < len; i += 4, offset += 4) {
3487 u32 page_off, phy_addr, nvram_cmd;
3488 __be32 data;
3489
3490 memcpy(&data, buf + i, 4);
3491 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3492
3493 page_off = offset % tp->nvram_pagesize;
3494
3495 phy_addr = tg3_nvram_phys_addr(tp, offset);
3496
3497 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3498
3499 if (page_off == 0 || i == 0)
3500 nvram_cmd |= NVRAM_CMD_FIRST;
3501 if (page_off == (tp->nvram_pagesize - 4))
3502 nvram_cmd |= NVRAM_CMD_LAST;
3503
3504 if (i == (len - 4))
3505 nvram_cmd |= NVRAM_CMD_LAST;
3506
3507 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3508 !tg3_flag(tp, FLASH) ||
3509 !tg3_flag(tp, 57765_PLUS))
3510 tw32(NVRAM_ADDR, phy_addr);
3511
3512 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3513 !tg3_flag(tp, 5755_PLUS) &&
3514 (tp->nvram_jedecnum == JEDEC_ST) &&
3515 (nvram_cmd & NVRAM_CMD_FIRST)) {
3516 u32 cmd;
3517
3518 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3519 ret = tg3_nvram_exec_cmd(tp, cmd);
3520 if (ret)
3521 break;
3522 }
3523 if (!tg3_flag(tp, FLASH)) {
3524 /* We always do complete word writes to eeprom. */
3525 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3526 }
3527
3528 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3529 if (ret)
3530 break;
3531 }
3532 return ret;
3533 }
3534
3535 /* offset and length are dword aligned */
3536 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3537 {
3538 int ret;
3539
3540 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3541 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3542 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3543 udelay(40);
3544 }
3545
3546 if (!tg3_flag(tp, NVRAM)) {
3547 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3548 } else {
3549 u32 grc_mode;
3550
3551 ret = tg3_nvram_lock(tp);
3552 if (ret)
3553 return ret;
3554
3555 tg3_enable_nvram_access(tp);
3556 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3557 tw32(NVRAM_WRITE1, 0x406);
3558
3559 grc_mode = tr32(GRC_MODE);
3560 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3561
3562 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3563 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3564 buf);
3565 } else {
3566 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3567 buf);
3568 }
3569
3570 grc_mode = tr32(GRC_MODE);
3571 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3572
3573 tg3_disable_nvram_access(tp);
3574 tg3_nvram_unlock(tp);
3575 }
3576
3577 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3578 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3579 udelay(40);
3580 }
3581
3582 return ret;
3583 }
3584
3585 #define RX_CPU_SCRATCH_BASE 0x30000
3586 #define RX_CPU_SCRATCH_SIZE 0x04000
3587 #define TX_CPU_SCRATCH_BASE 0x34000
3588 #define TX_CPU_SCRATCH_SIZE 0x04000
3589
3590 /* tp->lock is held. */
3591 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3592 {
3593 int i;
3594 const int iters = 10000;
3595
3596 for (i = 0; i < iters; i++) {
3597 tw32(cpu_base + CPU_STATE, 0xffffffff);
3598 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3599 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3600 break;
3601 if (pci_channel_offline(tp->pdev))
3602 return -EBUSY;
3603 }
3604
3605 return (i == iters) ? -EBUSY : 0;
3606 }
3607
3608 /* tp->lock is held. */
3609 static int tg3_rxcpu_pause(struct tg3 *tp)
3610 {
3611 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3612
3613 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3614 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3615 udelay(10);
3616
3617 return rc;
3618 }
3619
3620 /* tp->lock is held. */
3621 static int tg3_txcpu_pause(struct tg3 *tp)
3622 {
3623 return tg3_pause_cpu(tp, TX_CPU_BASE);
3624 }
3625
3626 /* tp->lock is held. */
3627 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3628 {
3629 tw32(cpu_base + CPU_STATE, 0xffffffff);
3630 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3631 }
3632
3633 /* tp->lock is held. */
3634 static void tg3_rxcpu_resume(struct tg3 *tp)
3635 {
3636 tg3_resume_cpu(tp, RX_CPU_BASE);
3637 }
3638
3639 /* tp->lock is held. */
3640 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3641 {
3642 int rc;
3643
3644 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3645
3646 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3647 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3648
3649 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3650 return 0;
3651 }
3652 if (cpu_base == RX_CPU_BASE) {
3653 rc = tg3_rxcpu_pause(tp);
3654 } else {
3655 /*
3656 * There is only an Rx CPU for the 5750 derivative in the
3657 * BCM4785.
3658 */
3659 if (tg3_flag(tp, IS_SSB_CORE))
3660 return 0;
3661
3662 rc = tg3_txcpu_pause(tp);
3663 }
3664
3665 if (rc) {
3666 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3667 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3668 return -ENODEV;
3669 }
3670
3671 /* Clear firmware's nvram arbitration. */
3672 if (tg3_flag(tp, NVRAM))
3673 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3674 return 0;
3675 }
3676
3677 static int tg3_fw_data_len(struct tg3 *tp,
3678 const struct tg3_firmware_hdr *fw_hdr)
3679 {
3680 int fw_len;
3681
3682 /* Non fragmented firmware have one firmware header followed by a
3683 * contiguous chunk of data to be written. The length field in that
3684 * header is not the length of data to be written but the complete
3685 * length of the bss. The data length is determined based on
3686 * tp->fw->size minus headers.
3687 *
3688 * Fragmented firmware have a main header followed by multiple
3689 * fragments. Each fragment is identical to non fragmented firmware
3690 * with a firmware header followed by a contiguous chunk of data. In
3691 * the main header, the length field is unused and set to 0xffffffff.
3692 * In each fragment header the length is the entire size of that
3693 * fragment i.e. fragment data + header length. Data length is
3694 * therefore length field in the header minus TG3_FW_HDR_LEN.
3695 */
3696 if (tp->fw_len == 0xffffffff)
3697 fw_len = be32_to_cpu(fw_hdr->len);
3698 else
3699 fw_len = tp->fw->size;
3700
3701 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3702 }
3703
3704 /* tp->lock is held. */
3705 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3706 u32 cpu_scratch_base, int cpu_scratch_size,
3707 const struct tg3_firmware_hdr *fw_hdr)
3708 {
3709 int err, i;
3710 void (*write_op)(struct tg3 *, u32, u32);
3711 int total_len = tp->fw->size;
3712
3713 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3714 netdev_err(tp->dev,
3715 "%s: Trying to load TX cpu firmware which is 5705\n",
3716 __func__);
3717 return -EINVAL;
3718 }
3719
3720 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3721 write_op = tg3_write_mem;
3722 else
3723 write_op = tg3_write_indirect_reg32;
3724
3725 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3726 /* It is possible that bootcode is still loading at this point.
3727 * Get the nvram lock first before halting the cpu.
3728 */
3729 int lock_err = tg3_nvram_lock(tp);
3730 err = tg3_halt_cpu(tp, cpu_base);
3731 if (!lock_err)
3732 tg3_nvram_unlock(tp);
3733 if (err)
3734 goto out;
3735
3736 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3737 write_op(tp, cpu_scratch_base + i, 0);
3738 tw32(cpu_base + CPU_STATE, 0xffffffff);
3739 tw32(cpu_base + CPU_MODE,
3740 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3741 } else {
3742 /* Subtract additional main header for fragmented firmware and
3743 * advance to the first fragment
3744 */
3745 total_len -= TG3_FW_HDR_LEN;
3746 fw_hdr++;
3747 }
3748
3749 do {
3750 u32 *fw_data = (u32 *)(fw_hdr + 1);
3751 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3752 write_op(tp, cpu_scratch_base +
3753 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3754 (i * sizeof(u32)),
3755 be32_to_cpu(fw_data[i]));
3756
3757 total_len -= be32_to_cpu(fw_hdr->len);
3758
3759 /* Advance to next fragment */
3760 fw_hdr = (struct tg3_firmware_hdr *)
3761 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3762 } while (total_len > 0);
3763
3764 err = 0;
3765
3766 out:
3767 return err;
3768 }
3769
3770 /* tp->lock is held. */
3771 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3772 {
3773 int i;
3774 const int iters = 5;
3775
3776 tw32(cpu_base + CPU_STATE, 0xffffffff);
3777 tw32_f(cpu_base + CPU_PC, pc);
3778
3779 for (i = 0; i < iters; i++) {
3780 if (tr32(cpu_base + CPU_PC) == pc)
3781 break;
3782 tw32(cpu_base + CPU_STATE, 0xffffffff);
3783 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3784 tw32_f(cpu_base + CPU_PC, pc);
3785 udelay(1000);
3786 }
3787
3788 return (i == iters) ? -EBUSY : 0;
3789 }
3790
3791 /* tp->lock is held. */
3792 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3793 {
3794 const struct tg3_firmware_hdr *fw_hdr;
3795 int err;
3796
3797 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3798
3799 /* Firmware blob starts with version numbers, followed by
3800 start address and length. We are setting complete length.
3801 length = end_address_of_bss - start_address_of_text.
3802 Remainder is the blob to be loaded contiguously
3803 from start address. */
3804
3805 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3806 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3807 fw_hdr);
3808 if (err)
3809 return err;
3810
3811 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3812 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3813 fw_hdr);
3814 if (err)
3815 return err;
3816
3817 /* Now startup only the RX cpu. */
3818 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3819 be32_to_cpu(fw_hdr->base_addr));
3820 if (err) {
3821 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3822 "should be %08x\n", __func__,
3823 tr32(RX_CPU_BASE + CPU_PC),
3824 be32_to_cpu(fw_hdr->base_addr));
3825 return -ENODEV;
3826 }
3827
3828 tg3_rxcpu_resume(tp);
3829
3830 return 0;
3831 }
3832
3833 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3834 {
3835 const int iters = 1000;
3836 int i;
3837 u32 val;
3838
3839 /* Wait for boot code to complete initialization and enter service
3840 * loop. It is then safe to download service patches
3841 */
3842 for (i = 0; i < iters; i++) {
3843 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3844 break;
3845
3846 udelay(10);
3847 }
3848
3849 if (i == iters) {
3850 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3851 return -EBUSY;
3852 }
3853
3854 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3855 if (val & 0xff) {
3856 netdev_warn(tp->dev,
3857 "Other patches exist. Not downloading EEE patch\n");
3858 return -EEXIST;
3859 }
3860
3861 return 0;
3862 }
3863
3864 /* tp->lock is held. */
3865 static void tg3_load_57766_firmware(struct tg3 *tp)
3866 {
3867 struct tg3_firmware_hdr *fw_hdr;
3868
3869 if (!tg3_flag(tp, NO_NVRAM))
3870 return;
3871
3872 if (tg3_validate_rxcpu_state(tp))
3873 return;
3874
3875 if (!tp->fw)
3876 return;
3877
3878 /* This firmware blob has a different format than older firmware
3879 * releases as given below. The main difference is we have fragmented
3880 * data to be written to non-contiguous locations.
3881 *
3882 * In the beginning we have a firmware header identical to other
3883 * firmware which consists of version, base addr and length. The length
3884 * here is unused and set to 0xffffffff.
3885 *
3886 * This is followed by a series of firmware fragments which are
3887 * individually identical to previous firmware. i.e. they have the
3888 * firmware header and followed by data for that fragment. The version
3889 * field of the individual fragment header is unused.
3890 */
3891
3892 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3893 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3894 return;
3895
3896 if (tg3_rxcpu_pause(tp))
3897 return;
3898
3899 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3900 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3901
3902 tg3_rxcpu_resume(tp);
3903 }
3904
3905 /* tp->lock is held. */
3906 static int tg3_load_tso_firmware(struct tg3 *tp)
3907 {
3908 const struct tg3_firmware_hdr *fw_hdr;
3909 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3910 int err;
3911
3912 if (!tg3_flag(tp, FW_TSO))
3913 return 0;
3914
3915 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3916
3917 /* Firmware blob starts with version numbers, followed by
3918 start address and length. We are setting complete length.
3919 length = end_address_of_bss - start_address_of_text.
3920 Remainder is the blob to be loaded contiguously
3921 from start address. */
3922
3923 cpu_scratch_size = tp->fw_len;
3924
3925 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3926 cpu_base = RX_CPU_BASE;
3927 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3928 } else {
3929 cpu_base = TX_CPU_BASE;
3930 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3931 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3932 }
3933
3934 err = tg3_load_firmware_cpu(tp, cpu_base,
3935 cpu_scratch_base, cpu_scratch_size,
3936 fw_hdr);
3937 if (err)
3938 return err;
3939
3940 /* Now startup the cpu. */
3941 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3942 be32_to_cpu(fw_hdr->base_addr));
3943 if (err) {
3944 netdev_err(tp->dev,
3945 "%s fails to set CPU PC, is %08x should be %08x\n",
3946 __func__, tr32(cpu_base + CPU_PC),
3947 be32_to_cpu(fw_hdr->base_addr));
3948 return -ENODEV;
3949 }
3950
3951 tg3_resume_cpu(tp, cpu_base);
3952 return 0;
3953 }
3954
3955 /* tp->lock is held. */
3956 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3957 {
3958 u32 addr_high, addr_low;
3959
3960 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3961 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3962 (mac_addr[4] << 8) | mac_addr[5]);
3963
3964 if (index < 4) {
3965 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3966 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3967 } else {
3968 index -= 4;
3969 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3970 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3971 }
3972 }
3973
3974 /* tp->lock is held. */
3975 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3976 {
3977 u32 addr_high;
3978 int i;
3979
3980 for (i = 0; i < 4; i++) {
3981 if (i == 1 && skip_mac_1)
3982 continue;
3983 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984 }
3985
3986 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3987 tg3_asic_rev(tp) == ASIC_REV_5704) {
3988 for (i = 4; i < 16; i++)
3989 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3990 }
3991
3992 addr_high = (tp->dev->dev_addr[0] +
3993 tp->dev->dev_addr[1] +
3994 tp->dev->dev_addr[2] +
3995 tp->dev->dev_addr[3] +
3996 tp->dev->dev_addr[4] +
3997 tp->dev->dev_addr[5]) &
3998 TX_BACKOFF_SEED_MASK;
3999 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4000 }
4001
4002 static void tg3_enable_register_access(struct tg3 *tp)
4003 {
4004 /*
4005 * Make sure register accesses (indirect or otherwise) will function
4006 * correctly.
4007 */
4008 pci_write_config_dword(tp->pdev,
4009 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4010 }
4011
4012 static int tg3_power_up(struct tg3 *tp)
4013 {
4014 int err;
4015
4016 tg3_enable_register_access(tp);
4017
4018 err = pci_set_power_state(tp->pdev, PCI_D0);
4019 if (!err) {
4020 /* Switch out of Vaux if it is a NIC */
4021 tg3_pwrsrc_switch_to_vmain(tp);
4022 } else {
4023 netdev_err(tp->dev, "Transition to D0 failed\n");
4024 }
4025
4026 return err;
4027 }
4028
4029 static int tg3_setup_phy(struct tg3 *, bool);
4030
4031 static int tg3_power_down_prepare(struct tg3 *tp)
4032 {
4033 u32 misc_host_ctrl;
4034 bool device_should_wake, do_low_power;
4035
4036 tg3_enable_register_access(tp);
4037
4038 /* Restore the CLKREQ setting. */
4039 if (tg3_flag(tp, CLKREQ_BUG))
4040 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4041 PCI_EXP_LNKCTL_CLKREQ_EN);
4042
4043 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4044 tw32(TG3PCI_MISC_HOST_CTRL,
4045 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4046
4047 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4048 tg3_flag(tp, WOL_ENABLE);
4049
4050 if (tg3_flag(tp, USE_PHYLIB)) {
4051 do_low_power = false;
4052 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4053 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4054 struct phy_device *phydev;
4055 u32 phyid, advertising;
4056
4057 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4058
4059 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4060
4061 tp->link_config.speed = phydev->speed;
4062 tp->link_config.duplex = phydev->duplex;
4063 tp->link_config.autoneg = phydev->autoneg;
4064 tp->link_config.advertising = phydev->advertising;
4065
4066 advertising = ADVERTISED_TP |
4067 ADVERTISED_Pause |
4068 ADVERTISED_Autoneg |
4069 ADVERTISED_10baseT_Half;
4070
4071 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4072 if (tg3_flag(tp, WOL_SPEED_100MB))
4073 advertising |=
4074 ADVERTISED_100baseT_Half |
4075 ADVERTISED_100baseT_Full |
4076 ADVERTISED_10baseT_Full;
4077 else
4078 advertising |= ADVERTISED_10baseT_Full;
4079 }
4080
4081 phydev->advertising = advertising;
4082
4083 phy_start_aneg(phydev);
4084
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4092 }
4093 }
4094 } else {
4095 do_low_power = true;
4096
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4102 }
4103
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 u32 val;
4106
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110 int i;
4111 u32 val;
4112
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 break;
4117 msleep(1);
4118 }
4119 }
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4123 WOL_DRV_WOL |
4124 WOL_SET_MAGIC_PKT);
4125
4126 if (device_should_wake) {
4127 u32 mac_mode;
4128
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 if (do_low_power &&
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137 udelay(40);
4138 }
4139
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 else
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 } else
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 else
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 }
4160 } else {
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 }
4163
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4176
4177 tw32_f(MAC_MODE, mac_mode);
4178 udelay(100);
4179
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181 udelay(10);
4182 }
4183
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 u32 base_val;
4188
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4192
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 /* do nothing */
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4201
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4206 CLOCK_CTRL_ALTCLK);
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 } else {
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 }
4215
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 40);
4218
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 40);
4221
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4223 u32 newbits3;
4224
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4230 } else {
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 }
4233
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4236 }
4237 }
4238
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4241
4242 tg3_frob_aux_power(tp, true);
4243
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4249
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 tw32(0x7d00, val);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4253 int err;
4254
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 if (!err)
4258 tg3_nvram_unlock(tp);
4259 }
4260 }
4261
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266 return 0;
4267 }
4268
4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4276 {
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4279 *speed = SPEED_10;
4280 *duplex = DUPLEX_HALF;
4281 break;
4282
4283 case MII_TG3_AUX_STAT_10FULL:
4284 *speed = SPEED_10;
4285 *duplex = DUPLEX_FULL;
4286 break;
4287
4288 case MII_TG3_AUX_STAT_100HALF:
4289 *speed = SPEED_100;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_100FULL:
4294 *speed = SPEED_100;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 default:
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 SPEED_10;
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313 DUPLEX_HALF;
4314 break;
4315 }
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4318 break;
4319 }
4320 }
4321
4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324 int err = 0;
4325 u32 val, new_adv;
4326
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332 if (err)
4333 goto done;
4334
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 if (err)
4344 goto done;
4345 }
4346
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 goto done;
4349
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 if (!err) {
4355 u32 err2;
4356
4357 val = 0;
4358 /* Advertise 100-BaseTX EEE ability */
4359 if (advertise & ADVERTISED_100baseT_Full)
4360 val |= MDIO_AN_EEE_ADV_100TX;
4361 /* Advertise 1000-BaseT EEE ability */
4362 if (advertise & ADVERTISED_1000baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_1000T;
4364
4365 if (!tp->eee.eee_enabled) {
4366 val = 0;
4367 tp->eee.advertised = 0;
4368 } else {
4369 tp->eee.advertised = advertise &
4370 (ADVERTISED_100baseT_Full |
4371 ADVERTISED_1000baseT_Full);
4372 }
4373
4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4375 if (err)
4376 val = 0;
4377
4378 switch (tg3_asic_rev(tp)) {
4379 case ASIC_REV_5717:
4380 case ASIC_REV_57765:
4381 case ASIC_REV_57766:
4382 case ASIC_REV_5719:
4383 /* If we advertised any eee advertisements above... */
4384 if (val)
4385 val = MII_TG3_DSP_TAP26_ALNOKO |
4386 MII_TG3_DSP_TAP26_RMRXSTO |
4387 MII_TG3_DSP_TAP26_OPCSINPT;
4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4389 /* Fall through */
4390 case ASIC_REV_5720:
4391 case ASIC_REV_5762:
4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394 MII_TG3_DSP_CH34TP2_HIBW01);
4395 }
4396
4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4398 if (!err)
4399 err = err2;
4400 }
4401
4402 done:
4403 return err;
4404 }
4405
4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 {
4408 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4410 u32 adv, fc;
4411
4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414 adv = ADVERTISED_10baseT_Half |
4415 ADVERTISED_10baseT_Full;
4416 if (tg3_flag(tp, WOL_SPEED_100MB))
4417 adv |= ADVERTISED_100baseT_Half |
4418 ADVERTISED_100baseT_Full;
4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420 if (!(tp->phy_flags &
4421 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422 adv |= ADVERTISED_1000baseT_Half;
4423 adv |= ADVERTISED_1000baseT_Full;
4424 }
4425
4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427 } else {
4428 adv = tp->link_config.advertising;
4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430 adv &= ~(ADVERTISED_1000baseT_Half |
4431 ADVERTISED_1000baseT_Full);
4432
4433 fc = tp->link_config.flowctrl;
4434 }
4435
4436 tg3_phy_autoneg_cfg(tp, adv, fc);
4437
4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440 /* Normally during power down we want to autonegotiate
4441 * the lowest possible speed for WOL. However, to avoid
4442 * link flap, we leave it untouched.
4443 */
4444 return;
4445 }
4446
4447 tg3_writephy(tp, MII_BMCR,
4448 BMCR_ANENABLE | BMCR_ANRESTART);
4449 } else {
4450 int i;
4451 u32 bmcr, orig_bmcr;
4452
4453 tp->link_config.active_speed = tp->link_config.speed;
4454 tp->link_config.active_duplex = tp->link_config.duplex;
4455
4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457 /* With autoneg disabled, 5715 only links up when the
4458 * advertisement register has the configured speed
4459 * enabled.
4460 */
4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4462 }
4463
4464 bmcr = 0;
4465 switch (tp->link_config.speed) {
4466 default:
4467 case SPEED_10:
4468 break;
4469
4470 case SPEED_100:
4471 bmcr |= BMCR_SPEED100;
4472 break;
4473
4474 case SPEED_1000:
4475 bmcr |= BMCR_SPEED1000;
4476 break;
4477 }
4478
4479 if (tp->link_config.duplex == DUPLEX_FULL)
4480 bmcr |= BMCR_FULLDPLX;
4481
4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483 (bmcr != orig_bmcr)) {
4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485 for (i = 0; i < 1500; i++) {
4486 u32 tmp;
4487
4488 udelay(10);
4489 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490 tg3_readphy(tp, MII_BMSR, &tmp))
4491 continue;
4492 if (!(tmp & BMSR_LSTATUS)) {
4493 udelay(40);
4494 break;
4495 }
4496 }
4497 tg3_writephy(tp, MII_BMCR, bmcr);
4498 udelay(40);
4499 }
4500 }
4501 }
4502
4503 static int tg3_phy_pull_config(struct tg3 *tp)
4504 {
4505 int err;
4506 u32 val;
4507
4508 err = tg3_readphy(tp, MII_BMCR, &val);
4509 if (err)
4510 goto done;
4511
4512 if (!(val & BMCR_ANENABLE)) {
4513 tp->link_config.autoneg = AUTONEG_DISABLE;
4514 tp->link_config.advertising = 0;
4515 tg3_flag_clear(tp, PAUSE_AUTONEG);
4516
4517 err = -EIO;
4518
4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520 case 0:
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 goto done;
4523
4524 tp->link_config.speed = SPEED_10;
4525 break;
4526 case BMCR_SPEED100:
4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528 goto done;
4529
4530 tp->link_config.speed = SPEED_100;
4531 break;
4532 case BMCR_SPEED1000:
4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534 tp->link_config.speed = SPEED_1000;
4535 break;
4536 }
4537 /* Fall through */
4538 default:
4539 goto done;
4540 }
4541
4542 if (val & BMCR_FULLDPLX)
4543 tp->link_config.duplex = DUPLEX_FULL;
4544 else
4545 tp->link_config.duplex = DUPLEX_HALF;
4546
4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4548
4549 err = 0;
4550 goto done;
4551 }
4552
4553 tp->link_config.autoneg = AUTONEG_ENABLE;
4554 tp->link_config.advertising = ADVERTISED_Autoneg;
4555 tg3_flag_set(tp, PAUSE_AUTONEG);
4556
4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4558 u32 adv;
4559
4560 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4561 if (err)
4562 goto done;
4563
4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566
4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568 } else {
4569 tp->link_config.advertising |= ADVERTISED_FIBRE;
4570 }
4571
4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4573 u32 adv;
4574
4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576 err = tg3_readphy(tp, MII_CTRL1000, &val);
4577 if (err)
4578 goto done;
4579
4580 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581 } else {
4582 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4583 if (err)
4584 goto done;
4585
4586 adv = tg3_decode_flowctrl_1000X(val);
4587 tp->link_config.flowctrl = adv;
4588
4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590 adv = mii_adv_to_ethtool_adv_x(val);
4591 }
4592
4593 tp->link_config.advertising |= adv;
4594 }
4595
4596 done:
4597 return err;
4598 }
4599
4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4601 {
4602 int err;
4603
4604 /* Turn off tap power management. */
4605 /* Set Extended packet length bit */
4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607
4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4613
4614 udelay(40);
4615
4616 return err;
4617 }
4618
4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 {
4621 struct ethtool_eee eee;
4622
4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4624 return true;
4625
4626 tg3_eee_pull_config(tp, &eee);
4627
4628 if (tp->eee.eee_enabled) {
4629 if (tp->eee.advertised != eee.advertised ||
4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4632 return false;
4633 } else {
4634 /* EEE is disabled but we're advertising */
4635 if (eee.advertised)
4636 return false;
4637 }
4638
4639 return true;
4640 }
4641
4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 {
4644 u32 advmsk, tgtadv, advertising;
4645
4646 advertising = tp->link_config.advertising;
4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648
4649 advmsk = ADVERTISE_ALL;
4650 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4653 }
4654
4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4656 return false;
4657
4658 if ((*lcladv & advmsk) != tgtadv)
4659 return false;
4660
4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662 u32 tg3_ctrl;
4663
4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665
4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4667 return false;
4668
4669 if (tgtadv &&
4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675 } else {
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4677 }
4678
4679 if (tg3_ctrl != tgtadv)
4680 return false;
4681 }
4682
4683 return true;
4684 }
4685
4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4687 {
4688 u32 lpeth = 0;
4689
4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4691 u32 val;
4692
4693 if (tg3_readphy(tp, MII_STAT1000, &val))
4694 return false;
4695
4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4697 }
4698
4699 if (tg3_readphy(tp, MII_LPA, rmtadv))
4700 return false;
4701
4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703 tp->link_config.rmt_adv = lpeth;
4704
4705 return true;
4706 }
4707
4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 {
4710 if (curr_link_up != tp->link_up) {
4711 if (curr_link_up) {
4712 netif_carrier_on(tp->dev);
4713 } else {
4714 netif_carrier_off(tp->dev);
4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4717 }
4718
4719 tg3_link_report(tp);
4720 return true;
4721 }
4722
4723 return false;
4724 }
4725
4726 static void tg3_clear_mac_status(struct tg3 *tp)
4727 {
4728 tw32(MAC_EVENT, 0);
4729
4730 tw32_f(MAC_STATUS,
4731 MAC_STATUS_SYNC_CHANGED |
4732 MAC_STATUS_CFG_CHANGED |
4733 MAC_STATUS_MI_COMPLETION |
4734 MAC_STATUS_LNKSTATE_CHANGED);
4735 udelay(40);
4736 }
4737
4738 static void tg3_setup_eee(struct tg3 *tp)
4739 {
4740 u32 val;
4741
4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746
4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748
4749 tw32_f(TG3_CPMU_EEE_CTRL,
4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751
4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754 TG3_CPMU_EEEMD_LPI_IN_RX |
4755 TG3_CPMU_EEEMD_EEE_ENABLE;
4756
4757 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759
4760 if (tg3_flag(tp, ENABLE_APE))
4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762
4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764
4765 tw32_f(TG3_CPMU_EEE_DBTMR1,
4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767 (tp->eee.tx_lpi_timer & 0xffff));
4768
4769 tw32_f(TG3_CPMU_EEE_DBTMR2,
4770 TG3_CPMU_DBTMR2_APE_TX_2047US |
4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4772 }
4773
4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 {
4776 bool current_link_up;
4777 u32 bmsr, val;
4778 u32 lcl_adv, rmt_adv;
4779 u16 current_speed;
4780 u8 current_duplex;
4781 int i, err;
4782
4783 tg3_clear_mac_status(tp);
4784
4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786 tw32_f(MAC_MI_MODE,
4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4788 udelay(80);
4789 }
4790
4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792
4793 /* Some third-party PHYs need to be reset on link going
4794 * down.
4795 */
4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799 tp->link_up) {
4800 tg3_readphy(tp, MII_BMSR, &bmsr);
4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802 !(bmsr & BMSR_LSTATUS))
4803 force_reset = true;
4804 }
4805 if (force_reset)
4806 tg3_phy_reset(tp);
4807
4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811 !tg3_flag(tp, INIT_COMPLETE))
4812 bmsr = 0;
4813
4814 if (!(bmsr & BMSR_LSTATUS)) {
4815 err = tg3_init_5401phy_dsp(tp);
4816 if (err)
4817 return err;
4818
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 for (i = 0; i < 1000; i++) {
4821 udelay(10);
4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823 (bmsr & BMSR_LSTATUS)) {
4824 udelay(40);
4825 break;
4826 }
4827 }
4828
4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830 TG3_PHY_REV_BCM5401_B0 &&
4831 !(bmsr & BMSR_LSTATUS) &&
4832 tp->link_config.active_speed == SPEED_1000) {
4833 err = tg3_phy_reset(tp);
4834 if (!err)
4835 err = tg3_init_5401phy_dsp(tp);
4836 if (err)
4837 return err;
4838 }
4839 }
4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842 /* 5701 {A0,B0} CRC bug workaround */
4843 tg3_writephy(tp, 0x15, 0x0a75);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4847 }
4848
4849 /* Clear pending interrupts... */
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852
4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857
4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859 tg3_asic_rev(tp) == ASIC_REV_5701) {
4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863 else
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4865 }
4866
4867 current_link_up = false;
4868 current_speed = SPEED_UNKNOWN;
4869 current_duplex = DUPLEX_UNKNOWN;
4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871 tp->link_config.rmt_adv = 0;
4872
4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874 err = tg3_phy_auxctl_read(tp,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876 &val);
4877 if (!err && !(val & (1 << 10))) {
4878 tg3_phy_auxctl_write(tp,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880 val | (1 << 10));
4881 goto relink;
4882 }
4883 }
4884
4885 bmsr = 0;
4886 for (i = 0; i < 100; i++) {
4887 tg3_readphy(tp, MII_BMSR, &bmsr);
4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889 (bmsr & BMSR_LSTATUS))
4890 break;
4891 udelay(40);
4892 }
4893
4894 if (bmsr & BMSR_LSTATUS) {
4895 u32 aux_stat, bmcr;
4896
4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898 for (i = 0; i < 2000; i++) {
4899 udelay(10);
4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4901 aux_stat)
4902 break;
4903 }
4904
4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4906 &current_speed,
4907 &current_duplex);
4908
4909 bmcr = 0;
4910 for (i = 0; i < 200; i++) {
4911 tg3_readphy(tp, MII_BMCR, &bmcr);
4912 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913 continue;
4914 if (bmcr && bmcr != 0x7fff)
4915 break;
4916 udelay(10);
4917 }
4918
4919 lcl_adv = 0;
4920 rmt_adv = 0;
4921
4922 tp->link_config.active_speed = current_speed;
4923 tp->link_config.active_duplex = current_duplex;
4924
4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927
4928 if ((bmcr & BMCR_ANENABLE) &&
4929 eee_config_ok &&
4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932 current_link_up = true;
4933
4934 /* EEE settings changes take effect only after a phy
4935 * reset. If we have skipped a reset due to Link Flap
4936 * Avoidance being enabled, do it now.
4937 */
4938 if (!eee_config_ok &&
4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4940 !force_reset) {
4941 tg3_setup_eee(tp);
4942 tg3_phy_reset(tp);
4943 }
4944 } else {
4945 if (!(bmcr & BMCR_ANENABLE) &&
4946 tp->link_config.speed == current_speed &&
4947 tp->link_config.duplex == current_duplex) {
4948 current_link_up = true;
4949 }
4950 }
4951
4952 if (current_link_up &&
4953 tp->link_config.active_duplex == DUPLEX_FULL) {
4954 u32 reg, bit;
4955
4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957 reg = MII_TG3_FET_GEN_STAT;
4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959 } else {
4960 reg = MII_TG3_EXT_STAT;
4961 bit = MII_TG3_EXT_STAT_MDIX;
4962 }
4963
4964 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966
4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968 }
4969 }
4970
4971 relink:
4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973 tg3_phy_copper_begin(tp);
4974
4975 if (tg3_flag(tp, ROBOSWITCH)) {
4976 current_link_up = true;
4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978 current_speed = SPEED_1000;
4979 current_duplex = DUPLEX_FULL;
4980 tp->link_config.active_speed = current_speed;
4981 tp->link_config.active_duplex = current_duplex;
4982 }
4983
4984 tg3_readphy(tp, MII_BMSR, &bmsr);
4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987 current_link_up = true;
4988 }
4989
4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991 if (current_link_up) {
4992 if (tp->link_config.active_speed == SPEED_100 ||
4993 tp->link_config.active_speed == SPEED_10)
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995 else
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 else
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001
5002 /* In order for the 5750 core in BCM4785 chip to work properly
5003 * in RGMII mode, the Led Control Register must be set up.
5004 */
5005 if (tg3_flag(tp, RGMII_MODE)) {
5006 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008
5009 if (tp->link_config.active_speed == SPEED_10)
5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011 else if (tp->link_config.active_speed == SPEED_100)
5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013 LED_CTRL_100MBPS_ON);
5014 else if (tp->link_config.active_speed == SPEED_1000)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_1000MBPS_ON);
5017
5018 tw32(MAC_LED_CTRL, led_ctrl);
5019 udelay(40);
5020 }
5021
5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023 if (tp->link_config.active_duplex == DUPLEX_HALF)
5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027 if (current_link_up &&
5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030 else
5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5032 }
5033
5034 /* ??? Without this setting Netgear GA302T PHY does not
5035 * ??? send/receive packets...
5036 */
5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040 tw32_f(MAC_MI_MODE, tp->mi_mode);
5041 udelay(80);
5042 }
5043
5044 tw32_f(MAC_MODE, tp->mac_mode);
5045 udelay(40);
5046
5047 tg3_phy_eee_adjust(tp, current_link_up);
5048
5049 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050 /* Polled via timer. */
5051 tw32_f(MAC_EVENT, 0);
5052 } else {
5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5054 }
5055 udelay(40);
5056
5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058 current_link_up &&
5059 tp->link_config.active_speed == SPEED_1000 &&
5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5061 udelay(120);
5062 tw32_f(MAC_STATUS,
5063 (MAC_STATUS_SYNC_CHANGED |
5064 MAC_STATUS_CFG_CHANGED));
5065 udelay(40);
5066 tg3_write_mem(tp,
5067 NIC_SRAM_FIRMWARE_MBOX,
5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5069 }
5070
5071 /* Prevent send BD corruption. */
5072 if (tg3_flag(tp, CLKREQ_BUG)) {
5073 if (tp->link_config.active_speed == SPEED_100 ||
5074 tp->link_config.active_speed == SPEED_10)
5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076 PCI_EXP_LNKCTL_CLKREQ_EN);
5077 else
5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5080 }
5081
5082 tg3_test_and_report_link_chg(tp, current_link_up);
5083
5084 return 0;
5085 }
5086
5087 struct tg3_fiber_aneginfo {
5088 int state;
5089 #define ANEG_STATE_UNKNOWN 0
5090 #define ANEG_STATE_AN_ENABLE 1
5091 #define ANEG_STATE_RESTART_INIT 2
5092 #define ANEG_STATE_RESTART 3
5093 #define ANEG_STATE_DISABLE_LINK_OK 4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5095 #define ANEG_STATE_ABILITY_DETECT 6
5096 #define ANEG_STATE_ACK_DETECT_INIT 7
5097 #define ANEG_STATE_ACK_DETECT 8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5099 #define ANEG_STATE_COMPLETE_ACK 10
5100 #define ANEG_STATE_IDLE_DETECT_INIT 11
5101 #define ANEG_STATE_IDLE_DETECT 12
5102 #define ANEG_STATE_LINK_OK 13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5105
5106 u32 flags;
5107 #define MR_AN_ENABLE 0x00000001
5108 #define MR_RESTART_AN 0x00000002
5109 #define MR_AN_COMPLETE 0x00000004
5110 #define MR_PAGE_RX 0x00000008
5111 #define MR_NP_LOADED 0x00000010
5112 #define MR_TOGGLE_TX 0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5120 #define MR_TOGGLE_RX 0x00002000
5121 #define MR_NP_RX 0x00004000
5122
5123 #define MR_LINK_OK 0x80000000
5124
5125 unsigned long link_time, cur_time;
5126
5127 u32 ability_match_cfg;
5128 int ability_match_count;
5129
5130 char ability_match, idle_match, ack_match;
5131
5132 u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP 0x00000080
5134 #define ANEG_CFG_ACK 0x00000040
5135 #define ANEG_CFG_RF2 0x00000020
5136 #define ANEG_CFG_RF1 0x00000010
5137 #define ANEG_CFG_PS2 0x00000001
5138 #define ANEG_CFG_PS1 0x00008000
5139 #define ANEG_CFG_HD 0x00004000
5140 #define ANEG_CFG_FD 0x00002000
5141 #define ANEG_CFG_INVAL 0x00001f06
5142
5143 };
5144 #define ANEG_OK 0
5145 #define ANEG_DONE 1
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED -1
5148
5149 #define ANEG_STATE_SETTLE_TIME 10000
5150
5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152 struct tg3_fiber_aneginfo *ap)
5153 {
5154 u16 flowctrl;
5155 unsigned long delta;
5156 u32 rx_cfg_reg;
5157 int ret;
5158
5159 if (ap->state == ANEG_STATE_UNKNOWN) {
5160 ap->rxconfig = 0;
5161 ap->link_time = 0;
5162 ap->cur_time = 0;
5163 ap->ability_match_cfg = 0;
5164 ap->ability_match_count = 0;
5165 ap->ability_match = 0;
5166 ap->idle_match = 0;
5167 ap->ack_match = 0;
5168 }
5169 ap->cur_time++;
5170
5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173
5174 if (rx_cfg_reg != ap->ability_match_cfg) {
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 ap->ability_match = 0;
5177 ap->ability_match_count = 0;
5178 } else {
5179 if (++ap->ability_match_count > 1) {
5180 ap->ability_match = 1;
5181 ap->ability_match_cfg = rx_cfg_reg;
5182 }
5183 }
5184 if (rx_cfg_reg & ANEG_CFG_ACK)
5185 ap->ack_match = 1;
5186 else
5187 ap->ack_match = 0;
5188
5189 ap->idle_match = 0;
5190 } else {
5191 ap->idle_match = 1;
5192 ap->ability_match_cfg = 0;
5193 ap->ability_match_count = 0;
5194 ap->ability_match = 0;
5195 ap->ack_match = 0;
5196
5197 rx_cfg_reg = 0;
5198 }
5199
5200 ap->rxconfig = rx_cfg_reg;
5201 ret = ANEG_OK;
5202
5203 switch (ap->state) {
5204 case ANEG_STATE_UNKNOWN:
5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206 ap->state = ANEG_STATE_AN_ENABLE;
5207
5208 /* fallthru */
5209 case ANEG_STATE_AN_ENABLE:
5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211 if (ap->flags & MR_AN_ENABLE) {
5212 ap->link_time = 0;
5213 ap->cur_time = 0;
5214 ap->ability_match_cfg = 0;
5215 ap->ability_match_count = 0;
5216 ap->ability_match = 0;
5217 ap->idle_match = 0;
5218 ap->ack_match = 0;
5219
5220 ap->state = ANEG_STATE_RESTART_INIT;
5221 } else {
5222 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5223 }
5224 break;
5225
5226 case ANEG_STATE_RESTART_INIT:
5227 ap->link_time = ap->cur_time;
5228 ap->flags &= ~(MR_NP_LOADED);
5229 ap->txconfig = 0;
5230 tw32(MAC_TX_AUTO_NEG, 0);
5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232 tw32_f(MAC_MODE, tp->mac_mode);
5233 udelay(40);
5234
5235 ret = ANEG_TIMER_ENAB;
5236 ap->state = ANEG_STATE_RESTART;
5237
5238 /* fallthru */
5239 case ANEG_STATE_RESTART:
5240 delta = ap->cur_time - ap->link_time;
5241 if (delta > ANEG_STATE_SETTLE_TIME)
5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243 else
5244 ret = ANEG_TIMER_ENAB;
5245 break;
5246
5247 case ANEG_STATE_DISABLE_LINK_OK:
5248 ret = ANEG_DONE;
5249 break;
5250
5251 case ANEG_STATE_ABILITY_DETECT_INIT:
5252 ap->flags &= ~(MR_TOGGLE_TX);
5253 ap->txconfig = ANEG_CFG_FD;
5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255 if (flowctrl & ADVERTISE_1000XPAUSE)
5256 ap->txconfig |= ANEG_CFG_PS1;
5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258 ap->txconfig |= ANEG_CFG_PS2;
5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5262 udelay(40);
5263
5264 ap->state = ANEG_STATE_ABILITY_DETECT;
5265 break;
5266
5267 case ANEG_STATE_ABILITY_DETECT:
5268 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5270 break;
5271
5272 case ANEG_STATE_ACK_DETECT_INIT:
5273 ap->txconfig |= ANEG_CFG_ACK;
5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276 tw32_f(MAC_MODE, tp->mac_mode);
5277 udelay(40);
5278
5279 ap->state = ANEG_STATE_ACK_DETECT;
5280
5281 /* fallthru */
5282 case ANEG_STATE_ACK_DETECT:
5283 if (ap->ack_match != 0) {
5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287 } else {
5288 ap->state = ANEG_STATE_AN_ENABLE;
5289 }
5290 } else if (ap->ability_match != 0 &&
5291 ap->rxconfig == 0) {
5292 ap->state = ANEG_STATE_AN_ENABLE;
5293 }
5294 break;
5295
5296 case ANEG_STATE_COMPLETE_ACK_INIT:
5297 if (ap->rxconfig & ANEG_CFG_INVAL) {
5298 ret = ANEG_FAILED;
5299 break;
5300 }
5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302 MR_LP_ADV_HALF_DUPLEX |
5303 MR_LP_ADV_SYM_PAUSE |
5304 MR_LP_ADV_ASYM_PAUSE |
5305 MR_LP_ADV_REMOTE_FAULT1 |
5306 MR_LP_ADV_REMOTE_FAULT2 |
5307 MR_LP_ADV_NEXT_PAGE |
5308 MR_TOGGLE_RX |
5309 MR_NP_RX);
5310 if (ap->rxconfig & ANEG_CFG_FD)
5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312 if (ap->rxconfig & ANEG_CFG_HD)
5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314 if (ap->rxconfig & ANEG_CFG_PS1)
5315 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316 if (ap->rxconfig & ANEG_CFG_PS2)
5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318 if (ap->rxconfig & ANEG_CFG_RF1)
5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320 if (ap->rxconfig & ANEG_CFG_RF2)
5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324
5325 ap->link_time = ap->cur_time;
5326
5327 ap->flags ^= (MR_TOGGLE_TX);
5328 if (ap->rxconfig & 0x0008)
5329 ap->flags |= MR_TOGGLE_RX;
5330 if (ap->rxconfig & ANEG_CFG_NP)
5331 ap->flags |= MR_NP_RX;
5332 ap->flags |= MR_PAGE_RX;
5333
5334 ap->state = ANEG_STATE_COMPLETE_ACK;
5335 ret = ANEG_TIMER_ENAB;
5336 break;
5337
5338 case ANEG_STATE_COMPLETE_ACK:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5342 break;
5343 }
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348 } else {
5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350 !(ap->flags & MR_NP_RX)) {
5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352 } else {
5353 ret = ANEG_FAILED;
5354 }
5355 }
5356 }
5357 break;
5358
5359 case ANEG_STATE_IDLE_DETECT_INIT:
5360 ap->link_time = ap->cur_time;
5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362 tw32_f(MAC_MODE, tp->mac_mode);
5363 udelay(40);
5364
5365 ap->state = ANEG_STATE_IDLE_DETECT;
5366 ret = ANEG_TIMER_ENAB;
5367 break;
5368
5369 case ANEG_STATE_IDLE_DETECT:
5370 if (ap->ability_match != 0 &&
5371 ap->rxconfig == 0) {
5372 ap->state = ANEG_STATE_AN_ENABLE;
5373 break;
5374 }
5375 delta = ap->cur_time - ap->link_time;
5376 if (delta > ANEG_STATE_SETTLE_TIME) {
5377 /* XXX another gem from the Broadcom driver :( */
5378 ap->state = ANEG_STATE_LINK_OK;
5379 }
5380 break;
5381
5382 case ANEG_STATE_LINK_OK:
5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5384 ret = ANEG_DONE;
5385 break;
5386
5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388 /* ??? unimplemented */
5389 break;
5390
5391 case ANEG_STATE_NEXT_PAGE_WAIT:
5392 /* ??? unimplemented */
5393 break;
5394
5395 default:
5396 ret = ANEG_FAILED;
5397 break;
5398 }
5399
5400 return ret;
5401 }
5402
5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5404 {
5405 int res = 0;
5406 struct tg3_fiber_aneginfo aninfo;
5407 int status = ANEG_FAILED;
5408 unsigned int tick;
5409 u32 tmp;
5410
5411 tw32_f(MAC_TX_AUTO_NEG, 0);
5412
5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5415 udelay(40);
5416
5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5418 udelay(40);
5419
5420 memset(&aninfo, 0, sizeof(aninfo));
5421 aninfo.flags |= MR_AN_ENABLE;
5422 aninfo.state = ANEG_STATE_UNKNOWN;
5423 aninfo.cur_time = 0;
5424 tick = 0;
5425 while (++tick < 195000) {
5426 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427 if (status == ANEG_DONE || status == ANEG_FAILED)
5428 break;
5429
5430 udelay(1);
5431 }
5432
5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434 tw32_f(MAC_MODE, tp->mac_mode);
5435 udelay(40);
5436
5437 *txflags = aninfo.txconfig;
5438 *rxflags = aninfo.flags;
5439
5440 if (status == ANEG_DONE &&
5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442 MR_LP_ADV_FULL_DUPLEX)))
5443 res = 1;
5444
5445 return res;
5446 }
5447
5448 static void tg3_init_bcm8002(struct tg3 *tp)
5449 {
5450 u32 mac_status = tr32(MAC_STATUS);
5451 int i;
5452
5453 /* Reset when initting first time or we have a link. */
5454 if (tg3_flag(tp, INIT_COMPLETE) &&
5455 !(mac_status & MAC_STATUS_PCS_SYNCED))
5456 return;
5457
5458 /* Set PLL lock range. */
5459 tg3_writephy(tp, 0x16, 0x8007);
5460
5461 /* SW reset */
5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463
5464 /* Wait for reset to complete. */
5465 /* XXX schedule_timeout() ... */
5466 for (i = 0; i < 500; i++)
5467 udelay(10);
5468
5469 /* Config mode; select PMA/Ch 1 regs. */
5470 tg3_writephy(tp, 0x10, 0x8411);
5471
5472 /* Enable auto-lock and comdet, select txclk for tx. */
5473 tg3_writephy(tp, 0x11, 0x0a10);
5474
5475 tg3_writephy(tp, 0x18, 0x00a0);
5476 tg3_writephy(tp, 0x16, 0x41ff);
5477
5478 /* Assert and deassert POR. */
5479 tg3_writephy(tp, 0x13, 0x0400);
5480 udelay(40);
5481 tg3_writephy(tp, 0x13, 0x0000);
5482
5483 tg3_writephy(tp, 0x11, 0x0a50);
5484 udelay(40);
5485 tg3_writephy(tp, 0x11, 0x0a10);
5486
5487 /* Wait for signal to stabilize */
5488 /* XXX schedule_timeout() ... */
5489 for (i = 0; i < 15000; i++)
5490 udelay(10);
5491
5492 /* Deselect the channel register so we can read the PHYID
5493 * later.
5494 */
5495 tg3_writephy(tp, 0x10, 0x8011);
5496 }
5497
5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5499 {
5500 u16 flowctrl;
5501 bool current_link_up;
5502 u32 sg_dig_ctrl, sg_dig_status;
5503 u32 serdes_cfg, expected_sg_dig_ctrl;
5504 int workaround, port_a;
5505
5506 serdes_cfg = 0;
5507 expected_sg_dig_ctrl = 0;
5508 workaround = 0;
5509 port_a = 1;
5510 current_link_up = false;
5511
5512 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5513 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5514 workaround = 1;
5515 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516 port_a = 0;
5517
5518 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5519 /* preserve bits 20-23 for voltage regulator */
5520 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521 }
5522
5523 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5524
5525 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5526 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5527 if (workaround) {
5528 u32 val = serdes_cfg;
5529
5530 if (port_a)
5531 val |= 0xc010000;
5532 else
5533 val |= 0x4010000;
5534 tw32_f(MAC_SERDES_CFG, val);
5535 }
5536
5537 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5538 }
5539 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5540 tg3_setup_flow_control(tp, 0, 0);
5541 current_link_up = true;
5542 }
5543 goto out;
5544 }
5545
5546 /* Want auto-negotiation. */
5547 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5548
5549 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5550 if (flowctrl & ADVERTISE_1000XPAUSE)
5551 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5552 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5553 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5554
5555 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5556 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5557 tp->serdes_counter &&
5558 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5559 MAC_STATUS_RCVD_CFG)) ==
5560 MAC_STATUS_PCS_SYNCED)) {
5561 tp->serdes_counter--;
5562 current_link_up = true;
5563 goto out;
5564 }
5565 restart_autoneg:
5566 if (workaround)
5567 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5568 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5569 udelay(5);
5570 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5571
5572 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5573 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5574 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5575 MAC_STATUS_SIGNAL_DET)) {
5576 sg_dig_status = tr32(SG_DIG_STATUS);
5577 mac_status = tr32(MAC_STATUS);
5578
5579 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5580 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5581 u32 local_adv = 0, remote_adv = 0;
5582
5583 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5584 local_adv |= ADVERTISE_1000XPAUSE;
5585 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5586 local_adv |= ADVERTISE_1000XPSE_ASYM;
5587
5588 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5589 remote_adv |= LPA_1000XPAUSE;
5590 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5591 remote_adv |= LPA_1000XPAUSE_ASYM;
5592
5593 tp->link_config.rmt_adv =
5594 mii_adv_to_ethtool_adv_x(remote_adv);
5595
5596 tg3_setup_flow_control(tp, local_adv, remote_adv);
5597 current_link_up = true;
5598 tp->serdes_counter = 0;
5599 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5600 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5601 if (tp->serdes_counter)
5602 tp->serdes_counter--;
5603 else {
5604 if (workaround) {
5605 u32 val = serdes_cfg;
5606
5607 if (port_a)
5608 val |= 0xc010000;
5609 else
5610 val |= 0x4010000;
5611
5612 tw32_f(MAC_SERDES_CFG, val);
5613 }
5614
5615 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616 udelay(40);
5617
5618 /* Link parallel detection - link is up */
5619 /* only if we have PCS_SYNC and not */
5620 /* receiving config code words */
5621 mac_status = tr32(MAC_STATUS);
5622 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5623 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5624 tg3_setup_flow_control(tp, 0, 0);
5625 current_link_up = true;
5626 tp->phy_flags |=
5627 TG3_PHYFLG_PARALLEL_DETECT;
5628 tp->serdes_counter =
5629 SERDES_PARALLEL_DET_TIMEOUT;
5630 } else
5631 goto restart_autoneg;
5632 }
5633 }
5634 } else {
5635 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5636 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5637 }
5638
5639 out:
5640 return current_link_up;
5641 }
5642
5643 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5644 {
5645 bool current_link_up = false;
5646
5647 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648 goto out;
5649
5650 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5651 u32 txflags, rxflags;
5652 int i;
5653
5654 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5655 u32 local_adv = 0, remote_adv = 0;
5656
5657 if (txflags & ANEG_CFG_PS1)
5658 local_adv |= ADVERTISE_1000XPAUSE;
5659 if (txflags & ANEG_CFG_PS2)
5660 local_adv |= ADVERTISE_1000XPSE_ASYM;
5661
5662 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5663 remote_adv |= LPA_1000XPAUSE;
5664 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5665 remote_adv |= LPA_1000XPAUSE_ASYM;
5666
5667 tp->link_config.rmt_adv =
5668 mii_adv_to_ethtool_adv_x(remote_adv);
5669
5670 tg3_setup_flow_control(tp, local_adv, remote_adv);
5671
5672 current_link_up = true;
5673 }
5674 for (i = 0; i < 30; i++) {
5675 udelay(20);
5676 tw32_f(MAC_STATUS,
5677 (MAC_STATUS_SYNC_CHANGED |
5678 MAC_STATUS_CFG_CHANGED));
5679 udelay(40);
5680 if ((tr32(MAC_STATUS) &
5681 (MAC_STATUS_SYNC_CHANGED |
5682 MAC_STATUS_CFG_CHANGED)) == 0)
5683 break;
5684 }
5685
5686 mac_status = tr32(MAC_STATUS);
5687 if (!current_link_up &&
5688 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5689 !(mac_status & MAC_STATUS_RCVD_CFG))
5690 current_link_up = true;
5691 } else {
5692 tg3_setup_flow_control(tp, 0, 0);
5693
5694 /* Forcing 1000FD link up. */
5695 current_link_up = true;
5696
5697 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698 udelay(40);
5699
5700 tw32_f(MAC_MODE, tp->mac_mode);
5701 udelay(40);
5702 }
5703
5704 out:
5705 return current_link_up;
5706 }
5707
5708 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 {
5710 u32 orig_pause_cfg;
5711 u16 orig_active_speed;
5712 u8 orig_active_duplex;
5713 u32 mac_status;
5714 bool current_link_up;
5715 int i;
5716
5717 orig_pause_cfg = tp->link_config.active_flowctrl;
5718 orig_active_speed = tp->link_config.active_speed;
5719 orig_active_duplex = tp->link_config.active_duplex;
5720
5721 if (!tg3_flag(tp, HW_AUTONEG) &&
5722 tp->link_up &&
5723 tg3_flag(tp, INIT_COMPLETE)) {
5724 mac_status = tr32(MAC_STATUS);
5725 mac_status &= (MAC_STATUS_PCS_SYNCED |
5726 MAC_STATUS_SIGNAL_DET |
5727 MAC_STATUS_CFG_CHANGED |
5728 MAC_STATUS_RCVD_CFG);
5729 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5730 MAC_STATUS_SIGNAL_DET)) {
5731 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5732 MAC_STATUS_CFG_CHANGED));
5733 return 0;
5734 }
5735 }
5736
5737 tw32_f(MAC_TX_AUTO_NEG, 0);
5738
5739 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5740 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5741 tw32_f(MAC_MODE, tp->mac_mode);
5742 udelay(40);
5743
5744 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5745 tg3_init_bcm8002(tp);
5746
5747 /* Enable link change event even when serdes polling. */
5748 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749 udelay(40);
5750
5751 current_link_up = false;
5752 tp->link_config.rmt_adv = 0;
5753 mac_status = tr32(MAC_STATUS);
5754
5755 if (tg3_flag(tp, HW_AUTONEG))
5756 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5757 else
5758 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5759
5760 tp->napi[0].hw_status->status =
5761 (SD_STATUS_UPDATED |
5762 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5763
5764 for (i = 0; i < 100; i++) {
5765 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5766 MAC_STATUS_CFG_CHANGED));
5767 udelay(5);
5768 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5769 MAC_STATUS_CFG_CHANGED |
5770 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5771 break;
5772 }
5773
5774 mac_status = tr32(MAC_STATUS);
5775 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5776 current_link_up = false;
5777 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5778 tp->serdes_counter == 0) {
5779 tw32_f(MAC_MODE, (tp->mac_mode |
5780 MAC_MODE_SEND_CONFIGS));
5781 udelay(1);
5782 tw32_f(MAC_MODE, tp->mac_mode);
5783 }
5784 }
5785
5786 if (current_link_up) {
5787 tp->link_config.active_speed = SPEED_1000;
5788 tp->link_config.active_duplex = DUPLEX_FULL;
5789 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5790 LED_CTRL_LNKLED_OVERRIDE |
5791 LED_CTRL_1000MBPS_ON));
5792 } else {
5793 tp->link_config.active_speed = SPEED_UNKNOWN;
5794 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5795 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5796 LED_CTRL_LNKLED_OVERRIDE |
5797 LED_CTRL_TRAFFIC_OVERRIDE));
5798 }
5799
5800 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5801 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5802 if (orig_pause_cfg != now_pause_cfg ||
5803 orig_active_speed != tp->link_config.active_speed ||
5804 orig_active_duplex != tp->link_config.active_duplex)
5805 tg3_link_report(tp);
5806 }
5807
5808 return 0;
5809 }
5810
5811 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5812 {
5813 int err = 0;
5814 u32 bmsr, bmcr;
5815 u16 current_speed = SPEED_UNKNOWN;
5816 u8 current_duplex = DUPLEX_UNKNOWN;
5817 bool current_link_up = false;
5818 u32 local_adv, remote_adv, sgsr;
5819
5820 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5821 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5822 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5823 (sgsr & SERDES_TG3_SGMII_MODE)) {
5824
5825 if (force_reset)
5826 tg3_phy_reset(tp);
5827
5828 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5829
5830 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5831 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5832 } else {
5833 current_link_up = true;
5834 if (sgsr & SERDES_TG3_SPEED_1000) {
5835 current_speed = SPEED_1000;
5836 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5837 } else if (sgsr & SERDES_TG3_SPEED_100) {
5838 current_speed = SPEED_100;
5839 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5840 } else {
5841 current_speed = SPEED_10;
5842 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5843 }
5844
5845 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5846 current_duplex = DUPLEX_FULL;
5847 else
5848 current_duplex = DUPLEX_HALF;
5849 }
5850
5851 tw32_f(MAC_MODE, tp->mac_mode);
5852 udelay(40);
5853
5854 tg3_clear_mac_status(tp);
5855
5856 goto fiber_setup_done;
5857 }
5858
5859 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5860 tw32_f(MAC_MODE, tp->mac_mode);
5861 udelay(40);
5862
5863 tg3_clear_mac_status(tp);
5864
5865 if (force_reset)
5866 tg3_phy_reset(tp);
5867
5868 tp->link_config.rmt_adv = 0;
5869
5870 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5871 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5872 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5873 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5874 bmsr |= BMSR_LSTATUS;
5875 else
5876 bmsr &= ~BMSR_LSTATUS;
5877 }
5878
5879 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5880
5881 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5882 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5883 /* do nothing, just check for link up at the end */
5884 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5885 u32 adv, newadv;
5886
5887 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5888 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5889 ADVERTISE_1000XPAUSE |
5890 ADVERTISE_1000XPSE_ASYM |
5891 ADVERTISE_SLCT);
5892
5893 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5894 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5895
5896 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5897 tg3_writephy(tp, MII_ADVERTISE, newadv);
5898 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5899 tg3_writephy(tp, MII_BMCR, bmcr);
5900
5901 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5902 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5903 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5904
5905 return err;
5906 }
5907 } else {
5908 u32 new_bmcr;
5909
5910 bmcr &= ~BMCR_SPEED1000;
5911 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5912
5913 if (tp->link_config.duplex == DUPLEX_FULL)
5914 new_bmcr |= BMCR_FULLDPLX;
5915
5916 if (new_bmcr != bmcr) {
5917 /* BMCR_SPEED1000 is a reserved bit that needs
5918 * to be set on write.
5919 */
5920 new_bmcr |= BMCR_SPEED1000;
5921
5922 /* Force a linkdown */
5923 if (tp->link_up) {
5924 u32 adv;
5925
5926 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5927 adv &= ~(ADVERTISE_1000XFULL |
5928 ADVERTISE_1000XHALF |
5929 ADVERTISE_SLCT);
5930 tg3_writephy(tp, MII_ADVERTISE, adv);
5931 tg3_writephy(tp, MII_BMCR, bmcr |
5932 BMCR_ANRESTART |
5933 BMCR_ANENABLE);
5934 udelay(10);
5935 tg3_carrier_off(tp);
5936 }
5937 tg3_writephy(tp, MII_BMCR, new_bmcr);
5938 bmcr = new_bmcr;
5939 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5940 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5941 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5942 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5943 bmsr |= BMSR_LSTATUS;
5944 else
5945 bmsr &= ~BMSR_LSTATUS;
5946 }
5947 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5948 }
5949 }
5950
5951 if (bmsr & BMSR_LSTATUS) {
5952 current_speed = SPEED_1000;
5953 current_link_up = true;
5954 if (bmcr & BMCR_FULLDPLX)
5955 current_duplex = DUPLEX_FULL;
5956 else
5957 current_duplex = DUPLEX_HALF;
5958
5959 local_adv = 0;
5960 remote_adv = 0;
5961
5962 if (bmcr & BMCR_ANENABLE) {
5963 u32 common;
5964
5965 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5966 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5967 common = local_adv & remote_adv;
5968 if (common & (ADVERTISE_1000XHALF |
5969 ADVERTISE_1000XFULL)) {
5970 if (common & ADVERTISE_1000XFULL)
5971 current_duplex = DUPLEX_FULL;
5972 else
5973 current_duplex = DUPLEX_HALF;
5974
5975 tp->link_config.rmt_adv =
5976 mii_adv_to_ethtool_adv_x(remote_adv);
5977 } else if (!tg3_flag(tp, 5780_CLASS)) {
5978 /* Link is up via parallel detect */
5979 } else {
5980 current_link_up = false;
5981 }
5982 }
5983 }
5984
5985 fiber_setup_done:
5986 if (current_link_up && current_duplex == DUPLEX_FULL)
5987 tg3_setup_flow_control(tp, local_adv, remote_adv);
5988
5989 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5990 if (tp->link_config.active_duplex == DUPLEX_HALF)
5991 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5992
5993 tw32_f(MAC_MODE, tp->mac_mode);
5994 udelay(40);
5995
5996 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5997
5998 tp->link_config.active_speed = current_speed;
5999 tp->link_config.active_duplex = current_duplex;
6000
6001 tg3_test_and_report_link_chg(tp, current_link_up);
6002 return err;
6003 }
6004
6005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6006 {
6007 if (tp->serdes_counter) {
6008 /* Give autoneg time to complete. */
6009 tp->serdes_counter--;
6010 return;
6011 }
6012
6013 if (!tp->link_up &&
6014 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6015 u32 bmcr;
6016
6017 tg3_readphy(tp, MII_BMCR, &bmcr);
6018 if (bmcr & BMCR_ANENABLE) {
6019 u32 phy1, phy2;
6020
6021 /* Select shadow register 0x1f */
6022 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6023 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6024
6025 /* Select expansion interrupt status register */
6026 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6027 MII_TG3_DSP_EXP1_INT_STAT);
6028 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6030
6031 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6032 /* We have signal detect and not receiving
6033 * config code words, link is up by parallel
6034 * detection.
6035 */
6036
6037 bmcr &= ~BMCR_ANENABLE;
6038 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6039 tg3_writephy(tp, MII_BMCR, bmcr);
6040 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6041 }
6042 }
6043 } else if (tp->link_up &&
6044 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6045 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6046 u32 phy2;
6047
6048 /* Select expansion interrupt status register */
6049 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6050 MII_TG3_DSP_EXP1_INT_STAT);
6051 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6052 if (phy2 & 0x20) {
6053 u32 bmcr;
6054
6055 /* Config code words received, turn on autoneg. */
6056 tg3_readphy(tp, MII_BMCR, &bmcr);
6057 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6058
6059 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6060
6061 }
6062 }
6063 }
6064
6065 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6066 {
6067 u32 val;
6068 int err;
6069
6070 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6071 err = tg3_setup_fiber_phy(tp, force_reset);
6072 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6073 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6074 else
6075 err = tg3_setup_copper_phy(tp, force_reset);
6076
6077 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6078 u32 scale;
6079
6080 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6081 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6082 scale = 65;
6083 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6084 scale = 6;
6085 else
6086 scale = 12;
6087
6088 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6089 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6090 tw32(GRC_MISC_CFG, val);
6091 }
6092
6093 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6094 (6 << TX_LENGTHS_IPG_SHIFT);
6095 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6096 tg3_asic_rev(tp) == ASIC_REV_5762)
6097 val |= tr32(MAC_TX_LENGTHS) &
6098 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6099 TX_LENGTHS_CNT_DWN_VAL_MSK);
6100
6101 if (tp->link_config.active_speed == SPEED_1000 &&
6102 tp->link_config.active_duplex == DUPLEX_HALF)
6103 tw32(MAC_TX_LENGTHS, val |
6104 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6105 else
6106 tw32(MAC_TX_LENGTHS, val |
6107 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6108
6109 if (!tg3_flag(tp, 5705_PLUS)) {
6110 if (tp->link_up) {
6111 tw32(HOSTCC_STAT_COAL_TICKS,
6112 tp->coal.stats_block_coalesce_usecs);
6113 } else {
6114 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6115 }
6116 }
6117
6118 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6119 val = tr32(PCIE_PWR_MGMT_THRESH);
6120 if (!tp->link_up)
6121 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6122 tp->pwrmgmt_thresh;
6123 else
6124 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6125 tw32(PCIE_PWR_MGMT_THRESH, val);
6126 }
6127
6128 return err;
6129 }
6130
6131 /* tp->lock must be held */
6132 static u64 tg3_refclk_read(struct tg3 *tp)
6133 {
6134 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6135 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6136 }
6137
6138 /* tp->lock must be held */
6139 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6140 {
6141 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6142
6143 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6144 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6145 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6146 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6147 }
6148
6149 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6150 static inline void tg3_full_unlock(struct tg3 *tp);
6151 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6152 {
6153 struct tg3 *tp = netdev_priv(dev);
6154
6155 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6156 SOF_TIMESTAMPING_RX_SOFTWARE |
6157 SOF_TIMESTAMPING_SOFTWARE;
6158
6159 if (tg3_flag(tp, PTP_CAPABLE)) {
6160 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6161 SOF_TIMESTAMPING_RX_HARDWARE |
6162 SOF_TIMESTAMPING_RAW_HARDWARE;
6163 }
6164
6165 if (tp->ptp_clock)
6166 info->phc_index = ptp_clock_index(tp->ptp_clock);
6167 else
6168 info->phc_index = -1;
6169
6170 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6171
6172 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6173 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6174 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6175 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6176 return 0;
6177 }
6178
6179 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6180 {
6181 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6182 bool neg_adj = false;
6183 u32 correction = 0;
6184
6185 if (ppb < 0) {
6186 neg_adj = true;
6187 ppb = -ppb;
6188 }
6189
6190 /* Frequency adjustment is performed using hardware with a 24 bit
6191 * accumulator and a programmable correction value. On each clk, the
6192 * correction value gets added to the accumulator and when it
6193 * overflows, the time counter is incremented/decremented.
6194 *
6195 * So conversion from ppb to correction value is
6196 * ppb * (1 << 24) / 1000000000
6197 */
6198 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6199 TG3_EAV_REF_CLK_CORRECT_MASK;
6200
6201 tg3_full_lock(tp, 0);
6202
6203 if (correction)
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6205 TG3_EAV_REF_CLK_CORRECT_EN |
6206 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6207 else
6208 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6209
6210 tg3_full_unlock(tp);
6211
6212 return 0;
6213 }
6214
6215 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6216 {
6217 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6218
6219 tg3_full_lock(tp, 0);
6220 tp->ptp_adjust += delta;
6221 tg3_full_unlock(tp);
6222
6223 return 0;
6224 }
6225
6226 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6227 {
6228 u64 ns;
6229 u32 remainder;
6230 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6231
6232 tg3_full_lock(tp, 0);
6233 ns = tg3_refclk_read(tp);
6234 ns += tp->ptp_adjust;
6235 tg3_full_unlock(tp);
6236
6237 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6238 ts->tv_nsec = remainder;
6239
6240 return 0;
6241 }
6242
6243 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6244 const struct timespec *ts)
6245 {
6246 u64 ns;
6247 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6248
6249 ns = timespec_to_ns(ts);
6250
6251 tg3_full_lock(tp, 0);
6252 tg3_refclk_write(tp, ns);
6253 tp->ptp_adjust = 0;
6254 tg3_full_unlock(tp);
6255
6256 return 0;
6257 }
6258
6259 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6260 struct ptp_clock_request *rq, int on)
6261 {
6262 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6263 u32 clock_ctl;
6264 int rval = 0;
6265
6266 switch (rq->type) {
6267 case PTP_CLK_REQ_PEROUT:
6268 if (rq->perout.index != 0)
6269 return -EINVAL;
6270
6271 tg3_full_lock(tp, 0);
6272 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6273 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6274
6275 if (on) {
6276 u64 nsec;
6277
6278 nsec = rq->perout.start.sec * 1000000000ULL +
6279 rq->perout.start.nsec;
6280
6281 if (rq->perout.period.sec || rq->perout.period.nsec) {
6282 netdev_warn(tp->dev,
6283 "Device supports only a one-shot timesync output, period must be 0\n");
6284 rval = -EINVAL;
6285 goto err_out;
6286 }
6287
6288 if (nsec & (1ULL << 63)) {
6289 netdev_warn(tp->dev,
6290 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6291 rval = -EINVAL;
6292 goto err_out;
6293 }
6294
6295 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6296 tw32(TG3_EAV_WATCHDOG0_MSB,
6297 TG3_EAV_WATCHDOG0_EN |
6298 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6299
6300 tw32(TG3_EAV_REF_CLCK_CTL,
6301 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6302 } else {
6303 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6304 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6305 }
6306
6307 err_out:
6308 tg3_full_unlock(tp);
6309 return rval;
6310
6311 default:
6312 break;
6313 }
6314
6315 return -EOPNOTSUPP;
6316 }
6317
6318 static const struct ptp_clock_info tg3_ptp_caps = {
6319 .owner = THIS_MODULE,
6320 .name = "tg3 clock",
6321 .max_adj = 250000000,
6322 .n_alarm = 0,
6323 .n_ext_ts = 0,
6324 .n_per_out = 1,
6325 .pps = 0,
6326 .adjfreq = tg3_ptp_adjfreq,
6327 .adjtime = tg3_ptp_adjtime,
6328 .gettime = tg3_ptp_gettime,
6329 .settime = tg3_ptp_settime,
6330 .enable = tg3_ptp_enable,
6331 };
6332
6333 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6334 struct skb_shared_hwtstamps *timestamp)
6335 {
6336 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6337 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6338 tp->ptp_adjust);
6339 }
6340
6341 /* tp->lock must be held */
6342 static void tg3_ptp_init(struct tg3 *tp)
6343 {
6344 if (!tg3_flag(tp, PTP_CAPABLE))
6345 return;
6346
6347 /* Initialize the hardware clock to the system time. */
6348 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6349 tp->ptp_adjust = 0;
6350 tp->ptp_info = tg3_ptp_caps;
6351 }
6352
6353 /* tp->lock must be held */
6354 static void tg3_ptp_resume(struct tg3 *tp)
6355 {
6356 if (!tg3_flag(tp, PTP_CAPABLE))
6357 return;
6358
6359 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6360 tp->ptp_adjust = 0;
6361 }
6362
6363 static void tg3_ptp_fini(struct tg3 *tp)
6364 {
6365 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6366 return;
6367
6368 ptp_clock_unregister(tp->ptp_clock);
6369 tp->ptp_clock = NULL;
6370 tp->ptp_adjust = 0;
6371 }
6372
6373 static inline int tg3_irq_sync(struct tg3 *tp)
6374 {
6375 return tp->irq_sync;
6376 }
6377
6378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6379 {
6380 int i;
6381
6382 dst = (u32 *)((u8 *)dst + off);
6383 for (i = 0; i < len; i += sizeof(u32))
6384 *dst++ = tr32(off + i);
6385 }
6386
6387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6388 {
6389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6408
6409 if (tg3_flag(tp, SUPPORT_MSIX))
6410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6411
6412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6420
6421 if (!tg3_flag(tp, 5705_PLUS)) {
6422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6425 }
6426
6427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6432
6433 if (tg3_flag(tp, NVRAM))
6434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6435 }
6436
6437 static void tg3_dump_state(struct tg3 *tp)
6438 {
6439 int i;
6440 u32 *regs;
6441
6442 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6443 if (!regs)
6444 return;
6445
6446 if (tg3_flag(tp, PCI_EXPRESS)) {
6447 /* Read up to but not including private PCI registers */
6448 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6449 regs[i / sizeof(u32)] = tr32(i);
6450 } else
6451 tg3_dump_legacy_regs(tp, regs);
6452
6453 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6454 if (!regs[i + 0] && !regs[i + 1] &&
6455 !regs[i + 2] && !regs[i + 3])
6456 continue;
6457
6458 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6459 i * 4,
6460 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6461 }
6462
6463 kfree(regs);
6464
6465 for (i = 0; i < tp->irq_cnt; i++) {
6466 struct tg3_napi *tnapi = &tp->napi[i];
6467
6468 /* SW status block */
6469 netdev_err(tp->dev,
6470 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6471 i,
6472 tnapi->hw_status->status,
6473 tnapi->hw_status->status_tag,
6474 tnapi->hw_status->rx_jumbo_consumer,
6475 tnapi->hw_status->rx_consumer,
6476 tnapi->hw_status->rx_mini_consumer,
6477 tnapi->hw_status->idx[0].rx_producer,
6478 tnapi->hw_status->idx[0].tx_consumer);
6479
6480 netdev_err(tp->dev,
6481 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6482 i,
6483 tnapi->last_tag, tnapi->last_irq_tag,
6484 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6485 tnapi->rx_rcb_ptr,
6486 tnapi->prodring.rx_std_prod_idx,
6487 tnapi->prodring.rx_std_cons_idx,
6488 tnapi->prodring.rx_jmb_prod_idx,
6489 tnapi->prodring.rx_jmb_cons_idx);
6490 }
6491 }
6492
6493 /* This is called whenever we suspect that the system chipset is re-
6494 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6495 * is bogus tx completions. We try to recover by setting the
6496 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6497 * in the workqueue.
6498 */
6499 static void tg3_tx_recover(struct tg3 *tp)
6500 {
6501 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6502 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6503
6504 netdev_warn(tp->dev,
6505 "The system may be re-ordering memory-mapped I/O "
6506 "cycles to the network device, attempting to recover. "
6507 "Please report the problem to the driver maintainer "
6508 "and include system chipset information.\n");
6509
6510 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6511 }
6512
6513 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6514 {
6515 /* Tell compiler to fetch tx indices from memory. */
6516 barrier();
6517 return tnapi->tx_pending -
6518 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6519 }
6520
6521 /* Tigon3 never reports partial packet sends. So we do not
6522 * need special logic to handle SKBs that have not had all
6523 * of their frags sent yet, like SunGEM does.
6524 */
6525 static void tg3_tx(struct tg3_napi *tnapi)
6526 {
6527 struct tg3 *tp = tnapi->tp;
6528 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6529 u32 sw_idx = tnapi->tx_cons;
6530 struct netdev_queue *txq;
6531 int index = tnapi - tp->napi;
6532 unsigned int pkts_compl = 0, bytes_compl = 0;
6533
6534 if (tg3_flag(tp, ENABLE_TSS))
6535 index--;
6536
6537 txq = netdev_get_tx_queue(tp->dev, index);
6538
6539 while (sw_idx != hw_idx) {
6540 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6541 struct sk_buff *skb = ri->skb;
6542 int i, tx_bug = 0;
6543
6544 if (unlikely(skb == NULL)) {
6545 tg3_tx_recover(tp);
6546 return;
6547 }
6548
6549 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6550 struct skb_shared_hwtstamps timestamp;
6551 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6552 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6553
6554 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6555
6556 skb_tstamp_tx(skb, &timestamp);
6557 }
6558
6559 pci_unmap_single(tp->pdev,
6560 dma_unmap_addr(ri, mapping),
6561 skb_headlen(skb),
6562 PCI_DMA_TODEVICE);
6563
6564 ri->skb = NULL;
6565
6566 while (ri->fragmented) {
6567 ri->fragmented = false;
6568 sw_idx = NEXT_TX(sw_idx);
6569 ri = &tnapi->tx_buffers[sw_idx];
6570 }
6571
6572 sw_idx = NEXT_TX(sw_idx);
6573
6574 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6575 ri = &tnapi->tx_buffers[sw_idx];
6576 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6577 tx_bug = 1;
6578
6579 pci_unmap_page(tp->pdev,
6580 dma_unmap_addr(ri, mapping),
6581 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6582 PCI_DMA_TODEVICE);
6583
6584 while (ri->fragmented) {
6585 ri->fragmented = false;
6586 sw_idx = NEXT_TX(sw_idx);
6587 ri = &tnapi->tx_buffers[sw_idx];
6588 }
6589
6590 sw_idx = NEXT_TX(sw_idx);
6591 }
6592
6593 pkts_compl++;
6594 bytes_compl += skb->len;
6595
6596 dev_kfree_skb(skb);
6597
6598 if (unlikely(tx_bug)) {
6599 tg3_tx_recover(tp);
6600 return;
6601 }
6602 }
6603
6604 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6605
6606 tnapi->tx_cons = sw_idx;
6607
6608 /* Need to make the tx_cons update visible to tg3_start_xmit()
6609 * before checking for netif_queue_stopped(). Without the
6610 * memory barrier, there is a small possibility that tg3_start_xmit()
6611 * will miss it and cause the queue to be stopped forever.
6612 */
6613 smp_mb();
6614
6615 if (unlikely(netif_tx_queue_stopped(txq) &&
6616 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6617 __netif_tx_lock(txq, smp_processor_id());
6618 if (netif_tx_queue_stopped(txq) &&
6619 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6620 netif_tx_wake_queue(txq);
6621 __netif_tx_unlock(txq);
6622 }
6623 }
6624
6625 static void tg3_frag_free(bool is_frag, void *data)
6626 {
6627 if (is_frag)
6628 put_page(virt_to_head_page(data));
6629 else
6630 kfree(data);
6631 }
6632
6633 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6634 {
6635 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6636 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6637
6638 if (!ri->data)
6639 return;
6640
6641 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6642 map_sz, PCI_DMA_FROMDEVICE);
6643 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6644 ri->data = NULL;
6645 }
6646
6647
6648 /* Returns size of skb allocated or < 0 on error.
6649 *
6650 * We only need to fill in the address because the other members
6651 * of the RX descriptor are invariant, see tg3_init_rings.
6652 *
6653 * Note the purposeful assymetry of cpu vs. chip accesses. For
6654 * posting buffers we only dirty the first cache line of the RX
6655 * descriptor (containing the address). Whereas for the RX status
6656 * buffers the cpu only reads the last cacheline of the RX descriptor
6657 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6658 */
6659 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6660 u32 opaque_key, u32 dest_idx_unmasked,
6661 unsigned int *frag_size)
6662 {
6663 struct tg3_rx_buffer_desc *desc;
6664 struct ring_info *map;
6665 u8 *data;
6666 dma_addr_t mapping;
6667 int skb_size, data_size, dest_idx;
6668
6669 switch (opaque_key) {
6670 case RXD_OPAQUE_RING_STD:
6671 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6672 desc = &tpr->rx_std[dest_idx];
6673 map = &tpr->rx_std_buffers[dest_idx];
6674 data_size = tp->rx_pkt_map_sz;
6675 break;
6676
6677 case RXD_OPAQUE_RING_JUMBO:
6678 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6679 desc = &tpr->rx_jmb[dest_idx].std;
6680 map = &tpr->rx_jmb_buffers[dest_idx];
6681 data_size = TG3_RX_JMB_MAP_SZ;
6682 break;
6683
6684 default:
6685 return -EINVAL;
6686 }
6687
6688 /* Do not overwrite any of the map or rp information
6689 * until we are sure we can commit to a new buffer.
6690 *
6691 * Callers depend upon this behavior and assume that
6692 * we leave everything unchanged if we fail.
6693 */
6694 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6695 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6696 if (skb_size <= PAGE_SIZE) {
6697 data = netdev_alloc_frag(skb_size);
6698 *frag_size = skb_size;
6699 } else {
6700 data = kmalloc(skb_size, GFP_ATOMIC);
6701 *frag_size = 0;
6702 }
6703 if (!data)
6704 return -ENOMEM;
6705
6706 mapping = pci_map_single(tp->pdev,
6707 data + TG3_RX_OFFSET(tp),
6708 data_size,
6709 PCI_DMA_FROMDEVICE);
6710 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6711 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6712 return -EIO;
6713 }
6714
6715 map->data = data;
6716 dma_unmap_addr_set(map, mapping, mapping);
6717
6718 desc->addr_hi = ((u64)mapping >> 32);
6719 desc->addr_lo = ((u64)mapping & 0xffffffff);
6720
6721 return data_size;
6722 }
6723
6724 /* We only need to move over in the address because the other
6725 * members of the RX descriptor are invariant. See notes above
6726 * tg3_alloc_rx_data for full details.
6727 */
6728 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6729 struct tg3_rx_prodring_set *dpr,
6730 u32 opaque_key, int src_idx,
6731 u32 dest_idx_unmasked)
6732 {
6733 struct tg3 *tp = tnapi->tp;
6734 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6735 struct ring_info *src_map, *dest_map;
6736 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6737 int dest_idx;
6738
6739 switch (opaque_key) {
6740 case RXD_OPAQUE_RING_STD:
6741 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6742 dest_desc = &dpr->rx_std[dest_idx];
6743 dest_map = &dpr->rx_std_buffers[dest_idx];
6744 src_desc = &spr->rx_std[src_idx];
6745 src_map = &spr->rx_std_buffers[src_idx];
6746 break;
6747
6748 case RXD_OPAQUE_RING_JUMBO:
6749 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6750 dest_desc = &dpr->rx_jmb[dest_idx].std;
6751 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6752 src_desc = &spr->rx_jmb[src_idx].std;
6753 src_map = &spr->rx_jmb_buffers[src_idx];
6754 break;
6755
6756 default:
6757 return;
6758 }
6759
6760 dest_map->data = src_map->data;
6761 dma_unmap_addr_set(dest_map, mapping,
6762 dma_unmap_addr(src_map, mapping));
6763 dest_desc->addr_hi = src_desc->addr_hi;
6764 dest_desc->addr_lo = src_desc->addr_lo;
6765
6766 /* Ensure that the update to the skb happens after the physical
6767 * addresses have been transferred to the new BD location.
6768 */
6769 smp_wmb();
6770
6771 src_map->data = NULL;
6772 }
6773
6774 /* The RX ring scheme is composed of multiple rings which post fresh
6775 * buffers to the chip, and one special ring the chip uses to report
6776 * status back to the host.
6777 *
6778 * The special ring reports the status of received packets to the
6779 * host. The chip does not write into the original descriptor the
6780 * RX buffer was obtained from. The chip simply takes the original
6781 * descriptor as provided by the host, updates the status and length
6782 * field, then writes this into the next status ring entry.
6783 *
6784 * Each ring the host uses to post buffers to the chip is described
6785 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6786 * it is first placed into the on-chip ram. When the packet's length
6787 * is known, it walks down the TG3_BDINFO entries to select the ring.
6788 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6789 * which is within the range of the new packet's length is chosen.
6790 *
6791 * The "separate ring for rx status" scheme may sound queer, but it makes
6792 * sense from a cache coherency perspective. If only the host writes
6793 * to the buffer post rings, and only the chip writes to the rx status
6794 * rings, then cache lines never move beyond shared-modified state.
6795 * If both the host and chip were to write into the same ring, cache line
6796 * eviction could occur since both entities want it in an exclusive state.
6797 */
6798 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6799 {
6800 struct tg3 *tp = tnapi->tp;
6801 u32 work_mask, rx_std_posted = 0;
6802 u32 std_prod_idx, jmb_prod_idx;
6803 u32 sw_idx = tnapi->rx_rcb_ptr;
6804 u16 hw_idx;
6805 int received;
6806 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6807
6808 hw_idx = *(tnapi->rx_rcb_prod_idx);
6809 /*
6810 * We need to order the read of hw_idx and the read of
6811 * the opaque cookie.
6812 */
6813 rmb();
6814 work_mask = 0;
6815 received = 0;
6816 std_prod_idx = tpr->rx_std_prod_idx;
6817 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6818 while (sw_idx != hw_idx && budget > 0) {
6819 struct ring_info *ri;
6820 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6821 unsigned int len;
6822 struct sk_buff *skb;
6823 dma_addr_t dma_addr;
6824 u32 opaque_key, desc_idx, *post_ptr;
6825 u8 *data;
6826 u64 tstamp = 0;
6827
6828 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6829 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6830 if (opaque_key == RXD_OPAQUE_RING_STD) {
6831 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6832 dma_addr = dma_unmap_addr(ri, mapping);
6833 data = ri->data;
6834 post_ptr = &std_prod_idx;
6835 rx_std_posted++;
6836 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6837 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6838 dma_addr = dma_unmap_addr(ri, mapping);
6839 data = ri->data;
6840 post_ptr = &jmb_prod_idx;
6841 } else
6842 goto next_pkt_nopost;
6843
6844 work_mask |= opaque_key;
6845
6846 if (desc->err_vlan & RXD_ERR_MASK) {
6847 drop_it:
6848 tg3_recycle_rx(tnapi, tpr, opaque_key,
6849 desc_idx, *post_ptr);
6850 drop_it_no_recycle:
6851 /* Other statistics kept track of by card. */
6852 tp->rx_dropped++;
6853 goto next_pkt;
6854 }
6855
6856 prefetch(data + TG3_RX_OFFSET(tp));
6857 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6858 ETH_FCS_LEN;
6859
6860 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6861 RXD_FLAG_PTPSTAT_PTPV1 ||
6862 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6863 RXD_FLAG_PTPSTAT_PTPV2) {
6864 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6865 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6866 }
6867
6868 if (len > TG3_RX_COPY_THRESH(tp)) {
6869 int skb_size;
6870 unsigned int frag_size;
6871
6872 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6873 *post_ptr, &frag_size);
6874 if (skb_size < 0)
6875 goto drop_it;
6876
6877 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6878 PCI_DMA_FROMDEVICE);
6879
6880 /* Ensure that the update to the data happens
6881 * after the usage of the old DMA mapping.
6882 */
6883 smp_wmb();
6884
6885 ri->data = NULL;
6886
6887 skb = build_skb(data, frag_size);
6888 if (!skb) {
6889 tg3_frag_free(frag_size != 0, data);
6890 goto drop_it_no_recycle;
6891 }
6892 skb_reserve(skb, TG3_RX_OFFSET(tp));
6893 } else {
6894 tg3_recycle_rx(tnapi, tpr, opaque_key,
6895 desc_idx, *post_ptr);
6896
6897 skb = netdev_alloc_skb(tp->dev,
6898 len + TG3_RAW_IP_ALIGN);
6899 if (skb == NULL)
6900 goto drop_it_no_recycle;
6901
6902 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6903 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6904 memcpy(skb->data,
6905 data + TG3_RX_OFFSET(tp),
6906 len);
6907 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6908 }
6909
6910 skb_put(skb, len);
6911 if (tstamp)
6912 tg3_hwclock_to_timestamp(tp, tstamp,
6913 skb_hwtstamps(skb));
6914
6915 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6916 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6917 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6918 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6919 skb->ip_summed = CHECKSUM_UNNECESSARY;
6920 else
6921 skb_checksum_none_assert(skb);
6922
6923 skb->protocol = eth_type_trans(skb, tp->dev);
6924
6925 if (len > (tp->dev->mtu + ETH_HLEN) &&
6926 skb->protocol != htons(ETH_P_8021Q)) {
6927 dev_kfree_skb(skb);
6928 goto drop_it_no_recycle;
6929 }
6930
6931 if (desc->type_flags & RXD_FLAG_VLAN &&
6932 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6933 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6934 desc->err_vlan & RXD_VLAN_MASK);
6935
6936 napi_gro_receive(&tnapi->napi, skb);
6937
6938 received++;
6939 budget--;
6940
6941 next_pkt:
6942 (*post_ptr)++;
6943
6944 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6945 tpr->rx_std_prod_idx = std_prod_idx &
6946 tp->rx_std_ring_mask;
6947 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6948 tpr->rx_std_prod_idx);
6949 work_mask &= ~RXD_OPAQUE_RING_STD;
6950 rx_std_posted = 0;
6951 }
6952 next_pkt_nopost:
6953 sw_idx++;
6954 sw_idx &= tp->rx_ret_ring_mask;
6955
6956 /* Refresh hw_idx to see if there is new work */
6957 if (sw_idx == hw_idx) {
6958 hw_idx = *(tnapi->rx_rcb_prod_idx);
6959 rmb();
6960 }
6961 }
6962
6963 /* ACK the status ring. */
6964 tnapi->rx_rcb_ptr = sw_idx;
6965 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6966
6967 /* Refill RX ring(s). */
6968 if (!tg3_flag(tp, ENABLE_RSS)) {
6969 /* Sync BD data before updating mailbox */
6970 wmb();
6971
6972 if (work_mask & RXD_OPAQUE_RING_STD) {
6973 tpr->rx_std_prod_idx = std_prod_idx &
6974 tp->rx_std_ring_mask;
6975 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6976 tpr->rx_std_prod_idx);
6977 }
6978 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6979 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6980 tp->rx_jmb_ring_mask;
6981 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6982 tpr->rx_jmb_prod_idx);
6983 }
6984 mmiowb();
6985 } else if (work_mask) {
6986 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6987 * updated before the producer indices can be updated.
6988 */
6989 smp_wmb();
6990
6991 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6992 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6993
6994 if (tnapi != &tp->napi[1]) {
6995 tp->rx_refill = true;
6996 napi_schedule(&tp->napi[1].napi);
6997 }
6998 }
6999
7000 return received;
7001 }
7002
7003 static void tg3_poll_link(struct tg3 *tp)
7004 {
7005 /* handle link change and other phy events */
7006 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7007 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7008
7009 if (sblk->status & SD_STATUS_LINK_CHG) {
7010 sblk->status = SD_STATUS_UPDATED |
7011 (sblk->status & ~SD_STATUS_LINK_CHG);
7012 spin_lock(&tp->lock);
7013 if (tg3_flag(tp, USE_PHYLIB)) {
7014 tw32_f(MAC_STATUS,
7015 (MAC_STATUS_SYNC_CHANGED |
7016 MAC_STATUS_CFG_CHANGED |
7017 MAC_STATUS_MI_COMPLETION |
7018 MAC_STATUS_LNKSTATE_CHANGED));
7019 udelay(40);
7020 } else
7021 tg3_setup_phy(tp, false);
7022 spin_unlock(&tp->lock);
7023 }
7024 }
7025 }
7026
7027 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7028 struct tg3_rx_prodring_set *dpr,
7029 struct tg3_rx_prodring_set *spr)
7030 {
7031 u32 si, di, cpycnt, src_prod_idx;
7032 int i, err = 0;
7033
7034 while (1) {
7035 src_prod_idx = spr->rx_std_prod_idx;
7036
7037 /* Make sure updates to the rx_std_buffers[] entries and the
7038 * standard producer index are seen in the correct order.
7039 */
7040 smp_rmb();
7041
7042 if (spr->rx_std_cons_idx == src_prod_idx)
7043 break;
7044
7045 if (spr->rx_std_cons_idx < src_prod_idx)
7046 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7047 else
7048 cpycnt = tp->rx_std_ring_mask + 1 -
7049 spr->rx_std_cons_idx;
7050
7051 cpycnt = min(cpycnt,
7052 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7053
7054 si = spr->rx_std_cons_idx;
7055 di = dpr->rx_std_prod_idx;
7056
7057 for (i = di; i < di + cpycnt; i++) {
7058 if (dpr->rx_std_buffers[i].data) {
7059 cpycnt = i - di;
7060 err = -ENOSPC;
7061 break;
7062 }
7063 }
7064
7065 if (!cpycnt)
7066 break;
7067
7068 /* Ensure that updates to the rx_std_buffers ring and the
7069 * shadowed hardware producer ring from tg3_recycle_skb() are
7070 * ordered correctly WRT the skb check above.
7071 */
7072 smp_rmb();
7073
7074 memcpy(&dpr->rx_std_buffers[di],
7075 &spr->rx_std_buffers[si],
7076 cpycnt * sizeof(struct ring_info));
7077
7078 for (i = 0; i < cpycnt; i++, di++, si++) {
7079 struct tg3_rx_buffer_desc *sbd, *dbd;
7080 sbd = &spr->rx_std[si];
7081 dbd = &dpr->rx_std[di];
7082 dbd->addr_hi = sbd->addr_hi;
7083 dbd->addr_lo = sbd->addr_lo;
7084 }
7085
7086 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7087 tp->rx_std_ring_mask;
7088 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7089 tp->rx_std_ring_mask;
7090 }
7091
7092 while (1) {
7093 src_prod_idx = spr->rx_jmb_prod_idx;
7094
7095 /* Make sure updates to the rx_jmb_buffers[] entries and
7096 * the jumbo producer index are seen in the correct order.
7097 */
7098 smp_rmb();
7099
7100 if (spr->rx_jmb_cons_idx == src_prod_idx)
7101 break;
7102
7103 if (spr->rx_jmb_cons_idx < src_prod_idx)
7104 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7105 else
7106 cpycnt = tp->rx_jmb_ring_mask + 1 -
7107 spr->rx_jmb_cons_idx;
7108
7109 cpycnt = min(cpycnt,
7110 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7111
7112 si = spr->rx_jmb_cons_idx;
7113 di = dpr->rx_jmb_prod_idx;
7114
7115 for (i = di; i < di + cpycnt; i++) {
7116 if (dpr->rx_jmb_buffers[i].data) {
7117 cpycnt = i - di;
7118 err = -ENOSPC;
7119 break;
7120 }
7121 }
7122
7123 if (!cpycnt)
7124 break;
7125
7126 /* Ensure that updates to the rx_jmb_buffers ring and the
7127 * shadowed hardware producer ring from tg3_recycle_skb() are
7128 * ordered correctly WRT the skb check above.
7129 */
7130 smp_rmb();
7131
7132 memcpy(&dpr->rx_jmb_buffers[di],
7133 &spr->rx_jmb_buffers[si],
7134 cpycnt * sizeof(struct ring_info));
7135
7136 for (i = 0; i < cpycnt; i++, di++, si++) {
7137 struct tg3_rx_buffer_desc *sbd, *dbd;
7138 sbd = &spr->rx_jmb[si].std;
7139 dbd = &dpr->rx_jmb[di].std;
7140 dbd->addr_hi = sbd->addr_hi;
7141 dbd->addr_lo = sbd->addr_lo;
7142 }
7143
7144 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7145 tp->rx_jmb_ring_mask;
7146 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7147 tp->rx_jmb_ring_mask;
7148 }
7149
7150 return err;
7151 }
7152
7153 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7154 {
7155 struct tg3 *tp = tnapi->tp;
7156
7157 /* run TX completion thread */
7158 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7159 tg3_tx(tnapi);
7160 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7161 return work_done;
7162 }
7163
7164 if (!tnapi->rx_rcb_prod_idx)
7165 return work_done;
7166
7167 /* run RX thread, within the bounds set by NAPI.
7168 * All RX "locking" is done by ensuring outside
7169 * code synchronizes with tg3->napi.poll()
7170 */
7171 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7172 work_done += tg3_rx(tnapi, budget - work_done);
7173
7174 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7175 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7176 int i, err = 0;
7177 u32 std_prod_idx = dpr->rx_std_prod_idx;
7178 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7179
7180 tp->rx_refill = false;
7181 for (i = 1; i <= tp->rxq_cnt; i++)
7182 err |= tg3_rx_prodring_xfer(tp, dpr,
7183 &tp->napi[i].prodring);
7184
7185 wmb();
7186
7187 if (std_prod_idx != dpr->rx_std_prod_idx)
7188 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7189 dpr->rx_std_prod_idx);
7190
7191 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7192 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7193 dpr->rx_jmb_prod_idx);
7194
7195 mmiowb();
7196
7197 if (err)
7198 tw32_f(HOSTCC_MODE, tp->coal_now);
7199 }
7200
7201 return work_done;
7202 }
7203
7204 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7205 {
7206 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7207 schedule_work(&tp->reset_task);
7208 }
7209
7210 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7211 {
7212 cancel_work_sync(&tp->reset_task);
7213 tg3_flag_clear(tp, RESET_TASK_PENDING);
7214 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7215 }
7216
7217 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7218 {
7219 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7220 struct tg3 *tp = tnapi->tp;
7221 int work_done = 0;
7222 struct tg3_hw_status *sblk = tnapi->hw_status;
7223
7224 while (1) {
7225 work_done = tg3_poll_work(tnapi, work_done, budget);
7226
7227 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7228 goto tx_recovery;
7229
7230 if (unlikely(work_done >= budget))
7231 break;
7232
7233 /* tp->last_tag is used in tg3_int_reenable() below
7234 * to tell the hw how much work has been processed,
7235 * so we must read it before checking for more work.
7236 */
7237 tnapi->last_tag = sblk->status_tag;
7238 tnapi->last_irq_tag = tnapi->last_tag;
7239 rmb();
7240
7241 /* check for RX/TX work to do */
7242 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7243 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7244
7245 /* This test here is not race free, but will reduce
7246 * the number of interrupts by looping again.
7247 */
7248 if (tnapi == &tp->napi[1] && tp->rx_refill)
7249 continue;
7250
7251 napi_complete(napi);
7252 /* Reenable interrupts. */
7253 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7254
7255 /* This test here is synchronized by napi_schedule()
7256 * and napi_complete() to close the race condition.
7257 */
7258 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7259 tw32(HOSTCC_MODE, tp->coalesce_mode |
7260 HOSTCC_MODE_ENABLE |
7261 tnapi->coal_now);
7262 }
7263 mmiowb();
7264 break;
7265 }
7266 }
7267
7268 return work_done;
7269
7270 tx_recovery:
7271 /* work_done is guaranteed to be less than budget. */
7272 napi_complete(napi);
7273 tg3_reset_task_schedule(tp);
7274 return work_done;
7275 }
7276
7277 static void tg3_process_error(struct tg3 *tp)
7278 {
7279 u32 val;
7280 bool real_error = false;
7281
7282 if (tg3_flag(tp, ERROR_PROCESSED))
7283 return;
7284
7285 /* Check Flow Attention register */
7286 val = tr32(HOSTCC_FLOW_ATTN);
7287 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7288 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7289 real_error = true;
7290 }
7291
7292 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7293 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7294 real_error = true;
7295 }
7296
7297 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7298 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7299 real_error = true;
7300 }
7301
7302 if (!real_error)
7303 return;
7304
7305 tg3_dump_state(tp);
7306
7307 tg3_flag_set(tp, ERROR_PROCESSED);
7308 tg3_reset_task_schedule(tp);
7309 }
7310
7311 static int tg3_poll(struct napi_struct *napi, int budget)
7312 {
7313 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7314 struct tg3 *tp = tnapi->tp;
7315 int work_done = 0;
7316 struct tg3_hw_status *sblk = tnapi->hw_status;
7317
7318 while (1) {
7319 if (sblk->status & SD_STATUS_ERROR)
7320 tg3_process_error(tp);
7321
7322 tg3_poll_link(tp);
7323
7324 work_done = tg3_poll_work(tnapi, work_done, budget);
7325
7326 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7327 goto tx_recovery;
7328
7329 if (unlikely(work_done >= budget))
7330 break;
7331
7332 if (tg3_flag(tp, TAGGED_STATUS)) {
7333 /* tp->last_tag is used in tg3_int_reenable() below
7334 * to tell the hw how much work has been processed,
7335 * so we must read it before checking for more work.
7336 */
7337 tnapi->last_tag = sblk->status_tag;
7338 tnapi->last_irq_tag = tnapi->last_tag;
7339 rmb();
7340 } else
7341 sblk->status &= ~SD_STATUS_UPDATED;
7342
7343 if (likely(!tg3_has_work(tnapi))) {
7344 napi_complete(napi);
7345 tg3_int_reenable(tnapi);
7346 break;
7347 }
7348 }
7349
7350 return work_done;
7351
7352 tx_recovery:
7353 /* work_done is guaranteed to be less than budget. */
7354 napi_complete(napi);
7355 tg3_reset_task_schedule(tp);
7356 return work_done;
7357 }
7358
7359 static void tg3_napi_disable(struct tg3 *tp)
7360 {
7361 int i;
7362
7363 for (i = tp->irq_cnt - 1; i >= 0; i--)
7364 napi_disable(&tp->napi[i].napi);
7365 }
7366
7367 static void tg3_napi_enable(struct tg3 *tp)
7368 {
7369 int i;
7370
7371 for (i = 0; i < tp->irq_cnt; i++)
7372 napi_enable(&tp->napi[i].napi);
7373 }
7374
7375 static void tg3_napi_init(struct tg3 *tp)
7376 {
7377 int i;
7378
7379 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7380 for (i = 1; i < tp->irq_cnt; i++)
7381 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7382 }
7383
7384 static void tg3_napi_fini(struct tg3 *tp)
7385 {
7386 int i;
7387
7388 for (i = 0; i < tp->irq_cnt; i++)
7389 netif_napi_del(&tp->napi[i].napi);
7390 }
7391
7392 static inline void tg3_netif_stop(struct tg3 *tp)
7393 {
7394 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7395 tg3_napi_disable(tp);
7396 netif_carrier_off(tp->dev);
7397 netif_tx_disable(tp->dev);
7398 }
7399
7400 /* tp->lock must be held */
7401 static inline void tg3_netif_start(struct tg3 *tp)
7402 {
7403 tg3_ptp_resume(tp);
7404
7405 /* NOTE: unconditional netif_tx_wake_all_queues is only
7406 * appropriate so long as all callers are assured to
7407 * have free tx slots (such as after tg3_init_hw)
7408 */
7409 netif_tx_wake_all_queues(tp->dev);
7410
7411 if (tp->link_up)
7412 netif_carrier_on(tp->dev);
7413
7414 tg3_napi_enable(tp);
7415 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7416 tg3_enable_ints(tp);
7417 }
7418
7419 static void tg3_irq_quiesce(struct tg3 *tp)
7420 {
7421 int i;
7422
7423 BUG_ON(tp->irq_sync);
7424
7425 tp->irq_sync = 1;
7426 smp_mb();
7427
7428 for (i = 0; i < tp->irq_cnt; i++)
7429 synchronize_irq(tp->napi[i].irq_vec);
7430 }
7431
7432 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7433 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7434 * with as well. Most of the time, this is not necessary except when
7435 * shutting down the device.
7436 */
7437 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7438 {
7439 spin_lock_bh(&tp->lock);
7440 if (irq_sync)
7441 tg3_irq_quiesce(tp);
7442 }
7443
7444 static inline void tg3_full_unlock(struct tg3 *tp)
7445 {
7446 spin_unlock_bh(&tp->lock);
7447 }
7448
7449 /* One-shot MSI handler - Chip automatically disables interrupt
7450 * after sending MSI so driver doesn't have to do it.
7451 */
7452 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7453 {
7454 struct tg3_napi *tnapi = dev_id;
7455 struct tg3 *tp = tnapi->tp;
7456
7457 prefetch(tnapi->hw_status);
7458 if (tnapi->rx_rcb)
7459 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7460
7461 if (likely(!tg3_irq_sync(tp)))
7462 napi_schedule(&tnapi->napi);
7463
7464 return IRQ_HANDLED;
7465 }
7466
7467 /* MSI ISR - No need to check for interrupt sharing and no need to
7468 * flush status block and interrupt mailbox. PCI ordering rules
7469 * guarantee that MSI will arrive after the status block.
7470 */
7471 static irqreturn_t tg3_msi(int irq, void *dev_id)
7472 {
7473 struct tg3_napi *tnapi = dev_id;
7474 struct tg3 *tp = tnapi->tp;
7475
7476 prefetch(tnapi->hw_status);
7477 if (tnapi->rx_rcb)
7478 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7479 /*
7480 * Writing any value to intr-mbox-0 clears PCI INTA# and
7481 * chip-internal interrupt pending events.
7482 * Writing non-zero to intr-mbox-0 additional tells the
7483 * NIC to stop sending us irqs, engaging "in-intr-handler"
7484 * event coalescing.
7485 */
7486 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7487 if (likely(!tg3_irq_sync(tp)))
7488 napi_schedule(&tnapi->napi);
7489
7490 return IRQ_RETVAL(1);
7491 }
7492
7493 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7494 {
7495 struct tg3_napi *tnapi = dev_id;
7496 struct tg3 *tp = tnapi->tp;
7497 struct tg3_hw_status *sblk = tnapi->hw_status;
7498 unsigned int handled = 1;
7499
7500 /* In INTx mode, it is possible for the interrupt to arrive at
7501 * the CPU before the status block posted prior to the interrupt.
7502 * Reading the PCI State register will confirm whether the
7503 * interrupt is ours and will flush the status block.
7504 */
7505 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7506 if (tg3_flag(tp, CHIP_RESETTING) ||
7507 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7508 handled = 0;
7509 goto out;
7510 }
7511 }
7512
7513 /*
7514 * Writing any value to intr-mbox-0 clears PCI INTA# and
7515 * chip-internal interrupt pending events.
7516 * Writing non-zero to intr-mbox-0 additional tells the
7517 * NIC to stop sending us irqs, engaging "in-intr-handler"
7518 * event coalescing.
7519 *
7520 * Flush the mailbox to de-assert the IRQ immediately to prevent
7521 * spurious interrupts. The flush impacts performance but
7522 * excessive spurious interrupts can be worse in some cases.
7523 */
7524 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7525 if (tg3_irq_sync(tp))
7526 goto out;
7527 sblk->status &= ~SD_STATUS_UPDATED;
7528 if (likely(tg3_has_work(tnapi))) {
7529 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7530 napi_schedule(&tnapi->napi);
7531 } else {
7532 /* No work, shared interrupt perhaps? re-enable
7533 * interrupts, and flush that PCI write
7534 */
7535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7536 0x00000000);
7537 }
7538 out:
7539 return IRQ_RETVAL(handled);
7540 }
7541
7542 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7543 {
7544 struct tg3_napi *tnapi = dev_id;
7545 struct tg3 *tp = tnapi->tp;
7546 struct tg3_hw_status *sblk = tnapi->hw_status;
7547 unsigned int handled = 1;
7548
7549 /* In INTx mode, it is possible for the interrupt to arrive at
7550 * the CPU before the status block posted prior to the interrupt.
7551 * Reading the PCI State register will confirm whether the
7552 * interrupt is ours and will flush the status block.
7553 */
7554 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7555 if (tg3_flag(tp, CHIP_RESETTING) ||
7556 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7557 handled = 0;
7558 goto out;
7559 }
7560 }
7561
7562 /*
7563 * writing any value to intr-mbox-0 clears PCI INTA# and
7564 * chip-internal interrupt pending events.
7565 * writing non-zero to intr-mbox-0 additional tells the
7566 * NIC to stop sending us irqs, engaging "in-intr-handler"
7567 * event coalescing.
7568 *
7569 * Flush the mailbox to de-assert the IRQ immediately to prevent
7570 * spurious interrupts. The flush impacts performance but
7571 * excessive spurious interrupts can be worse in some cases.
7572 */
7573 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7574
7575 /*
7576 * In a shared interrupt configuration, sometimes other devices'
7577 * interrupts will scream. We record the current status tag here
7578 * so that the above check can report that the screaming interrupts
7579 * are unhandled. Eventually they will be silenced.
7580 */
7581 tnapi->last_irq_tag = sblk->status_tag;
7582
7583 if (tg3_irq_sync(tp))
7584 goto out;
7585
7586 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7587
7588 napi_schedule(&tnapi->napi);
7589
7590 out:
7591 return IRQ_RETVAL(handled);
7592 }
7593
7594 /* ISR for interrupt test */
7595 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7596 {
7597 struct tg3_napi *tnapi = dev_id;
7598 struct tg3 *tp = tnapi->tp;
7599 struct tg3_hw_status *sblk = tnapi->hw_status;
7600
7601 if ((sblk->status & SD_STATUS_UPDATED) ||
7602 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7603 tg3_disable_ints(tp);
7604 return IRQ_RETVAL(1);
7605 }
7606 return IRQ_RETVAL(0);
7607 }
7608
7609 #ifdef CONFIG_NET_POLL_CONTROLLER
7610 static void tg3_poll_controller(struct net_device *dev)
7611 {
7612 int i;
7613 struct tg3 *tp = netdev_priv(dev);
7614
7615 if (tg3_irq_sync(tp))
7616 return;
7617
7618 for (i = 0; i < tp->irq_cnt; i++)
7619 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7620 }
7621 #endif
7622
7623 static void tg3_tx_timeout(struct net_device *dev)
7624 {
7625 struct tg3 *tp = netdev_priv(dev);
7626
7627 if (netif_msg_tx_err(tp)) {
7628 netdev_err(dev, "transmit timed out, resetting\n");
7629 tg3_dump_state(tp);
7630 }
7631
7632 tg3_reset_task_schedule(tp);
7633 }
7634
7635 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7636 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7637 {
7638 u32 base = (u32) mapping & 0xffffffff;
7639
7640 return base + len + 8 < base;
7641 }
7642
7643 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7644 * of any 4GB boundaries: 4G, 8G, etc
7645 */
7646 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7647 u32 len, u32 mss)
7648 {
7649 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7650 u32 base = (u32) mapping & 0xffffffff;
7651
7652 return ((base + len + (mss & 0x3fff)) < base);
7653 }
7654 return 0;
7655 }
7656
7657 /* Test for DMA addresses > 40-bit */
7658 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659 int len)
7660 {
7661 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7662 if (tg3_flag(tp, 40BIT_DMA_BUG))
7663 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7664 return 0;
7665 #else
7666 return 0;
7667 #endif
7668 }
7669
7670 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7671 dma_addr_t mapping, u32 len, u32 flags,
7672 u32 mss, u32 vlan)
7673 {
7674 txbd->addr_hi = ((u64) mapping >> 32);
7675 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7676 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7677 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7678 }
7679
7680 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7681 dma_addr_t map, u32 len, u32 flags,
7682 u32 mss, u32 vlan)
7683 {
7684 struct tg3 *tp = tnapi->tp;
7685 bool hwbug = false;
7686
7687 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7688 hwbug = true;
7689
7690 if (tg3_4g_overflow_test(map, len))
7691 hwbug = true;
7692
7693 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7694 hwbug = true;
7695
7696 if (tg3_40bit_overflow_test(tp, map, len))
7697 hwbug = true;
7698
7699 if (tp->dma_limit) {
7700 u32 prvidx = *entry;
7701 u32 tmp_flag = flags & ~TXD_FLAG_END;
7702 while (len > tp->dma_limit && *budget) {
7703 u32 frag_len = tp->dma_limit;
7704 len -= tp->dma_limit;
7705
7706 /* Avoid the 8byte DMA problem */
7707 if (len <= 8) {
7708 len += tp->dma_limit / 2;
7709 frag_len = tp->dma_limit / 2;
7710 }
7711
7712 tnapi->tx_buffers[*entry].fragmented = true;
7713
7714 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7715 frag_len, tmp_flag, mss, vlan);
7716 *budget -= 1;
7717 prvidx = *entry;
7718 *entry = NEXT_TX(*entry);
7719
7720 map += frag_len;
7721 }
7722
7723 if (len) {
7724 if (*budget) {
7725 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726 len, flags, mss, vlan);
7727 *budget -= 1;
7728 *entry = NEXT_TX(*entry);
7729 } else {
7730 hwbug = true;
7731 tnapi->tx_buffers[prvidx].fragmented = false;
7732 }
7733 }
7734 } else {
7735 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736 len, flags, mss, vlan);
7737 *entry = NEXT_TX(*entry);
7738 }
7739
7740 return hwbug;
7741 }
7742
7743 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7744 {
7745 int i;
7746 struct sk_buff *skb;
7747 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7748
7749 skb = txb->skb;
7750 txb->skb = NULL;
7751
7752 pci_unmap_single(tnapi->tp->pdev,
7753 dma_unmap_addr(txb, mapping),
7754 skb_headlen(skb),
7755 PCI_DMA_TODEVICE);
7756
7757 while (txb->fragmented) {
7758 txb->fragmented = false;
7759 entry = NEXT_TX(entry);
7760 txb = &tnapi->tx_buffers[entry];
7761 }
7762
7763 for (i = 0; i <= last; i++) {
7764 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7765
7766 entry = NEXT_TX(entry);
7767 txb = &tnapi->tx_buffers[entry];
7768
7769 pci_unmap_page(tnapi->tp->pdev,
7770 dma_unmap_addr(txb, mapping),
7771 skb_frag_size(frag), PCI_DMA_TODEVICE);
7772
7773 while (txb->fragmented) {
7774 txb->fragmented = false;
7775 entry = NEXT_TX(entry);
7776 txb = &tnapi->tx_buffers[entry];
7777 }
7778 }
7779 }
7780
7781 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7782 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7783 struct sk_buff **pskb,
7784 u32 *entry, u32 *budget,
7785 u32 base_flags, u32 mss, u32 vlan)
7786 {
7787 struct tg3 *tp = tnapi->tp;
7788 struct sk_buff *new_skb, *skb = *pskb;
7789 dma_addr_t new_addr = 0;
7790 int ret = 0;
7791
7792 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7793 new_skb = skb_copy(skb, GFP_ATOMIC);
7794 else {
7795 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7796
7797 new_skb = skb_copy_expand(skb,
7798 skb_headroom(skb) + more_headroom,
7799 skb_tailroom(skb), GFP_ATOMIC);
7800 }
7801
7802 if (!new_skb) {
7803 ret = -1;
7804 } else {
7805 /* New SKB is guaranteed to be linear. */
7806 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7807 PCI_DMA_TODEVICE);
7808 /* Make sure the mapping succeeded */
7809 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7810 dev_kfree_skb(new_skb);
7811 ret = -1;
7812 } else {
7813 u32 save_entry = *entry;
7814
7815 base_flags |= TXD_FLAG_END;
7816
7817 tnapi->tx_buffers[*entry].skb = new_skb;
7818 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7819 mapping, new_addr);
7820
7821 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7822 new_skb->len, base_flags,
7823 mss, vlan)) {
7824 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825 dev_kfree_skb(new_skb);
7826 ret = -1;
7827 }
7828 }
7829 }
7830
7831 dev_kfree_skb(skb);
7832 *pskb = new_skb;
7833 return ret;
7834 }
7835
7836 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7837
7838 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7839 * TSO header is greater than 80 bytes.
7840 */
7841 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7842 {
7843 struct sk_buff *segs, *nskb;
7844 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7845
7846 /* Estimate the number of fragments in the worst case */
7847 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7848 netif_stop_queue(tp->dev);
7849
7850 /* netif_tx_stop_queue() must be done before checking
7851 * checking tx index in tg3_tx_avail() below, because in
7852 * tg3_tx(), we update tx index before checking for
7853 * netif_tx_queue_stopped().
7854 */
7855 smp_mb();
7856 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7857 return NETDEV_TX_BUSY;
7858
7859 netif_wake_queue(tp->dev);
7860 }
7861
7862 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7863 if (IS_ERR(segs))
7864 goto tg3_tso_bug_end;
7865
7866 do {
7867 nskb = segs;
7868 segs = segs->next;
7869 nskb->next = NULL;
7870 tg3_start_xmit(nskb, tp->dev);
7871 } while (segs);
7872
7873 tg3_tso_bug_end:
7874 dev_kfree_skb(skb);
7875
7876 return NETDEV_TX_OK;
7877 }
7878
7879 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7880 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7881 */
7882 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7883 {
7884 struct tg3 *tp = netdev_priv(dev);
7885 u32 len, entry, base_flags, mss, vlan = 0;
7886 u32 budget;
7887 int i = -1, would_hit_hwbug;
7888 dma_addr_t mapping;
7889 struct tg3_napi *tnapi;
7890 struct netdev_queue *txq;
7891 unsigned int last;
7892
7893 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7894 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7895 if (tg3_flag(tp, ENABLE_TSS))
7896 tnapi++;
7897
7898 budget = tg3_tx_avail(tnapi);
7899
7900 /* We are running in BH disabled context with netif_tx_lock
7901 * and TX reclaim runs via tp->napi.poll inside of a software
7902 * interrupt. Furthermore, IRQ processing runs lockless so we have
7903 * no IRQ context deadlocks to worry about either. Rejoice!
7904 */
7905 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7906 if (!netif_tx_queue_stopped(txq)) {
7907 netif_tx_stop_queue(txq);
7908
7909 /* This is a hard error, log it. */
7910 netdev_err(dev,
7911 "BUG! Tx Ring full when queue awake!\n");
7912 }
7913 return NETDEV_TX_BUSY;
7914 }
7915
7916 entry = tnapi->tx_prod;
7917 base_flags = 0;
7918 if (skb->ip_summed == CHECKSUM_PARTIAL)
7919 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7920
7921 mss = skb_shinfo(skb)->gso_size;
7922 if (mss) {
7923 struct iphdr *iph;
7924 u32 tcp_opt_len, hdr_len;
7925
7926 if (skb_header_cloned(skb) &&
7927 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7928 goto drop;
7929
7930 iph = ip_hdr(skb);
7931 tcp_opt_len = tcp_optlen(skb);
7932
7933 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7934
7935 if (!skb_is_gso_v6(skb)) {
7936 iph->check = 0;
7937 iph->tot_len = htons(mss + hdr_len);
7938 }
7939
7940 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7941 tg3_flag(tp, TSO_BUG))
7942 return tg3_tso_bug(tp, skb);
7943
7944 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7945 TXD_FLAG_CPU_POST_DMA);
7946
7947 if (tg3_flag(tp, HW_TSO_1) ||
7948 tg3_flag(tp, HW_TSO_2) ||
7949 tg3_flag(tp, HW_TSO_3)) {
7950 tcp_hdr(skb)->check = 0;
7951 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7952 } else
7953 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7954 iph->daddr, 0,
7955 IPPROTO_TCP,
7956 0);
7957
7958 if (tg3_flag(tp, HW_TSO_3)) {
7959 mss |= (hdr_len & 0xc) << 12;
7960 if (hdr_len & 0x10)
7961 base_flags |= 0x00000010;
7962 base_flags |= (hdr_len & 0x3e0) << 5;
7963 } else if (tg3_flag(tp, HW_TSO_2))
7964 mss |= hdr_len << 9;
7965 else if (tg3_flag(tp, HW_TSO_1) ||
7966 tg3_asic_rev(tp) == ASIC_REV_5705) {
7967 if (tcp_opt_len || iph->ihl > 5) {
7968 int tsflags;
7969
7970 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7971 mss |= (tsflags << 11);
7972 }
7973 } else {
7974 if (tcp_opt_len || iph->ihl > 5) {
7975 int tsflags;
7976
7977 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7978 base_flags |= tsflags << 12;
7979 }
7980 }
7981 }
7982
7983 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7984 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7985 base_flags |= TXD_FLAG_JMB_PKT;
7986
7987 if (vlan_tx_tag_present(skb)) {
7988 base_flags |= TXD_FLAG_VLAN;
7989 vlan = vlan_tx_tag_get(skb);
7990 }
7991
7992 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7993 tg3_flag(tp, TX_TSTAMP_EN)) {
7994 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7995 base_flags |= TXD_FLAG_HWTSTAMP;
7996 }
7997
7998 len = skb_headlen(skb);
7999
8000 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8001 if (pci_dma_mapping_error(tp->pdev, mapping))
8002 goto drop;
8003
8004
8005 tnapi->tx_buffers[entry].skb = skb;
8006 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8007
8008 would_hit_hwbug = 0;
8009
8010 if (tg3_flag(tp, 5701_DMA_BUG))
8011 would_hit_hwbug = 1;
8012
8013 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8014 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8015 mss, vlan)) {
8016 would_hit_hwbug = 1;
8017 } else if (skb_shinfo(skb)->nr_frags > 0) {
8018 u32 tmp_mss = mss;
8019
8020 if (!tg3_flag(tp, HW_TSO_1) &&
8021 !tg3_flag(tp, HW_TSO_2) &&
8022 !tg3_flag(tp, HW_TSO_3))
8023 tmp_mss = 0;
8024
8025 /* Now loop through additional data
8026 * fragments, and queue them.
8027 */
8028 last = skb_shinfo(skb)->nr_frags - 1;
8029 for (i = 0; i <= last; i++) {
8030 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8031
8032 len = skb_frag_size(frag);
8033 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8034 len, DMA_TO_DEVICE);
8035
8036 tnapi->tx_buffers[entry].skb = NULL;
8037 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8038 mapping);
8039 if (dma_mapping_error(&tp->pdev->dev, mapping))
8040 goto dma_error;
8041
8042 if (!budget ||
8043 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8044 len, base_flags |
8045 ((i == last) ? TXD_FLAG_END : 0),
8046 tmp_mss, vlan)) {
8047 would_hit_hwbug = 1;
8048 break;
8049 }
8050 }
8051 }
8052
8053 if (would_hit_hwbug) {
8054 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8055
8056 /* If the workaround fails due to memory/mapping
8057 * failure, silently drop this packet.
8058 */
8059 entry = tnapi->tx_prod;
8060 budget = tg3_tx_avail(tnapi);
8061 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8062 base_flags, mss, vlan))
8063 goto drop_nofree;
8064 }
8065
8066 skb_tx_timestamp(skb);
8067 netdev_tx_sent_queue(txq, skb->len);
8068
8069 /* Sync BD data before updating mailbox */
8070 wmb();
8071
8072 /* Packets are ready, update Tx producer idx local and on card. */
8073 tw32_tx_mbox(tnapi->prodmbox, entry);
8074
8075 tnapi->tx_prod = entry;
8076 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8077 netif_tx_stop_queue(txq);
8078
8079 /* netif_tx_stop_queue() must be done before checking
8080 * checking tx index in tg3_tx_avail() below, because in
8081 * tg3_tx(), we update tx index before checking for
8082 * netif_tx_queue_stopped().
8083 */
8084 smp_mb();
8085 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8086 netif_tx_wake_queue(txq);
8087 }
8088
8089 mmiowb();
8090 return NETDEV_TX_OK;
8091
8092 dma_error:
8093 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8094 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8095 drop:
8096 dev_kfree_skb(skb);
8097 drop_nofree:
8098 tp->tx_dropped++;
8099 return NETDEV_TX_OK;
8100 }
8101
8102 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8103 {
8104 if (enable) {
8105 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8106 MAC_MODE_PORT_MODE_MASK);
8107
8108 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8109
8110 if (!tg3_flag(tp, 5705_PLUS))
8111 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8112
8113 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8114 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8115 else
8116 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8117 } else {
8118 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8119
8120 if (tg3_flag(tp, 5705_PLUS) ||
8121 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8122 tg3_asic_rev(tp) == ASIC_REV_5700)
8123 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8124 }
8125
8126 tw32(MAC_MODE, tp->mac_mode);
8127 udelay(40);
8128 }
8129
8130 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8131 {
8132 u32 val, bmcr, mac_mode, ptest = 0;
8133
8134 tg3_phy_toggle_apd(tp, false);
8135 tg3_phy_toggle_automdix(tp, false);
8136
8137 if (extlpbk && tg3_phy_set_extloopbk(tp))
8138 return -EIO;
8139
8140 bmcr = BMCR_FULLDPLX;
8141 switch (speed) {
8142 case SPEED_10:
8143 break;
8144 case SPEED_100:
8145 bmcr |= BMCR_SPEED100;
8146 break;
8147 case SPEED_1000:
8148 default:
8149 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8150 speed = SPEED_100;
8151 bmcr |= BMCR_SPEED100;
8152 } else {
8153 speed = SPEED_1000;
8154 bmcr |= BMCR_SPEED1000;
8155 }
8156 }
8157
8158 if (extlpbk) {
8159 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8160 tg3_readphy(tp, MII_CTRL1000, &val);
8161 val |= CTL1000_AS_MASTER |
8162 CTL1000_ENABLE_MASTER;
8163 tg3_writephy(tp, MII_CTRL1000, val);
8164 } else {
8165 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8166 MII_TG3_FET_PTEST_TRIM_2;
8167 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8168 }
8169 } else
8170 bmcr |= BMCR_LOOPBACK;
8171
8172 tg3_writephy(tp, MII_BMCR, bmcr);
8173
8174 /* The write needs to be flushed for the FETs */
8175 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8176 tg3_readphy(tp, MII_BMCR, &bmcr);
8177
8178 udelay(40);
8179
8180 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8181 tg3_asic_rev(tp) == ASIC_REV_5785) {
8182 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8183 MII_TG3_FET_PTEST_FRC_TX_LINK |
8184 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8185
8186 /* The write needs to be flushed for the AC131 */
8187 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8188 }
8189
8190 /* Reset to prevent losing 1st rx packet intermittently */
8191 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8192 tg3_flag(tp, 5780_CLASS)) {
8193 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8194 udelay(10);
8195 tw32_f(MAC_RX_MODE, tp->rx_mode);
8196 }
8197
8198 mac_mode = tp->mac_mode &
8199 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8200 if (speed == SPEED_1000)
8201 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8202 else
8203 mac_mode |= MAC_MODE_PORT_MODE_MII;
8204
8205 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8206 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8207
8208 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8209 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8210 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8211 mac_mode |= MAC_MODE_LINK_POLARITY;
8212
8213 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8214 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8215 }
8216
8217 tw32(MAC_MODE, mac_mode);
8218 udelay(40);
8219
8220 return 0;
8221 }
8222
8223 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8224 {
8225 struct tg3 *tp = netdev_priv(dev);
8226
8227 if (features & NETIF_F_LOOPBACK) {
8228 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8229 return;
8230
8231 spin_lock_bh(&tp->lock);
8232 tg3_mac_loopback(tp, true);
8233 netif_carrier_on(tp->dev);
8234 spin_unlock_bh(&tp->lock);
8235 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8236 } else {
8237 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8238 return;
8239
8240 spin_lock_bh(&tp->lock);
8241 tg3_mac_loopback(tp, false);
8242 /* Force link status check */
8243 tg3_setup_phy(tp, true);
8244 spin_unlock_bh(&tp->lock);
8245 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8246 }
8247 }
8248
8249 static netdev_features_t tg3_fix_features(struct net_device *dev,
8250 netdev_features_t features)
8251 {
8252 struct tg3 *tp = netdev_priv(dev);
8253
8254 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8255 features &= ~NETIF_F_ALL_TSO;
8256
8257 return features;
8258 }
8259
8260 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8261 {
8262 netdev_features_t changed = dev->features ^ features;
8263
8264 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8265 tg3_set_loopback(dev, features);
8266
8267 return 0;
8268 }
8269
8270 static void tg3_rx_prodring_free(struct tg3 *tp,
8271 struct tg3_rx_prodring_set *tpr)
8272 {
8273 int i;
8274
8275 if (tpr != &tp->napi[0].prodring) {
8276 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8277 i = (i + 1) & tp->rx_std_ring_mask)
8278 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8279 tp->rx_pkt_map_sz);
8280
8281 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8282 for (i = tpr->rx_jmb_cons_idx;
8283 i != tpr->rx_jmb_prod_idx;
8284 i = (i + 1) & tp->rx_jmb_ring_mask) {
8285 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8286 TG3_RX_JMB_MAP_SZ);
8287 }
8288 }
8289
8290 return;
8291 }
8292
8293 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8294 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8295 tp->rx_pkt_map_sz);
8296
8297 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8298 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8299 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8300 TG3_RX_JMB_MAP_SZ);
8301 }
8302 }
8303
8304 /* Initialize rx rings for packet processing.
8305 *
8306 * The chip has been shut down and the driver detached from
8307 * the networking, so no interrupts or new tx packets will
8308 * end up in the driver. tp->{tx,}lock are held and thus
8309 * we may not sleep.
8310 */
8311 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8312 struct tg3_rx_prodring_set *tpr)
8313 {
8314 u32 i, rx_pkt_dma_sz;
8315
8316 tpr->rx_std_cons_idx = 0;
8317 tpr->rx_std_prod_idx = 0;
8318 tpr->rx_jmb_cons_idx = 0;
8319 tpr->rx_jmb_prod_idx = 0;
8320
8321 if (tpr != &tp->napi[0].prodring) {
8322 memset(&tpr->rx_std_buffers[0], 0,
8323 TG3_RX_STD_BUFF_RING_SIZE(tp));
8324 if (tpr->rx_jmb_buffers)
8325 memset(&tpr->rx_jmb_buffers[0], 0,
8326 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8327 goto done;
8328 }
8329
8330 /* Zero out all descriptors. */
8331 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8332
8333 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8334 if (tg3_flag(tp, 5780_CLASS) &&
8335 tp->dev->mtu > ETH_DATA_LEN)
8336 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8337 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8338
8339 /* Initialize invariants of the rings, we only set this
8340 * stuff once. This works because the card does not
8341 * write into the rx buffer posting rings.
8342 */
8343 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8344 struct tg3_rx_buffer_desc *rxd;
8345
8346 rxd = &tpr->rx_std[i];
8347 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8348 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8349 rxd->opaque = (RXD_OPAQUE_RING_STD |
8350 (i << RXD_OPAQUE_INDEX_SHIFT));
8351 }
8352
8353 /* Now allocate fresh SKBs for each rx ring. */
8354 for (i = 0; i < tp->rx_pending; i++) {
8355 unsigned int frag_size;
8356
8357 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8358 &frag_size) < 0) {
8359 netdev_warn(tp->dev,
8360 "Using a smaller RX standard ring. Only "
8361 "%d out of %d buffers were allocated "
8362 "successfully\n", i, tp->rx_pending);
8363 if (i == 0)
8364 goto initfail;
8365 tp->rx_pending = i;
8366 break;
8367 }
8368 }
8369
8370 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8371 goto done;
8372
8373 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8374
8375 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8376 goto done;
8377
8378 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8379 struct tg3_rx_buffer_desc *rxd;
8380
8381 rxd = &tpr->rx_jmb[i].std;
8382 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8383 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8384 RXD_FLAG_JUMBO;
8385 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8386 (i << RXD_OPAQUE_INDEX_SHIFT));
8387 }
8388
8389 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8390 unsigned int frag_size;
8391
8392 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8393 &frag_size) < 0) {
8394 netdev_warn(tp->dev,
8395 "Using a smaller RX jumbo ring. Only %d "
8396 "out of %d buffers were allocated "
8397 "successfully\n", i, tp->rx_jumbo_pending);
8398 if (i == 0)
8399 goto initfail;
8400 tp->rx_jumbo_pending = i;
8401 break;
8402 }
8403 }
8404
8405 done:
8406 return 0;
8407
8408 initfail:
8409 tg3_rx_prodring_free(tp, tpr);
8410 return -ENOMEM;
8411 }
8412
8413 static void tg3_rx_prodring_fini(struct tg3 *tp,
8414 struct tg3_rx_prodring_set *tpr)
8415 {
8416 kfree(tpr->rx_std_buffers);
8417 tpr->rx_std_buffers = NULL;
8418 kfree(tpr->rx_jmb_buffers);
8419 tpr->rx_jmb_buffers = NULL;
8420 if (tpr->rx_std) {
8421 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8422 tpr->rx_std, tpr->rx_std_mapping);
8423 tpr->rx_std = NULL;
8424 }
8425 if (tpr->rx_jmb) {
8426 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8427 tpr->rx_jmb, tpr->rx_jmb_mapping);
8428 tpr->rx_jmb = NULL;
8429 }
8430 }
8431
8432 static int tg3_rx_prodring_init(struct tg3 *tp,
8433 struct tg3_rx_prodring_set *tpr)
8434 {
8435 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8436 GFP_KERNEL);
8437 if (!tpr->rx_std_buffers)
8438 return -ENOMEM;
8439
8440 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8441 TG3_RX_STD_RING_BYTES(tp),
8442 &tpr->rx_std_mapping,
8443 GFP_KERNEL);
8444 if (!tpr->rx_std)
8445 goto err_out;
8446
8447 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8448 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8449 GFP_KERNEL);
8450 if (!tpr->rx_jmb_buffers)
8451 goto err_out;
8452
8453 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8454 TG3_RX_JMB_RING_BYTES(tp),
8455 &tpr->rx_jmb_mapping,
8456 GFP_KERNEL);
8457 if (!tpr->rx_jmb)
8458 goto err_out;
8459 }
8460
8461 return 0;
8462
8463 err_out:
8464 tg3_rx_prodring_fini(tp, tpr);
8465 return -ENOMEM;
8466 }
8467
8468 /* Free up pending packets in all rx/tx rings.
8469 *
8470 * The chip has been shut down and the driver detached from
8471 * the networking, so no interrupts or new tx packets will
8472 * end up in the driver. tp->{tx,}lock is not held and we are not
8473 * in an interrupt context and thus may sleep.
8474 */
8475 static void tg3_free_rings(struct tg3 *tp)
8476 {
8477 int i, j;
8478
8479 for (j = 0; j < tp->irq_cnt; j++) {
8480 struct tg3_napi *tnapi = &tp->napi[j];
8481
8482 tg3_rx_prodring_free(tp, &tnapi->prodring);
8483
8484 if (!tnapi->tx_buffers)
8485 continue;
8486
8487 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8488 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8489
8490 if (!skb)
8491 continue;
8492
8493 tg3_tx_skb_unmap(tnapi, i,
8494 skb_shinfo(skb)->nr_frags - 1);
8495
8496 dev_kfree_skb_any(skb);
8497 }
8498 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8499 }
8500 }
8501
8502 /* Initialize tx/rx rings for packet processing.
8503 *
8504 * The chip has been shut down and the driver detached from
8505 * the networking, so no interrupts or new tx packets will
8506 * end up in the driver. tp->{tx,}lock are held and thus
8507 * we may not sleep.
8508 */
8509 static int tg3_init_rings(struct tg3 *tp)
8510 {
8511 int i;
8512
8513 /* Free up all the SKBs. */
8514 tg3_free_rings(tp);
8515
8516 for (i = 0; i < tp->irq_cnt; i++) {
8517 struct tg3_napi *tnapi = &tp->napi[i];
8518
8519 tnapi->last_tag = 0;
8520 tnapi->last_irq_tag = 0;
8521 tnapi->hw_status->status = 0;
8522 tnapi->hw_status->status_tag = 0;
8523 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8524
8525 tnapi->tx_prod = 0;
8526 tnapi->tx_cons = 0;
8527 if (tnapi->tx_ring)
8528 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8529
8530 tnapi->rx_rcb_ptr = 0;
8531 if (tnapi->rx_rcb)
8532 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8533
8534 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8535 tg3_free_rings(tp);
8536 return -ENOMEM;
8537 }
8538 }
8539
8540 return 0;
8541 }
8542
8543 static void tg3_mem_tx_release(struct tg3 *tp)
8544 {
8545 int i;
8546
8547 for (i = 0; i < tp->irq_max; i++) {
8548 struct tg3_napi *tnapi = &tp->napi[i];
8549
8550 if (tnapi->tx_ring) {
8551 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8552 tnapi->tx_ring, tnapi->tx_desc_mapping);
8553 tnapi->tx_ring = NULL;
8554 }
8555
8556 kfree(tnapi->tx_buffers);
8557 tnapi->tx_buffers = NULL;
8558 }
8559 }
8560
8561 static int tg3_mem_tx_acquire(struct tg3 *tp)
8562 {
8563 int i;
8564 struct tg3_napi *tnapi = &tp->napi[0];
8565
8566 /* If multivector TSS is enabled, vector 0 does not handle
8567 * tx interrupts. Don't allocate any resources for it.
8568 */
8569 if (tg3_flag(tp, ENABLE_TSS))
8570 tnapi++;
8571
8572 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8573 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8574 TG3_TX_RING_SIZE, GFP_KERNEL);
8575 if (!tnapi->tx_buffers)
8576 goto err_out;
8577
8578 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8579 TG3_TX_RING_BYTES,
8580 &tnapi->tx_desc_mapping,
8581 GFP_KERNEL);
8582 if (!tnapi->tx_ring)
8583 goto err_out;
8584 }
8585
8586 return 0;
8587
8588 err_out:
8589 tg3_mem_tx_release(tp);
8590 return -ENOMEM;
8591 }
8592
8593 static void tg3_mem_rx_release(struct tg3 *tp)
8594 {
8595 int i;
8596
8597 for (i = 0; i < tp->irq_max; i++) {
8598 struct tg3_napi *tnapi = &tp->napi[i];
8599
8600 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8601
8602 if (!tnapi->rx_rcb)
8603 continue;
8604
8605 dma_free_coherent(&tp->pdev->dev,
8606 TG3_RX_RCB_RING_BYTES(tp),
8607 tnapi->rx_rcb,
8608 tnapi->rx_rcb_mapping);
8609 tnapi->rx_rcb = NULL;
8610 }
8611 }
8612
8613 static int tg3_mem_rx_acquire(struct tg3 *tp)
8614 {
8615 unsigned int i, limit;
8616
8617 limit = tp->rxq_cnt;
8618
8619 /* If RSS is enabled, we need a (dummy) producer ring
8620 * set on vector zero. This is the true hw prodring.
8621 */
8622 if (tg3_flag(tp, ENABLE_RSS))
8623 limit++;
8624
8625 for (i = 0; i < limit; i++) {
8626 struct tg3_napi *tnapi = &tp->napi[i];
8627
8628 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8629 goto err_out;
8630
8631 /* If multivector RSS is enabled, vector 0
8632 * does not handle rx or tx interrupts.
8633 * Don't allocate any resources for it.
8634 */
8635 if (!i && tg3_flag(tp, ENABLE_RSS))
8636 continue;
8637
8638 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8639 TG3_RX_RCB_RING_BYTES(tp),
8640 &tnapi->rx_rcb_mapping,
8641 GFP_KERNEL);
8642 if (!tnapi->rx_rcb)
8643 goto err_out;
8644 }
8645
8646 return 0;
8647
8648 err_out:
8649 tg3_mem_rx_release(tp);
8650 return -ENOMEM;
8651 }
8652
8653 /*
8654 * Must not be invoked with interrupt sources disabled and
8655 * the hardware shutdown down.
8656 */
8657 static void tg3_free_consistent(struct tg3 *tp)
8658 {
8659 int i;
8660
8661 for (i = 0; i < tp->irq_cnt; i++) {
8662 struct tg3_napi *tnapi = &tp->napi[i];
8663
8664 if (tnapi->hw_status) {
8665 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8666 tnapi->hw_status,
8667 tnapi->status_mapping);
8668 tnapi->hw_status = NULL;
8669 }
8670 }
8671
8672 tg3_mem_rx_release(tp);
8673 tg3_mem_tx_release(tp);
8674
8675 if (tp->hw_stats) {
8676 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8677 tp->hw_stats, tp->stats_mapping);
8678 tp->hw_stats = NULL;
8679 }
8680 }
8681
8682 /*
8683 * Must not be invoked with interrupt sources disabled and
8684 * the hardware shutdown down. Can sleep.
8685 */
8686 static int tg3_alloc_consistent(struct tg3 *tp)
8687 {
8688 int i;
8689
8690 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8691 sizeof(struct tg3_hw_stats),
8692 &tp->stats_mapping, GFP_KERNEL);
8693 if (!tp->hw_stats)
8694 goto err_out;
8695
8696 for (i = 0; i < tp->irq_cnt; i++) {
8697 struct tg3_napi *tnapi = &tp->napi[i];
8698 struct tg3_hw_status *sblk;
8699
8700 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8701 TG3_HW_STATUS_SIZE,
8702 &tnapi->status_mapping,
8703 GFP_KERNEL);
8704 if (!tnapi->hw_status)
8705 goto err_out;
8706
8707 sblk = tnapi->hw_status;
8708
8709 if (tg3_flag(tp, ENABLE_RSS)) {
8710 u16 *prodptr = NULL;
8711
8712 /*
8713 * When RSS is enabled, the status block format changes
8714 * slightly. The "rx_jumbo_consumer", "reserved",
8715 * and "rx_mini_consumer" members get mapped to the
8716 * other three rx return ring producer indexes.
8717 */
8718 switch (i) {
8719 case 1:
8720 prodptr = &sblk->idx[0].rx_producer;
8721 break;
8722 case 2:
8723 prodptr = &sblk->rx_jumbo_consumer;
8724 break;
8725 case 3:
8726 prodptr = &sblk->reserved;
8727 break;
8728 case 4:
8729 prodptr = &sblk->rx_mini_consumer;
8730 break;
8731 }
8732 tnapi->rx_rcb_prod_idx = prodptr;
8733 } else {
8734 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8735 }
8736 }
8737
8738 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8739 goto err_out;
8740
8741 return 0;
8742
8743 err_out:
8744 tg3_free_consistent(tp);
8745 return -ENOMEM;
8746 }
8747
8748 #define MAX_WAIT_CNT 1000
8749
8750 /* To stop a block, clear the enable bit and poll till it
8751 * clears. tp->lock is held.
8752 */
8753 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8754 {
8755 unsigned int i;
8756 u32 val;
8757
8758 if (tg3_flag(tp, 5705_PLUS)) {
8759 switch (ofs) {
8760 case RCVLSC_MODE:
8761 case DMAC_MODE:
8762 case MBFREE_MODE:
8763 case BUFMGR_MODE:
8764 case MEMARB_MODE:
8765 /* We can't enable/disable these bits of the
8766 * 5705/5750, just say success.
8767 */
8768 return 0;
8769
8770 default:
8771 break;
8772 }
8773 }
8774
8775 val = tr32(ofs);
8776 val &= ~enable_bit;
8777 tw32_f(ofs, val);
8778
8779 for (i = 0; i < MAX_WAIT_CNT; i++) {
8780 if (pci_channel_offline(tp->pdev)) {
8781 dev_err(&tp->pdev->dev,
8782 "tg3_stop_block device offline, "
8783 "ofs=%lx enable_bit=%x\n",
8784 ofs, enable_bit);
8785 return -ENODEV;
8786 }
8787
8788 udelay(100);
8789 val = tr32(ofs);
8790 if ((val & enable_bit) == 0)
8791 break;
8792 }
8793
8794 if (i == MAX_WAIT_CNT && !silent) {
8795 dev_err(&tp->pdev->dev,
8796 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8797 ofs, enable_bit);
8798 return -ENODEV;
8799 }
8800
8801 return 0;
8802 }
8803
8804 /* tp->lock is held. */
8805 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8806 {
8807 int i, err;
8808
8809 tg3_disable_ints(tp);
8810
8811 if (pci_channel_offline(tp->pdev)) {
8812 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8813 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8814 err = -ENODEV;
8815 goto err_no_dev;
8816 }
8817
8818 tp->rx_mode &= ~RX_MODE_ENABLE;
8819 tw32_f(MAC_RX_MODE, tp->rx_mode);
8820 udelay(10);
8821
8822 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8823 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8824 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8825 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8826 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8827 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8828
8829 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8830 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8831 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8832 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8833 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8834 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8835 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8836
8837 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8838 tw32_f(MAC_MODE, tp->mac_mode);
8839 udelay(40);
8840
8841 tp->tx_mode &= ~TX_MODE_ENABLE;
8842 tw32_f(MAC_TX_MODE, tp->tx_mode);
8843
8844 for (i = 0; i < MAX_WAIT_CNT; i++) {
8845 udelay(100);
8846 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8847 break;
8848 }
8849 if (i >= MAX_WAIT_CNT) {
8850 dev_err(&tp->pdev->dev,
8851 "%s timed out, TX_MODE_ENABLE will not clear "
8852 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8853 err |= -ENODEV;
8854 }
8855
8856 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8857 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8858 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8859
8860 tw32(FTQ_RESET, 0xffffffff);
8861 tw32(FTQ_RESET, 0x00000000);
8862
8863 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8864 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8865
8866 err_no_dev:
8867 for (i = 0; i < tp->irq_cnt; i++) {
8868 struct tg3_napi *tnapi = &tp->napi[i];
8869 if (tnapi->hw_status)
8870 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8871 }
8872
8873 return err;
8874 }
8875
8876 /* Save PCI command register before chip reset */
8877 static void tg3_save_pci_state(struct tg3 *tp)
8878 {
8879 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8880 }
8881
8882 /* Restore PCI state after chip reset */
8883 static void tg3_restore_pci_state(struct tg3 *tp)
8884 {
8885 u32 val;
8886
8887 /* Re-enable indirect register accesses. */
8888 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8889 tp->misc_host_ctrl);
8890
8891 /* Set MAX PCI retry to zero. */
8892 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8893 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8894 tg3_flag(tp, PCIX_MODE))
8895 val |= PCISTATE_RETRY_SAME_DMA;
8896 /* Allow reads and writes to the APE register and memory space. */
8897 if (tg3_flag(tp, ENABLE_APE))
8898 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8899 PCISTATE_ALLOW_APE_SHMEM_WR |
8900 PCISTATE_ALLOW_APE_PSPACE_WR;
8901 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8902
8903 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8904
8905 if (!tg3_flag(tp, PCI_EXPRESS)) {
8906 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8907 tp->pci_cacheline_sz);
8908 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8909 tp->pci_lat_timer);
8910 }
8911
8912 /* Make sure PCI-X relaxed ordering bit is clear. */
8913 if (tg3_flag(tp, PCIX_MODE)) {
8914 u16 pcix_cmd;
8915
8916 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8917 &pcix_cmd);
8918 pcix_cmd &= ~PCI_X_CMD_ERO;
8919 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8920 pcix_cmd);
8921 }
8922
8923 if (tg3_flag(tp, 5780_CLASS)) {
8924
8925 /* Chip reset on 5780 will reset MSI enable bit,
8926 * so need to restore it.
8927 */
8928 if (tg3_flag(tp, USING_MSI)) {
8929 u16 ctrl;
8930
8931 pci_read_config_word(tp->pdev,
8932 tp->msi_cap + PCI_MSI_FLAGS,
8933 &ctrl);
8934 pci_write_config_word(tp->pdev,
8935 tp->msi_cap + PCI_MSI_FLAGS,
8936 ctrl | PCI_MSI_FLAGS_ENABLE);
8937 val = tr32(MSGINT_MODE);
8938 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8939 }
8940 }
8941 }
8942
8943 static void tg3_override_clk(struct tg3 *tp)
8944 {
8945 u32 val;
8946
8947 switch (tg3_asic_rev(tp)) {
8948 case ASIC_REV_5717:
8949 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8950 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8951 TG3_CPMU_MAC_ORIDE_ENABLE);
8952 break;
8953
8954 case ASIC_REV_5719:
8955 case ASIC_REV_5720:
8956 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8957 break;
8958
8959 default:
8960 return;
8961 }
8962 }
8963
8964 static void tg3_restore_clk(struct tg3 *tp)
8965 {
8966 u32 val;
8967
8968 switch (tg3_asic_rev(tp)) {
8969 case ASIC_REV_5717:
8970 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8971 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
8972 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
8973 break;
8974
8975 case ASIC_REV_5719:
8976 case ASIC_REV_5720:
8977 val = tr32(TG3_CPMU_CLCK_ORIDE);
8978 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8979 break;
8980
8981 default:
8982 return;
8983 }
8984 }
8985
8986 /* tp->lock is held. */
8987 static int tg3_chip_reset(struct tg3 *tp)
8988 {
8989 u32 val;
8990 void (*write_op)(struct tg3 *, u32, u32);
8991 int i, err;
8992
8993 if (!pci_device_is_present(tp->pdev))
8994 return -ENODEV;
8995
8996 tg3_nvram_lock(tp);
8997
8998 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8999
9000 /* No matching tg3_nvram_unlock() after this because
9001 * chip reset below will undo the nvram lock.
9002 */
9003 tp->nvram_lock_cnt = 0;
9004
9005 /* GRC_MISC_CFG core clock reset will clear the memory
9006 * enable bit in PCI register 4 and the MSI enable bit
9007 * on some chips, so we save relevant registers here.
9008 */
9009 tg3_save_pci_state(tp);
9010
9011 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9012 tg3_flag(tp, 5755_PLUS))
9013 tw32(GRC_FASTBOOT_PC, 0);
9014
9015 /*
9016 * We must avoid the readl() that normally takes place.
9017 * It locks machines, causes machine checks, and other
9018 * fun things. So, temporarily disable the 5701
9019 * hardware workaround, while we do the reset.
9020 */
9021 write_op = tp->write32;
9022 if (write_op == tg3_write_flush_reg32)
9023 tp->write32 = tg3_write32;
9024
9025 /* Prevent the irq handler from reading or writing PCI registers
9026 * during chip reset when the memory enable bit in the PCI command
9027 * register may be cleared. The chip does not generate interrupt
9028 * at this time, but the irq handler may still be called due to irq
9029 * sharing or irqpoll.
9030 */
9031 tg3_flag_set(tp, CHIP_RESETTING);
9032 for (i = 0; i < tp->irq_cnt; i++) {
9033 struct tg3_napi *tnapi = &tp->napi[i];
9034 if (tnapi->hw_status) {
9035 tnapi->hw_status->status = 0;
9036 tnapi->hw_status->status_tag = 0;
9037 }
9038 tnapi->last_tag = 0;
9039 tnapi->last_irq_tag = 0;
9040 }
9041 smp_mb();
9042
9043 for (i = 0; i < tp->irq_cnt; i++)
9044 synchronize_irq(tp->napi[i].irq_vec);
9045
9046 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9047 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9048 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9049 }
9050
9051 /* do the reset */
9052 val = GRC_MISC_CFG_CORECLK_RESET;
9053
9054 if (tg3_flag(tp, PCI_EXPRESS)) {
9055 /* Force PCIe 1.0a mode */
9056 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9057 !tg3_flag(tp, 57765_PLUS) &&
9058 tr32(TG3_PCIE_PHY_TSTCTL) ==
9059 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9060 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9061
9062 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9063 tw32(GRC_MISC_CFG, (1 << 29));
9064 val |= (1 << 29);
9065 }
9066 }
9067
9068 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9069 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9070 tw32(GRC_VCPU_EXT_CTRL,
9071 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9072 }
9073
9074 /* Set the clock to the highest frequency to avoid timeouts. With link
9075 * aware mode, the clock speed could be slow and bootcode does not
9076 * complete within the expected time. Override the clock to allow the
9077 * bootcode to finish sooner and then restore it.
9078 */
9079 tg3_override_clk(tp);
9080
9081 /* Manage gphy power for all CPMU absent PCIe devices. */
9082 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9083 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9084
9085 tw32(GRC_MISC_CFG, val);
9086
9087 /* restore 5701 hardware bug workaround write method */
9088 tp->write32 = write_op;
9089
9090 /* Unfortunately, we have to delay before the PCI read back.
9091 * Some 575X chips even will not respond to a PCI cfg access
9092 * when the reset command is given to the chip.
9093 *
9094 * How do these hardware designers expect things to work
9095 * properly if the PCI write is posted for a long period
9096 * of time? It is always necessary to have some method by
9097 * which a register read back can occur to push the write
9098 * out which does the reset.
9099 *
9100 * For most tg3 variants the trick below was working.
9101 * Ho hum...
9102 */
9103 udelay(120);
9104
9105 /* Flush PCI posted writes. The normal MMIO registers
9106 * are inaccessible at this time so this is the only
9107 * way to make this reliably (actually, this is no longer
9108 * the case, see above). I tried to use indirect
9109 * register read/write but this upset some 5701 variants.
9110 */
9111 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9112
9113 udelay(120);
9114
9115 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9116 u16 val16;
9117
9118 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9119 int j;
9120 u32 cfg_val;
9121
9122 /* Wait for link training to complete. */
9123 for (j = 0; j < 5000; j++)
9124 udelay(100);
9125
9126 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9127 pci_write_config_dword(tp->pdev, 0xc4,
9128 cfg_val | (1 << 15));
9129 }
9130
9131 /* Clear the "no snoop" and "relaxed ordering" bits. */
9132 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9133 /*
9134 * Older PCIe devices only support the 128 byte
9135 * MPS setting. Enforce the restriction.
9136 */
9137 if (!tg3_flag(tp, CPMU_PRESENT))
9138 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9139 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9140
9141 /* Clear error status */
9142 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9143 PCI_EXP_DEVSTA_CED |
9144 PCI_EXP_DEVSTA_NFED |
9145 PCI_EXP_DEVSTA_FED |
9146 PCI_EXP_DEVSTA_URD);
9147 }
9148
9149 tg3_restore_pci_state(tp);
9150
9151 tg3_flag_clear(tp, CHIP_RESETTING);
9152 tg3_flag_clear(tp, ERROR_PROCESSED);
9153
9154 val = 0;
9155 if (tg3_flag(tp, 5780_CLASS))
9156 val = tr32(MEMARB_MODE);
9157 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9158
9159 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9160 tg3_stop_fw(tp);
9161 tw32(0x5000, 0x400);
9162 }
9163
9164 if (tg3_flag(tp, IS_SSB_CORE)) {
9165 /*
9166 * BCM4785: In order to avoid repercussions from using
9167 * potentially defective internal ROM, stop the Rx RISC CPU,
9168 * which is not required.
9169 */
9170 tg3_stop_fw(tp);
9171 tg3_halt_cpu(tp, RX_CPU_BASE);
9172 }
9173
9174 err = tg3_poll_fw(tp);
9175 if (err)
9176 return err;
9177
9178 tw32(GRC_MODE, tp->grc_mode);
9179
9180 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9181 val = tr32(0xc4);
9182
9183 tw32(0xc4, val | (1 << 15));
9184 }
9185
9186 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9187 tg3_asic_rev(tp) == ASIC_REV_5705) {
9188 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9189 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9190 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9191 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9192 }
9193
9194 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9195 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9196 val = tp->mac_mode;
9197 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9198 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9199 val = tp->mac_mode;
9200 } else
9201 val = 0;
9202
9203 tw32_f(MAC_MODE, val);
9204 udelay(40);
9205
9206 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9207
9208 tg3_mdio_start(tp);
9209
9210 if (tg3_flag(tp, PCI_EXPRESS) &&
9211 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9212 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9213 !tg3_flag(tp, 57765_PLUS)) {
9214 val = tr32(0x7c00);
9215
9216 tw32(0x7c00, val | (1 << 25));
9217 }
9218
9219 tg3_restore_clk(tp);
9220
9221 /* Reprobe ASF enable state. */
9222 tg3_flag_clear(tp, ENABLE_ASF);
9223 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9224 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9225
9226 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9227 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9228 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9229 u32 nic_cfg;
9230
9231 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9232 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9233 tg3_flag_set(tp, ENABLE_ASF);
9234 tp->last_event_jiffies = jiffies;
9235 if (tg3_flag(tp, 5750_PLUS))
9236 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9237
9238 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9239 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9240 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9241 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9242 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9243 }
9244 }
9245
9246 return 0;
9247 }
9248
9249 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9250 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9251 static void __tg3_set_rx_mode(struct net_device *);
9252
9253 /* tp->lock is held. */
9254 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9255 {
9256 int err;
9257
9258 tg3_stop_fw(tp);
9259
9260 tg3_write_sig_pre_reset(tp, kind);
9261
9262 tg3_abort_hw(tp, silent);
9263 err = tg3_chip_reset(tp);
9264
9265 __tg3_set_mac_addr(tp, false);
9266
9267 tg3_write_sig_legacy(tp, kind);
9268 tg3_write_sig_post_reset(tp, kind);
9269
9270 if (tp->hw_stats) {
9271 /* Save the stats across chip resets... */
9272 tg3_get_nstats(tp, &tp->net_stats_prev);
9273 tg3_get_estats(tp, &tp->estats_prev);
9274
9275 /* And make sure the next sample is new data */
9276 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9277 }
9278
9279 return err;
9280 }
9281
9282 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9283 {
9284 struct tg3 *tp = netdev_priv(dev);
9285 struct sockaddr *addr = p;
9286 int err = 0;
9287 bool skip_mac_1 = false;
9288
9289 if (!is_valid_ether_addr(addr->sa_data))
9290 return -EADDRNOTAVAIL;
9291
9292 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9293
9294 if (!netif_running(dev))
9295 return 0;
9296
9297 if (tg3_flag(tp, ENABLE_ASF)) {
9298 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9299
9300 addr0_high = tr32(MAC_ADDR_0_HIGH);
9301 addr0_low = tr32(MAC_ADDR_0_LOW);
9302 addr1_high = tr32(MAC_ADDR_1_HIGH);
9303 addr1_low = tr32(MAC_ADDR_1_LOW);
9304
9305 /* Skip MAC addr 1 if ASF is using it. */
9306 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9307 !(addr1_high == 0 && addr1_low == 0))
9308 skip_mac_1 = true;
9309 }
9310 spin_lock_bh(&tp->lock);
9311 __tg3_set_mac_addr(tp, skip_mac_1);
9312 __tg3_set_rx_mode(dev);
9313 spin_unlock_bh(&tp->lock);
9314
9315 return err;
9316 }
9317
9318 /* tp->lock is held. */
9319 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9320 dma_addr_t mapping, u32 maxlen_flags,
9321 u32 nic_addr)
9322 {
9323 tg3_write_mem(tp,
9324 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9325 ((u64) mapping >> 32));
9326 tg3_write_mem(tp,
9327 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9328 ((u64) mapping & 0xffffffff));
9329 tg3_write_mem(tp,
9330 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9331 maxlen_flags);
9332
9333 if (!tg3_flag(tp, 5705_PLUS))
9334 tg3_write_mem(tp,
9335 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9336 nic_addr);
9337 }
9338
9339
9340 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9341 {
9342 int i = 0;
9343
9344 if (!tg3_flag(tp, ENABLE_TSS)) {
9345 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9346 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9347 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9348 } else {
9349 tw32(HOSTCC_TXCOL_TICKS, 0);
9350 tw32(HOSTCC_TXMAX_FRAMES, 0);
9351 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9352
9353 for (; i < tp->txq_cnt; i++) {
9354 u32 reg;
9355
9356 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9357 tw32(reg, ec->tx_coalesce_usecs);
9358 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9359 tw32(reg, ec->tx_max_coalesced_frames);
9360 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9361 tw32(reg, ec->tx_max_coalesced_frames_irq);
9362 }
9363 }
9364
9365 for (; i < tp->irq_max - 1; i++) {
9366 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9367 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9368 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9369 }
9370 }
9371
9372 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9373 {
9374 int i = 0;
9375 u32 limit = tp->rxq_cnt;
9376
9377 if (!tg3_flag(tp, ENABLE_RSS)) {
9378 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9379 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9380 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9381 limit--;
9382 } else {
9383 tw32(HOSTCC_RXCOL_TICKS, 0);
9384 tw32(HOSTCC_RXMAX_FRAMES, 0);
9385 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9386 }
9387
9388 for (; i < limit; i++) {
9389 u32 reg;
9390
9391 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9392 tw32(reg, ec->rx_coalesce_usecs);
9393 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9394 tw32(reg, ec->rx_max_coalesced_frames);
9395 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9396 tw32(reg, ec->rx_max_coalesced_frames_irq);
9397 }
9398
9399 for (; i < tp->irq_max - 1; i++) {
9400 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9401 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9402 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9403 }
9404 }
9405
9406 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9407 {
9408 tg3_coal_tx_init(tp, ec);
9409 tg3_coal_rx_init(tp, ec);
9410
9411 if (!tg3_flag(tp, 5705_PLUS)) {
9412 u32 val = ec->stats_block_coalesce_usecs;
9413
9414 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9415 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9416
9417 if (!tp->link_up)
9418 val = 0;
9419
9420 tw32(HOSTCC_STAT_COAL_TICKS, val);
9421 }
9422 }
9423
9424 /* tp->lock is held. */
9425 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9426 {
9427 u32 txrcb, limit;
9428
9429 /* Disable all transmit rings but the first. */
9430 if (!tg3_flag(tp, 5705_PLUS))
9431 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9432 else if (tg3_flag(tp, 5717_PLUS))
9433 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9434 else if (tg3_flag(tp, 57765_CLASS) ||
9435 tg3_asic_rev(tp) == ASIC_REV_5762)
9436 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9437 else
9438 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9439
9440 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9441 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9442 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9443 BDINFO_FLAGS_DISABLED);
9444 }
9445
9446 /* tp->lock is held. */
9447 static void tg3_tx_rcbs_init(struct tg3 *tp)
9448 {
9449 int i = 0;
9450 u32 txrcb = NIC_SRAM_SEND_RCB;
9451
9452 if (tg3_flag(tp, ENABLE_TSS))
9453 i++;
9454
9455 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9456 struct tg3_napi *tnapi = &tp->napi[i];
9457
9458 if (!tnapi->tx_ring)
9459 continue;
9460
9461 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9462 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9463 NIC_SRAM_TX_BUFFER_DESC);
9464 }
9465 }
9466
9467 /* tp->lock is held. */
9468 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9469 {
9470 u32 rxrcb, limit;
9471
9472 /* Disable all receive return rings but the first. */
9473 if (tg3_flag(tp, 5717_PLUS))
9474 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9475 else if (!tg3_flag(tp, 5705_PLUS))
9476 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9477 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9478 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9479 tg3_flag(tp, 57765_CLASS))
9480 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9481 else
9482 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9483
9484 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9485 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9486 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9487 BDINFO_FLAGS_DISABLED);
9488 }
9489
9490 /* tp->lock is held. */
9491 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9492 {
9493 int i = 0;
9494 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9495
9496 if (tg3_flag(tp, ENABLE_RSS))
9497 i++;
9498
9499 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9500 struct tg3_napi *tnapi = &tp->napi[i];
9501
9502 if (!tnapi->rx_rcb)
9503 continue;
9504
9505 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9506 (tp->rx_ret_ring_mask + 1) <<
9507 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9508 }
9509 }
9510
9511 /* tp->lock is held. */
9512 static void tg3_rings_reset(struct tg3 *tp)
9513 {
9514 int i;
9515 u32 stblk;
9516 struct tg3_napi *tnapi = &tp->napi[0];
9517
9518 tg3_tx_rcbs_disable(tp);
9519
9520 tg3_rx_ret_rcbs_disable(tp);
9521
9522 /* Disable interrupts */
9523 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9524 tp->napi[0].chk_msi_cnt = 0;
9525 tp->napi[0].last_rx_cons = 0;
9526 tp->napi[0].last_tx_cons = 0;
9527
9528 /* Zero mailbox registers. */
9529 if (tg3_flag(tp, SUPPORT_MSIX)) {
9530 for (i = 1; i < tp->irq_max; i++) {
9531 tp->napi[i].tx_prod = 0;
9532 tp->napi[i].tx_cons = 0;
9533 if (tg3_flag(tp, ENABLE_TSS))
9534 tw32_mailbox(tp->napi[i].prodmbox, 0);
9535 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9536 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9537 tp->napi[i].chk_msi_cnt = 0;
9538 tp->napi[i].last_rx_cons = 0;
9539 tp->napi[i].last_tx_cons = 0;
9540 }
9541 if (!tg3_flag(tp, ENABLE_TSS))
9542 tw32_mailbox(tp->napi[0].prodmbox, 0);
9543 } else {
9544 tp->napi[0].tx_prod = 0;
9545 tp->napi[0].tx_cons = 0;
9546 tw32_mailbox(tp->napi[0].prodmbox, 0);
9547 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9548 }
9549
9550 /* Make sure the NIC-based send BD rings are disabled. */
9551 if (!tg3_flag(tp, 5705_PLUS)) {
9552 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9553 for (i = 0; i < 16; i++)
9554 tw32_tx_mbox(mbox + i * 8, 0);
9555 }
9556
9557 /* Clear status block in ram. */
9558 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9559
9560 /* Set status block DMA address */
9561 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9562 ((u64) tnapi->status_mapping >> 32));
9563 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9564 ((u64) tnapi->status_mapping & 0xffffffff));
9565
9566 stblk = HOSTCC_STATBLCK_RING1;
9567
9568 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9569 u64 mapping = (u64)tnapi->status_mapping;
9570 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9571 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9572 stblk += 8;
9573
9574 /* Clear status block in ram. */
9575 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9576 }
9577
9578 tg3_tx_rcbs_init(tp);
9579 tg3_rx_ret_rcbs_init(tp);
9580 }
9581
9582 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9583 {
9584 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9585
9586 if (!tg3_flag(tp, 5750_PLUS) ||
9587 tg3_flag(tp, 5780_CLASS) ||
9588 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9589 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9590 tg3_flag(tp, 57765_PLUS))
9591 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9592 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9593 tg3_asic_rev(tp) == ASIC_REV_5787)
9594 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9595 else
9596 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9597
9598 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9599 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9600
9601 val = min(nic_rep_thresh, host_rep_thresh);
9602 tw32(RCVBDI_STD_THRESH, val);
9603
9604 if (tg3_flag(tp, 57765_PLUS))
9605 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9606
9607 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9608 return;
9609
9610 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9611
9612 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9613
9614 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9615 tw32(RCVBDI_JUMBO_THRESH, val);
9616
9617 if (tg3_flag(tp, 57765_PLUS))
9618 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9619 }
9620
9621 static inline u32 calc_crc(unsigned char *buf, int len)
9622 {
9623 u32 reg;
9624 u32 tmp;
9625 int j, k;
9626
9627 reg = 0xffffffff;
9628
9629 for (j = 0; j < len; j++) {
9630 reg ^= buf[j];
9631
9632 for (k = 0; k < 8; k++) {
9633 tmp = reg & 0x01;
9634
9635 reg >>= 1;
9636
9637 if (tmp)
9638 reg ^= 0xedb88320;
9639 }
9640 }
9641
9642 return ~reg;
9643 }
9644
9645 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9646 {
9647 /* accept or reject all multicast frames */
9648 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9649 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9650 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9651 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9652 }
9653
9654 static void __tg3_set_rx_mode(struct net_device *dev)
9655 {
9656 struct tg3 *tp = netdev_priv(dev);
9657 u32 rx_mode;
9658
9659 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9660 RX_MODE_KEEP_VLAN_TAG);
9661
9662 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9663 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9664 * flag clear.
9665 */
9666 if (!tg3_flag(tp, ENABLE_ASF))
9667 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9668 #endif
9669
9670 if (dev->flags & IFF_PROMISC) {
9671 /* Promiscuous mode. */
9672 rx_mode |= RX_MODE_PROMISC;
9673 } else if (dev->flags & IFF_ALLMULTI) {
9674 /* Accept all multicast. */
9675 tg3_set_multi(tp, 1);
9676 } else if (netdev_mc_empty(dev)) {
9677 /* Reject all multicast. */
9678 tg3_set_multi(tp, 0);
9679 } else {
9680 /* Accept one or more multicast(s). */
9681 struct netdev_hw_addr *ha;
9682 u32 mc_filter[4] = { 0, };
9683 u32 regidx;
9684 u32 bit;
9685 u32 crc;
9686
9687 netdev_for_each_mc_addr(ha, dev) {
9688 crc = calc_crc(ha->addr, ETH_ALEN);
9689 bit = ~crc & 0x7f;
9690 regidx = (bit & 0x60) >> 5;
9691 bit &= 0x1f;
9692 mc_filter[regidx] |= (1 << bit);
9693 }
9694
9695 tw32(MAC_HASH_REG_0, mc_filter[0]);
9696 tw32(MAC_HASH_REG_1, mc_filter[1]);
9697 tw32(MAC_HASH_REG_2, mc_filter[2]);
9698 tw32(MAC_HASH_REG_3, mc_filter[3]);
9699 }
9700
9701 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9702 rx_mode |= RX_MODE_PROMISC;
9703 } else if (!(dev->flags & IFF_PROMISC)) {
9704 /* Add all entries into to the mac addr filter list */
9705 int i = 0;
9706 struct netdev_hw_addr *ha;
9707
9708 netdev_for_each_uc_addr(ha, dev) {
9709 __tg3_set_one_mac_addr(tp, ha->addr,
9710 i + TG3_UCAST_ADDR_IDX(tp));
9711 i++;
9712 }
9713 }
9714
9715 if (rx_mode != tp->rx_mode) {
9716 tp->rx_mode = rx_mode;
9717 tw32_f(MAC_RX_MODE, rx_mode);
9718 udelay(10);
9719 }
9720 }
9721
9722 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9723 {
9724 int i;
9725
9726 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9727 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9728 }
9729
9730 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9731 {
9732 int i;
9733
9734 if (!tg3_flag(tp, SUPPORT_MSIX))
9735 return;
9736
9737 if (tp->rxq_cnt == 1) {
9738 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9739 return;
9740 }
9741
9742 /* Validate table against current IRQ count */
9743 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9744 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9745 break;
9746 }
9747
9748 if (i != TG3_RSS_INDIR_TBL_SIZE)
9749 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9750 }
9751
9752 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9753 {
9754 int i = 0;
9755 u32 reg = MAC_RSS_INDIR_TBL_0;
9756
9757 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9758 u32 val = tp->rss_ind_tbl[i];
9759 i++;
9760 for (; i % 8; i++) {
9761 val <<= 4;
9762 val |= tp->rss_ind_tbl[i];
9763 }
9764 tw32(reg, val);
9765 reg += 4;
9766 }
9767 }
9768
9769 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9770 {
9771 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9772 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9773 else
9774 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9775 }
9776
9777 /* tp->lock is held. */
9778 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9779 {
9780 u32 val, rdmac_mode;
9781 int i, err, limit;
9782 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9783
9784 tg3_disable_ints(tp);
9785
9786 tg3_stop_fw(tp);
9787
9788 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9789
9790 if (tg3_flag(tp, INIT_COMPLETE))
9791 tg3_abort_hw(tp, 1);
9792
9793 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9794 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9795 tg3_phy_pull_config(tp);
9796 tg3_eee_pull_config(tp, NULL);
9797 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9798 }
9799
9800 /* Enable MAC control of LPI */
9801 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9802 tg3_setup_eee(tp);
9803
9804 if (reset_phy)
9805 tg3_phy_reset(tp);
9806
9807 err = tg3_chip_reset(tp);
9808 if (err)
9809 return err;
9810
9811 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9812
9813 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9814 val = tr32(TG3_CPMU_CTRL);
9815 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9816 tw32(TG3_CPMU_CTRL, val);
9817
9818 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9819 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9820 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9821 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9822
9823 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9824 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9825 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9826 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9827
9828 val = tr32(TG3_CPMU_HST_ACC);
9829 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9830 val |= CPMU_HST_ACC_MACCLK_6_25;
9831 tw32(TG3_CPMU_HST_ACC, val);
9832 }
9833
9834 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9835 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9836 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9837 PCIE_PWR_MGMT_L1_THRESH_4MS;
9838 tw32(PCIE_PWR_MGMT_THRESH, val);
9839
9840 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9841 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9842
9843 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9844
9845 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9846 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9847 }
9848
9849 if (tg3_flag(tp, L1PLLPD_EN)) {
9850 u32 grc_mode = tr32(GRC_MODE);
9851
9852 /* Access the lower 1K of PL PCIE block registers. */
9853 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9854 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9855
9856 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9857 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9858 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9859
9860 tw32(GRC_MODE, grc_mode);
9861 }
9862
9863 if (tg3_flag(tp, 57765_CLASS)) {
9864 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9865 u32 grc_mode = tr32(GRC_MODE);
9866
9867 /* Access the lower 1K of PL PCIE block registers. */
9868 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9869 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9870
9871 val = tr32(TG3_PCIE_TLDLPL_PORT +
9872 TG3_PCIE_PL_LO_PHYCTL5);
9873 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9874 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9875
9876 tw32(GRC_MODE, grc_mode);
9877 }
9878
9879 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9880 u32 grc_mode;
9881
9882 /* Fix transmit hangs */
9883 val = tr32(TG3_CPMU_PADRNG_CTL);
9884 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9885 tw32(TG3_CPMU_PADRNG_CTL, val);
9886
9887 grc_mode = tr32(GRC_MODE);
9888
9889 /* Access the lower 1K of DL PCIE block registers. */
9890 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9891 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9892
9893 val = tr32(TG3_PCIE_TLDLPL_PORT +
9894 TG3_PCIE_DL_LO_FTSMAX);
9895 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9896 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9897 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9898
9899 tw32(GRC_MODE, grc_mode);
9900 }
9901
9902 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9903 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9904 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9905 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9906 }
9907
9908 /* This works around an issue with Athlon chipsets on
9909 * B3 tigon3 silicon. This bit has no effect on any
9910 * other revision. But do not set this on PCI Express
9911 * chips and don't even touch the clocks if the CPMU is present.
9912 */
9913 if (!tg3_flag(tp, CPMU_PRESENT)) {
9914 if (!tg3_flag(tp, PCI_EXPRESS))
9915 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9916 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9917 }
9918
9919 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9920 tg3_flag(tp, PCIX_MODE)) {
9921 val = tr32(TG3PCI_PCISTATE);
9922 val |= PCISTATE_RETRY_SAME_DMA;
9923 tw32(TG3PCI_PCISTATE, val);
9924 }
9925
9926 if (tg3_flag(tp, ENABLE_APE)) {
9927 /* Allow reads and writes to the
9928 * APE register and memory space.
9929 */
9930 val = tr32(TG3PCI_PCISTATE);
9931 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9932 PCISTATE_ALLOW_APE_SHMEM_WR |
9933 PCISTATE_ALLOW_APE_PSPACE_WR;
9934 tw32(TG3PCI_PCISTATE, val);
9935 }
9936
9937 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9938 /* Enable some hw fixes. */
9939 val = tr32(TG3PCI_MSI_DATA);
9940 val |= (1 << 26) | (1 << 28) | (1 << 29);
9941 tw32(TG3PCI_MSI_DATA, val);
9942 }
9943
9944 /* Descriptor ring init may make accesses to the
9945 * NIC SRAM area to setup the TX descriptors, so we
9946 * can only do this after the hardware has been
9947 * successfully reset.
9948 */
9949 err = tg3_init_rings(tp);
9950 if (err)
9951 return err;
9952
9953 if (tg3_flag(tp, 57765_PLUS)) {
9954 val = tr32(TG3PCI_DMA_RW_CTRL) &
9955 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9956 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9957 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9958 if (!tg3_flag(tp, 57765_CLASS) &&
9959 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9960 tg3_asic_rev(tp) != ASIC_REV_5762)
9961 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9962 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9963 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9964 tg3_asic_rev(tp) != ASIC_REV_5761) {
9965 /* This value is determined during the probe time DMA
9966 * engine test, tg3_test_dma.
9967 */
9968 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9969 }
9970
9971 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9972 GRC_MODE_4X_NIC_SEND_RINGS |
9973 GRC_MODE_NO_TX_PHDR_CSUM |
9974 GRC_MODE_NO_RX_PHDR_CSUM);
9975 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9976
9977 /* Pseudo-header checksum is done by hardware logic and not
9978 * the offload processers, so make the chip do the pseudo-
9979 * header checksums on receive. For transmit it is more
9980 * convenient to do the pseudo-header checksum in software
9981 * as Linux does that on transmit for us in all cases.
9982 */
9983 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9984
9985 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9986 if (tp->rxptpctl)
9987 tw32(TG3_RX_PTP_CTL,
9988 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9989
9990 if (tg3_flag(tp, PTP_CAPABLE))
9991 val |= GRC_MODE_TIME_SYNC_ENABLE;
9992
9993 tw32(GRC_MODE, tp->grc_mode | val);
9994
9995 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9996 val = tr32(GRC_MISC_CFG);
9997 val &= ~0xff;
9998 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9999 tw32(GRC_MISC_CFG, val);
10000
10001 /* Initialize MBUF/DESC pool. */
10002 if (tg3_flag(tp, 5750_PLUS)) {
10003 /* Do nothing. */
10004 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10005 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10006 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10007 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10008 else
10009 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10010 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10011 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10012 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10013 int fw_len;
10014
10015 fw_len = tp->fw_len;
10016 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10017 tw32(BUFMGR_MB_POOL_ADDR,
10018 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10019 tw32(BUFMGR_MB_POOL_SIZE,
10020 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10021 }
10022
10023 if (tp->dev->mtu <= ETH_DATA_LEN) {
10024 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10025 tp->bufmgr_config.mbuf_read_dma_low_water);
10026 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10027 tp->bufmgr_config.mbuf_mac_rx_low_water);
10028 tw32(BUFMGR_MB_HIGH_WATER,
10029 tp->bufmgr_config.mbuf_high_water);
10030 } else {
10031 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10032 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10033 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10034 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10035 tw32(BUFMGR_MB_HIGH_WATER,
10036 tp->bufmgr_config.mbuf_high_water_jumbo);
10037 }
10038 tw32(BUFMGR_DMA_LOW_WATER,
10039 tp->bufmgr_config.dma_low_water);
10040 tw32(BUFMGR_DMA_HIGH_WATER,
10041 tp->bufmgr_config.dma_high_water);
10042
10043 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10044 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10045 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10046 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10047 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10048 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10049 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10050 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10051 tw32(BUFMGR_MODE, val);
10052 for (i = 0; i < 2000; i++) {
10053 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10054 break;
10055 udelay(10);
10056 }
10057 if (i >= 2000) {
10058 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10059 return -ENODEV;
10060 }
10061
10062 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10063 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10064
10065 tg3_setup_rxbd_thresholds(tp);
10066
10067 /* Initialize TG3_BDINFO's at:
10068 * RCVDBDI_STD_BD: standard eth size rx ring
10069 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10070 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10071 *
10072 * like so:
10073 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10074 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10075 * ring attribute flags
10076 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10077 *
10078 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10079 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10080 *
10081 * The size of each ring is fixed in the firmware, but the location is
10082 * configurable.
10083 */
10084 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10085 ((u64) tpr->rx_std_mapping >> 32));
10086 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10087 ((u64) tpr->rx_std_mapping & 0xffffffff));
10088 if (!tg3_flag(tp, 5717_PLUS))
10089 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10090 NIC_SRAM_RX_BUFFER_DESC);
10091
10092 /* Disable the mini ring */
10093 if (!tg3_flag(tp, 5705_PLUS))
10094 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10095 BDINFO_FLAGS_DISABLED);
10096
10097 /* Program the jumbo buffer descriptor ring control
10098 * blocks on those devices that have them.
10099 */
10100 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10101 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10102
10103 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10104 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10105 ((u64) tpr->rx_jmb_mapping >> 32));
10106 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10107 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10108 val = TG3_RX_JMB_RING_SIZE(tp) <<
10109 BDINFO_FLAGS_MAXLEN_SHIFT;
10110 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10111 val | BDINFO_FLAGS_USE_EXT_RECV);
10112 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10113 tg3_flag(tp, 57765_CLASS) ||
10114 tg3_asic_rev(tp) == ASIC_REV_5762)
10115 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10116 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10117 } else {
10118 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10119 BDINFO_FLAGS_DISABLED);
10120 }
10121
10122 if (tg3_flag(tp, 57765_PLUS)) {
10123 val = TG3_RX_STD_RING_SIZE(tp);
10124 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10125 val |= (TG3_RX_STD_DMA_SZ << 2);
10126 } else
10127 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10128 } else
10129 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10130
10131 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10132
10133 tpr->rx_std_prod_idx = tp->rx_pending;
10134 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10135
10136 tpr->rx_jmb_prod_idx =
10137 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10138 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10139
10140 tg3_rings_reset(tp);
10141
10142 /* Initialize MAC address and backoff seed. */
10143 __tg3_set_mac_addr(tp, false);
10144
10145 /* MTU + ethernet header + FCS + optional VLAN tag */
10146 tw32(MAC_RX_MTU_SIZE,
10147 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10148
10149 /* The slot time is changed by tg3_setup_phy if we
10150 * run at gigabit with half duplex.
10151 */
10152 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10153 (6 << TX_LENGTHS_IPG_SHIFT) |
10154 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10155
10156 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10157 tg3_asic_rev(tp) == ASIC_REV_5762)
10158 val |= tr32(MAC_TX_LENGTHS) &
10159 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10160 TX_LENGTHS_CNT_DWN_VAL_MSK);
10161
10162 tw32(MAC_TX_LENGTHS, val);
10163
10164 /* Receive rules. */
10165 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10166 tw32(RCVLPC_CONFIG, 0x0181);
10167
10168 /* Calculate RDMAC_MODE setting early, we need it to determine
10169 * the RCVLPC_STATE_ENABLE mask.
10170 */
10171 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10172 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10173 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10174 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10175 RDMAC_MODE_LNGREAD_ENAB);
10176
10177 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10178 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10179
10180 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10181 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10182 tg3_asic_rev(tp) == ASIC_REV_57780)
10183 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10184 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10185 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10186
10187 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10188 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10189 if (tg3_flag(tp, TSO_CAPABLE) &&
10190 tg3_asic_rev(tp) == ASIC_REV_5705) {
10191 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10192 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10193 !tg3_flag(tp, IS_5788)) {
10194 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10195 }
10196 }
10197
10198 if (tg3_flag(tp, PCI_EXPRESS))
10199 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10200
10201 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10202 tp->dma_limit = 0;
10203 if (tp->dev->mtu <= ETH_DATA_LEN) {
10204 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10205 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10206 }
10207 }
10208
10209 if (tg3_flag(tp, HW_TSO_1) ||
10210 tg3_flag(tp, HW_TSO_2) ||
10211 tg3_flag(tp, HW_TSO_3))
10212 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10213
10214 if (tg3_flag(tp, 57765_PLUS) ||
10215 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10216 tg3_asic_rev(tp) == ASIC_REV_57780)
10217 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10218
10219 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10220 tg3_asic_rev(tp) == ASIC_REV_5762)
10221 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10222
10223 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10224 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10225 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10226 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10227 tg3_flag(tp, 57765_PLUS)) {
10228 u32 tgtreg;
10229
10230 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10231 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10232 else
10233 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10234
10235 val = tr32(tgtreg);
10236 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10237 tg3_asic_rev(tp) == ASIC_REV_5762) {
10238 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10239 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10240 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10241 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10242 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10243 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10244 }
10245 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10246 }
10247
10248 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10249 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10250 tg3_asic_rev(tp) == ASIC_REV_5762) {
10251 u32 tgtreg;
10252
10253 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10254 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10255 else
10256 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10257
10258 val = tr32(tgtreg);
10259 tw32(tgtreg, val |
10260 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10261 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10262 }
10263
10264 /* Receive/send statistics. */
10265 if (tg3_flag(tp, 5750_PLUS)) {
10266 val = tr32(RCVLPC_STATS_ENABLE);
10267 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10268 tw32(RCVLPC_STATS_ENABLE, val);
10269 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10270 tg3_flag(tp, TSO_CAPABLE)) {
10271 val = tr32(RCVLPC_STATS_ENABLE);
10272 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10273 tw32(RCVLPC_STATS_ENABLE, val);
10274 } else {
10275 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10276 }
10277 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10278 tw32(SNDDATAI_STATSENAB, 0xffffff);
10279 tw32(SNDDATAI_STATSCTRL,
10280 (SNDDATAI_SCTRL_ENABLE |
10281 SNDDATAI_SCTRL_FASTUPD));
10282
10283 /* Setup host coalescing engine. */
10284 tw32(HOSTCC_MODE, 0);
10285 for (i = 0; i < 2000; i++) {
10286 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10287 break;
10288 udelay(10);
10289 }
10290
10291 __tg3_set_coalesce(tp, &tp->coal);
10292
10293 if (!tg3_flag(tp, 5705_PLUS)) {
10294 /* Status/statistics block address. See tg3_timer,
10295 * the tg3_periodic_fetch_stats call there, and
10296 * tg3_get_stats to see how this works for 5705/5750 chips.
10297 */
10298 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10299 ((u64) tp->stats_mapping >> 32));
10300 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10301 ((u64) tp->stats_mapping & 0xffffffff));
10302 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10303
10304 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10305
10306 /* Clear statistics and status block memory areas */
10307 for (i = NIC_SRAM_STATS_BLK;
10308 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10309 i += sizeof(u32)) {
10310 tg3_write_mem(tp, i, 0);
10311 udelay(40);
10312 }
10313 }
10314
10315 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10316
10317 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10318 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10319 if (!tg3_flag(tp, 5705_PLUS))
10320 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10321
10322 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10323 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10324 /* reset to prevent losing 1st rx packet intermittently */
10325 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10326 udelay(10);
10327 }
10328
10329 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10330 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10331 MAC_MODE_FHDE_ENABLE;
10332 if (tg3_flag(tp, ENABLE_APE))
10333 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10334 if (!tg3_flag(tp, 5705_PLUS) &&
10335 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10336 tg3_asic_rev(tp) != ASIC_REV_5700)
10337 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10338 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10339 udelay(40);
10340
10341 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10342 * If TG3_FLAG_IS_NIC is zero, we should read the
10343 * register to preserve the GPIO settings for LOMs. The GPIOs,
10344 * whether used as inputs or outputs, are set by boot code after
10345 * reset.
10346 */
10347 if (!tg3_flag(tp, IS_NIC)) {
10348 u32 gpio_mask;
10349
10350 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10351 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10352 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10353
10354 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10355 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10356 GRC_LCLCTRL_GPIO_OUTPUT3;
10357
10358 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10359 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10360
10361 tp->grc_local_ctrl &= ~gpio_mask;
10362 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10363
10364 /* GPIO1 must be driven high for eeprom write protect */
10365 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10366 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10367 GRC_LCLCTRL_GPIO_OUTPUT1);
10368 }
10369 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10370 udelay(100);
10371
10372 if (tg3_flag(tp, USING_MSIX)) {
10373 val = tr32(MSGINT_MODE);
10374 val |= MSGINT_MODE_ENABLE;
10375 if (tp->irq_cnt > 1)
10376 val |= MSGINT_MODE_MULTIVEC_EN;
10377 if (!tg3_flag(tp, 1SHOT_MSI))
10378 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10379 tw32(MSGINT_MODE, val);
10380 }
10381
10382 if (!tg3_flag(tp, 5705_PLUS)) {
10383 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10384 udelay(40);
10385 }
10386
10387 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10388 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10389 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10390 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10391 WDMAC_MODE_LNGREAD_ENAB);
10392
10393 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10394 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10395 if (tg3_flag(tp, TSO_CAPABLE) &&
10396 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10397 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10398 /* nothing */
10399 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10400 !tg3_flag(tp, IS_5788)) {
10401 val |= WDMAC_MODE_RX_ACCEL;
10402 }
10403 }
10404
10405 /* Enable host coalescing bug fix */
10406 if (tg3_flag(tp, 5755_PLUS))
10407 val |= WDMAC_MODE_STATUS_TAG_FIX;
10408
10409 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10410 val |= WDMAC_MODE_BURST_ALL_DATA;
10411
10412 tw32_f(WDMAC_MODE, val);
10413 udelay(40);
10414
10415 if (tg3_flag(tp, PCIX_MODE)) {
10416 u16 pcix_cmd;
10417
10418 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10419 &pcix_cmd);
10420 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10421 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10422 pcix_cmd |= PCI_X_CMD_READ_2K;
10423 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10424 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10425 pcix_cmd |= PCI_X_CMD_READ_2K;
10426 }
10427 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10428 pcix_cmd);
10429 }
10430
10431 tw32_f(RDMAC_MODE, rdmac_mode);
10432 udelay(40);
10433
10434 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10435 tg3_asic_rev(tp) == ASIC_REV_5720) {
10436 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10437 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10438 break;
10439 }
10440 if (i < TG3_NUM_RDMA_CHANNELS) {
10441 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10442 val |= tg3_lso_rd_dma_workaround_bit(tp);
10443 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10444 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10445 }
10446 }
10447
10448 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10449 if (!tg3_flag(tp, 5705_PLUS))
10450 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10451
10452 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10453 tw32(SNDDATAC_MODE,
10454 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10455 else
10456 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10457
10458 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10459 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10460 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10461 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10462 val |= RCVDBDI_MODE_LRG_RING_SZ;
10463 tw32(RCVDBDI_MODE, val);
10464 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10465 if (tg3_flag(tp, HW_TSO_1) ||
10466 tg3_flag(tp, HW_TSO_2) ||
10467 tg3_flag(tp, HW_TSO_3))
10468 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10469 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10470 if (tg3_flag(tp, ENABLE_TSS))
10471 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10472 tw32(SNDBDI_MODE, val);
10473 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10474
10475 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10476 err = tg3_load_5701_a0_firmware_fix(tp);
10477 if (err)
10478 return err;
10479 }
10480
10481 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10482 /* Ignore any errors for the firmware download. If download
10483 * fails, the device will operate with EEE disabled
10484 */
10485 tg3_load_57766_firmware(tp);
10486 }
10487
10488 if (tg3_flag(tp, TSO_CAPABLE)) {
10489 err = tg3_load_tso_firmware(tp);
10490 if (err)
10491 return err;
10492 }
10493
10494 tp->tx_mode = TX_MODE_ENABLE;
10495
10496 if (tg3_flag(tp, 5755_PLUS) ||
10497 tg3_asic_rev(tp) == ASIC_REV_5906)
10498 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10499
10500 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10501 tg3_asic_rev(tp) == ASIC_REV_5762) {
10502 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10503 tp->tx_mode &= ~val;
10504 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10505 }
10506
10507 tw32_f(MAC_TX_MODE, tp->tx_mode);
10508 udelay(100);
10509
10510 if (tg3_flag(tp, ENABLE_RSS)) {
10511 tg3_rss_write_indir_tbl(tp);
10512
10513 /* Setup the "secret" hash key. */
10514 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10515 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10516 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10517 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10518 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10519 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10520 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10521 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10522 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10523 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10524 }
10525
10526 tp->rx_mode = RX_MODE_ENABLE;
10527 if (tg3_flag(tp, 5755_PLUS))
10528 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10529
10530 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10531 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10532
10533 if (tg3_flag(tp, ENABLE_RSS))
10534 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10535 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10536 RX_MODE_RSS_IPV6_HASH_EN |
10537 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10538 RX_MODE_RSS_IPV4_HASH_EN |
10539 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10540
10541 tw32_f(MAC_RX_MODE, tp->rx_mode);
10542 udelay(10);
10543
10544 tw32(MAC_LED_CTRL, tp->led_ctrl);
10545
10546 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10547 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10548 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10549 udelay(10);
10550 }
10551 tw32_f(MAC_RX_MODE, tp->rx_mode);
10552 udelay(10);
10553
10554 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10555 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10556 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10557 /* Set drive transmission level to 1.2V */
10558 /* only if the signal pre-emphasis bit is not set */
10559 val = tr32(MAC_SERDES_CFG);
10560 val &= 0xfffff000;
10561 val |= 0x880;
10562 tw32(MAC_SERDES_CFG, val);
10563 }
10564 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10565 tw32(MAC_SERDES_CFG, 0x616000);
10566 }
10567
10568 /* Prevent chip from dropping frames when flow control
10569 * is enabled.
10570 */
10571 if (tg3_flag(tp, 57765_CLASS))
10572 val = 1;
10573 else
10574 val = 2;
10575 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10576
10577 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10578 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10579 /* Use hardware link auto-negotiation */
10580 tg3_flag_set(tp, HW_AUTONEG);
10581 }
10582
10583 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10584 tg3_asic_rev(tp) == ASIC_REV_5714) {
10585 u32 tmp;
10586
10587 tmp = tr32(SERDES_RX_CTRL);
10588 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10589 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10590 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10591 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10592 }
10593
10594 if (!tg3_flag(tp, USE_PHYLIB)) {
10595 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10596 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10597
10598 err = tg3_setup_phy(tp, false);
10599 if (err)
10600 return err;
10601
10602 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10603 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10604 u32 tmp;
10605
10606 /* Clear CRC stats. */
10607 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10608 tg3_writephy(tp, MII_TG3_TEST1,
10609 tmp | MII_TG3_TEST1_CRC_EN);
10610 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10611 }
10612 }
10613 }
10614
10615 __tg3_set_rx_mode(tp->dev);
10616
10617 /* Initialize receive rules. */
10618 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10619 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10620 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10621 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10622
10623 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10624 limit = 8;
10625 else
10626 limit = 16;
10627 if (tg3_flag(tp, ENABLE_ASF))
10628 limit -= 4;
10629 switch (limit) {
10630 case 16:
10631 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10632 case 15:
10633 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10634 case 14:
10635 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10636 case 13:
10637 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10638 case 12:
10639 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10640 case 11:
10641 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10642 case 10:
10643 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10644 case 9:
10645 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10646 case 8:
10647 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10648 case 7:
10649 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10650 case 6:
10651 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10652 case 5:
10653 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10654 case 4:
10655 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10656 case 3:
10657 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10658 case 2:
10659 case 1:
10660
10661 default:
10662 break;
10663 }
10664
10665 if (tg3_flag(tp, ENABLE_APE))
10666 /* Write our heartbeat update interval to APE. */
10667 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10668 APE_HOST_HEARTBEAT_INT_DISABLE);
10669
10670 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10671
10672 return 0;
10673 }
10674
10675 /* Called at device open time to get the chip ready for
10676 * packet processing. Invoked with tp->lock held.
10677 */
10678 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10679 {
10680 /* Chip may have been just powered on. If so, the boot code may still
10681 * be running initialization. Wait for it to finish to avoid races in
10682 * accessing the hardware.
10683 */
10684 tg3_enable_register_access(tp);
10685 tg3_poll_fw(tp);
10686
10687 tg3_switch_clocks(tp);
10688
10689 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10690
10691 return tg3_reset_hw(tp, reset_phy);
10692 }
10693
10694 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10695 {
10696 int i;
10697
10698 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10699 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10700
10701 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10702 off += len;
10703
10704 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10705 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10706 memset(ocir, 0, TG3_OCIR_LEN);
10707 }
10708 }
10709
10710 /* sysfs attributes for hwmon */
10711 static ssize_t tg3_show_temp(struct device *dev,
10712 struct device_attribute *devattr, char *buf)
10713 {
10714 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10715 struct tg3 *tp = dev_get_drvdata(dev);
10716 u32 temperature;
10717
10718 spin_lock_bh(&tp->lock);
10719 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10720 sizeof(temperature));
10721 spin_unlock_bh(&tp->lock);
10722 return sprintf(buf, "%u\n", temperature);
10723 }
10724
10725
10726 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10727 TG3_TEMP_SENSOR_OFFSET);
10728 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10729 TG3_TEMP_CAUTION_OFFSET);
10730 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10731 TG3_TEMP_MAX_OFFSET);
10732
10733 static struct attribute *tg3_attrs[] = {
10734 &sensor_dev_attr_temp1_input.dev_attr.attr,
10735 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10736 &sensor_dev_attr_temp1_max.dev_attr.attr,
10737 NULL
10738 };
10739 ATTRIBUTE_GROUPS(tg3);
10740
10741 static void tg3_hwmon_close(struct tg3 *tp)
10742 {
10743 if (tp->hwmon_dev) {
10744 hwmon_device_unregister(tp->hwmon_dev);
10745 tp->hwmon_dev = NULL;
10746 }
10747 }
10748
10749 static void tg3_hwmon_open(struct tg3 *tp)
10750 {
10751 int i;
10752 u32 size = 0;
10753 struct pci_dev *pdev = tp->pdev;
10754 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10755
10756 tg3_sd_scan_scratchpad(tp, ocirs);
10757
10758 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10759 if (!ocirs[i].src_data_length)
10760 continue;
10761
10762 size += ocirs[i].src_hdr_length;
10763 size += ocirs[i].src_data_length;
10764 }
10765
10766 if (!size)
10767 return;
10768
10769 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10770 tp, tg3_groups);
10771 if (IS_ERR(tp->hwmon_dev)) {
10772 tp->hwmon_dev = NULL;
10773 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10774 }
10775 }
10776
10777
10778 #define TG3_STAT_ADD32(PSTAT, REG) \
10779 do { u32 __val = tr32(REG); \
10780 (PSTAT)->low += __val; \
10781 if ((PSTAT)->low < __val) \
10782 (PSTAT)->high += 1; \
10783 } while (0)
10784
10785 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10786 {
10787 struct tg3_hw_stats *sp = tp->hw_stats;
10788
10789 if (!tp->link_up)
10790 return;
10791
10792 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10793 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10794 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10795 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10796 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10797 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10798 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10799 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10800 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10801 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10802 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10803 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10804 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10805 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10806 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10807 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10808 u32 val;
10809
10810 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10811 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10812 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10813 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10814 }
10815
10816 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10817 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10818 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10819 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10820 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10821 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10822 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10823 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10824 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10825 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10826 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10827 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10828 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10829 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10830
10831 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10832 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10833 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10834 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10835 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10836 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10837 } else {
10838 u32 val = tr32(HOSTCC_FLOW_ATTN);
10839 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10840 if (val) {
10841 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10842 sp->rx_discards.low += val;
10843 if (sp->rx_discards.low < val)
10844 sp->rx_discards.high += 1;
10845 }
10846 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10847 }
10848 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10849 }
10850
10851 static void tg3_chk_missed_msi(struct tg3 *tp)
10852 {
10853 u32 i;
10854
10855 for (i = 0; i < tp->irq_cnt; i++) {
10856 struct tg3_napi *tnapi = &tp->napi[i];
10857
10858 if (tg3_has_work(tnapi)) {
10859 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10860 tnapi->last_tx_cons == tnapi->tx_cons) {
10861 if (tnapi->chk_msi_cnt < 1) {
10862 tnapi->chk_msi_cnt++;
10863 return;
10864 }
10865 tg3_msi(0, tnapi);
10866 }
10867 }
10868 tnapi->chk_msi_cnt = 0;
10869 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10870 tnapi->last_tx_cons = tnapi->tx_cons;
10871 }
10872 }
10873
10874 static void tg3_timer(unsigned long __opaque)
10875 {
10876 struct tg3 *tp = (struct tg3 *) __opaque;
10877
10878 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10879 goto restart_timer;
10880
10881 spin_lock(&tp->lock);
10882
10883 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10884 tg3_flag(tp, 57765_CLASS))
10885 tg3_chk_missed_msi(tp);
10886
10887 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10888 /* BCM4785: Flush posted writes from GbE to host memory. */
10889 tr32(HOSTCC_MODE);
10890 }
10891
10892 if (!tg3_flag(tp, TAGGED_STATUS)) {
10893 /* All of this garbage is because when using non-tagged
10894 * IRQ status the mailbox/status_block protocol the chip
10895 * uses with the cpu is race prone.
10896 */
10897 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10898 tw32(GRC_LOCAL_CTRL,
10899 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10900 } else {
10901 tw32(HOSTCC_MODE, tp->coalesce_mode |
10902 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10903 }
10904
10905 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10906 spin_unlock(&tp->lock);
10907 tg3_reset_task_schedule(tp);
10908 goto restart_timer;
10909 }
10910 }
10911
10912 /* This part only runs once per second. */
10913 if (!--tp->timer_counter) {
10914 if (tg3_flag(tp, 5705_PLUS))
10915 tg3_periodic_fetch_stats(tp);
10916
10917 if (tp->setlpicnt && !--tp->setlpicnt)
10918 tg3_phy_eee_enable(tp);
10919
10920 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10921 u32 mac_stat;
10922 int phy_event;
10923
10924 mac_stat = tr32(MAC_STATUS);
10925
10926 phy_event = 0;
10927 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10928 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10929 phy_event = 1;
10930 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10931 phy_event = 1;
10932
10933 if (phy_event)
10934 tg3_setup_phy(tp, false);
10935 } else if (tg3_flag(tp, POLL_SERDES)) {
10936 u32 mac_stat = tr32(MAC_STATUS);
10937 int need_setup = 0;
10938
10939 if (tp->link_up &&
10940 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10941 need_setup = 1;
10942 }
10943 if (!tp->link_up &&
10944 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10945 MAC_STATUS_SIGNAL_DET))) {
10946 need_setup = 1;
10947 }
10948 if (need_setup) {
10949 if (!tp->serdes_counter) {
10950 tw32_f(MAC_MODE,
10951 (tp->mac_mode &
10952 ~MAC_MODE_PORT_MODE_MASK));
10953 udelay(40);
10954 tw32_f(MAC_MODE, tp->mac_mode);
10955 udelay(40);
10956 }
10957 tg3_setup_phy(tp, false);
10958 }
10959 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10960 tg3_flag(tp, 5780_CLASS)) {
10961 tg3_serdes_parallel_detect(tp);
10962 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
10963 u32 cpmu = tr32(TG3_CPMU_STATUS);
10964 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
10965 TG3_CPMU_STATUS_LINK_MASK);
10966
10967 if (link_up != tp->link_up)
10968 tg3_setup_phy(tp, false);
10969 }
10970
10971 tp->timer_counter = tp->timer_multiplier;
10972 }
10973
10974 /* Heartbeat is only sent once every 2 seconds.
10975 *
10976 * The heartbeat is to tell the ASF firmware that the host
10977 * driver is still alive. In the event that the OS crashes,
10978 * ASF needs to reset the hardware to free up the FIFO space
10979 * that may be filled with rx packets destined for the host.
10980 * If the FIFO is full, ASF will no longer function properly.
10981 *
10982 * Unintended resets have been reported on real time kernels
10983 * where the timer doesn't run on time. Netpoll will also have
10984 * same problem.
10985 *
10986 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10987 * to check the ring condition when the heartbeat is expiring
10988 * before doing the reset. This will prevent most unintended
10989 * resets.
10990 */
10991 if (!--tp->asf_counter) {
10992 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10993 tg3_wait_for_event_ack(tp);
10994
10995 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10996 FWCMD_NICDRV_ALIVE3);
10997 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10998 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10999 TG3_FW_UPDATE_TIMEOUT_SEC);
11000
11001 tg3_generate_fw_event(tp);
11002 }
11003 tp->asf_counter = tp->asf_multiplier;
11004 }
11005
11006 spin_unlock(&tp->lock);
11007
11008 restart_timer:
11009 tp->timer.expires = jiffies + tp->timer_offset;
11010 add_timer(&tp->timer);
11011 }
11012
11013 static void tg3_timer_init(struct tg3 *tp)
11014 {
11015 if (tg3_flag(tp, TAGGED_STATUS) &&
11016 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11017 !tg3_flag(tp, 57765_CLASS))
11018 tp->timer_offset = HZ;
11019 else
11020 tp->timer_offset = HZ / 10;
11021
11022 BUG_ON(tp->timer_offset > HZ);
11023
11024 tp->timer_multiplier = (HZ / tp->timer_offset);
11025 tp->asf_multiplier = (HZ / tp->timer_offset) *
11026 TG3_FW_UPDATE_FREQ_SEC;
11027
11028 init_timer(&tp->timer);
11029 tp->timer.data = (unsigned long) tp;
11030 tp->timer.function = tg3_timer;
11031 }
11032
11033 static void tg3_timer_start(struct tg3 *tp)
11034 {
11035 tp->asf_counter = tp->asf_multiplier;
11036 tp->timer_counter = tp->timer_multiplier;
11037
11038 tp->timer.expires = jiffies + tp->timer_offset;
11039 add_timer(&tp->timer);
11040 }
11041
11042 static void tg3_timer_stop(struct tg3 *tp)
11043 {
11044 del_timer_sync(&tp->timer);
11045 }
11046
11047 /* Restart hardware after configuration changes, self-test, etc.
11048 * Invoked with tp->lock held.
11049 */
11050 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11051 __releases(tp->lock)
11052 __acquires(tp->lock)
11053 {
11054 int err;
11055
11056 err = tg3_init_hw(tp, reset_phy);
11057 if (err) {
11058 netdev_err(tp->dev,
11059 "Failed to re-initialize device, aborting\n");
11060 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11061 tg3_full_unlock(tp);
11062 tg3_timer_stop(tp);
11063 tp->irq_sync = 0;
11064 tg3_napi_enable(tp);
11065 dev_close(tp->dev);
11066 tg3_full_lock(tp, 0);
11067 }
11068 return err;
11069 }
11070
11071 static void tg3_reset_task(struct work_struct *work)
11072 {
11073 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11074 int err;
11075
11076 tg3_full_lock(tp, 0);
11077
11078 if (!netif_running(tp->dev)) {
11079 tg3_flag_clear(tp, RESET_TASK_PENDING);
11080 tg3_full_unlock(tp);
11081 return;
11082 }
11083
11084 tg3_full_unlock(tp);
11085
11086 tg3_phy_stop(tp);
11087
11088 tg3_netif_stop(tp);
11089
11090 tg3_full_lock(tp, 1);
11091
11092 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11093 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11094 tp->write32_rx_mbox = tg3_write_flush_reg32;
11095 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11096 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11097 }
11098
11099 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11100 err = tg3_init_hw(tp, true);
11101 if (err)
11102 goto out;
11103
11104 tg3_netif_start(tp);
11105
11106 out:
11107 tg3_full_unlock(tp);
11108
11109 if (!err)
11110 tg3_phy_start(tp);
11111
11112 tg3_flag_clear(tp, RESET_TASK_PENDING);
11113 }
11114
11115 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11116 {
11117 irq_handler_t fn;
11118 unsigned long flags;
11119 char *name;
11120 struct tg3_napi *tnapi = &tp->napi[irq_num];
11121
11122 if (tp->irq_cnt == 1)
11123 name = tp->dev->name;
11124 else {
11125 name = &tnapi->irq_lbl[0];
11126 if (tnapi->tx_buffers && tnapi->rx_rcb)
11127 snprintf(name, IFNAMSIZ,
11128 "%s-txrx-%d", tp->dev->name, irq_num);
11129 else if (tnapi->tx_buffers)
11130 snprintf(name, IFNAMSIZ,
11131 "%s-tx-%d", tp->dev->name, irq_num);
11132 else if (tnapi->rx_rcb)
11133 snprintf(name, IFNAMSIZ,
11134 "%s-rx-%d", tp->dev->name, irq_num);
11135 else
11136 snprintf(name, IFNAMSIZ,
11137 "%s-%d", tp->dev->name, irq_num);
11138 name[IFNAMSIZ-1] = 0;
11139 }
11140
11141 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11142 fn = tg3_msi;
11143 if (tg3_flag(tp, 1SHOT_MSI))
11144 fn = tg3_msi_1shot;
11145 flags = 0;
11146 } else {
11147 fn = tg3_interrupt;
11148 if (tg3_flag(tp, TAGGED_STATUS))
11149 fn = tg3_interrupt_tagged;
11150 flags = IRQF_SHARED;
11151 }
11152
11153 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11154 }
11155
11156 static int tg3_test_interrupt(struct tg3 *tp)
11157 {
11158 struct tg3_napi *tnapi = &tp->napi[0];
11159 struct net_device *dev = tp->dev;
11160 int err, i, intr_ok = 0;
11161 u32 val;
11162
11163 if (!netif_running(dev))
11164 return -ENODEV;
11165
11166 tg3_disable_ints(tp);
11167
11168 free_irq(tnapi->irq_vec, tnapi);
11169
11170 /*
11171 * Turn off MSI one shot mode. Otherwise this test has no
11172 * observable way to know whether the interrupt was delivered.
11173 */
11174 if (tg3_flag(tp, 57765_PLUS)) {
11175 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11176 tw32(MSGINT_MODE, val);
11177 }
11178
11179 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11180 IRQF_SHARED, dev->name, tnapi);
11181 if (err)
11182 return err;
11183
11184 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11185 tg3_enable_ints(tp);
11186
11187 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11188 tnapi->coal_now);
11189
11190 for (i = 0; i < 5; i++) {
11191 u32 int_mbox, misc_host_ctrl;
11192
11193 int_mbox = tr32_mailbox(tnapi->int_mbox);
11194 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11195
11196 if ((int_mbox != 0) ||
11197 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11198 intr_ok = 1;
11199 break;
11200 }
11201
11202 if (tg3_flag(tp, 57765_PLUS) &&
11203 tnapi->hw_status->status_tag != tnapi->last_tag)
11204 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11205
11206 msleep(10);
11207 }
11208
11209 tg3_disable_ints(tp);
11210
11211 free_irq(tnapi->irq_vec, tnapi);
11212
11213 err = tg3_request_irq(tp, 0);
11214
11215 if (err)
11216 return err;
11217
11218 if (intr_ok) {
11219 /* Reenable MSI one shot mode. */
11220 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11221 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11222 tw32(MSGINT_MODE, val);
11223 }
11224 return 0;
11225 }
11226
11227 return -EIO;
11228 }
11229
11230 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11231 * successfully restored
11232 */
11233 static int tg3_test_msi(struct tg3 *tp)
11234 {
11235 int err;
11236 u16 pci_cmd;
11237
11238 if (!tg3_flag(tp, USING_MSI))
11239 return 0;
11240
11241 /* Turn off SERR reporting in case MSI terminates with Master
11242 * Abort.
11243 */
11244 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11245 pci_write_config_word(tp->pdev, PCI_COMMAND,
11246 pci_cmd & ~PCI_COMMAND_SERR);
11247
11248 err = tg3_test_interrupt(tp);
11249
11250 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11251
11252 if (!err)
11253 return 0;
11254
11255 /* other failures */
11256 if (err != -EIO)
11257 return err;
11258
11259 /* MSI test failed, go back to INTx mode */
11260 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11261 "to INTx mode. Please report this failure to the PCI "
11262 "maintainer and include system chipset information\n");
11263
11264 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11265
11266 pci_disable_msi(tp->pdev);
11267
11268 tg3_flag_clear(tp, USING_MSI);
11269 tp->napi[0].irq_vec = tp->pdev->irq;
11270
11271 err = tg3_request_irq(tp, 0);
11272 if (err)
11273 return err;
11274
11275 /* Need to reset the chip because the MSI cycle may have terminated
11276 * with Master Abort.
11277 */
11278 tg3_full_lock(tp, 1);
11279
11280 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11281 err = tg3_init_hw(tp, true);
11282
11283 tg3_full_unlock(tp);
11284
11285 if (err)
11286 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11287
11288 return err;
11289 }
11290
11291 static int tg3_request_firmware(struct tg3 *tp)
11292 {
11293 const struct tg3_firmware_hdr *fw_hdr;
11294
11295 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11296 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11297 tp->fw_needed);
11298 return -ENOENT;
11299 }
11300
11301 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11302
11303 /* Firmware blob starts with version numbers, followed by
11304 * start address and _full_ length including BSS sections
11305 * (which must be longer than the actual data, of course
11306 */
11307
11308 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11309 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11310 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11311 tp->fw_len, tp->fw_needed);
11312 release_firmware(tp->fw);
11313 tp->fw = NULL;
11314 return -EINVAL;
11315 }
11316
11317 /* We no longer need firmware; we have it. */
11318 tp->fw_needed = NULL;
11319 return 0;
11320 }
11321
11322 static u32 tg3_irq_count(struct tg3 *tp)
11323 {
11324 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11325
11326 if (irq_cnt > 1) {
11327 /* We want as many rx rings enabled as there are cpus.
11328 * In multiqueue MSI-X mode, the first MSI-X vector
11329 * only deals with link interrupts, etc, so we add
11330 * one to the number of vectors we are requesting.
11331 */
11332 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11333 }
11334
11335 return irq_cnt;
11336 }
11337
11338 static bool tg3_enable_msix(struct tg3 *tp)
11339 {
11340 int i, rc;
11341 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11342
11343 tp->txq_cnt = tp->txq_req;
11344 tp->rxq_cnt = tp->rxq_req;
11345 if (!tp->rxq_cnt)
11346 tp->rxq_cnt = netif_get_num_default_rss_queues();
11347 if (tp->rxq_cnt > tp->rxq_max)
11348 tp->rxq_cnt = tp->rxq_max;
11349
11350 /* Disable multiple TX rings by default. Simple round-robin hardware
11351 * scheduling of the TX rings can cause starvation of rings with
11352 * small packets when other rings have TSO or jumbo packets.
11353 */
11354 if (!tp->txq_req)
11355 tp->txq_cnt = 1;
11356
11357 tp->irq_cnt = tg3_irq_count(tp);
11358
11359 for (i = 0; i < tp->irq_max; i++) {
11360 msix_ent[i].entry = i;
11361 msix_ent[i].vector = 0;
11362 }
11363
11364 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11365 if (rc < 0) {
11366 return false;
11367 } else if (rc != 0) {
11368 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11369 return false;
11370 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11371 tp->irq_cnt, rc);
11372 tp->irq_cnt = rc;
11373 tp->rxq_cnt = max(rc - 1, 1);
11374 if (tp->txq_cnt)
11375 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11376 }
11377
11378 for (i = 0; i < tp->irq_max; i++)
11379 tp->napi[i].irq_vec = msix_ent[i].vector;
11380
11381 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11382 pci_disable_msix(tp->pdev);
11383 return false;
11384 }
11385
11386 if (tp->irq_cnt == 1)
11387 return true;
11388
11389 tg3_flag_set(tp, ENABLE_RSS);
11390
11391 if (tp->txq_cnt > 1)
11392 tg3_flag_set(tp, ENABLE_TSS);
11393
11394 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11395
11396 return true;
11397 }
11398
11399 static void tg3_ints_init(struct tg3 *tp)
11400 {
11401 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11402 !tg3_flag(tp, TAGGED_STATUS)) {
11403 /* All MSI supporting chips should support tagged
11404 * status. Assert that this is the case.
11405 */
11406 netdev_warn(tp->dev,
11407 "MSI without TAGGED_STATUS? Not using MSI\n");
11408 goto defcfg;
11409 }
11410
11411 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11412 tg3_flag_set(tp, USING_MSIX);
11413 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11414 tg3_flag_set(tp, USING_MSI);
11415
11416 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11417 u32 msi_mode = tr32(MSGINT_MODE);
11418 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11419 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11420 if (!tg3_flag(tp, 1SHOT_MSI))
11421 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11422 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11423 }
11424 defcfg:
11425 if (!tg3_flag(tp, USING_MSIX)) {
11426 tp->irq_cnt = 1;
11427 tp->napi[0].irq_vec = tp->pdev->irq;
11428 }
11429
11430 if (tp->irq_cnt == 1) {
11431 tp->txq_cnt = 1;
11432 tp->rxq_cnt = 1;
11433 netif_set_real_num_tx_queues(tp->dev, 1);
11434 netif_set_real_num_rx_queues(tp->dev, 1);
11435 }
11436 }
11437
11438 static void tg3_ints_fini(struct tg3 *tp)
11439 {
11440 if (tg3_flag(tp, USING_MSIX))
11441 pci_disable_msix(tp->pdev);
11442 else if (tg3_flag(tp, USING_MSI))
11443 pci_disable_msi(tp->pdev);
11444 tg3_flag_clear(tp, USING_MSI);
11445 tg3_flag_clear(tp, USING_MSIX);
11446 tg3_flag_clear(tp, ENABLE_RSS);
11447 tg3_flag_clear(tp, ENABLE_TSS);
11448 }
11449
11450 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11451 bool init)
11452 {
11453 struct net_device *dev = tp->dev;
11454 int i, err;
11455
11456 /*
11457 * Setup interrupts first so we know how
11458 * many NAPI resources to allocate
11459 */
11460 tg3_ints_init(tp);
11461
11462 tg3_rss_check_indir_tbl(tp);
11463
11464 /* The placement of this call is tied
11465 * to the setup and use of Host TX descriptors.
11466 */
11467 err = tg3_alloc_consistent(tp);
11468 if (err)
11469 goto out_ints_fini;
11470
11471 tg3_napi_init(tp);
11472
11473 tg3_napi_enable(tp);
11474
11475 for (i = 0; i < tp->irq_cnt; i++) {
11476 struct tg3_napi *tnapi = &tp->napi[i];
11477 err = tg3_request_irq(tp, i);
11478 if (err) {
11479 for (i--; i >= 0; i--) {
11480 tnapi = &tp->napi[i];
11481 free_irq(tnapi->irq_vec, tnapi);
11482 }
11483 goto out_napi_fini;
11484 }
11485 }
11486
11487 tg3_full_lock(tp, 0);
11488
11489 if (init)
11490 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11491
11492 err = tg3_init_hw(tp, reset_phy);
11493 if (err) {
11494 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11495 tg3_free_rings(tp);
11496 }
11497
11498 tg3_full_unlock(tp);
11499
11500 if (err)
11501 goto out_free_irq;
11502
11503 if (test_irq && tg3_flag(tp, USING_MSI)) {
11504 err = tg3_test_msi(tp);
11505
11506 if (err) {
11507 tg3_full_lock(tp, 0);
11508 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11509 tg3_free_rings(tp);
11510 tg3_full_unlock(tp);
11511
11512 goto out_napi_fini;
11513 }
11514
11515 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11516 u32 val = tr32(PCIE_TRANSACTION_CFG);
11517
11518 tw32(PCIE_TRANSACTION_CFG,
11519 val | PCIE_TRANS_CFG_1SHOT_MSI);
11520 }
11521 }
11522
11523 tg3_phy_start(tp);
11524
11525 tg3_hwmon_open(tp);
11526
11527 tg3_full_lock(tp, 0);
11528
11529 tg3_timer_start(tp);
11530 tg3_flag_set(tp, INIT_COMPLETE);
11531 tg3_enable_ints(tp);
11532
11533 if (init)
11534 tg3_ptp_init(tp);
11535 else
11536 tg3_ptp_resume(tp);
11537
11538
11539 tg3_full_unlock(tp);
11540
11541 netif_tx_start_all_queues(dev);
11542
11543 /*
11544 * Reset loopback feature if it was turned on while the device was down
11545 * make sure that it's installed properly now.
11546 */
11547 if (dev->features & NETIF_F_LOOPBACK)
11548 tg3_set_loopback(dev, dev->features);
11549
11550 return 0;
11551
11552 out_free_irq:
11553 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11554 struct tg3_napi *tnapi = &tp->napi[i];
11555 free_irq(tnapi->irq_vec, tnapi);
11556 }
11557
11558 out_napi_fini:
11559 tg3_napi_disable(tp);
11560 tg3_napi_fini(tp);
11561 tg3_free_consistent(tp);
11562
11563 out_ints_fini:
11564 tg3_ints_fini(tp);
11565
11566 return err;
11567 }
11568
11569 static void tg3_stop(struct tg3 *tp)
11570 {
11571 int i;
11572
11573 tg3_reset_task_cancel(tp);
11574 tg3_netif_stop(tp);
11575
11576 tg3_timer_stop(tp);
11577
11578 tg3_hwmon_close(tp);
11579
11580 tg3_phy_stop(tp);
11581
11582 tg3_full_lock(tp, 1);
11583
11584 tg3_disable_ints(tp);
11585
11586 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11587 tg3_free_rings(tp);
11588 tg3_flag_clear(tp, INIT_COMPLETE);
11589
11590 tg3_full_unlock(tp);
11591
11592 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11593 struct tg3_napi *tnapi = &tp->napi[i];
11594 free_irq(tnapi->irq_vec, tnapi);
11595 }
11596
11597 tg3_ints_fini(tp);
11598
11599 tg3_napi_fini(tp);
11600
11601 tg3_free_consistent(tp);
11602 }
11603
11604 static int tg3_open(struct net_device *dev)
11605 {
11606 struct tg3 *tp = netdev_priv(dev);
11607 int err;
11608
11609 if (tp->fw_needed) {
11610 err = tg3_request_firmware(tp);
11611 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11612 if (err) {
11613 netdev_warn(tp->dev, "EEE capability disabled\n");
11614 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11615 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11616 netdev_warn(tp->dev, "EEE capability restored\n");
11617 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11618 }
11619 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11620 if (err)
11621 return err;
11622 } else if (err) {
11623 netdev_warn(tp->dev, "TSO capability disabled\n");
11624 tg3_flag_clear(tp, TSO_CAPABLE);
11625 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11626 netdev_notice(tp->dev, "TSO capability restored\n");
11627 tg3_flag_set(tp, TSO_CAPABLE);
11628 }
11629 }
11630
11631 tg3_carrier_off(tp);
11632
11633 err = tg3_power_up(tp);
11634 if (err)
11635 return err;
11636
11637 tg3_full_lock(tp, 0);
11638
11639 tg3_disable_ints(tp);
11640 tg3_flag_clear(tp, INIT_COMPLETE);
11641
11642 tg3_full_unlock(tp);
11643
11644 err = tg3_start(tp,
11645 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11646 true, true);
11647 if (err) {
11648 tg3_frob_aux_power(tp, false);
11649 pci_set_power_state(tp->pdev, PCI_D3hot);
11650 }
11651
11652 if (tg3_flag(tp, PTP_CAPABLE)) {
11653 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11654 &tp->pdev->dev);
11655 if (IS_ERR(tp->ptp_clock))
11656 tp->ptp_clock = NULL;
11657 }
11658
11659 return err;
11660 }
11661
11662 static int tg3_close(struct net_device *dev)
11663 {
11664 struct tg3 *tp = netdev_priv(dev);
11665
11666 tg3_ptp_fini(tp);
11667
11668 tg3_stop(tp);
11669
11670 /* Clear stats across close / open calls */
11671 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11672 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11673
11674 if (pci_device_is_present(tp->pdev)) {
11675 tg3_power_down_prepare(tp);
11676
11677 tg3_carrier_off(tp);
11678 }
11679 return 0;
11680 }
11681
11682 static inline u64 get_stat64(tg3_stat64_t *val)
11683 {
11684 return ((u64)val->high << 32) | ((u64)val->low);
11685 }
11686
11687 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11688 {
11689 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11690
11691 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11692 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11693 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11694 u32 val;
11695
11696 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11697 tg3_writephy(tp, MII_TG3_TEST1,
11698 val | MII_TG3_TEST1_CRC_EN);
11699 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11700 } else
11701 val = 0;
11702
11703 tp->phy_crc_errors += val;
11704
11705 return tp->phy_crc_errors;
11706 }
11707
11708 return get_stat64(&hw_stats->rx_fcs_errors);
11709 }
11710
11711 #define ESTAT_ADD(member) \
11712 estats->member = old_estats->member + \
11713 get_stat64(&hw_stats->member)
11714
11715 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11716 {
11717 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11718 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11719
11720 ESTAT_ADD(rx_octets);
11721 ESTAT_ADD(rx_fragments);
11722 ESTAT_ADD(rx_ucast_packets);
11723 ESTAT_ADD(rx_mcast_packets);
11724 ESTAT_ADD(rx_bcast_packets);
11725 ESTAT_ADD(rx_fcs_errors);
11726 ESTAT_ADD(rx_align_errors);
11727 ESTAT_ADD(rx_xon_pause_rcvd);
11728 ESTAT_ADD(rx_xoff_pause_rcvd);
11729 ESTAT_ADD(rx_mac_ctrl_rcvd);
11730 ESTAT_ADD(rx_xoff_entered);
11731 ESTAT_ADD(rx_frame_too_long_errors);
11732 ESTAT_ADD(rx_jabbers);
11733 ESTAT_ADD(rx_undersize_packets);
11734 ESTAT_ADD(rx_in_length_errors);
11735 ESTAT_ADD(rx_out_length_errors);
11736 ESTAT_ADD(rx_64_or_less_octet_packets);
11737 ESTAT_ADD(rx_65_to_127_octet_packets);
11738 ESTAT_ADD(rx_128_to_255_octet_packets);
11739 ESTAT_ADD(rx_256_to_511_octet_packets);
11740 ESTAT_ADD(rx_512_to_1023_octet_packets);
11741 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11742 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11743 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11744 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11745 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11746
11747 ESTAT_ADD(tx_octets);
11748 ESTAT_ADD(tx_collisions);
11749 ESTAT_ADD(tx_xon_sent);
11750 ESTAT_ADD(tx_xoff_sent);
11751 ESTAT_ADD(tx_flow_control);
11752 ESTAT_ADD(tx_mac_errors);
11753 ESTAT_ADD(tx_single_collisions);
11754 ESTAT_ADD(tx_mult_collisions);
11755 ESTAT_ADD(tx_deferred);
11756 ESTAT_ADD(tx_excessive_collisions);
11757 ESTAT_ADD(tx_late_collisions);
11758 ESTAT_ADD(tx_collide_2times);
11759 ESTAT_ADD(tx_collide_3times);
11760 ESTAT_ADD(tx_collide_4times);
11761 ESTAT_ADD(tx_collide_5times);
11762 ESTAT_ADD(tx_collide_6times);
11763 ESTAT_ADD(tx_collide_7times);
11764 ESTAT_ADD(tx_collide_8times);
11765 ESTAT_ADD(tx_collide_9times);
11766 ESTAT_ADD(tx_collide_10times);
11767 ESTAT_ADD(tx_collide_11times);
11768 ESTAT_ADD(tx_collide_12times);
11769 ESTAT_ADD(tx_collide_13times);
11770 ESTAT_ADD(tx_collide_14times);
11771 ESTAT_ADD(tx_collide_15times);
11772 ESTAT_ADD(tx_ucast_packets);
11773 ESTAT_ADD(tx_mcast_packets);
11774 ESTAT_ADD(tx_bcast_packets);
11775 ESTAT_ADD(tx_carrier_sense_errors);
11776 ESTAT_ADD(tx_discards);
11777 ESTAT_ADD(tx_errors);
11778
11779 ESTAT_ADD(dma_writeq_full);
11780 ESTAT_ADD(dma_write_prioq_full);
11781 ESTAT_ADD(rxbds_empty);
11782 ESTAT_ADD(rx_discards);
11783 ESTAT_ADD(rx_errors);
11784 ESTAT_ADD(rx_threshold_hit);
11785
11786 ESTAT_ADD(dma_readq_full);
11787 ESTAT_ADD(dma_read_prioq_full);
11788 ESTAT_ADD(tx_comp_queue_full);
11789
11790 ESTAT_ADD(ring_set_send_prod_index);
11791 ESTAT_ADD(ring_status_update);
11792 ESTAT_ADD(nic_irqs);
11793 ESTAT_ADD(nic_avoided_irqs);
11794 ESTAT_ADD(nic_tx_threshold_hit);
11795
11796 ESTAT_ADD(mbuf_lwm_thresh_hit);
11797 }
11798
11799 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11800 {
11801 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11802 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11803
11804 stats->rx_packets = old_stats->rx_packets +
11805 get_stat64(&hw_stats->rx_ucast_packets) +
11806 get_stat64(&hw_stats->rx_mcast_packets) +
11807 get_stat64(&hw_stats->rx_bcast_packets);
11808
11809 stats->tx_packets = old_stats->tx_packets +
11810 get_stat64(&hw_stats->tx_ucast_packets) +
11811 get_stat64(&hw_stats->tx_mcast_packets) +
11812 get_stat64(&hw_stats->tx_bcast_packets);
11813
11814 stats->rx_bytes = old_stats->rx_bytes +
11815 get_stat64(&hw_stats->rx_octets);
11816 stats->tx_bytes = old_stats->tx_bytes +
11817 get_stat64(&hw_stats->tx_octets);
11818
11819 stats->rx_errors = old_stats->rx_errors +
11820 get_stat64(&hw_stats->rx_errors);
11821 stats->tx_errors = old_stats->tx_errors +
11822 get_stat64(&hw_stats->tx_errors) +
11823 get_stat64(&hw_stats->tx_mac_errors) +
11824 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11825 get_stat64(&hw_stats->tx_discards);
11826
11827 stats->multicast = old_stats->multicast +
11828 get_stat64(&hw_stats->rx_mcast_packets);
11829 stats->collisions = old_stats->collisions +
11830 get_stat64(&hw_stats->tx_collisions);
11831
11832 stats->rx_length_errors = old_stats->rx_length_errors +
11833 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11834 get_stat64(&hw_stats->rx_undersize_packets);
11835
11836 stats->rx_frame_errors = old_stats->rx_frame_errors +
11837 get_stat64(&hw_stats->rx_align_errors);
11838 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11839 get_stat64(&hw_stats->tx_discards);
11840 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11841 get_stat64(&hw_stats->tx_carrier_sense_errors);
11842
11843 stats->rx_crc_errors = old_stats->rx_crc_errors +
11844 tg3_calc_crc_errors(tp);
11845
11846 stats->rx_missed_errors = old_stats->rx_missed_errors +
11847 get_stat64(&hw_stats->rx_discards);
11848
11849 stats->rx_dropped = tp->rx_dropped;
11850 stats->tx_dropped = tp->tx_dropped;
11851 }
11852
11853 static int tg3_get_regs_len(struct net_device *dev)
11854 {
11855 return TG3_REG_BLK_SIZE;
11856 }
11857
11858 static void tg3_get_regs(struct net_device *dev,
11859 struct ethtool_regs *regs, void *_p)
11860 {
11861 struct tg3 *tp = netdev_priv(dev);
11862
11863 regs->version = 0;
11864
11865 memset(_p, 0, TG3_REG_BLK_SIZE);
11866
11867 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11868 return;
11869
11870 tg3_full_lock(tp, 0);
11871
11872 tg3_dump_legacy_regs(tp, (u32 *)_p);
11873
11874 tg3_full_unlock(tp);
11875 }
11876
11877 static int tg3_get_eeprom_len(struct net_device *dev)
11878 {
11879 struct tg3 *tp = netdev_priv(dev);
11880
11881 return tp->nvram_size;
11882 }
11883
11884 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11885 {
11886 struct tg3 *tp = netdev_priv(dev);
11887 int ret;
11888 u8 *pd;
11889 u32 i, offset, len, b_offset, b_count;
11890 __be32 val;
11891
11892 if (tg3_flag(tp, NO_NVRAM))
11893 return -EINVAL;
11894
11895 offset = eeprom->offset;
11896 len = eeprom->len;
11897 eeprom->len = 0;
11898
11899 eeprom->magic = TG3_EEPROM_MAGIC;
11900
11901 if (offset & 3) {
11902 /* adjustments to start on required 4 byte boundary */
11903 b_offset = offset & 3;
11904 b_count = 4 - b_offset;
11905 if (b_count > len) {
11906 /* i.e. offset=1 len=2 */
11907 b_count = len;
11908 }
11909 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11910 if (ret)
11911 return ret;
11912 memcpy(data, ((char *)&val) + b_offset, b_count);
11913 len -= b_count;
11914 offset += b_count;
11915 eeprom->len += b_count;
11916 }
11917
11918 /* read bytes up to the last 4 byte boundary */
11919 pd = &data[eeprom->len];
11920 for (i = 0; i < (len - (len & 3)); i += 4) {
11921 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11922 if (ret) {
11923 eeprom->len += i;
11924 return ret;
11925 }
11926 memcpy(pd + i, &val, 4);
11927 }
11928 eeprom->len += i;
11929
11930 if (len & 3) {
11931 /* read last bytes not ending on 4 byte boundary */
11932 pd = &data[eeprom->len];
11933 b_count = len & 3;
11934 b_offset = offset + len - b_count;
11935 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11936 if (ret)
11937 return ret;
11938 memcpy(pd, &val, b_count);
11939 eeprom->len += b_count;
11940 }
11941 return 0;
11942 }
11943
11944 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11945 {
11946 struct tg3 *tp = netdev_priv(dev);
11947 int ret;
11948 u32 offset, len, b_offset, odd_len;
11949 u8 *buf;
11950 __be32 start, end;
11951
11952 if (tg3_flag(tp, NO_NVRAM) ||
11953 eeprom->magic != TG3_EEPROM_MAGIC)
11954 return -EINVAL;
11955
11956 offset = eeprom->offset;
11957 len = eeprom->len;
11958
11959 if ((b_offset = (offset & 3))) {
11960 /* adjustments to start on required 4 byte boundary */
11961 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11962 if (ret)
11963 return ret;
11964 len += b_offset;
11965 offset &= ~3;
11966 if (len < 4)
11967 len = 4;
11968 }
11969
11970 odd_len = 0;
11971 if (len & 3) {
11972 /* adjustments to end on required 4 byte boundary */
11973 odd_len = 1;
11974 len = (len + 3) & ~3;
11975 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11976 if (ret)
11977 return ret;
11978 }
11979
11980 buf = data;
11981 if (b_offset || odd_len) {
11982 buf = kmalloc(len, GFP_KERNEL);
11983 if (!buf)
11984 return -ENOMEM;
11985 if (b_offset)
11986 memcpy(buf, &start, 4);
11987 if (odd_len)
11988 memcpy(buf+len-4, &end, 4);
11989 memcpy(buf + b_offset, data, eeprom->len);
11990 }
11991
11992 ret = tg3_nvram_write_block(tp, offset, len, buf);
11993
11994 if (buf != data)
11995 kfree(buf);
11996
11997 return ret;
11998 }
11999
12000 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12001 {
12002 struct tg3 *tp = netdev_priv(dev);
12003
12004 if (tg3_flag(tp, USE_PHYLIB)) {
12005 struct phy_device *phydev;
12006 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12007 return -EAGAIN;
12008 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12009 return phy_ethtool_gset(phydev, cmd);
12010 }
12011
12012 cmd->supported = (SUPPORTED_Autoneg);
12013
12014 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12015 cmd->supported |= (SUPPORTED_1000baseT_Half |
12016 SUPPORTED_1000baseT_Full);
12017
12018 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12019 cmd->supported |= (SUPPORTED_100baseT_Half |
12020 SUPPORTED_100baseT_Full |
12021 SUPPORTED_10baseT_Half |
12022 SUPPORTED_10baseT_Full |
12023 SUPPORTED_TP);
12024 cmd->port = PORT_TP;
12025 } else {
12026 cmd->supported |= SUPPORTED_FIBRE;
12027 cmd->port = PORT_FIBRE;
12028 }
12029
12030 cmd->advertising = tp->link_config.advertising;
12031 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12032 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12033 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12034 cmd->advertising |= ADVERTISED_Pause;
12035 } else {
12036 cmd->advertising |= ADVERTISED_Pause |
12037 ADVERTISED_Asym_Pause;
12038 }
12039 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12040 cmd->advertising |= ADVERTISED_Asym_Pause;
12041 }
12042 }
12043 if (netif_running(dev) && tp->link_up) {
12044 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12045 cmd->duplex = tp->link_config.active_duplex;
12046 cmd->lp_advertising = tp->link_config.rmt_adv;
12047 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12048 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12049 cmd->eth_tp_mdix = ETH_TP_MDI_X;
12050 else
12051 cmd->eth_tp_mdix = ETH_TP_MDI;
12052 }
12053 } else {
12054 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12055 cmd->duplex = DUPLEX_UNKNOWN;
12056 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12057 }
12058 cmd->phy_address = tp->phy_addr;
12059 cmd->transceiver = XCVR_INTERNAL;
12060 cmd->autoneg = tp->link_config.autoneg;
12061 cmd->maxtxpkt = 0;
12062 cmd->maxrxpkt = 0;
12063 return 0;
12064 }
12065
12066 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12067 {
12068 struct tg3 *tp = netdev_priv(dev);
12069 u32 speed = ethtool_cmd_speed(cmd);
12070
12071 if (tg3_flag(tp, USE_PHYLIB)) {
12072 struct phy_device *phydev;
12073 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12074 return -EAGAIN;
12075 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12076 return phy_ethtool_sset(phydev, cmd);
12077 }
12078
12079 if (cmd->autoneg != AUTONEG_ENABLE &&
12080 cmd->autoneg != AUTONEG_DISABLE)
12081 return -EINVAL;
12082
12083 if (cmd->autoneg == AUTONEG_DISABLE &&
12084 cmd->duplex != DUPLEX_FULL &&
12085 cmd->duplex != DUPLEX_HALF)
12086 return -EINVAL;
12087
12088 if (cmd->autoneg == AUTONEG_ENABLE) {
12089 u32 mask = ADVERTISED_Autoneg |
12090 ADVERTISED_Pause |
12091 ADVERTISED_Asym_Pause;
12092
12093 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12094 mask |= ADVERTISED_1000baseT_Half |
12095 ADVERTISED_1000baseT_Full;
12096
12097 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12098 mask |= ADVERTISED_100baseT_Half |
12099 ADVERTISED_100baseT_Full |
12100 ADVERTISED_10baseT_Half |
12101 ADVERTISED_10baseT_Full |
12102 ADVERTISED_TP;
12103 else
12104 mask |= ADVERTISED_FIBRE;
12105
12106 if (cmd->advertising & ~mask)
12107 return -EINVAL;
12108
12109 mask &= (ADVERTISED_1000baseT_Half |
12110 ADVERTISED_1000baseT_Full |
12111 ADVERTISED_100baseT_Half |
12112 ADVERTISED_100baseT_Full |
12113 ADVERTISED_10baseT_Half |
12114 ADVERTISED_10baseT_Full);
12115
12116 cmd->advertising &= mask;
12117 } else {
12118 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12119 if (speed != SPEED_1000)
12120 return -EINVAL;
12121
12122 if (cmd->duplex != DUPLEX_FULL)
12123 return -EINVAL;
12124 } else {
12125 if (speed != SPEED_100 &&
12126 speed != SPEED_10)
12127 return -EINVAL;
12128 }
12129 }
12130
12131 tg3_full_lock(tp, 0);
12132
12133 tp->link_config.autoneg = cmd->autoneg;
12134 if (cmd->autoneg == AUTONEG_ENABLE) {
12135 tp->link_config.advertising = (cmd->advertising |
12136 ADVERTISED_Autoneg);
12137 tp->link_config.speed = SPEED_UNKNOWN;
12138 tp->link_config.duplex = DUPLEX_UNKNOWN;
12139 } else {
12140 tp->link_config.advertising = 0;
12141 tp->link_config.speed = speed;
12142 tp->link_config.duplex = cmd->duplex;
12143 }
12144
12145 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12146
12147 tg3_warn_mgmt_link_flap(tp);
12148
12149 if (netif_running(dev))
12150 tg3_setup_phy(tp, true);
12151
12152 tg3_full_unlock(tp);
12153
12154 return 0;
12155 }
12156
12157 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12158 {
12159 struct tg3 *tp = netdev_priv(dev);
12160
12161 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12162 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12163 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12164 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12165 }
12166
12167 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12168 {
12169 struct tg3 *tp = netdev_priv(dev);
12170
12171 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12172 wol->supported = WAKE_MAGIC;
12173 else
12174 wol->supported = 0;
12175 wol->wolopts = 0;
12176 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12177 wol->wolopts = WAKE_MAGIC;
12178 memset(&wol->sopass, 0, sizeof(wol->sopass));
12179 }
12180
12181 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12182 {
12183 struct tg3 *tp = netdev_priv(dev);
12184 struct device *dp = &tp->pdev->dev;
12185
12186 if (wol->wolopts & ~WAKE_MAGIC)
12187 return -EINVAL;
12188 if ((wol->wolopts & WAKE_MAGIC) &&
12189 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12190 return -EINVAL;
12191
12192 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12193
12194 if (device_may_wakeup(dp))
12195 tg3_flag_set(tp, WOL_ENABLE);
12196 else
12197 tg3_flag_clear(tp, WOL_ENABLE);
12198
12199 return 0;
12200 }
12201
12202 static u32 tg3_get_msglevel(struct net_device *dev)
12203 {
12204 struct tg3 *tp = netdev_priv(dev);
12205 return tp->msg_enable;
12206 }
12207
12208 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12209 {
12210 struct tg3 *tp = netdev_priv(dev);
12211 tp->msg_enable = value;
12212 }
12213
12214 static int tg3_nway_reset(struct net_device *dev)
12215 {
12216 struct tg3 *tp = netdev_priv(dev);
12217 int r;
12218
12219 if (!netif_running(dev))
12220 return -EAGAIN;
12221
12222 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12223 return -EINVAL;
12224
12225 tg3_warn_mgmt_link_flap(tp);
12226
12227 if (tg3_flag(tp, USE_PHYLIB)) {
12228 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12229 return -EAGAIN;
12230 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12231 } else {
12232 u32 bmcr;
12233
12234 spin_lock_bh(&tp->lock);
12235 r = -EINVAL;
12236 tg3_readphy(tp, MII_BMCR, &bmcr);
12237 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12238 ((bmcr & BMCR_ANENABLE) ||
12239 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12240 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12241 BMCR_ANENABLE);
12242 r = 0;
12243 }
12244 spin_unlock_bh(&tp->lock);
12245 }
12246
12247 return r;
12248 }
12249
12250 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12251 {
12252 struct tg3 *tp = netdev_priv(dev);
12253
12254 ering->rx_max_pending = tp->rx_std_ring_mask;
12255 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12256 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12257 else
12258 ering->rx_jumbo_max_pending = 0;
12259
12260 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12261
12262 ering->rx_pending = tp->rx_pending;
12263 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12264 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12265 else
12266 ering->rx_jumbo_pending = 0;
12267
12268 ering->tx_pending = tp->napi[0].tx_pending;
12269 }
12270
12271 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12272 {
12273 struct tg3 *tp = netdev_priv(dev);
12274 int i, irq_sync = 0, err = 0;
12275
12276 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12277 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12278 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12279 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12280 (tg3_flag(tp, TSO_BUG) &&
12281 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12282 return -EINVAL;
12283
12284 if (netif_running(dev)) {
12285 tg3_phy_stop(tp);
12286 tg3_netif_stop(tp);
12287 irq_sync = 1;
12288 }
12289
12290 tg3_full_lock(tp, irq_sync);
12291
12292 tp->rx_pending = ering->rx_pending;
12293
12294 if (tg3_flag(tp, MAX_RXPEND_64) &&
12295 tp->rx_pending > 63)
12296 tp->rx_pending = 63;
12297 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12298
12299 for (i = 0; i < tp->irq_max; i++)
12300 tp->napi[i].tx_pending = ering->tx_pending;
12301
12302 if (netif_running(dev)) {
12303 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12304 err = tg3_restart_hw(tp, false);
12305 if (!err)
12306 tg3_netif_start(tp);
12307 }
12308
12309 tg3_full_unlock(tp);
12310
12311 if (irq_sync && !err)
12312 tg3_phy_start(tp);
12313
12314 return err;
12315 }
12316
12317 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12318 {
12319 struct tg3 *tp = netdev_priv(dev);
12320
12321 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12322
12323 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12324 epause->rx_pause = 1;
12325 else
12326 epause->rx_pause = 0;
12327
12328 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12329 epause->tx_pause = 1;
12330 else
12331 epause->tx_pause = 0;
12332 }
12333
12334 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12335 {
12336 struct tg3 *tp = netdev_priv(dev);
12337 int err = 0;
12338
12339 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12340 tg3_warn_mgmt_link_flap(tp);
12341
12342 if (tg3_flag(tp, USE_PHYLIB)) {
12343 u32 newadv;
12344 struct phy_device *phydev;
12345
12346 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12347
12348 if (!(phydev->supported & SUPPORTED_Pause) ||
12349 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12350 (epause->rx_pause != epause->tx_pause)))
12351 return -EINVAL;
12352
12353 tp->link_config.flowctrl = 0;
12354 if (epause->rx_pause) {
12355 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12356
12357 if (epause->tx_pause) {
12358 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12359 newadv = ADVERTISED_Pause;
12360 } else
12361 newadv = ADVERTISED_Pause |
12362 ADVERTISED_Asym_Pause;
12363 } else if (epause->tx_pause) {
12364 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12365 newadv = ADVERTISED_Asym_Pause;
12366 } else
12367 newadv = 0;
12368
12369 if (epause->autoneg)
12370 tg3_flag_set(tp, PAUSE_AUTONEG);
12371 else
12372 tg3_flag_clear(tp, PAUSE_AUTONEG);
12373
12374 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12375 u32 oldadv = phydev->advertising &
12376 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12377 if (oldadv != newadv) {
12378 phydev->advertising &=
12379 ~(ADVERTISED_Pause |
12380 ADVERTISED_Asym_Pause);
12381 phydev->advertising |= newadv;
12382 if (phydev->autoneg) {
12383 /*
12384 * Always renegotiate the link to
12385 * inform our link partner of our
12386 * flow control settings, even if the
12387 * flow control is forced. Let
12388 * tg3_adjust_link() do the final
12389 * flow control setup.
12390 */
12391 return phy_start_aneg(phydev);
12392 }
12393 }
12394
12395 if (!epause->autoneg)
12396 tg3_setup_flow_control(tp, 0, 0);
12397 } else {
12398 tp->link_config.advertising &=
12399 ~(ADVERTISED_Pause |
12400 ADVERTISED_Asym_Pause);
12401 tp->link_config.advertising |= newadv;
12402 }
12403 } else {
12404 int irq_sync = 0;
12405
12406 if (netif_running(dev)) {
12407 tg3_netif_stop(tp);
12408 irq_sync = 1;
12409 }
12410
12411 tg3_full_lock(tp, irq_sync);
12412
12413 if (epause->autoneg)
12414 tg3_flag_set(tp, PAUSE_AUTONEG);
12415 else
12416 tg3_flag_clear(tp, PAUSE_AUTONEG);
12417 if (epause->rx_pause)
12418 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12419 else
12420 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12421 if (epause->tx_pause)
12422 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12423 else
12424 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12425
12426 if (netif_running(dev)) {
12427 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12428 err = tg3_restart_hw(tp, false);
12429 if (!err)
12430 tg3_netif_start(tp);
12431 }
12432
12433 tg3_full_unlock(tp);
12434 }
12435
12436 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12437
12438 return err;
12439 }
12440
12441 static int tg3_get_sset_count(struct net_device *dev, int sset)
12442 {
12443 switch (sset) {
12444 case ETH_SS_TEST:
12445 return TG3_NUM_TEST;
12446 case ETH_SS_STATS:
12447 return TG3_NUM_STATS;
12448 default:
12449 return -EOPNOTSUPP;
12450 }
12451 }
12452
12453 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12454 u32 *rules __always_unused)
12455 {
12456 struct tg3 *tp = netdev_priv(dev);
12457
12458 if (!tg3_flag(tp, SUPPORT_MSIX))
12459 return -EOPNOTSUPP;
12460
12461 switch (info->cmd) {
12462 case ETHTOOL_GRXRINGS:
12463 if (netif_running(tp->dev))
12464 info->data = tp->rxq_cnt;
12465 else {
12466 info->data = num_online_cpus();
12467 if (info->data > TG3_RSS_MAX_NUM_QS)
12468 info->data = TG3_RSS_MAX_NUM_QS;
12469 }
12470
12471 /* The first interrupt vector only
12472 * handles link interrupts.
12473 */
12474 info->data -= 1;
12475 return 0;
12476
12477 default:
12478 return -EOPNOTSUPP;
12479 }
12480 }
12481
12482 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12483 {
12484 u32 size = 0;
12485 struct tg3 *tp = netdev_priv(dev);
12486
12487 if (tg3_flag(tp, SUPPORT_MSIX))
12488 size = TG3_RSS_INDIR_TBL_SIZE;
12489
12490 return size;
12491 }
12492
12493 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12494 {
12495 struct tg3 *tp = netdev_priv(dev);
12496 int i;
12497
12498 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12499 indir[i] = tp->rss_ind_tbl[i];
12500
12501 return 0;
12502 }
12503
12504 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12505 {
12506 struct tg3 *tp = netdev_priv(dev);
12507 size_t i;
12508
12509 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12510 tp->rss_ind_tbl[i] = indir[i];
12511
12512 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12513 return 0;
12514
12515 /* It is legal to write the indirection
12516 * table while the device is running.
12517 */
12518 tg3_full_lock(tp, 0);
12519 tg3_rss_write_indir_tbl(tp);
12520 tg3_full_unlock(tp);
12521
12522 return 0;
12523 }
12524
12525 static void tg3_get_channels(struct net_device *dev,
12526 struct ethtool_channels *channel)
12527 {
12528 struct tg3 *tp = netdev_priv(dev);
12529 u32 deflt_qs = netif_get_num_default_rss_queues();
12530
12531 channel->max_rx = tp->rxq_max;
12532 channel->max_tx = tp->txq_max;
12533
12534 if (netif_running(dev)) {
12535 channel->rx_count = tp->rxq_cnt;
12536 channel->tx_count = tp->txq_cnt;
12537 } else {
12538 if (tp->rxq_req)
12539 channel->rx_count = tp->rxq_req;
12540 else
12541 channel->rx_count = min(deflt_qs, tp->rxq_max);
12542
12543 if (tp->txq_req)
12544 channel->tx_count = tp->txq_req;
12545 else
12546 channel->tx_count = min(deflt_qs, tp->txq_max);
12547 }
12548 }
12549
12550 static int tg3_set_channels(struct net_device *dev,
12551 struct ethtool_channels *channel)
12552 {
12553 struct tg3 *tp = netdev_priv(dev);
12554
12555 if (!tg3_flag(tp, SUPPORT_MSIX))
12556 return -EOPNOTSUPP;
12557
12558 if (channel->rx_count > tp->rxq_max ||
12559 channel->tx_count > tp->txq_max)
12560 return -EINVAL;
12561
12562 tp->rxq_req = channel->rx_count;
12563 tp->txq_req = channel->tx_count;
12564
12565 if (!netif_running(dev))
12566 return 0;
12567
12568 tg3_stop(tp);
12569
12570 tg3_carrier_off(tp);
12571
12572 tg3_start(tp, true, false, false);
12573
12574 return 0;
12575 }
12576
12577 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12578 {
12579 switch (stringset) {
12580 case ETH_SS_STATS:
12581 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12582 break;
12583 case ETH_SS_TEST:
12584 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12585 break;
12586 default:
12587 WARN_ON(1); /* we need a WARN() */
12588 break;
12589 }
12590 }
12591
12592 static int tg3_set_phys_id(struct net_device *dev,
12593 enum ethtool_phys_id_state state)
12594 {
12595 struct tg3 *tp = netdev_priv(dev);
12596
12597 if (!netif_running(tp->dev))
12598 return -EAGAIN;
12599
12600 switch (state) {
12601 case ETHTOOL_ID_ACTIVE:
12602 return 1; /* cycle on/off once per second */
12603
12604 case ETHTOOL_ID_ON:
12605 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12606 LED_CTRL_1000MBPS_ON |
12607 LED_CTRL_100MBPS_ON |
12608 LED_CTRL_10MBPS_ON |
12609 LED_CTRL_TRAFFIC_OVERRIDE |
12610 LED_CTRL_TRAFFIC_BLINK |
12611 LED_CTRL_TRAFFIC_LED);
12612 break;
12613
12614 case ETHTOOL_ID_OFF:
12615 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12616 LED_CTRL_TRAFFIC_OVERRIDE);
12617 break;
12618
12619 case ETHTOOL_ID_INACTIVE:
12620 tw32(MAC_LED_CTRL, tp->led_ctrl);
12621 break;
12622 }
12623
12624 return 0;
12625 }
12626
12627 static void tg3_get_ethtool_stats(struct net_device *dev,
12628 struct ethtool_stats *estats, u64 *tmp_stats)
12629 {
12630 struct tg3 *tp = netdev_priv(dev);
12631
12632 if (tp->hw_stats)
12633 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12634 else
12635 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12636 }
12637
12638 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12639 {
12640 int i;
12641 __be32 *buf;
12642 u32 offset = 0, len = 0;
12643 u32 magic, val;
12644
12645 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12646 return NULL;
12647
12648 if (magic == TG3_EEPROM_MAGIC) {
12649 for (offset = TG3_NVM_DIR_START;
12650 offset < TG3_NVM_DIR_END;
12651 offset += TG3_NVM_DIRENT_SIZE) {
12652 if (tg3_nvram_read(tp, offset, &val))
12653 return NULL;
12654
12655 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12656 TG3_NVM_DIRTYPE_EXTVPD)
12657 break;
12658 }
12659
12660 if (offset != TG3_NVM_DIR_END) {
12661 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12662 if (tg3_nvram_read(tp, offset + 4, &offset))
12663 return NULL;
12664
12665 offset = tg3_nvram_logical_addr(tp, offset);
12666 }
12667 }
12668
12669 if (!offset || !len) {
12670 offset = TG3_NVM_VPD_OFF;
12671 len = TG3_NVM_VPD_LEN;
12672 }
12673
12674 buf = kmalloc(len, GFP_KERNEL);
12675 if (buf == NULL)
12676 return NULL;
12677
12678 if (magic == TG3_EEPROM_MAGIC) {
12679 for (i = 0; i < len; i += 4) {
12680 /* The data is in little-endian format in NVRAM.
12681 * Use the big-endian read routines to preserve
12682 * the byte order as it exists in NVRAM.
12683 */
12684 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12685 goto error;
12686 }
12687 } else {
12688 u8 *ptr;
12689 ssize_t cnt;
12690 unsigned int pos = 0;
12691
12692 ptr = (u8 *)&buf[0];
12693 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12694 cnt = pci_read_vpd(tp->pdev, pos,
12695 len - pos, ptr);
12696 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12697 cnt = 0;
12698 else if (cnt < 0)
12699 goto error;
12700 }
12701 if (pos != len)
12702 goto error;
12703 }
12704
12705 *vpdlen = len;
12706
12707 return buf;
12708
12709 error:
12710 kfree(buf);
12711 return NULL;
12712 }
12713
12714 #define NVRAM_TEST_SIZE 0x100
12715 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12716 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12717 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12718 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12719 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12720 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12721 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12722 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12723
12724 static int tg3_test_nvram(struct tg3 *tp)
12725 {
12726 u32 csum, magic, len;
12727 __be32 *buf;
12728 int i, j, k, err = 0, size;
12729
12730 if (tg3_flag(tp, NO_NVRAM))
12731 return 0;
12732
12733 if (tg3_nvram_read(tp, 0, &magic) != 0)
12734 return -EIO;
12735
12736 if (magic == TG3_EEPROM_MAGIC)
12737 size = NVRAM_TEST_SIZE;
12738 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12739 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12740 TG3_EEPROM_SB_FORMAT_1) {
12741 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12742 case TG3_EEPROM_SB_REVISION_0:
12743 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12744 break;
12745 case TG3_EEPROM_SB_REVISION_2:
12746 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12747 break;
12748 case TG3_EEPROM_SB_REVISION_3:
12749 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12750 break;
12751 case TG3_EEPROM_SB_REVISION_4:
12752 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12753 break;
12754 case TG3_EEPROM_SB_REVISION_5:
12755 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12756 break;
12757 case TG3_EEPROM_SB_REVISION_6:
12758 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12759 break;
12760 default:
12761 return -EIO;
12762 }
12763 } else
12764 return 0;
12765 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12766 size = NVRAM_SELFBOOT_HW_SIZE;
12767 else
12768 return -EIO;
12769
12770 buf = kmalloc(size, GFP_KERNEL);
12771 if (buf == NULL)
12772 return -ENOMEM;
12773
12774 err = -EIO;
12775 for (i = 0, j = 0; i < size; i += 4, j++) {
12776 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12777 if (err)
12778 break;
12779 }
12780 if (i < size)
12781 goto out;
12782
12783 /* Selfboot format */
12784 magic = be32_to_cpu(buf[0]);
12785 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12786 TG3_EEPROM_MAGIC_FW) {
12787 u8 *buf8 = (u8 *) buf, csum8 = 0;
12788
12789 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12790 TG3_EEPROM_SB_REVISION_2) {
12791 /* For rev 2, the csum doesn't include the MBA. */
12792 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12793 csum8 += buf8[i];
12794 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12795 csum8 += buf8[i];
12796 } else {
12797 for (i = 0; i < size; i++)
12798 csum8 += buf8[i];
12799 }
12800
12801 if (csum8 == 0) {
12802 err = 0;
12803 goto out;
12804 }
12805
12806 err = -EIO;
12807 goto out;
12808 }
12809
12810 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12811 TG3_EEPROM_MAGIC_HW) {
12812 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12813 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12814 u8 *buf8 = (u8 *) buf;
12815
12816 /* Separate the parity bits and the data bytes. */
12817 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12818 if ((i == 0) || (i == 8)) {
12819 int l;
12820 u8 msk;
12821
12822 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12823 parity[k++] = buf8[i] & msk;
12824 i++;
12825 } else if (i == 16) {
12826 int l;
12827 u8 msk;
12828
12829 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12830 parity[k++] = buf8[i] & msk;
12831 i++;
12832
12833 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12834 parity[k++] = buf8[i] & msk;
12835 i++;
12836 }
12837 data[j++] = buf8[i];
12838 }
12839
12840 err = -EIO;
12841 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12842 u8 hw8 = hweight8(data[i]);
12843
12844 if ((hw8 & 0x1) && parity[i])
12845 goto out;
12846 else if (!(hw8 & 0x1) && !parity[i])
12847 goto out;
12848 }
12849 err = 0;
12850 goto out;
12851 }
12852
12853 err = -EIO;
12854
12855 /* Bootstrap checksum at offset 0x10 */
12856 csum = calc_crc((unsigned char *) buf, 0x10);
12857 if (csum != le32_to_cpu(buf[0x10/4]))
12858 goto out;
12859
12860 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12861 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12862 if (csum != le32_to_cpu(buf[0xfc/4]))
12863 goto out;
12864
12865 kfree(buf);
12866
12867 buf = tg3_vpd_readblock(tp, &len);
12868 if (!buf)
12869 return -ENOMEM;
12870
12871 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12872 if (i > 0) {
12873 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12874 if (j < 0)
12875 goto out;
12876
12877 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12878 goto out;
12879
12880 i += PCI_VPD_LRDT_TAG_SIZE;
12881 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12882 PCI_VPD_RO_KEYWORD_CHKSUM);
12883 if (j > 0) {
12884 u8 csum8 = 0;
12885
12886 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12887
12888 for (i = 0; i <= j; i++)
12889 csum8 += ((u8 *)buf)[i];
12890
12891 if (csum8)
12892 goto out;
12893 }
12894 }
12895
12896 err = 0;
12897
12898 out:
12899 kfree(buf);
12900 return err;
12901 }
12902
12903 #define TG3_SERDES_TIMEOUT_SEC 2
12904 #define TG3_COPPER_TIMEOUT_SEC 6
12905
12906 static int tg3_test_link(struct tg3 *tp)
12907 {
12908 int i, max;
12909
12910 if (!netif_running(tp->dev))
12911 return -ENODEV;
12912
12913 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12914 max = TG3_SERDES_TIMEOUT_SEC;
12915 else
12916 max = TG3_COPPER_TIMEOUT_SEC;
12917
12918 for (i = 0; i < max; i++) {
12919 if (tp->link_up)
12920 return 0;
12921
12922 if (msleep_interruptible(1000))
12923 break;
12924 }
12925
12926 return -EIO;
12927 }
12928
12929 /* Only test the commonly used registers */
12930 static int tg3_test_registers(struct tg3 *tp)
12931 {
12932 int i, is_5705, is_5750;
12933 u32 offset, read_mask, write_mask, val, save_val, read_val;
12934 static struct {
12935 u16 offset;
12936 u16 flags;
12937 #define TG3_FL_5705 0x1
12938 #define TG3_FL_NOT_5705 0x2
12939 #define TG3_FL_NOT_5788 0x4
12940 #define TG3_FL_NOT_5750 0x8
12941 u32 read_mask;
12942 u32 write_mask;
12943 } reg_tbl[] = {
12944 /* MAC Control Registers */
12945 { MAC_MODE, TG3_FL_NOT_5705,
12946 0x00000000, 0x00ef6f8c },
12947 { MAC_MODE, TG3_FL_5705,
12948 0x00000000, 0x01ef6b8c },
12949 { MAC_STATUS, TG3_FL_NOT_5705,
12950 0x03800107, 0x00000000 },
12951 { MAC_STATUS, TG3_FL_5705,
12952 0x03800100, 0x00000000 },
12953 { MAC_ADDR_0_HIGH, 0x0000,
12954 0x00000000, 0x0000ffff },
12955 { MAC_ADDR_0_LOW, 0x0000,
12956 0x00000000, 0xffffffff },
12957 { MAC_RX_MTU_SIZE, 0x0000,
12958 0x00000000, 0x0000ffff },
12959 { MAC_TX_MODE, 0x0000,
12960 0x00000000, 0x00000070 },
12961 { MAC_TX_LENGTHS, 0x0000,
12962 0x00000000, 0x00003fff },
12963 { MAC_RX_MODE, TG3_FL_NOT_5705,
12964 0x00000000, 0x000007fc },
12965 { MAC_RX_MODE, TG3_FL_5705,
12966 0x00000000, 0x000007dc },
12967 { MAC_HASH_REG_0, 0x0000,
12968 0x00000000, 0xffffffff },
12969 { MAC_HASH_REG_1, 0x0000,
12970 0x00000000, 0xffffffff },
12971 { MAC_HASH_REG_2, 0x0000,
12972 0x00000000, 0xffffffff },
12973 { MAC_HASH_REG_3, 0x0000,
12974 0x00000000, 0xffffffff },
12975
12976 /* Receive Data and Receive BD Initiator Control Registers. */
12977 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12978 0x00000000, 0xffffffff },
12979 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12980 0x00000000, 0xffffffff },
12981 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12982 0x00000000, 0x00000003 },
12983 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12984 0x00000000, 0xffffffff },
12985 { RCVDBDI_STD_BD+0, 0x0000,
12986 0x00000000, 0xffffffff },
12987 { RCVDBDI_STD_BD+4, 0x0000,
12988 0x00000000, 0xffffffff },
12989 { RCVDBDI_STD_BD+8, 0x0000,
12990 0x00000000, 0xffff0002 },
12991 { RCVDBDI_STD_BD+0xc, 0x0000,
12992 0x00000000, 0xffffffff },
12993
12994 /* Receive BD Initiator Control Registers. */
12995 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12996 0x00000000, 0xffffffff },
12997 { RCVBDI_STD_THRESH, TG3_FL_5705,
12998 0x00000000, 0x000003ff },
12999 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13000 0x00000000, 0xffffffff },
13001
13002 /* Host Coalescing Control Registers. */
13003 { HOSTCC_MODE, TG3_FL_NOT_5705,
13004 0x00000000, 0x00000004 },
13005 { HOSTCC_MODE, TG3_FL_5705,
13006 0x00000000, 0x000000f6 },
13007 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13008 0x00000000, 0xffffffff },
13009 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13010 0x00000000, 0x000003ff },
13011 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13012 0x00000000, 0xffffffff },
13013 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13014 0x00000000, 0x000003ff },
13015 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13016 0x00000000, 0xffffffff },
13017 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13018 0x00000000, 0x000000ff },
13019 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13020 0x00000000, 0xffffffff },
13021 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13022 0x00000000, 0x000000ff },
13023 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13024 0x00000000, 0xffffffff },
13025 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13026 0x00000000, 0xffffffff },
13027 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13028 0x00000000, 0xffffffff },
13029 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13030 0x00000000, 0x000000ff },
13031 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13032 0x00000000, 0xffffffff },
13033 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13034 0x00000000, 0x000000ff },
13035 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13036 0x00000000, 0xffffffff },
13037 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13038 0x00000000, 0xffffffff },
13039 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13040 0x00000000, 0xffffffff },
13041 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13042 0x00000000, 0xffffffff },
13043 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13044 0x00000000, 0xffffffff },
13045 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13046 0xffffffff, 0x00000000 },
13047 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13048 0xffffffff, 0x00000000 },
13049
13050 /* Buffer Manager Control Registers. */
13051 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13052 0x00000000, 0x007fff80 },
13053 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13054 0x00000000, 0x007fffff },
13055 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13056 0x00000000, 0x0000003f },
13057 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13058 0x00000000, 0x000001ff },
13059 { BUFMGR_MB_HIGH_WATER, 0x0000,
13060 0x00000000, 0x000001ff },
13061 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13062 0xffffffff, 0x00000000 },
13063 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13064 0xffffffff, 0x00000000 },
13065
13066 /* Mailbox Registers */
13067 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13068 0x00000000, 0x000001ff },
13069 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13070 0x00000000, 0x000001ff },
13071 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13072 0x00000000, 0x000007ff },
13073 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13074 0x00000000, 0x000001ff },
13075
13076 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13077 };
13078
13079 is_5705 = is_5750 = 0;
13080 if (tg3_flag(tp, 5705_PLUS)) {
13081 is_5705 = 1;
13082 if (tg3_flag(tp, 5750_PLUS))
13083 is_5750 = 1;
13084 }
13085
13086 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13087 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13088 continue;
13089
13090 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13091 continue;
13092
13093 if (tg3_flag(tp, IS_5788) &&
13094 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13095 continue;
13096
13097 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13098 continue;
13099
13100 offset = (u32) reg_tbl[i].offset;
13101 read_mask = reg_tbl[i].read_mask;
13102 write_mask = reg_tbl[i].write_mask;
13103
13104 /* Save the original register content */
13105 save_val = tr32(offset);
13106
13107 /* Determine the read-only value. */
13108 read_val = save_val & read_mask;
13109
13110 /* Write zero to the register, then make sure the read-only bits
13111 * are not changed and the read/write bits are all zeros.
13112 */
13113 tw32(offset, 0);
13114
13115 val = tr32(offset);
13116
13117 /* Test the read-only and read/write bits. */
13118 if (((val & read_mask) != read_val) || (val & write_mask))
13119 goto out;
13120
13121 /* Write ones to all the bits defined by RdMask and WrMask, then
13122 * make sure the read-only bits are not changed and the
13123 * read/write bits are all ones.
13124 */
13125 tw32(offset, read_mask | write_mask);
13126
13127 val = tr32(offset);
13128
13129 /* Test the read-only bits. */
13130 if ((val & read_mask) != read_val)
13131 goto out;
13132
13133 /* Test the read/write bits. */
13134 if ((val & write_mask) != write_mask)
13135 goto out;
13136
13137 tw32(offset, save_val);
13138 }
13139
13140 return 0;
13141
13142 out:
13143 if (netif_msg_hw(tp))
13144 netdev_err(tp->dev,
13145 "Register test failed at offset %x\n", offset);
13146 tw32(offset, save_val);
13147 return -EIO;
13148 }
13149
13150 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13151 {
13152 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13153 int i;
13154 u32 j;
13155
13156 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13157 for (j = 0; j < len; j += 4) {
13158 u32 val;
13159
13160 tg3_write_mem(tp, offset + j, test_pattern[i]);
13161 tg3_read_mem(tp, offset + j, &val);
13162 if (val != test_pattern[i])
13163 return -EIO;
13164 }
13165 }
13166 return 0;
13167 }
13168
13169 static int tg3_test_memory(struct tg3 *tp)
13170 {
13171 static struct mem_entry {
13172 u32 offset;
13173 u32 len;
13174 } mem_tbl_570x[] = {
13175 { 0x00000000, 0x00b50},
13176 { 0x00002000, 0x1c000},
13177 { 0xffffffff, 0x00000}
13178 }, mem_tbl_5705[] = {
13179 { 0x00000100, 0x0000c},
13180 { 0x00000200, 0x00008},
13181 { 0x00004000, 0x00800},
13182 { 0x00006000, 0x01000},
13183 { 0x00008000, 0x02000},
13184 { 0x00010000, 0x0e000},
13185 { 0xffffffff, 0x00000}
13186 }, mem_tbl_5755[] = {
13187 { 0x00000200, 0x00008},
13188 { 0x00004000, 0x00800},
13189 { 0x00006000, 0x00800},
13190 { 0x00008000, 0x02000},
13191 { 0x00010000, 0x0c000},
13192 { 0xffffffff, 0x00000}
13193 }, mem_tbl_5906[] = {
13194 { 0x00000200, 0x00008},
13195 { 0x00004000, 0x00400},
13196 { 0x00006000, 0x00400},
13197 { 0x00008000, 0x01000},
13198 { 0x00010000, 0x01000},
13199 { 0xffffffff, 0x00000}
13200 }, mem_tbl_5717[] = {
13201 { 0x00000200, 0x00008},
13202 { 0x00010000, 0x0a000},
13203 { 0x00020000, 0x13c00},
13204 { 0xffffffff, 0x00000}
13205 }, mem_tbl_57765[] = {
13206 { 0x00000200, 0x00008},
13207 { 0x00004000, 0x00800},
13208 { 0x00006000, 0x09800},
13209 { 0x00010000, 0x0a000},
13210 { 0xffffffff, 0x00000}
13211 };
13212 struct mem_entry *mem_tbl;
13213 int err = 0;
13214 int i;
13215
13216 if (tg3_flag(tp, 5717_PLUS))
13217 mem_tbl = mem_tbl_5717;
13218 else if (tg3_flag(tp, 57765_CLASS) ||
13219 tg3_asic_rev(tp) == ASIC_REV_5762)
13220 mem_tbl = mem_tbl_57765;
13221 else if (tg3_flag(tp, 5755_PLUS))
13222 mem_tbl = mem_tbl_5755;
13223 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13224 mem_tbl = mem_tbl_5906;
13225 else if (tg3_flag(tp, 5705_PLUS))
13226 mem_tbl = mem_tbl_5705;
13227 else
13228 mem_tbl = mem_tbl_570x;
13229
13230 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13231 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13232 if (err)
13233 break;
13234 }
13235
13236 return err;
13237 }
13238
13239 #define TG3_TSO_MSS 500
13240
13241 #define TG3_TSO_IP_HDR_LEN 20
13242 #define TG3_TSO_TCP_HDR_LEN 20
13243 #define TG3_TSO_TCP_OPT_LEN 12
13244
13245 static const u8 tg3_tso_header[] = {
13246 0x08, 0x00,
13247 0x45, 0x00, 0x00, 0x00,
13248 0x00, 0x00, 0x40, 0x00,
13249 0x40, 0x06, 0x00, 0x00,
13250 0x0a, 0x00, 0x00, 0x01,
13251 0x0a, 0x00, 0x00, 0x02,
13252 0x0d, 0x00, 0xe0, 0x00,
13253 0x00, 0x00, 0x01, 0x00,
13254 0x00, 0x00, 0x02, 0x00,
13255 0x80, 0x10, 0x10, 0x00,
13256 0x14, 0x09, 0x00, 0x00,
13257 0x01, 0x01, 0x08, 0x0a,
13258 0x11, 0x11, 0x11, 0x11,
13259 0x11, 0x11, 0x11, 0x11,
13260 };
13261
13262 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13263 {
13264 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13265 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13266 u32 budget;
13267 struct sk_buff *skb;
13268 u8 *tx_data, *rx_data;
13269 dma_addr_t map;
13270 int num_pkts, tx_len, rx_len, i, err;
13271 struct tg3_rx_buffer_desc *desc;
13272 struct tg3_napi *tnapi, *rnapi;
13273 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13274
13275 tnapi = &tp->napi[0];
13276 rnapi = &tp->napi[0];
13277 if (tp->irq_cnt > 1) {
13278 if (tg3_flag(tp, ENABLE_RSS))
13279 rnapi = &tp->napi[1];
13280 if (tg3_flag(tp, ENABLE_TSS))
13281 tnapi = &tp->napi[1];
13282 }
13283 coal_now = tnapi->coal_now | rnapi->coal_now;
13284
13285 err = -EIO;
13286
13287 tx_len = pktsz;
13288 skb = netdev_alloc_skb(tp->dev, tx_len);
13289 if (!skb)
13290 return -ENOMEM;
13291
13292 tx_data = skb_put(skb, tx_len);
13293 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13294 memset(tx_data + ETH_ALEN, 0x0, 8);
13295
13296 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13297
13298 if (tso_loopback) {
13299 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13300
13301 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13302 TG3_TSO_TCP_OPT_LEN;
13303
13304 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13305 sizeof(tg3_tso_header));
13306 mss = TG3_TSO_MSS;
13307
13308 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13309 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13310
13311 /* Set the total length field in the IP header */
13312 iph->tot_len = htons((u16)(mss + hdr_len));
13313
13314 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13315 TXD_FLAG_CPU_POST_DMA);
13316
13317 if (tg3_flag(tp, HW_TSO_1) ||
13318 tg3_flag(tp, HW_TSO_2) ||
13319 tg3_flag(tp, HW_TSO_3)) {
13320 struct tcphdr *th;
13321 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13322 th = (struct tcphdr *)&tx_data[val];
13323 th->check = 0;
13324 } else
13325 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13326
13327 if (tg3_flag(tp, HW_TSO_3)) {
13328 mss |= (hdr_len & 0xc) << 12;
13329 if (hdr_len & 0x10)
13330 base_flags |= 0x00000010;
13331 base_flags |= (hdr_len & 0x3e0) << 5;
13332 } else if (tg3_flag(tp, HW_TSO_2))
13333 mss |= hdr_len << 9;
13334 else if (tg3_flag(tp, HW_TSO_1) ||
13335 tg3_asic_rev(tp) == ASIC_REV_5705) {
13336 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13337 } else {
13338 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13339 }
13340
13341 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13342 } else {
13343 num_pkts = 1;
13344 data_off = ETH_HLEN;
13345
13346 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13347 tx_len > VLAN_ETH_FRAME_LEN)
13348 base_flags |= TXD_FLAG_JMB_PKT;
13349 }
13350
13351 for (i = data_off; i < tx_len; i++)
13352 tx_data[i] = (u8) (i & 0xff);
13353
13354 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13355 if (pci_dma_mapping_error(tp->pdev, map)) {
13356 dev_kfree_skb(skb);
13357 return -EIO;
13358 }
13359
13360 val = tnapi->tx_prod;
13361 tnapi->tx_buffers[val].skb = skb;
13362 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13363
13364 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13365 rnapi->coal_now);
13366
13367 udelay(10);
13368
13369 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13370
13371 budget = tg3_tx_avail(tnapi);
13372 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13373 base_flags | TXD_FLAG_END, mss, 0)) {
13374 tnapi->tx_buffers[val].skb = NULL;
13375 dev_kfree_skb(skb);
13376 return -EIO;
13377 }
13378
13379 tnapi->tx_prod++;
13380
13381 /* Sync BD data before updating mailbox */
13382 wmb();
13383
13384 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13385 tr32_mailbox(tnapi->prodmbox);
13386
13387 udelay(10);
13388
13389 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13390 for (i = 0; i < 35; i++) {
13391 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13392 coal_now);
13393
13394 udelay(10);
13395
13396 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13397 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13398 if ((tx_idx == tnapi->tx_prod) &&
13399 (rx_idx == (rx_start_idx + num_pkts)))
13400 break;
13401 }
13402
13403 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13404 dev_kfree_skb(skb);
13405
13406 if (tx_idx != tnapi->tx_prod)
13407 goto out;
13408
13409 if (rx_idx != rx_start_idx + num_pkts)
13410 goto out;
13411
13412 val = data_off;
13413 while (rx_idx != rx_start_idx) {
13414 desc = &rnapi->rx_rcb[rx_start_idx++];
13415 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13416 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13417
13418 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13419 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13420 goto out;
13421
13422 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13423 - ETH_FCS_LEN;
13424
13425 if (!tso_loopback) {
13426 if (rx_len != tx_len)
13427 goto out;
13428
13429 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13430 if (opaque_key != RXD_OPAQUE_RING_STD)
13431 goto out;
13432 } else {
13433 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13434 goto out;
13435 }
13436 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13437 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13438 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13439 goto out;
13440 }
13441
13442 if (opaque_key == RXD_OPAQUE_RING_STD) {
13443 rx_data = tpr->rx_std_buffers[desc_idx].data;
13444 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13445 mapping);
13446 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13447 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13448 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13449 mapping);
13450 } else
13451 goto out;
13452
13453 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13454 PCI_DMA_FROMDEVICE);
13455
13456 rx_data += TG3_RX_OFFSET(tp);
13457 for (i = data_off; i < rx_len; i++, val++) {
13458 if (*(rx_data + i) != (u8) (val & 0xff))
13459 goto out;
13460 }
13461 }
13462
13463 err = 0;
13464
13465 /* tg3_free_rings will unmap and free the rx_data */
13466 out:
13467 return err;
13468 }
13469
13470 #define TG3_STD_LOOPBACK_FAILED 1
13471 #define TG3_JMB_LOOPBACK_FAILED 2
13472 #define TG3_TSO_LOOPBACK_FAILED 4
13473 #define TG3_LOOPBACK_FAILED \
13474 (TG3_STD_LOOPBACK_FAILED | \
13475 TG3_JMB_LOOPBACK_FAILED | \
13476 TG3_TSO_LOOPBACK_FAILED)
13477
13478 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13479 {
13480 int err = -EIO;
13481 u32 eee_cap;
13482 u32 jmb_pkt_sz = 9000;
13483
13484 if (tp->dma_limit)
13485 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13486
13487 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13488 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13489
13490 if (!netif_running(tp->dev)) {
13491 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13492 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13493 if (do_extlpbk)
13494 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13495 goto done;
13496 }
13497
13498 err = tg3_reset_hw(tp, true);
13499 if (err) {
13500 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13501 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13502 if (do_extlpbk)
13503 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13504 goto done;
13505 }
13506
13507 if (tg3_flag(tp, ENABLE_RSS)) {
13508 int i;
13509
13510 /* Reroute all rx packets to the 1st queue */
13511 for (i = MAC_RSS_INDIR_TBL_0;
13512 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13513 tw32(i, 0x0);
13514 }
13515
13516 /* HW errata - mac loopback fails in some cases on 5780.
13517 * Normal traffic and PHY loopback are not affected by
13518 * errata. Also, the MAC loopback test is deprecated for
13519 * all newer ASIC revisions.
13520 */
13521 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13522 !tg3_flag(tp, CPMU_PRESENT)) {
13523 tg3_mac_loopback(tp, true);
13524
13525 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13526 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13527
13528 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13529 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13530 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13531
13532 tg3_mac_loopback(tp, false);
13533 }
13534
13535 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13536 !tg3_flag(tp, USE_PHYLIB)) {
13537 int i;
13538
13539 tg3_phy_lpbk_set(tp, 0, false);
13540
13541 /* Wait for link */
13542 for (i = 0; i < 100; i++) {
13543 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13544 break;
13545 mdelay(1);
13546 }
13547
13548 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13549 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13550 if (tg3_flag(tp, TSO_CAPABLE) &&
13551 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13552 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13553 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13554 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13555 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13556
13557 if (do_extlpbk) {
13558 tg3_phy_lpbk_set(tp, 0, true);
13559
13560 /* All link indications report up, but the hardware
13561 * isn't really ready for about 20 msec. Double it
13562 * to be sure.
13563 */
13564 mdelay(40);
13565
13566 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13567 data[TG3_EXT_LOOPB_TEST] |=
13568 TG3_STD_LOOPBACK_FAILED;
13569 if (tg3_flag(tp, TSO_CAPABLE) &&
13570 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13571 data[TG3_EXT_LOOPB_TEST] |=
13572 TG3_TSO_LOOPBACK_FAILED;
13573 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13574 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13575 data[TG3_EXT_LOOPB_TEST] |=
13576 TG3_JMB_LOOPBACK_FAILED;
13577 }
13578
13579 /* Re-enable gphy autopowerdown. */
13580 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13581 tg3_phy_toggle_apd(tp, true);
13582 }
13583
13584 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13585 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13586
13587 done:
13588 tp->phy_flags |= eee_cap;
13589
13590 return err;
13591 }
13592
13593 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13594 u64 *data)
13595 {
13596 struct tg3 *tp = netdev_priv(dev);
13597 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13598
13599 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13600 if (tg3_power_up(tp)) {
13601 etest->flags |= ETH_TEST_FL_FAILED;
13602 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13603 return;
13604 }
13605 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13606 }
13607
13608 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13609
13610 if (tg3_test_nvram(tp) != 0) {
13611 etest->flags |= ETH_TEST_FL_FAILED;
13612 data[TG3_NVRAM_TEST] = 1;
13613 }
13614 if (!doextlpbk && tg3_test_link(tp)) {
13615 etest->flags |= ETH_TEST_FL_FAILED;
13616 data[TG3_LINK_TEST] = 1;
13617 }
13618 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13619 int err, err2 = 0, irq_sync = 0;
13620
13621 if (netif_running(dev)) {
13622 tg3_phy_stop(tp);
13623 tg3_netif_stop(tp);
13624 irq_sync = 1;
13625 }
13626
13627 tg3_full_lock(tp, irq_sync);
13628 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13629 err = tg3_nvram_lock(tp);
13630 tg3_halt_cpu(tp, RX_CPU_BASE);
13631 if (!tg3_flag(tp, 5705_PLUS))
13632 tg3_halt_cpu(tp, TX_CPU_BASE);
13633 if (!err)
13634 tg3_nvram_unlock(tp);
13635
13636 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13637 tg3_phy_reset(tp);
13638
13639 if (tg3_test_registers(tp) != 0) {
13640 etest->flags |= ETH_TEST_FL_FAILED;
13641 data[TG3_REGISTER_TEST] = 1;
13642 }
13643
13644 if (tg3_test_memory(tp) != 0) {
13645 etest->flags |= ETH_TEST_FL_FAILED;
13646 data[TG3_MEMORY_TEST] = 1;
13647 }
13648
13649 if (doextlpbk)
13650 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13651
13652 if (tg3_test_loopback(tp, data, doextlpbk))
13653 etest->flags |= ETH_TEST_FL_FAILED;
13654
13655 tg3_full_unlock(tp);
13656
13657 if (tg3_test_interrupt(tp) != 0) {
13658 etest->flags |= ETH_TEST_FL_FAILED;
13659 data[TG3_INTERRUPT_TEST] = 1;
13660 }
13661
13662 tg3_full_lock(tp, 0);
13663
13664 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13665 if (netif_running(dev)) {
13666 tg3_flag_set(tp, INIT_COMPLETE);
13667 err2 = tg3_restart_hw(tp, true);
13668 if (!err2)
13669 tg3_netif_start(tp);
13670 }
13671
13672 tg3_full_unlock(tp);
13673
13674 if (irq_sync && !err2)
13675 tg3_phy_start(tp);
13676 }
13677 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13678 tg3_power_down_prepare(tp);
13679
13680 }
13681
13682 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13683 {
13684 struct tg3 *tp = netdev_priv(dev);
13685 struct hwtstamp_config stmpconf;
13686
13687 if (!tg3_flag(tp, PTP_CAPABLE))
13688 return -EOPNOTSUPP;
13689
13690 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13691 return -EFAULT;
13692
13693 if (stmpconf.flags)
13694 return -EINVAL;
13695
13696 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13697 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13698 return -ERANGE;
13699
13700 switch (stmpconf.rx_filter) {
13701 case HWTSTAMP_FILTER_NONE:
13702 tp->rxptpctl = 0;
13703 break;
13704 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13705 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13706 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13707 break;
13708 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13709 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13710 TG3_RX_PTP_CTL_SYNC_EVNT;
13711 break;
13712 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13713 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13714 TG3_RX_PTP_CTL_DELAY_REQ;
13715 break;
13716 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13717 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13718 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13719 break;
13720 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13721 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13722 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13723 break;
13724 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13725 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13726 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13727 break;
13728 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13729 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13730 TG3_RX_PTP_CTL_SYNC_EVNT;
13731 break;
13732 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13733 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13734 TG3_RX_PTP_CTL_SYNC_EVNT;
13735 break;
13736 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13737 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13738 TG3_RX_PTP_CTL_SYNC_EVNT;
13739 break;
13740 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13741 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13742 TG3_RX_PTP_CTL_DELAY_REQ;
13743 break;
13744 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13745 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13746 TG3_RX_PTP_CTL_DELAY_REQ;
13747 break;
13748 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13749 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13750 TG3_RX_PTP_CTL_DELAY_REQ;
13751 break;
13752 default:
13753 return -ERANGE;
13754 }
13755
13756 if (netif_running(dev) && tp->rxptpctl)
13757 tw32(TG3_RX_PTP_CTL,
13758 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13759
13760 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13761 tg3_flag_set(tp, TX_TSTAMP_EN);
13762 else
13763 tg3_flag_clear(tp, TX_TSTAMP_EN);
13764
13765 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13766 -EFAULT : 0;
13767 }
13768
13769 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13770 {
13771 struct tg3 *tp = netdev_priv(dev);
13772 struct hwtstamp_config stmpconf;
13773
13774 if (!tg3_flag(tp, PTP_CAPABLE))
13775 return -EOPNOTSUPP;
13776
13777 stmpconf.flags = 0;
13778 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13779 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13780
13781 switch (tp->rxptpctl) {
13782 case 0:
13783 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13784 break;
13785 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13786 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13787 break;
13788 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13789 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13790 break;
13791 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13792 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13793 break;
13794 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13795 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13796 break;
13797 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13798 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13799 break;
13800 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13801 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13802 break;
13803 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13804 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13805 break;
13806 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13807 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13808 break;
13809 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13810 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13811 break;
13812 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13813 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13814 break;
13815 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13816 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13817 break;
13818 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13819 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13820 break;
13821 default:
13822 WARN_ON_ONCE(1);
13823 return -ERANGE;
13824 }
13825
13826 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13827 -EFAULT : 0;
13828 }
13829
13830 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13831 {
13832 struct mii_ioctl_data *data = if_mii(ifr);
13833 struct tg3 *tp = netdev_priv(dev);
13834 int err;
13835
13836 if (tg3_flag(tp, USE_PHYLIB)) {
13837 struct phy_device *phydev;
13838 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13839 return -EAGAIN;
13840 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13841 return phy_mii_ioctl(phydev, ifr, cmd);
13842 }
13843
13844 switch (cmd) {
13845 case SIOCGMIIPHY:
13846 data->phy_id = tp->phy_addr;
13847
13848 /* fallthru */
13849 case SIOCGMIIREG: {
13850 u32 mii_regval;
13851
13852 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13853 break; /* We have no PHY */
13854
13855 if (!netif_running(dev))
13856 return -EAGAIN;
13857
13858 spin_lock_bh(&tp->lock);
13859 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13860 data->reg_num & 0x1f, &mii_regval);
13861 spin_unlock_bh(&tp->lock);
13862
13863 data->val_out = mii_regval;
13864
13865 return err;
13866 }
13867
13868 case SIOCSMIIREG:
13869 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13870 break; /* We have no PHY */
13871
13872 if (!netif_running(dev))
13873 return -EAGAIN;
13874
13875 spin_lock_bh(&tp->lock);
13876 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13877 data->reg_num & 0x1f, data->val_in);
13878 spin_unlock_bh(&tp->lock);
13879
13880 return err;
13881
13882 case SIOCSHWTSTAMP:
13883 return tg3_hwtstamp_set(dev, ifr);
13884
13885 case SIOCGHWTSTAMP:
13886 return tg3_hwtstamp_get(dev, ifr);
13887
13888 default:
13889 /* do nothing */
13890 break;
13891 }
13892 return -EOPNOTSUPP;
13893 }
13894
13895 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13896 {
13897 struct tg3 *tp = netdev_priv(dev);
13898
13899 memcpy(ec, &tp->coal, sizeof(*ec));
13900 return 0;
13901 }
13902
13903 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13904 {
13905 struct tg3 *tp = netdev_priv(dev);
13906 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13907 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13908
13909 if (!tg3_flag(tp, 5705_PLUS)) {
13910 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13911 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13912 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13913 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13914 }
13915
13916 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13917 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13918 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13919 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13920 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13921 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13922 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13923 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13924 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13925 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13926 return -EINVAL;
13927
13928 /* No rx interrupts will be generated if both are zero */
13929 if ((ec->rx_coalesce_usecs == 0) &&
13930 (ec->rx_max_coalesced_frames == 0))
13931 return -EINVAL;
13932
13933 /* No tx interrupts will be generated if both are zero */
13934 if ((ec->tx_coalesce_usecs == 0) &&
13935 (ec->tx_max_coalesced_frames == 0))
13936 return -EINVAL;
13937
13938 /* Only copy relevant parameters, ignore all others. */
13939 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13940 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13941 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13942 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13943 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13944 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13945 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13946 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13947 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13948
13949 if (netif_running(dev)) {
13950 tg3_full_lock(tp, 0);
13951 __tg3_set_coalesce(tp, &tp->coal);
13952 tg3_full_unlock(tp);
13953 }
13954 return 0;
13955 }
13956
13957 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13958 {
13959 struct tg3 *tp = netdev_priv(dev);
13960
13961 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13962 netdev_warn(tp->dev, "Board does not support EEE!\n");
13963 return -EOPNOTSUPP;
13964 }
13965
13966 if (edata->advertised != tp->eee.advertised) {
13967 netdev_warn(tp->dev,
13968 "Direct manipulation of EEE advertisement is not supported\n");
13969 return -EINVAL;
13970 }
13971
13972 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13973 netdev_warn(tp->dev,
13974 "Maximal Tx Lpi timer supported is %#x(u)\n",
13975 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13976 return -EINVAL;
13977 }
13978
13979 tp->eee = *edata;
13980
13981 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13982 tg3_warn_mgmt_link_flap(tp);
13983
13984 if (netif_running(tp->dev)) {
13985 tg3_full_lock(tp, 0);
13986 tg3_setup_eee(tp);
13987 tg3_phy_reset(tp);
13988 tg3_full_unlock(tp);
13989 }
13990
13991 return 0;
13992 }
13993
13994 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13995 {
13996 struct tg3 *tp = netdev_priv(dev);
13997
13998 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13999 netdev_warn(tp->dev,
14000 "Board does not support EEE!\n");
14001 return -EOPNOTSUPP;
14002 }
14003
14004 *edata = tp->eee;
14005 return 0;
14006 }
14007
14008 static const struct ethtool_ops tg3_ethtool_ops = {
14009 .get_settings = tg3_get_settings,
14010 .set_settings = tg3_set_settings,
14011 .get_drvinfo = tg3_get_drvinfo,
14012 .get_regs_len = tg3_get_regs_len,
14013 .get_regs = tg3_get_regs,
14014 .get_wol = tg3_get_wol,
14015 .set_wol = tg3_set_wol,
14016 .get_msglevel = tg3_get_msglevel,
14017 .set_msglevel = tg3_set_msglevel,
14018 .nway_reset = tg3_nway_reset,
14019 .get_link = ethtool_op_get_link,
14020 .get_eeprom_len = tg3_get_eeprom_len,
14021 .get_eeprom = tg3_get_eeprom,
14022 .set_eeprom = tg3_set_eeprom,
14023 .get_ringparam = tg3_get_ringparam,
14024 .set_ringparam = tg3_set_ringparam,
14025 .get_pauseparam = tg3_get_pauseparam,
14026 .set_pauseparam = tg3_set_pauseparam,
14027 .self_test = tg3_self_test,
14028 .get_strings = tg3_get_strings,
14029 .set_phys_id = tg3_set_phys_id,
14030 .get_ethtool_stats = tg3_get_ethtool_stats,
14031 .get_coalesce = tg3_get_coalesce,
14032 .set_coalesce = tg3_set_coalesce,
14033 .get_sset_count = tg3_get_sset_count,
14034 .get_rxnfc = tg3_get_rxnfc,
14035 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14036 .get_rxfh_indir = tg3_get_rxfh_indir,
14037 .set_rxfh_indir = tg3_set_rxfh_indir,
14038 .get_channels = tg3_get_channels,
14039 .set_channels = tg3_set_channels,
14040 .get_ts_info = tg3_get_ts_info,
14041 .get_eee = tg3_get_eee,
14042 .set_eee = tg3_set_eee,
14043 };
14044
14045 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14046 struct rtnl_link_stats64 *stats)
14047 {
14048 struct tg3 *tp = netdev_priv(dev);
14049
14050 spin_lock_bh(&tp->lock);
14051 if (!tp->hw_stats) {
14052 spin_unlock_bh(&tp->lock);
14053 return &tp->net_stats_prev;
14054 }
14055
14056 tg3_get_nstats(tp, stats);
14057 spin_unlock_bh(&tp->lock);
14058
14059 return stats;
14060 }
14061
14062 static void tg3_set_rx_mode(struct net_device *dev)
14063 {
14064 struct tg3 *tp = netdev_priv(dev);
14065
14066 if (!netif_running(dev))
14067 return;
14068
14069 tg3_full_lock(tp, 0);
14070 __tg3_set_rx_mode(dev);
14071 tg3_full_unlock(tp);
14072 }
14073
14074 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14075 int new_mtu)
14076 {
14077 dev->mtu = new_mtu;
14078
14079 if (new_mtu > ETH_DATA_LEN) {
14080 if (tg3_flag(tp, 5780_CLASS)) {
14081 netdev_update_features(dev);
14082 tg3_flag_clear(tp, TSO_CAPABLE);
14083 } else {
14084 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14085 }
14086 } else {
14087 if (tg3_flag(tp, 5780_CLASS)) {
14088 tg3_flag_set(tp, TSO_CAPABLE);
14089 netdev_update_features(dev);
14090 }
14091 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14092 }
14093 }
14094
14095 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14096 {
14097 struct tg3 *tp = netdev_priv(dev);
14098 int err;
14099 bool reset_phy = false;
14100
14101 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14102 return -EINVAL;
14103
14104 if (!netif_running(dev)) {
14105 /* We'll just catch it later when the
14106 * device is up'd.
14107 */
14108 tg3_set_mtu(dev, tp, new_mtu);
14109 return 0;
14110 }
14111
14112 tg3_phy_stop(tp);
14113
14114 tg3_netif_stop(tp);
14115
14116 tg3_set_mtu(dev, tp, new_mtu);
14117
14118 tg3_full_lock(tp, 1);
14119
14120 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14121
14122 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14123 * breaks all requests to 256 bytes.
14124 */
14125 if (tg3_asic_rev(tp) == ASIC_REV_57766)
14126 reset_phy = true;
14127
14128 err = tg3_restart_hw(tp, reset_phy);
14129
14130 if (!err)
14131 tg3_netif_start(tp);
14132
14133 tg3_full_unlock(tp);
14134
14135 if (!err)
14136 tg3_phy_start(tp);
14137
14138 return err;
14139 }
14140
14141 static const struct net_device_ops tg3_netdev_ops = {
14142 .ndo_open = tg3_open,
14143 .ndo_stop = tg3_close,
14144 .ndo_start_xmit = tg3_start_xmit,
14145 .ndo_get_stats64 = tg3_get_stats64,
14146 .ndo_validate_addr = eth_validate_addr,
14147 .ndo_set_rx_mode = tg3_set_rx_mode,
14148 .ndo_set_mac_address = tg3_set_mac_addr,
14149 .ndo_do_ioctl = tg3_ioctl,
14150 .ndo_tx_timeout = tg3_tx_timeout,
14151 .ndo_change_mtu = tg3_change_mtu,
14152 .ndo_fix_features = tg3_fix_features,
14153 .ndo_set_features = tg3_set_features,
14154 #ifdef CONFIG_NET_POLL_CONTROLLER
14155 .ndo_poll_controller = tg3_poll_controller,
14156 #endif
14157 };
14158
14159 static void tg3_get_eeprom_size(struct tg3 *tp)
14160 {
14161 u32 cursize, val, magic;
14162
14163 tp->nvram_size = EEPROM_CHIP_SIZE;
14164
14165 if (tg3_nvram_read(tp, 0, &magic) != 0)
14166 return;
14167
14168 if ((magic != TG3_EEPROM_MAGIC) &&
14169 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14170 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14171 return;
14172
14173 /*
14174 * Size the chip by reading offsets at increasing powers of two.
14175 * When we encounter our validation signature, we know the addressing
14176 * has wrapped around, and thus have our chip size.
14177 */
14178 cursize = 0x10;
14179
14180 while (cursize < tp->nvram_size) {
14181 if (tg3_nvram_read(tp, cursize, &val) != 0)
14182 return;
14183
14184 if (val == magic)
14185 break;
14186
14187 cursize <<= 1;
14188 }
14189
14190 tp->nvram_size = cursize;
14191 }
14192
14193 static void tg3_get_nvram_size(struct tg3 *tp)
14194 {
14195 u32 val;
14196
14197 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14198 return;
14199
14200 /* Selfboot format */
14201 if (val != TG3_EEPROM_MAGIC) {
14202 tg3_get_eeprom_size(tp);
14203 return;
14204 }
14205
14206 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14207 if (val != 0) {
14208 /* This is confusing. We want to operate on the
14209 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14210 * call will read from NVRAM and byteswap the data
14211 * according to the byteswapping settings for all
14212 * other register accesses. This ensures the data we
14213 * want will always reside in the lower 16-bits.
14214 * However, the data in NVRAM is in LE format, which
14215 * means the data from the NVRAM read will always be
14216 * opposite the endianness of the CPU. The 16-bit
14217 * byteswap then brings the data to CPU endianness.
14218 */
14219 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14220 return;
14221 }
14222 }
14223 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14224 }
14225
14226 static void tg3_get_nvram_info(struct tg3 *tp)
14227 {
14228 u32 nvcfg1;
14229
14230 nvcfg1 = tr32(NVRAM_CFG1);
14231 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14232 tg3_flag_set(tp, FLASH);
14233 } else {
14234 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14235 tw32(NVRAM_CFG1, nvcfg1);
14236 }
14237
14238 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14239 tg3_flag(tp, 5780_CLASS)) {
14240 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14241 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14242 tp->nvram_jedecnum = JEDEC_ATMEL;
14243 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14244 tg3_flag_set(tp, NVRAM_BUFFERED);
14245 break;
14246 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14247 tp->nvram_jedecnum = JEDEC_ATMEL;
14248 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14249 break;
14250 case FLASH_VENDOR_ATMEL_EEPROM:
14251 tp->nvram_jedecnum = JEDEC_ATMEL;
14252 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14253 tg3_flag_set(tp, NVRAM_BUFFERED);
14254 break;
14255 case FLASH_VENDOR_ST:
14256 tp->nvram_jedecnum = JEDEC_ST;
14257 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14258 tg3_flag_set(tp, NVRAM_BUFFERED);
14259 break;
14260 case FLASH_VENDOR_SAIFUN:
14261 tp->nvram_jedecnum = JEDEC_SAIFUN;
14262 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14263 break;
14264 case FLASH_VENDOR_SST_SMALL:
14265 case FLASH_VENDOR_SST_LARGE:
14266 tp->nvram_jedecnum = JEDEC_SST;
14267 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14268 break;
14269 }
14270 } else {
14271 tp->nvram_jedecnum = JEDEC_ATMEL;
14272 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14273 tg3_flag_set(tp, NVRAM_BUFFERED);
14274 }
14275 }
14276
14277 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14278 {
14279 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14280 case FLASH_5752PAGE_SIZE_256:
14281 tp->nvram_pagesize = 256;
14282 break;
14283 case FLASH_5752PAGE_SIZE_512:
14284 tp->nvram_pagesize = 512;
14285 break;
14286 case FLASH_5752PAGE_SIZE_1K:
14287 tp->nvram_pagesize = 1024;
14288 break;
14289 case FLASH_5752PAGE_SIZE_2K:
14290 tp->nvram_pagesize = 2048;
14291 break;
14292 case FLASH_5752PAGE_SIZE_4K:
14293 tp->nvram_pagesize = 4096;
14294 break;
14295 case FLASH_5752PAGE_SIZE_264:
14296 tp->nvram_pagesize = 264;
14297 break;
14298 case FLASH_5752PAGE_SIZE_528:
14299 tp->nvram_pagesize = 528;
14300 break;
14301 }
14302 }
14303
14304 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14305 {
14306 u32 nvcfg1;
14307
14308 nvcfg1 = tr32(NVRAM_CFG1);
14309
14310 /* NVRAM protection for TPM */
14311 if (nvcfg1 & (1 << 27))
14312 tg3_flag_set(tp, PROTECTED_NVRAM);
14313
14314 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14315 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14316 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14317 tp->nvram_jedecnum = JEDEC_ATMEL;
14318 tg3_flag_set(tp, NVRAM_BUFFERED);
14319 break;
14320 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14321 tp->nvram_jedecnum = JEDEC_ATMEL;
14322 tg3_flag_set(tp, NVRAM_BUFFERED);
14323 tg3_flag_set(tp, FLASH);
14324 break;
14325 case FLASH_5752VENDOR_ST_M45PE10:
14326 case FLASH_5752VENDOR_ST_M45PE20:
14327 case FLASH_5752VENDOR_ST_M45PE40:
14328 tp->nvram_jedecnum = JEDEC_ST;
14329 tg3_flag_set(tp, NVRAM_BUFFERED);
14330 tg3_flag_set(tp, FLASH);
14331 break;
14332 }
14333
14334 if (tg3_flag(tp, FLASH)) {
14335 tg3_nvram_get_pagesize(tp, nvcfg1);
14336 } else {
14337 /* For eeprom, set pagesize to maximum eeprom size */
14338 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14339
14340 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14341 tw32(NVRAM_CFG1, nvcfg1);
14342 }
14343 }
14344
14345 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14346 {
14347 u32 nvcfg1, protect = 0;
14348
14349 nvcfg1 = tr32(NVRAM_CFG1);
14350
14351 /* NVRAM protection for TPM */
14352 if (nvcfg1 & (1 << 27)) {
14353 tg3_flag_set(tp, PROTECTED_NVRAM);
14354 protect = 1;
14355 }
14356
14357 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14358 switch (nvcfg1) {
14359 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14360 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14361 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14362 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14363 tp->nvram_jedecnum = JEDEC_ATMEL;
14364 tg3_flag_set(tp, NVRAM_BUFFERED);
14365 tg3_flag_set(tp, FLASH);
14366 tp->nvram_pagesize = 264;
14367 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14368 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14369 tp->nvram_size = (protect ? 0x3e200 :
14370 TG3_NVRAM_SIZE_512KB);
14371 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14372 tp->nvram_size = (protect ? 0x1f200 :
14373 TG3_NVRAM_SIZE_256KB);
14374 else
14375 tp->nvram_size = (protect ? 0x1f200 :
14376 TG3_NVRAM_SIZE_128KB);
14377 break;
14378 case FLASH_5752VENDOR_ST_M45PE10:
14379 case FLASH_5752VENDOR_ST_M45PE20:
14380 case FLASH_5752VENDOR_ST_M45PE40:
14381 tp->nvram_jedecnum = JEDEC_ST;
14382 tg3_flag_set(tp, NVRAM_BUFFERED);
14383 tg3_flag_set(tp, FLASH);
14384 tp->nvram_pagesize = 256;
14385 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14386 tp->nvram_size = (protect ?
14387 TG3_NVRAM_SIZE_64KB :
14388 TG3_NVRAM_SIZE_128KB);
14389 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14390 tp->nvram_size = (protect ?
14391 TG3_NVRAM_SIZE_64KB :
14392 TG3_NVRAM_SIZE_256KB);
14393 else
14394 tp->nvram_size = (protect ?
14395 TG3_NVRAM_SIZE_128KB :
14396 TG3_NVRAM_SIZE_512KB);
14397 break;
14398 }
14399 }
14400
14401 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14402 {
14403 u32 nvcfg1;
14404
14405 nvcfg1 = tr32(NVRAM_CFG1);
14406
14407 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14408 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14409 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14410 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14411 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14412 tp->nvram_jedecnum = JEDEC_ATMEL;
14413 tg3_flag_set(tp, NVRAM_BUFFERED);
14414 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14415
14416 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14417 tw32(NVRAM_CFG1, nvcfg1);
14418 break;
14419 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14420 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14421 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14422 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14423 tp->nvram_jedecnum = JEDEC_ATMEL;
14424 tg3_flag_set(tp, NVRAM_BUFFERED);
14425 tg3_flag_set(tp, FLASH);
14426 tp->nvram_pagesize = 264;
14427 break;
14428 case FLASH_5752VENDOR_ST_M45PE10:
14429 case FLASH_5752VENDOR_ST_M45PE20:
14430 case FLASH_5752VENDOR_ST_M45PE40:
14431 tp->nvram_jedecnum = JEDEC_ST;
14432 tg3_flag_set(tp, NVRAM_BUFFERED);
14433 tg3_flag_set(tp, FLASH);
14434 tp->nvram_pagesize = 256;
14435 break;
14436 }
14437 }
14438
14439 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14440 {
14441 u32 nvcfg1, protect = 0;
14442
14443 nvcfg1 = tr32(NVRAM_CFG1);
14444
14445 /* NVRAM protection for TPM */
14446 if (nvcfg1 & (1 << 27)) {
14447 tg3_flag_set(tp, PROTECTED_NVRAM);
14448 protect = 1;
14449 }
14450
14451 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14452 switch (nvcfg1) {
14453 case FLASH_5761VENDOR_ATMEL_ADB021D:
14454 case FLASH_5761VENDOR_ATMEL_ADB041D:
14455 case FLASH_5761VENDOR_ATMEL_ADB081D:
14456 case FLASH_5761VENDOR_ATMEL_ADB161D:
14457 case FLASH_5761VENDOR_ATMEL_MDB021D:
14458 case FLASH_5761VENDOR_ATMEL_MDB041D:
14459 case FLASH_5761VENDOR_ATMEL_MDB081D:
14460 case FLASH_5761VENDOR_ATMEL_MDB161D:
14461 tp->nvram_jedecnum = JEDEC_ATMEL;
14462 tg3_flag_set(tp, NVRAM_BUFFERED);
14463 tg3_flag_set(tp, FLASH);
14464 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14465 tp->nvram_pagesize = 256;
14466 break;
14467 case FLASH_5761VENDOR_ST_A_M45PE20:
14468 case FLASH_5761VENDOR_ST_A_M45PE40:
14469 case FLASH_5761VENDOR_ST_A_M45PE80:
14470 case FLASH_5761VENDOR_ST_A_M45PE16:
14471 case FLASH_5761VENDOR_ST_M_M45PE20:
14472 case FLASH_5761VENDOR_ST_M_M45PE40:
14473 case FLASH_5761VENDOR_ST_M_M45PE80:
14474 case FLASH_5761VENDOR_ST_M_M45PE16:
14475 tp->nvram_jedecnum = JEDEC_ST;
14476 tg3_flag_set(tp, NVRAM_BUFFERED);
14477 tg3_flag_set(tp, FLASH);
14478 tp->nvram_pagesize = 256;
14479 break;
14480 }
14481
14482 if (protect) {
14483 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14484 } else {
14485 switch (nvcfg1) {
14486 case FLASH_5761VENDOR_ATMEL_ADB161D:
14487 case FLASH_5761VENDOR_ATMEL_MDB161D:
14488 case FLASH_5761VENDOR_ST_A_M45PE16:
14489 case FLASH_5761VENDOR_ST_M_M45PE16:
14490 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14491 break;
14492 case FLASH_5761VENDOR_ATMEL_ADB081D:
14493 case FLASH_5761VENDOR_ATMEL_MDB081D:
14494 case FLASH_5761VENDOR_ST_A_M45PE80:
14495 case FLASH_5761VENDOR_ST_M_M45PE80:
14496 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14497 break;
14498 case FLASH_5761VENDOR_ATMEL_ADB041D:
14499 case FLASH_5761VENDOR_ATMEL_MDB041D:
14500 case FLASH_5761VENDOR_ST_A_M45PE40:
14501 case FLASH_5761VENDOR_ST_M_M45PE40:
14502 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14503 break;
14504 case FLASH_5761VENDOR_ATMEL_ADB021D:
14505 case FLASH_5761VENDOR_ATMEL_MDB021D:
14506 case FLASH_5761VENDOR_ST_A_M45PE20:
14507 case FLASH_5761VENDOR_ST_M_M45PE20:
14508 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14509 break;
14510 }
14511 }
14512 }
14513
14514 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14515 {
14516 tp->nvram_jedecnum = JEDEC_ATMEL;
14517 tg3_flag_set(tp, NVRAM_BUFFERED);
14518 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14519 }
14520
14521 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14522 {
14523 u32 nvcfg1;
14524
14525 nvcfg1 = tr32(NVRAM_CFG1);
14526
14527 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14528 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14529 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14530 tp->nvram_jedecnum = JEDEC_ATMEL;
14531 tg3_flag_set(tp, NVRAM_BUFFERED);
14532 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14533
14534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535 tw32(NVRAM_CFG1, nvcfg1);
14536 return;
14537 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14539 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14540 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14541 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14542 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14543 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14544 tp->nvram_jedecnum = JEDEC_ATMEL;
14545 tg3_flag_set(tp, NVRAM_BUFFERED);
14546 tg3_flag_set(tp, FLASH);
14547
14548 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14549 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14550 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14551 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14552 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14553 break;
14554 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14555 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14556 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14557 break;
14558 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14559 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14560 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14561 break;
14562 }
14563 break;
14564 case FLASH_5752VENDOR_ST_M45PE10:
14565 case FLASH_5752VENDOR_ST_M45PE20:
14566 case FLASH_5752VENDOR_ST_M45PE40:
14567 tp->nvram_jedecnum = JEDEC_ST;
14568 tg3_flag_set(tp, NVRAM_BUFFERED);
14569 tg3_flag_set(tp, FLASH);
14570
14571 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14572 case FLASH_5752VENDOR_ST_M45PE10:
14573 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14574 break;
14575 case FLASH_5752VENDOR_ST_M45PE20:
14576 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14577 break;
14578 case FLASH_5752VENDOR_ST_M45PE40:
14579 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14580 break;
14581 }
14582 break;
14583 default:
14584 tg3_flag_set(tp, NO_NVRAM);
14585 return;
14586 }
14587
14588 tg3_nvram_get_pagesize(tp, nvcfg1);
14589 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14590 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14591 }
14592
14593
14594 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14595 {
14596 u32 nvcfg1;
14597
14598 nvcfg1 = tr32(NVRAM_CFG1);
14599
14600 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14601 case FLASH_5717VENDOR_ATMEL_EEPROM:
14602 case FLASH_5717VENDOR_MICRO_EEPROM:
14603 tp->nvram_jedecnum = JEDEC_ATMEL;
14604 tg3_flag_set(tp, NVRAM_BUFFERED);
14605 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14606
14607 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14608 tw32(NVRAM_CFG1, nvcfg1);
14609 return;
14610 case FLASH_5717VENDOR_ATMEL_MDB011D:
14611 case FLASH_5717VENDOR_ATMEL_ADB011B:
14612 case FLASH_5717VENDOR_ATMEL_ADB011D:
14613 case FLASH_5717VENDOR_ATMEL_MDB021D:
14614 case FLASH_5717VENDOR_ATMEL_ADB021B:
14615 case FLASH_5717VENDOR_ATMEL_ADB021D:
14616 case FLASH_5717VENDOR_ATMEL_45USPT:
14617 tp->nvram_jedecnum = JEDEC_ATMEL;
14618 tg3_flag_set(tp, NVRAM_BUFFERED);
14619 tg3_flag_set(tp, FLASH);
14620
14621 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14622 case FLASH_5717VENDOR_ATMEL_MDB021D:
14623 /* Detect size with tg3_nvram_get_size() */
14624 break;
14625 case FLASH_5717VENDOR_ATMEL_ADB021B:
14626 case FLASH_5717VENDOR_ATMEL_ADB021D:
14627 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14628 break;
14629 default:
14630 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14631 break;
14632 }
14633 break;
14634 case FLASH_5717VENDOR_ST_M_M25PE10:
14635 case FLASH_5717VENDOR_ST_A_M25PE10:
14636 case FLASH_5717VENDOR_ST_M_M45PE10:
14637 case FLASH_5717VENDOR_ST_A_M45PE10:
14638 case FLASH_5717VENDOR_ST_M_M25PE20:
14639 case FLASH_5717VENDOR_ST_A_M25PE20:
14640 case FLASH_5717VENDOR_ST_M_M45PE20:
14641 case FLASH_5717VENDOR_ST_A_M45PE20:
14642 case FLASH_5717VENDOR_ST_25USPT:
14643 case FLASH_5717VENDOR_ST_45USPT:
14644 tp->nvram_jedecnum = JEDEC_ST;
14645 tg3_flag_set(tp, NVRAM_BUFFERED);
14646 tg3_flag_set(tp, FLASH);
14647
14648 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14649 case FLASH_5717VENDOR_ST_M_M25PE20:
14650 case FLASH_5717VENDOR_ST_M_M45PE20:
14651 /* Detect size with tg3_nvram_get_size() */
14652 break;
14653 case FLASH_5717VENDOR_ST_A_M25PE20:
14654 case FLASH_5717VENDOR_ST_A_M45PE20:
14655 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14656 break;
14657 default:
14658 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14659 break;
14660 }
14661 break;
14662 default:
14663 tg3_flag_set(tp, NO_NVRAM);
14664 return;
14665 }
14666
14667 tg3_nvram_get_pagesize(tp, nvcfg1);
14668 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14669 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14670 }
14671
14672 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14673 {
14674 u32 nvcfg1, nvmpinstrp;
14675
14676 nvcfg1 = tr32(NVRAM_CFG1);
14677 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14678
14679 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14680 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14681 tg3_flag_set(tp, NO_NVRAM);
14682 return;
14683 }
14684
14685 switch (nvmpinstrp) {
14686 case FLASH_5762_EEPROM_HD:
14687 nvmpinstrp = FLASH_5720_EEPROM_HD;
14688 break;
14689 case FLASH_5762_EEPROM_LD:
14690 nvmpinstrp = FLASH_5720_EEPROM_LD;
14691 break;
14692 case FLASH_5720VENDOR_M_ST_M45PE20:
14693 /* This pinstrap supports multiple sizes, so force it
14694 * to read the actual size from location 0xf0.
14695 */
14696 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14697 break;
14698 }
14699 }
14700
14701 switch (nvmpinstrp) {
14702 case FLASH_5720_EEPROM_HD:
14703 case FLASH_5720_EEPROM_LD:
14704 tp->nvram_jedecnum = JEDEC_ATMEL;
14705 tg3_flag_set(tp, NVRAM_BUFFERED);
14706
14707 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14708 tw32(NVRAM_CFG1, nvcfg1);
14709 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14710 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14711 else
14712 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14713 return;
14714 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14715 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14716 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14717 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14718 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14719 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14720 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14721 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14722 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14723 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14724 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14725 case FLASH_5720VENDOR_ATMEL_45USPT:
14726 tp->nvram_jedecnum = JEDEC_ATMEL;
14727 tg3_flag_set(tp, NVRAM_BUFFERED);
14728 tg3_flag_set(tp, FLASH);
14729
14730 switch (nvmpinstrp) {
14731 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14732 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14733 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14734 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14735 break;
14736 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14737 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14738 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14739 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14740 break;
14741 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14742 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14743 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14744 break;
14745 default:
14746 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14747 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14748 break;
14749 }
14750 break;
14751 case FLASH_5720VENDOR_M_ST_M25PE10:
14752 case FLASH_5720VENDOR_M_ST_M45PE10:
14753 case FLASH_5720VENDOR_A_ST_M25PE10:
14754 case FLASH_5720VENDOR_A_ST_M45PE10:
14755 case FLASH_5720VENDOR_M_ST_M25PE20:
14756 case FLASH_5720VENDOR_M_ST_M45PE20:
14757 case FLASH_5720VENDOR_A_ST_M25PE20:
14758 case FLASH_5720VENDOR_A_ST_M45PE20:
14759 case FLASH_5720VENDOR_M_ST_M25PE40:
14760 case FLASH_5720VENDOR_M_ST_M45PE40:
14761 case FLASH_5720VENDOR_A_ST_M25PE40:
14762 case FLASH_5720VENDOR_A_ST_M45PE40:
14763 case FLASH_5720VENDOR_M_ST_M25PE80:
14764 case FLASH_5720VENDOR_M_ST_M45PE80:
14765 case FLASH_5720VENDOR_A_ST_M25PE80:
14766 case FLASH_5720VENDOR_A_ST_M45PE80:
14767 case FLASH_5720VENDOR_ST_25USPT:
14768 case FLASH_5720VENDOR_ST_45USPT:
14769 tp->nvram_jedecnum = JEDEC_ST;
14770 tg3_flag_set(tp, NVRAM_BUFFERED);
14771 tg3_flag_set(tp, FLASH);
14772
14773 switch (nvmpinstrp) {
14774 case FLASH_5720VENDOR_M_ST_M25PE20:
14775 case FLASH_5720VENDOR_M_ST_M45PE20:
14776 case FLASH_5720VENDOR_A_ST_M25PE20:
14777 case FLASH_5720VENDOR_A_ST_M45PE20:
14778 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14779 break;
14780 case FLASH_5720VENDOR_M_ST_M25PE40:
14781 case FLASH_5720VENDOR_M_ST_M45PE40:
14782 case FLASH_5720VENDOR_A_ST_M25PE40:
14783 case FLASH_5720VENDOR_A_ST_M45PE40:
14784 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14785 break;
14786 case FLASH_5720VENDOR_M_ST_M25PE80:
14787 case FLASH_5720VENDOR_M_ST_M45PE80:
14788 case FLASH_5720VENDOR_A_ST_M25PE80:
14789 case FLASH_5720VENDOR_A_ST_M45PE80:
14790 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14791 break;
14792 default:
14793 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14794 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14795 break;
14796 }
14797 break;
14798 default:
14799 tg3_flag_set(tp, NO_NVRAM);
14800 return;
14801 }
14802
14803 tg3_nvram_get_pagesize(tp, nvcfg1);
14804 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14805 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14806
14807 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14808 u32 val;
14809
14810 if (tg3_nvram_read(tp, 0, &val))
14811 return;
14812
14813 if (val != TG3_EEPROM_MAGIC &&
14814 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14815 tg3_flag_set(tp, NO_NVRAM);
14816 }
14817 }
14818
14819 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14820 static void tg3_nvram_init(struct tg3 *tp)
14821 {
14822 if (tg3_flag(tp, IS_SSB_CORE)) {
14823 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14824 tg3_flag_clear(tp, NVRAM);
14825 tg3_flag_clear(tp, NVRAM_BUFFERED);
14826 tg3_flag_set(tp, NO_NVRAM);
14827 return;
14828 }
14829
14830 tw32_f(GRC_EEPROM_ADDR,
14831 (EEPROM_ADDR_FSM_RESET |
14832 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14833 EEPROM_ADDR_CLKPERD_SHIFT)));
14834
14835 msleep(1);
14836
14837 /* Enable seeprom accesses. */
14838 tw32_f(GRC_LOCAL_CTRL,
14839 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14840 udelay(100);
14841
14842 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14843 tg3_asic_rev(tp) != ASIC_REV_5701) {
14844 tg3_flag_set(tp, NVRAM);
14845
14846 if (tg3_nvram_lock(tp)) {
14847 netdev_warn(tp->dev,
14848 "Cannot get nvram lock, %s failed\n",
14849 __func__);
14850 return;
14851 }
14852 tg3_enable_nvram_access(tp);
14853
14854 tp->nvram_size = 0;
14855
14856 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14857 tg3_get_5752_nvram_info(tp);
14858 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14859 tg3_get_5755_nvram_info(tp);
14860 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14861 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14862 tg3_asic_rev(tp) == ASIC_REV_5785)
14863 tg3_get_5787_nvram_info(tp);
14864 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14865 tg3_get_5761_nvram_info(tp);
14866 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14867 tg3_get_5906_nvram_info(tp);
14868 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14869 tg3_flag(tp, 57765_CLASS))
14870 tg3_get_57780_nvram_info(tp);
14871 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14872 tg3_asic_rev(tp) == ASIC_REV_5719)
14873 tg3_get_5717_nvram_info(tp);
14874 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14875 tg3_asic_rev(tp) == ASIC_REV_5762)
14876 tg3_get_5720_nvram_info(tp);
14877 else
14878 tg3_get_nvram_info(tp);
14879
14880 if (tp->nvram_size == 0)
14881 tg3_get_nvram_size(tp);
14882
14883 tg3_disable_nvram_access(tp);
14884 tg3_nvram_unlock(tp);
14885
14886 } else {
14887 tg3_flag_clear(tp, NVRAM);
14888 tg3_flag_clear(tp, NVRAM_BUFFERED);
14889
14890 tg3_get_eeprom_size(tp);
14891 }
14892 }
14893
14894 struct subsys_tbl_ent {
14895 u16 subsys_vendor, subsys_devid;
14896 u32 phy_id;
14897 };
14898
14899 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14900 /* Broadcom boards. */
14901 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14902 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14903 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14904 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14905 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14906 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14907 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14908 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14909 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14910 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14911 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14912 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14913 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14914 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14915 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14916 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14917 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14918 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14919 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14920 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14921 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14922 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14923
14924 /* 3com boards. */
14925 { TG3PCI_SUBVENDOR_ID_3COM,
14926 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14927 { TG3PCI_SUBVENDOR_ID_3COM,
14928 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14929 { TG3PCI_SUBVENDOR_ID_3COM,
14930 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14931 { TG3PCI_SUBVENDOR_ID_3COM,
14932 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14933 { TG3PCI_SUBVENDOR_ID_3COM,
14934 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14935
14936 /* DELL boards. */
14937 { TG3PCI_SUBVENDOR_ID_DELL,
14938 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14939 { TG3PCI_SUBVENDOR_ID_DELL,
14940 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14941 { TG3PCI_SUBVENDOR_ID_DELL,
14942 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14943 { TG3PCI_SUBVENDOR_ID_DELL,
14944 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14945
14946 /* Compaq boards. */
14947 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14948 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14949 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14950 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14951 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14952 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14953 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14954 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14955 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14956 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14957
14958 /* IBM boards. */
14959 { TG3PCI_SUBVENDOR_ID_IBM,
14960 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14961 };
14962
14963 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14964 {
14965 int i;
14966
14967 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14968 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14969 tp->pdev->subsystem_vendor) &&
14970 (subsys_id_to_phy_id[i].subsys_devid ==
14971 tp->pdev->subsystem_device))
14972 return &subsys_id_to_phy_id[i];
14973 }
14974 return NULL;
14975 }
14976
14977 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14978 {
14979 u32 val;
14980
14981 tp->phy_id = TG3_PHY_ID_INVALID;
14982 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14983
14984 /* Assume an onboard device and WOL capable by default. */
14985 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14986 tg3_flag_set(tp, WOL_CAP);
14987
14988 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14989 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14990 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14991 tg3_flag_set(tp, IS_NIC);
14992 }
14993 val = tr32(VCPU_CFGSHDW);
14994 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14995 tg3_flag_set(tp, ASPM_WORKAROUND);
14996 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14997 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14998 tg3_flag_set(tp, WOL_ENABLE);
14999 device_set_wakeup_enable(&tp->pdev->dev, true);
15000 }
15001 goto done;
15002 }
15003
15004 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15005 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15006 u32 nic_cfg, led_cfg;
15007 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15008 u32 nic_phy_id, ver, eeprom_phy_id;
15009 int eeprom_phy_serdes = 0;
15010
15011 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15012 tp->nic_sram_data_cfg = nic_cfg;
15013
15014 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15015 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15016 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15017 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15018 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15019 (ver > 0) && (ver < 0x100))
15020 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15021
15022 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15023 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15024
15025 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15026 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15027 tg3_asic_rev(tp) == ASIC_REV_5720)
15028 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15029
15030 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15031 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15032 eeprom_phy_serdes = 1;
15033
15034 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15035 if (nic_phy_id != 0) {
15036 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15037 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15038
15039 eeprom_phy_id = (id1 >> 16) << 10;
15040 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15041 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15042 } else
15043 eeprom_phy_id = 0;
15044
15045 tp->phy_id = eeprom_phy_id;
15046 if (eeprom_phy_serdes) {
15047 if (!tg3_flag(tp, 5705_PLUS))
15048 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15049 else
15050 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15051 }
15052
15053 if (tg3_flag(tp, 5750_PLUS))
15054 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15055 SHASTA_EXT_LED_MODE_MASK);
15056 else
15057 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15058
15059 switch (led_cfg) {
15060 default:
15061 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15062 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15063 break;
15064
15065 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15066 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15067 break;
15068
15069 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15070 tp->led_ctrl = LED_CTRL_MODE_MAC;
15071
15072 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15073 * read on some older 5700/5701 bootcode.
15074 */
15075 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15076 tg3_asic_rev(tp) == ASIC_REV_5701)
15077 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15078
15079 break;
15080
15081 case SHASTA_EXT_LED_SHARED:
15082 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15083 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15084 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15085 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15086 LED_CTRL_MODE_PHY_2);
15087
15088 if (tg3_flag(tp, 5717_PLUS) ||
15089 tg3_asic_rev(tp) == ASIC_REV_5762)
15090 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15091 LED_CTRL_BLINK_RATE_MASK;
15092
15093 break;
15094
15095 case SHASTA_EXT_LED_MAC:
15096 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15097 break;
15098
15099 case SHASTA_EXT_LED_COMBO:
15100 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15101 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15102 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15103 LED_CTRL_MODE_PHY_2);
15104 break;
15105
15106 }
15107
15108 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15109 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15110 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15111 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15112
15113 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15114 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15115
15116 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15117 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15118 if ((tp->pdev->subsystem_vendor ==
15119 PCI_VENDOR_ID_ARIMA) &&
15120 (tp->pdev->subsystem_device == 0x205a ||
15121 tp->pdev->subsystem_device == 0x2063))
15122 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15123 } else {
15124 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15125 tg3_flag_set(tp, IS_NIC);
15126 }
15127
15128 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15129 tg3_flag_set(tp, ENABLE_ASF);
15130 if (tg3_flag(tp, 5750_PLUS))
15131 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15132 }
15133
15134 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15135 tg3_flag(tp, 5750_PLUS))
15136 tg3_flag_set(tp, ENABLE_APE);
15137
15138 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15139 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15140 tg3_flag_clear(tp, WOL_CAP);
15141
15142 if (tg3_flag(tp, WOL_CAP) &&
15143 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15144 tg3_flag_set(tp, WOL_ENABLE);
15145 device_set_wakeup_enable(&tp->pdev->dev, true);
15146 }
15147
15148 if (cfg2 & (1 << 17))
15149 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15150
15151 /* serdes signal pre-emphasis in register 0x590 set by */
15152 /* bootcode if bit 18 is set */
15153 if (cfg2 & (1 << 18))
15154 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15155
15156 if ((tg3_flag(tp, 57765_PLUS) ||
15157 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15158 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15159 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15160 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15161
15162 if (tg3_flag(tp, PCI_EXPRESS)) {
15163 u32 cfg3;
15164
15165 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15166 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15167 !tg3_flag(tp, 57765_PLUS) &&
15168 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15169 tg3_flag_set(tp, ASPM_WORKAROUND);
15170 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15171 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15172 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15173 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15174 }
15175
15176 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15177 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15178 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15179 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15180 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15181 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15182
15183 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15184 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15185 }
15186 done:
15187 if (tg3_flag(tp, WOL_CAP))
15188 device_set_wakeup_enable(&tp->pdev->dev,
15189 tg3_flag(tp, WOL_ENABLE));
15190 else
15191 device_set_wakeup_capable(&tp->pdev->dev, false);
15192 }
15193
15194 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15195 {
15196 int i, err;
15197 u32 val2, off = offset * 8;
15198
15199 err = tg3_nvram_lock(tp);
15200 if (err)
15201 return err;
15202
15203 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15204 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15205 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15206 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15207 udelay(10);
15208
15209 for (i = 0; i < 100; i++) {
15210 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15211 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15212 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15213 break;
15214 }
15215 udelay(10);
15216 }
15217
15218 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15219
15220 tg3_nvram_unlock(tp);
15221 if (val2 & APE_OTP_STATUS_CMD_DONE)
15222 return 0;
15223
15224 return -EBUSY;
15225 }
15226
15227 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15228 {
15229 int i;
15230 u32 val;
15231
15232 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15233 tw32(OTP_CTRL, cmd);
15234
15235 /* Wait for up to 1 ms for command to execute. */
15236 for (i = 0; i < 100; i++) {
15237 val = tr32(OTP_STATUS);
15238 if (val & OTP_STATUS_CMD_DONE)
15239 break;
15240 udelay(10);
15241 }
15242
15243 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15244 }
15245
15246 /* Read the gphy configuration from the OTP region of the chip. The gphy
15247 * configuration is a 32-bit value that straddles the alignment boundary.
15248 * We do two 32-bit reads and then shift and merge the results.
15249 */
15250 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15251 {
15252 u32 bhalf_otp, thalf_otp;
15253
15254 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15255
15256 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15257 return 0;
15258
15259 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15260
15261 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15262 return 0;
15263
15264 thalf_otp = tr32(OTP_READ_DATA);
15265
15266 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15267
15268 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15269 return 0;
15270
15271 bhalf_otp = tr32(OTP_READ_DATA);
15272
15273 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15274 }
15275
15276 static void tg3_phy_init_link_config(struct tg3 *tp)
15277 {
15278 u32 adv = ADVERTISED_Autoneg;
15279
15280 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15281 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15282 adv |= ADVERTISED_1000baseT_Half;
15283 adv |= ADVERTISED_1000baseT_Full;
15284 }
15285
15286 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15287 adv |= ADVERTISED_100baseT_Half |
15288 ADVERTISED_100baseT_Full |
15289 ADVERTISED_10baseT_Half |
15290 ADVERTISED_10baseT_Full |
15291 ADVERTISED_TP;
15292 else
15293 adv |= ADVERTISED_FIBRE;
15294
15295 tp->link_config.advertising = adv;
15296 tp->link_config.speed = SPEED_UNKNOWN;
15297 tp->link_config.duplex = DUPLEX_UNKNOWN;
15298 tp->link_config.autoneg = AUTONEG_ENABLE;
15299 tp->link_config.active_speed = SPEED_UNKNOWN;
15300 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15301
15302 tp->old_link = -1;
15303 }
15304
15305 static int tg3_phy_probe(struct tg3 *tp)
15306 {
15307 u32 hw_phy_id_1, hw_phy_id_2;
15308 u32 hw_phy_id, hw_phy_id_masked;
15309 int err;
15310
15311 /* flow control autonegotiation is default behavior */
15312 tg3_flag_set(tp, PAUSE_AUTONEG);
15313 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15314
15315 if (tg3_flag(tp, ENABLE_APE)) {
15316 switch (tp->pci_fn) {
15317 case 0:
15318 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15319 break;
15320 case 1:
15321 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15322 break;
15323 case 2:
15324 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15325 break;
15326 case 3:
15327 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15328 break;
15329 }
15330 }
15331
15332 if (!tg3_flag(tp, ENABLE_ASF) &&
15333 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15334 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15335 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15336 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15337
15338 if (tg3_flag(tp, USE_PHYLIB))
15339 return tg3_phy_init(tp);
15340
15341 /* Reading the PHY ID register can conflict with ASF
15342 * firmware access to the PHY hardware.
15343 */
15344 err = 0;
15345 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15346 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15347 } else {
15348 /* Now read the physical PHY_ID from the chip and verify
15349 * that it is sane. If it doesn't look good, we fall back
15350 * to either the hard-coded table based PHY_ID and failing
15351 * that the value found in the eeprom area.
15352 */
15353 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15354 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15355
15356 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15357 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15358 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15359
15360 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15361 }
15362
15363 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15364 tp->phy_id = hw_phy_id;
15365 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15366 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15367 else
15368 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15369 } else {
15370 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15371 /* Do nothing, phy ID already set up in
15372 * tg3_get_eeprom_hw_cfg().
15373 */
15374 } else {
15375 struct subsys_tbl_ent *p;
15376
15377 /* No eeprom signature? Try the hardcoded
15378 * subsys device table.
15379 */
15380 p = tg3_lookup_by_subsys(tp);
15381 if (p) {
15382 tp->phy_id = p->phy_id;
15383 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15384 /* For now we saw the IDs 0xbc050cd0,
15385 * 0xbc050f80 and 0xbc050c30 on devices
15386 * connected to an BCM4785 and there are
15387 * probably more. Just assume that the phy is
15388 * supported when it is connected to a SSB core
15389 * for now.
15390 */
15391 return -ENODEV;
15392 }
15393
15394 if (!tp->phy_id ||
15395 tp->phy_id == TG3_PHY_ID_BCM8002)
15396 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15397 }
15398 }
15399
15400 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15401 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15402 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15403 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15404 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15405 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15406 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15407 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15408 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15409 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15410
15411 tp->eee.supported = SUPPORTED_100baseT_Full |
15412 SUPPORTED_1000baseT_Full;
15413 tp->eee.advertised = ADVERTISED_100baseT_Full |
15414 ADVERTISED_1000baseT_Full;
15415 tp->eee.eee_enabled = 1;
15416 tp->eee.tx_lpi_enabled = 1;
15417 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15418 }
15419
15420 tg3_phy_init_link_config(tp);
15421
15422 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15423 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15424 !tg3_flag(tp, ENABLE_APE) &&
15425 !tg3_flag(tp, ENABLE_ASF)) {
15426 u32 bmsr, dummy;
15427
15428 tg3_readphy(tp, MII_BMSR, &bmsr);
15429 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15430 (bmsr & BMSR_LSTATUS))
15431 goto skip_phy_reset;
15432
15433 err = tg3_phy_reset(tp);
15434 if (err)
15435 return err;
15436
15437 tg3_phy_set_wirespeed(tp);
15438
15439 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15440 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15441 tp->link_config.flowctrl);
15442
15443 tg3_writephy(tp, MII_BMCR,
15444 BMCR_ANENABLE | BMCR_ANRESTART);
15445 }
15446 }
15447
15448 skip_phy_reset:
15449 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15450 err = tg3_init_5401phy_dsp(tp);
15451 if (err)
15452 return err;
15453
15454 err = tg3_init_5401phy_dsp(tp);
15455 }
15456
15457 return err;
15458 }
15459
15460 static void tg3_read_vpd(struct tg3 *tp)
15461 {
15462 u8 *vpd_data;
15463 unsigned int block_end, rosize, len;
15464 u32 vpdlen;
15465 int j, i = 0;
15466
15467 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15468 if (!vpd_data)
15469 goto out_no_vpd;
15470
15471 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15472 if (i < 0)
15473 goto out_not_found;
15474
15475 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15476 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15477 i += PCI_VPD_LRDT_TAG_SIZE;
15478
15479 if (block_end > vpdlen)
15480 goto out_not_found;
15481
15482 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15483 PCI_VPD_RO_KEYWORD_MFR_ID);
15484 if (j > 0) {
15485 len = pci_vpd_info_field_size(&vpd_data[j]);
15486
15487 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15488 if (j + len > block_end || len != 4 ||
15489 memcmp(&vpd_data[j], "1028", 4))
15490 goto partno;
15491
15492 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15493 PCI_VPD_RO_KEYWORD_VENDOR0);
15494 if (j < 0)
15495 goto partno;
15496
15497 len = pci_vpd_info_field_size(&vpd_data[j]);
15498
15499 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15500 if (j + len > block_end)
15501 goto partno;
15502
15503 if (len >= sizeof(tp->fw_ver))
15504 len = sizeof(tp->fw_ver) - 1;
15505 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15506 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15507 &vpd_data[j]);
15508 }
15509
15510 partno:
15511 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15512 PCI_VPD_RO_KEYWORD_PARTNO);
15513 if (i < 0)
15514 goto out_not_found;
15515
15516 len = pci_vpd_info_field_size(&vpd_data[i]);
15517
15518 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15519 if (len > TG3_BPN_SIZE ||
15520 (len + i) > vpdlen)
15521 goto out_not_found;
15522
15523 memcpy(tp->board_part_number, &vpd_data[i], len);
15524
15525 out_not_found:
15526 kfree(vpd_data);
15527 if (tp->board_part_number[0])
15528 return;
15529
15530 out_no_vpd:
15531 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15532 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15533 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15534 strcpy(tp->board_part_number, "BCM5717");
15535 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15536 strcpy(tp->board_part_number, "BCM5718");
15537 else
15538 goto nomatch;
15539 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15540 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15541 strcpy(tp->board_part_number, "BCM57780");
15542 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15543 strcpy(tp->board_part_number, "BCM57760");
15544 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15545 strcpy(tp->board_part_number, "BCM57790");
15546 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15547 strcpy(tp->board_part_number, "BCM57788");
15548 else
15549 goto nomatch;
15550 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15551 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15552 strcpy(tp->board_part_number, "BCM57761");
15553 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15554 strcpy(tp->board_part_number, "BCM57765");
15555 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15556 strcpy(tp->board_part_number, "BCM57781");
15557 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15558 strcpy(tp->board_part_number, "BCM57785");
15559 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15560 strcpy(tp->board_part_number, "BCM57791");
15561 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15562 strcpy(tp->board_part_number, "BCM57795");
15563 else
15564 goto nomatch;
15565 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15566 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15567 strcpy(tp->board_part_number, "BCM57762");
15568 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15569 strcpy(tp->board_part_number, "BCM57766");
15570 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15571 strcpy(tp->board_part_number, "BCM57782");
15572 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15573 strcpy(tp->board_part_number, "BCM57786");
15574 else
15575 goto nomatch;
15576 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15577 strcpy(tp->board_part_number, "BCM95906");
15578 } else {
15579 nomatch:
15580 strcpy(tp->board_part_number, "none");
15581 }
15582 }
15583
15584 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15585 {
15586 u32 val;
15587
15588 if (tg3_nvram_read(tp, offset, &val) ||
15589 (val & 0xfc000000) != 0x0c000000 ||
15590 tg3_nvram_read(tp, offset + 4, &val) ||
15591 val != 0)
15592 return 0;
15593
15594 return 1;
15595 }
15596
15597 static void tg3_read_bc_ver(struct tg3 *tp)
15598 {
15599 u32 val, offset, start, ver_offset;
15600 int i, dst_off;
15601 bool newver = false;
15602
15603 if (tg3_nvram_read(tp, 0xc, &offset) ||
15604 tg3_nvram_read(tp, 0x4, &start))
15605 return;
15606
15607 offset = tg3_nvram_logical_addr(tp, offset);
15608
15609 if (tg3_nvram_read(tp, offset, &val))
15610 return;
15611
15612 if ((val & 0xfc000000) == 0x0c000000) {
15613 if (tg3_nvram_read(tp, offset + 4, &val))
15614 return;
15615
15616 if (val == 0)
15617 newver = true;
15618 }
15619
15620 dst_off = strlen(tp->fw_ver);
15621
15622 if (newver) {
15623 if (TG3_VER_SIZE - dst_off < 16 ||
15624 tg3_nvram_read(tp, offset + 8, &ver_offset))
15625 return;
15626
15627 offset = offset + ver_offset - start;
15628 for (i = 0; i < 16; i += 4) {
15629 __be32 v;
15630 if (tg3_nvram_read_be32(tp, offset + i, &v))
15631 return;
15632
15633 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15634 }
15635 } else {
15636 u32 major, minor;
15637
15638 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15639 return;
15640
15641 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15642 TG3_NVM_BCVER_MAJSFT;
15643 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15644 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15645 "v%d.%02d", major, minor);
15646 }
15647 }
15648
15649 static void tg3_read_hwsb_ver(struct tg3 *tp)
15650 {
15651 u32 val, major, minor;
15652
15653 /* Use native endian representation */
15654 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15655 return;
15656
15657 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15658 TG3_NVM_HWSB_CFG1_MAJSFT;
15659 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15660 TG3_NVM_HWSB_CFG1_MINSFT;
15661
15662 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15663 }
15664
15665 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15666 {
15667 u32 offset, major, minor, build;
15668
15669 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15670
15671 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15672 return;
15673
15674 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15675 case TG3_EEPROM_SB_REVISION_0:
15676 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15677 break;
15678 case TG3_EEPROM_SB_REVISION_2:
15679 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15680 break;
15681 case TG3_EEPROM_SB_REVISION_3:
15682 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15683 break;
15684 case TG3_EEPROM_SB_REVISION_4:
15685 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15686 break;
15687 case TG3_EEPROM_SB_REVISION_5:
15688 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15689 break;
15690 case TG3_EEPROM_SB_REVISION_6:
15691 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15692 break;
15693 default:
15694 return;
15695 }
15696
15697 if (tg3_nvram_read(tp, offset, &val))
15698 return;
15699
15700 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15701 TG3_EEPROM_SB_EDH_BLD_SHFT;
15702 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15703 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15704 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15705
15706 if (minor > 99 || build > 26)
15707 return;
15708
15709 offset = strlen(tp->fw_ver);
15710 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15711 " v%d.%02d", major, minor);
15712
15713 if (build > 0) {
15714 offset = strlen(tp->fw_ver);
15715 if (offset < TG3_VER_SIZE - 1)
15716 tp->fw_ver[offset] = 'a' + build - 1;
15717 }
15718 }
15719
15720 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15721 {
15722 u32 val, offset, start;
15723 int i, vlen;
15724
15725 for (offset = TG3_NVM_DIR_START;
15726 offset < TG3_NVM_DIR_END;
15727 offset += TG3_NVM_DIRENT_SIZE) {
15728 if (tg3_nvram_read(tp, offset, &val))
15729 return;
15730
15731 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15732 break;
15733 }
15734
15735 if (offset == TG3_NVM_DIR_END)
15736 return;
15737
15738 if (!tg3_flag(tp, 5705_PLUS))
15739 start = 0x08000000;
15740 else if (tg3_nvram_read(tp, offset - 4, &start))
15741 return;
15742
15743 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15744 !tg3_fw_img_is_valid(tp, offset) ||
15745 tg3_nvram_read(tp, offset + 8, &val))
15746 return;
15747
15748 offset += val - start;
15749
15750 vlen = strlen(tp->fw_ver);
15751
15752 tp->fw_ver[vlen++] = ',';
15753 tp->fw_ver[vlen++] = ' ';
15754
15755 for (i = 0; i < 4; i++) {
15756 __be32 v;
15757 if (tg3_nvram_read_be32(tp, offset, &v))
15758 return;
15759
15760 offset += sizeof(v);
15761
15762 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15763 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15764 break;
15765 }
15766
15767 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15768 vlen += sizeof(v);
15769 }
15770 }
15771
15772 static void tg3_probe_ncsi(struct tg3 *tp)
15773 {
15774 u32 apedata;
15775
15776 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15777 if (apedata != APE_SEG_SIG_MAGIC)
15778 return;
15779
15780 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15781 if (!(apedata & APE_FW_STATUS_READY))
15782 return;
15783
15784 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15785 tg3_flag_set(tp, APE_HAS_NCSI);
15786 }
15787
15788 static void tg3_read_dash_ver(struct tg3 *tp)
15789 {
15790 int vlen;
15791 u32 apedata;
15792 char *fwtype;
15793
15794 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15795
15796 if (tg3_flag(tp, APE_HAS_NCSI))
15797 fwtype = "NCSI";
15798 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15799 fwtype = "SMASH";
15800 else
15801 fwtype = "DASH";
15802
15803 vlen = strlen(tp->fw_ver);
15804
15805 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15806 fwtype,
15807 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15808 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15809 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15810 (apedata & APE_FW_VERSION_BLDMSK));
15811 }
15812
15813 static void tg3_read_otp_ver(struct tg3 *tp)
15814 {
15815 u32 val, val2;
15816
15817 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15818 return;
15819
15820 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15821 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15822 TG3_OTP_MAGIC0_VALID(val)) {
15823 u64 val64 = (u64) val << 32 | val2;
15824 u32 ver = 0;
15825 int i, vlen;
15826
15827 for (i = 0; i < 7; i++) {
15828 if ((val64 & 0xff) == 0)
15829 break;
15830 ver = val64 & 0xff;
15831 val64 >>= 8;
15832 }
15833 vlen = strlen(tp->fw_ver);
15834 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15835 }
15836 }
15837
15838 static void tg3_read_fw_ver(struct tg3 *tp)
15839 {
15840 u32 val;
15841 bool vpd_vers = false;
15842
15843 if (tp->fw_ver[0] != 0)
15844 vpd_vers = true;
15845
15846 if (tg3_flag(tp, NO_NVRAM)) {
15847 strcat(tp->fw_ver, "sb");
15848 tg3_read_otp_ver(tp);
15849 return;
15850 }
15851
15852 if (tg3_nvram_read(tp, 0, &val))
15853 return;
15854
15855 if (val == TG3_EEPROM_MAGIC)
15856 tg3_read_bc_ver(tp);
15857 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15858 tg3_read_sb_ver(tp, val);
15859 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15860 tg3_read_hwsb_ver(tp);
15861
15862 if (tg3_flag(tp, ENABLE_ASF)) {
15863 if (tg3_flag(tp, ENABLE_APE)) {
15864 tg3_probe_ncsi(tp);
15865 if (!vpd_vers)
15866 tg3_read_dash_ver(tp);
15867 } else if (!vpd_vers) {
15868 tg3_read_mgmtfw_ver(tp);
15869 }
15870 }
15871
15872 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15873 }
15874
15875 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15876 {
15877 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15878 return TG3_RX_RET_MAX_SIZE_5717;
15879 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15880 return TG3_RX_RET_MAX_SIZE_5700;
15881 else
15882 return TG3_RX_RET_MAX_SIZE_5705;
15883 }
15884
15885 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15886 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15887 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15888 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15889 { },
15890 };
15891
15892 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15893 {
15894 struct pci_dev *peer;
15895 unsigned int func, devnr = tp->pdev->devfn & ~7;
15896
15897 for (func = 0; func < 8; func++) {
15898 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15899 if (peer && peer != tp->pdev)
15900 break;
15901 pci_dev_put(peer);
15902 }
15903 /* 5704 can be configured in single-port mode, set peer to
15904 * tp->pdev in that case.
15905 */
15906 if (!peer) {
15907 peer = tp->pdev;
15908 return peer;
15909 }
15910
15911 /*
15912 * We don't need to keep the refcount elevated; there's no way
15913 * to remove one half of this device without removing the other
15914 */
15915 pci_dev_put(peer);
15916
15917 return peer;
15918 }
15919
15920 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15921 {
15922 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15923 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15924 u32 reg;
15925
15926 /* All devices that use the alternate
15927 * ASIC REV location have a CPMU.
15928 */
15929 tg3_flag_set(tp, CPMU_PRESENT);
15930
15931 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15932 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15933 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15934 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15935 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15936 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15937 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15938 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15939 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15940 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15941 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15942 reg = TG3PCI_GEN2_PRODID_ASICREV;
15943 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15944 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15945 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15946 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15947 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15948 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15949 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15950 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15951 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15952 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15953 reg = TG3PCI_GEN15_PRODID_ASICREV;
15954 else
15955 reg = TG3PCI_PRODID_ASICREV;
15956
15957 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15958 }
15959
15960 /* Wrong chip ID in 5752 A0. This code can be removed later
15961 * as A0 is not in production.
15962 */
15963 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15964 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15965
15966 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15967 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15968
15969 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15970 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15971 tg3_asic_rev(tp) == ASIC_REV_5720)
15972 tg3_flag_set(tp, 5717_PLUS);
15973
15974 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15975 tg3_asic_rev(tp) == ASIC_REV_57766)
15976 tg3_flag_set(tp, 57765_CLASS);
15977
15978 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15979 tg3_asic_rev(tp) == ASIC_REV_5762)
15980 tg3_flag_set(tp, 57765_PLUS);
15981
15982 /* Intentionally exclude ASIC_REV_5906 */
15983 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15984 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15985 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15986 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15987 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15988 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15989 tg3_flag(tp, 57765_PLUS))
15990 tg3_flag_set(tp, 5755_PLUS);
15991
15992 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15993 tg3_asic_rev(tp) == ASIC_REV_5714)
15994 tg3_flag_set(tp, 5780_CLASS);
15995
15996 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15997 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15998 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15999 tg3_flag(tp, 5755_PLUS) ||
16000 tg3_flag(tp, 5780_CLASS))
16001 tg3_flag_set(tp, 5750_PLUS);
16002
16003 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16004 tg3_flag(tp, 5750_PLUS))
16005 tg3_flag_set(tp, 5705_PLUS);
16006 }
16007
16008 static bool tg3_10_100_only_device(struct tg3 *tp,
16009 const struct pci_device_id *ent)
16010 {
16011 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16012
16013 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16014 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16015 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16016 return true;
16017
16018 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16019 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16020 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16021 return true;
16022 } else {
16023 return true;
16024 }
16025 }
16026
16027 return false;
16028 }
16029
16030 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16031 {
16032 u32 misc_ctrl_reg;
16033 u32 pci_state_reg, grc_misc_cfg;
16034 u32 val;
16035 u16 pci_cmd;
16036 int err;
16037
16038 /* Force memory write invalidate off. If we leave it on,
16039 * then on 5700_BX chips we have to enable a workaround.
16040 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16041 * to match the cacheline size. The Broadcom driver have this
16042 * workaround but turns MWI off all the times so never uses
16043 * it. This seems to suggest that the workaround is insufficient.
16044 */
16045 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16046 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16047 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16048
16049 /* Important! -- Make sure register accesses are byteswapped
16050 * correctly. Also, for those chips that require it, make
16051 * sure that indirect register accesses are enabled before
16052 * the first operation.
16053 */
16054 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16055 &misc_ctrl_reg);
16056 tp->misc_host_ctrl |= (misc_ctrl_reg &
16057 MISC_HOST_CTRL_CHIPREV);
16058 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16059 tp->misc_host_ctrl);
16060
16061 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16062
16063 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16064 * we need to disable memory and use config. cycles
16065 * only to access all registers. The 5702/03 chips
16066 * can mistakenly decode the special cycles from the
16067 * ICH chipsets as memory write cycles, causing corruption
16068 * of register and memory space. Only certain ICH bridges
16069 * will drive special cycles with non-zero data during the
16070 * address phase which can fall within the 5703's address
16071 * range. This is not an ICH bug as the PCI spec allows
16072 * non-zero address during special cycles. However, only
16073 * these ICH bridges are known to drive non-zero addresses
16074 * during special cycles.
16075 *
16076 * Since special cycles do not cross PCI bridges, we only
16077 * enable this workaround if the 5703 is on the secondary
16078 * bus of these ICH bridges.
16079 */
16080 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16081 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16082 static struct tg3_dev_id {
16083 u32 vendor;
16084 u32 device;
16085 u32 rev;
16086 } ich_chipsets[] = {
16087 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16088 PCI_ANY_ID },
16089 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16090 PCI_ANY_ID },
16091 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16092 0xa },
16093 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16094 PCI_ANY_ID },
16095 { },
16096 };
16097 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16098 struct pci_dev *bridge = NULL;
16099
16100 while (pci_id->vendor != 0) {
16101 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16102 bridge);
16103 if (!bridge) {
16104 pci_id++;
16105 continue;
16106 }
16107 if (pci_id->rev != PCI_ANY_ID) {
16108 if (bridge->revision > pci_id->rev)
16109 continue;
16110 }
16111 if (bridge->subordinate &&
16112 (bridge->subordinate->number ==
16113 tp->pdev->bus->number)) {
16114 tg3_flag_set(tp, ICH_WORKAROUND);
16115 pci_dev_put(bridge);
16116 break;
16117 }
16118 }
16119 }
16120
16121 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16122 static struct tg3_dev_id {
16123 u32 vendor;
16124 u32 device;
16125 } bridge_chipsets[] = {
16126 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16127 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16128 { },
16129 };
16130 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16131 struct pci_dev *bridge = NULL;
16132
16133 while (pci_id->vendor != 0) {
16134 bridge = pci_get_device(pci_id->vendor,
16135 pci_id->device,
16136 bridge);
16137 if (!bridge) {
16138 pci_id++;
16139 continue;
16140 }
16141 if (bridge->subordinate &&
16142 (bridge->subordinate->number <=
16143 tp->pdev->bus->number) &&
16144 (bridge->subordinate->busn_res.end >=
16145 tp->pdev->bus->number)) {
16146 tg3_flag_set(tp, 5701_DMA_BUG);
16147 pci_dev_put(bridge);
16148 break;
16149 }
16150 }
16151 }
16152
16153 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16154 * DMA addresses > 40-bit. This bridge may have other additional
16155 * 57xx devices behind it in some 4-port NIC designs for example.
16156 * Any tg3 device found behind the bridge will also need the 40-bit
16157 * DMA workaround.
16158 */
16159 if (tg3_flag(tp, 5780_CLASS)) {
16160 tg3_flag_set(tp, 40BIT_DMA_BUG);
16161 tp->msi_cap = tp->pdev->msi_cap;
16162 } else {
16163 struct pci_dev *bridge = NULL;
16164
16165 do {
16166 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16167 PCI_DEVICE_ID_SERVERWORKS_EPB,
16168 bridge);
16169 if (bridge && bridge->subordinate &&
16170 (bridge->subordinate->number <=
16171 tp->pdev->bus->number) &&
16172 (bridge->subordinate->busn_res.end >=
16173 tp->pdev->bus->number)) {
16174 tg3_flag_set(tp, 40BIT_DMA_BUG);
16175 pci_dev_put(bridge);
16176 break;
16177 }
16178 } while (bridge);
16179 }
16180
16181 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16182 tg3_asic_rev(tp) == ASIC_REV_5714)
16183 tp->pdev_peer = tg3_find_peer(tp);
16184
16185 /* Determine TSO capabilities */
16186 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16187 ; /* Do nothing. HW bug. */
16188 else if (tg3_flag(tp, 57765_PLUS))
16189 tg3_flag_set(tp, HW_TSO_3);
16190 else if (tg3_flag(tp, 5755_PLUS) ||
16191 tg3_asic_rev(tp) == ASIC_REV_5906)
16192 tg3_flag_set(tp, HW_TSO_2);
16193 else if (tg3_flag(tp, 5750_PLUS)) {
16194 tg3_flag_set(tp, HW_TSO_1);
16195 tg3_flag_set(tp, TSO_BUG);
16196 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16197 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16198 tg3_flag_clear(tp, TSO_BUG);
16199 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16200 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16201 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16202 tg3_flag_set(tp, FW_TSO);
16203 tg3_flag_set(tp, TSO_BUG);
16204 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16205 tp->fw_needed = FIRMWARE_TG3TSO5;
16206 else
16207 tp->fw_needed = FIRMWARE_TG3TSO;
16208 }
16209
16210 /* Selectively allow TSO based on operating conditions */
16211 if (tg3_flag(tp, HW_TSO_1) ||
16212 tg3_flag(tp, HW_TSO_2) ||
16213 tg3_flag(tp, HW_TSO_3) ||
16214 tg3_flag(tp, FW_TSO)) {
16215 /* For firmware TSO, assume ASF is disabled.
16216 * We'll disable TSO later if we discover ASF
16217 * is enabled in tg3_get_eeprom_hw_cfg().
16218 */
16219 tg3_flag_set(tp, TSO_CAPABLE);
16220 } else {
16221 tg3_flag_clear(tp, TSO_CAPABLE);
16222 tg3_flag_clear(tp, TSO_BUG);
16223 tp->fw_needed = NULL;
16224 }
16225
16226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16227 tp->fw_needed = FIRMWARE_TG3;
16228
16229 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16230 tp->fw_needed = FIRMWARE_TG357766;
16231
16232 tp->irq_max = 1;
16233
16234 if (tg3_flag(tp, 5750_PLUS)) {
16235 tg3_flag_set(tp, SUPPORT_MSI);
16236 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16237 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16238 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16239 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16240 tp->pdev_peer == tp->pdev))
16241 tg3_flag_clear(tp, SUPPORT_MSI);
16242
16243 if (tg3_flag(tp, 5755_PLUS) ||
16244 tg3_asic_rev(tp) == ASIC_REV_5906) {
16245 tg3_flag_set(tp, 1SHOT_MSI);
16246 }
16247
16248 if (tg3_flag(tp, 57765_PLUS)) {
16249 tg3_flag_set(tp, SUPPORT_MSIX);
16250 tp->irq_max = TG3_IRQ_MAX_VECS;
16251 }
16252 }
16253
16254 tp->txq_max = 1;
16255 tp->rxq_max = 1;
16256 if (tp->irq_max > 1) {
16257 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16258 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16259
16260 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16261 tg3_asic_rev(tp) == ASIC_REV_5720)
16262 tp->txq_max = tp->irq_max - 1;
16263 }
16264
16265 if (tg3_flag(tp, 5755_PLUS) ||
16266 tg3_asic_rev(tp) == ASIC_REV_5906)
16267 tg3_flag_set(tp, SHORT_DMA_BUG);
16268
16269 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16270 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16271
16272 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16273 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16274 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16275 tg3_asic_rev(tp) == ASIC_REV_5762)
16276 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16277
16278 if (tg3_flag(tp, 57765_PLUS) &&
16279 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16280 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16281
16282 if (!tg3_flag(tp, 5705_PLUS) ||
16283 tg3_flag(tp, 5780_CLASS) ||
16284 tg3_flag(tp, USE_JUMBO_BDFLAG))
16285 tg3_flag_set(tp, JUMBO_CAPABLE);
16286
16287 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16288 &pci_state_reg);
16289
16290 if (pci_is_pcie(tp->pdev)) {
16291 u16 lnkctl;
16292
16293 tg3_flag_set(tp, PCI_EXPRESS);
16294
16295 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16296 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16297 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16298 tg3_flag_clear(tp, HW_TSO_2);
16299 tg3_flag_clear(tp, TSO_CAPABLE);
16300 }
16301 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16302 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16303 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16304 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16305 tg3_flag_set(tp, CLKREQ_BUG);
16306 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16307 tg3_flag_set(tp, L1PLLPD_EN);
16308 }
16309 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16310 /* BCM5785 devices are effectively PCIe devices, and should
16311 * follow PCIe codepaths, but do not have a PCIe capabilities
16312 * section.
16313 */
16314 tg3_flag_set(tp, PCI_EXPRESS);
16315 } else if (!tg3_flag(tp, 5705_PLUS) ||
16316 tg3_flag(tp, 5780_CLASS)) {
16317 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16318 if (!tp->pcix_cap) {
16319 dev_err(&tp->pdev->dev,
16320 "Cannot find PCI-X capability, aborting\n");
16321 return -EIO;
16322 }
16323
16324 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16325 tg3_flag_set(tp, PCIX_MODE);
16326 }
16327
16328 /* If we have an AMD 762 or VIA K8T800 chipset, write
16329 * reordering to the mailbox registers done by the host
16330 * controller can cause major troubles. We read back from
16331 * every mailbox register write to force the writes to be
16332 * posted to the chip in order.
16333 */
16334 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16335 !tg3_flag(tp, PCI_EXPRESS))
16336 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16337
16338 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16339 &tp->pci_cacheline_sz);
16340 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16341 &tp->pci_lat_timer);
16342 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16343 tp->pci_lat_timer < 64) {
16344 tp->pci_lat_timer = 64;
16345 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16346 tp->pci_lat_timer);
16347 }
16348
16349 /* Important! -- It is critical that the PCI-X hw workaround
16350 * situation is decided before the first MMIO register access.
16351 */
16352 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16353 /* 5700 BX chips need to have their TX producer index
16354 * mailboxes written twice to workaround a bug.
16355 */
16356 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16357
16358 /* If we are in PCI-X mode, enable register write workaround.
16359 *
16360 * The workaround is to use indirect register accesses
16361 * for all chip writes not to mailbox registers.
16362 */
16363 if (tg3_flag(tp, PCIX_MODE)) {
16364 u32 pm_reg;
16365
16366 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16367
16368 /* The chip can have it's power management PCI config
16369 * space registers clobbered due to this bug.
16370 * So explicitly force the chip into D0 here.
16371 */
16372 pci_read_config_dword(tp->pdev,
16373 tp->pdev->pm_cap + PCI_PM_CTRL,
16374 &pm_reg);
16375 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16376 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16377 pci_write_config_dword(tp->pdev,
16378 tp->pdev->pm_cap + PCI_PM_CTRL,
16379 pm_reg);
16380
16381 /* Also, force SERR#/PERR# in PCI command. */
16382 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16383 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16384 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16385 }
16386 }
16387
16388 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16389 tg3_flag_set(tp, PCI_HIGH_SPEED);
16390 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16391 tg3_flag_set(tp, PCI_32BIT);
16392
16393 /* Chip-specific fixup from Broadcom driver */
16394 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16395 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16396 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16397 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16398 }
16399
16400 /* Default fast path register access methods */
16401 tp->read32 = tg3_read32;
16402 tp->write32 = tg3_write32;
16403 tp->read32_mbox = tg3_read32;
16404 tp->write32_mbox = tg3_write32;
16405 tp->write32_tx_mbox = tg3_write32;
16406 tp->write32_rx_mbox = tg3_write32;
16407
16408 /* Various workaround register access methods */
16409 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16410 tp->write32 = tg3_write_indirect_reg32;
16411 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16412 (tg3_flag(tp, PCI_EXPRESS) &&
16413 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16414 /*
16415 * Back to back register writes can cause problems on these
16416 * chips, the workaround is to read back all reg writes
16417 * except those to mailbox regs.
16418 *
16419 * See tg3_write_indirect_reg32().
16420 */
16421 tp->write32 = tg3_write_flush_reg32;
16422 }
16423
16424 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16425 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16426 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16427 tp->write32_rx_mbox = tg3_write_flush_reg32;
16428 }
16429
16430 if (tg3_flag(tp, ICH_WORKAROUND)) {
16431 tp->read32 = tg3_read_indirect_reg32;
16432 tp->write32 = tg3_write_indirect_reg32;
16433 tp->read32_mbox = tg3_read_indirect_mbox;
16434 tp->write32_mbox = tg3_write_indirect_mbox;
16435 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16436 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16437
16438 iounmap(tp->regs);
16439 tp->regs = NULL;
16440
16441 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16442 pci_cmd &= ~PCI_COMMAND_MEMORY;
16443 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16444 }
16445 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16446 tp->read32_mbox = tg3_read32_mbox_5906;
16447 tp->write32_mbox = tg3_write32_mbox_5906;
16448 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16449 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16450 }
16451
16452 if (tp->write32 == tg3_write_indirect_reg32 ||
16453 (tg3_flag(tp, PCIX_MODE) &&
16454 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16455 tg3_asic_rev(tp) == ASIC_REV_5701)))
16456 tg3_flag_set(tp, SRAM_USE_CONFIG);
16457
16458 /* The memory arbiter has to be enabled in order for SRAM accesses
16459 * to succeed. Normally on powerup the tg3 chip firmware will make
16460 * sure it is enabled, but other entities such as system netboot
16461 * code might disable it.
16462 */
16463 val = tr32(MEMARB_MODE);
16464 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16465
16466 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16467 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16468 tg3_flag(tp, 5780_CLASS)) {
16469 if (tg3_flag(tp, PCIX_MODE)) {
16470 pci_read_config_dword(tp->pdev,
16471 tp->pcix_cap + PCI_X_STATUS,
16472 &val);
16473 tp->pci_fn = val & 0x7;
16474 }
16475 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16476 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16477 tg3_asic_rev(tp) == ASIC_REV_5720) {
16478 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16479 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16480 val = tr32(TG3_CPMU_STATUS);
16481
16482 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16483 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16484 else
16485 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16486 TG3_CPMU_STATUS_FSHFT_5719;
16487 }
16488
16489 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16490 tp->write32_tx_mbox = tg3_write_flush_reg32;
16491 tp->write32_rx_mbox = tg3_write_flush_reg32;
16492 }
16493
16494 /* Get eeprom hw config before calling tg3_set_power_state().
16495 * In particular, the TG3_FLAG_IS_NIC flag must be
16496 * determined before calling tg3_set_power_state() so that
16497 * we know whether or not to switch out of Vaux power.
16498 * When the flag is set, it means that GPIO1 is used for eeprom
16499 * write protect and also implies that it is a LOM where GPIOs
16500 * are not used to switch power.
16501 */
16502 tg3_get_eeprom_hw_cfg(tp);
16503
16504 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16505 tg3_flag_clear(tp, TSO_CAPABLE);
16506 tg3_flag_clear(tp, TSO_BUG);
16507 tp->fw_needed = NULL;
16508 }
16509
16510 if (tg3_flag(tp, ENABLE_APE)) {
16511 /* Allow reads and writes to the
16512 * APE register and memory space.
16513 */
16514 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16515 PCISTATE_ALLOW_APE_SHMEM_WR |
16516 PCISTATE_ALLOW_APE_PSPACE_WR;
16517 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16518 pci_state_reg);
16519
16520 tg3_ape_lock_init(tp);
16521 }
16522
16523 /* Set up tp->grc_local_ctrl before calling
16524 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16525 * will bring 5700's external PHY out of reset.
16526 * It is also used as eeprom write protect on LOMs.
16527 */
16528 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16529 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16530 tg3_flag(tp, EEPROM_WRITE_PROT))
16531 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16532 GRC_LCLCTRL_GPIO_OUTPUT1);
16533 /* Unused GPIO3 must be driven as output on 5752 because there
16534 * are no pull-up resistors on unused GPIO pins.
16535 */
16536 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16537 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16538
16539 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16540 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16541 tg3_flag(tp, 57765_CLASS))
16542 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16543
16544 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16545 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16546 /* Turn off the debug UART. */
16547 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16548 if (tg3_flag(tp, IS_NIC))
16549 /* Keep VMain power. */
16550 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16551 GRC_LCLCTRL_GPIO_OUTPUT0;
16552 }
16553
16554 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16555 tp->grc_local_ctrl |=
16556 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16557
16558 /* Switch out of Vaux if it is a NIC */
16559 tg3_pwrsrc_switch_to_vmain(tp);
16560
16561 /* Derive initial jumbo mode from MTU assigned in
16562 * ether_setup() via the alloc_etherdev() call
16563 */
16564 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16565 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16566
16567 /* Determine WakeOnLan speed to use. */
16568 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16569 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16570 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16571 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16572 tg3_flag_clear(tp, WOL_SPEED_100MB);
16573 } else {
16574 tg3_flag_set(tp, WOL_SPEED_100MB);
16575 }
16576
16577 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16578 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16579
16580 /* A few boards don't want Ethernet@WireSpeed phy feature */
16581 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16582 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16583 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16584 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16585 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16586 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16587 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16588
16589 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16590 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16591 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16592 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16593 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16594
16595 if (tg3_flag(tp, 5705_PLUS) &&
16596 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16597 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16598 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16599 !tg3_flag(tp, 57765_PLUS)) {
16600 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16601 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16602 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16603 tg3_asic_rev(tp) == ASIC_REV_5761) {
16604 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16605 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16606 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16607 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16608 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16609 } else
16610 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16611 }
16612
16613 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16614 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16615 tp->phy_otp = tg3_read_otp_phycfg(tp);
16616 if (tp->phy_otp == 0)
16617 tp->phy_otp = TG3_OTP_DEFAULT;
16618 }
16619
16620 if (tg3_flag(tp, CPMU_PRESENT))
16621 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16622 else
16623 tp->mi_mode = MAC_MI_MODE_BASE;
16624
16625 tp->coalesce_mode = 0;
16626 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16627 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16628 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16629
16630 /* Set these bits to enable statistics workaround. */
16631 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16632 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16633 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16634 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16635 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16636 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16637 }
16638
16639 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16640 tg3_asic_rev(tp) == ASIC_REV_57780)
16641 tg3_flag_set(tp, USE_PHYLIB);
16642
16643 err = tg3_mdio_init(tp);
16644 if (err)
16645 return err;
16646
16647 /* Initialize data/descriptor byte/word swapping. */
16648 val = tr32(GRC_MODE);
16649 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16650 tg3_asic_rev(tp) == ASIC_REV_5762)
16651 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16652 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16653 GRC_MODE_B2HRX_ENABLE |
16654 GRC_MODE_HTX2B_ENABLE |
16655 GRC_MODE_HOST_STACKUP);
16656 else
16657 val &= GRC_MODE_HOST_STACKUP;
16658
16659 tw32(GRC_MODE, val | tp->grc_mode);
16660
16661 tg3_switch_clocks(tp);
16662
16663 /* Clear this out for sanity. */
16664 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16665
16666 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16667 tw32(TG3PCI_REG_BASE_ADDR, 0);
16668
16669 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16670 &pci_state_reg);
16671 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16672 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16673 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16674 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16675 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16676 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16677 void __iomem *sram_base;
16678
16679 /* Write some dummy words into the SRAM status block
16680 * area, see if it reads back correctly. If the return
16681 * value is bad, force enable the PCIX workaround.
16682 */
16683 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16684
16685 writel(0x00000000, sram_base);
16686 writel(0x00000000, sram_base + 4);
16687 writel(0xffffffff, sram_base + 4);
16688 if (readl(sram_base) != 0x00000000)
16689 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16690 }
16691 }
16692
16693 udelay(50);
16694 tg3_nvram_init(tp);
16695
16696 /* If the device has an NVRAM, no need to load patch firmware */
16697 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16698 !tg3_flag(tp, NO_NVRAM))
16699 tp->fw_needed = NULL;
16700
16701 grc_misc_cfg = tr32(GRC_MISC_CFG);
16702 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16703
16704 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16705 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16706 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16707 tg3_flag_set(tp, IS_5788);
16708
16709 if (!tg3_flag(tp, IS_5788) &&
16710 tg3_asic_rev(tp) != ASIC_REV_5700)
16711 tg3_flag_set(tp, TAGGED_STATUS);
16712 if (tg3_flag(tp, TAGGED_STATUS)) {
16713 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16714 HOSTCC_MODE_CLRTICK_TXBD);
16715
16716 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16717 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16718 tp->misc_host_ctrl);
16719 }
16720
16721 /* Preserve the APE MAC_MODE bits */
16722 if (tg3_flag(tp, ENABLE_APE))
16723 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16724 else
16725 tp->mac_mode = 0;
16726
16727 if (tg3_10_100_only_device(tp, ent))
16728 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16729
16730 err = tg3_phy_probe(tp);
16731 if (err) {
16732 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16733 /* ... but do not return immediately ... */
16734 tg3_mdio_fini(tp);
16735 }
16736
16737 tg3_read_vpd(tp);
16738 tg3_read_fw_ver(tp);
16739
16740 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16741 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16742 } else {
16743 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16744 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16745 else
16746 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16747 }
16748
16749 /* 5700 {AX,BX} chips have a broken status block link
16750 * change bit implementation, so we must use the
16751 * status register in those cases.
16752 */
16753 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16754 tg3_flag_set(tp, USE_LINKCHG_REG);
16755 else
16756 tg3_flag_clear(tp, USE_LINKCHG_REG);
16757
16758 /* The led_ctrl is set during tg3_phy_probe, here we might
16759 * have to force the link status polling mechanism based
16760 * upon subsystem IDs.
16761 */
16762 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16763 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16764 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16765 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16766 tg3_flag_set(tp, USE_LINKCHG_REG);
16767 }
16768
16769 /* For all SERDES we poll the MAC status register. */
16770 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16771 tg3_flag_set(tp, POLL_SERDES);
16772 else
16773 tg3_flag_clear(tp, POLL_SERDES);
16774
16775 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16776 tg3_flag_set(tp, POLL_CPMU_LINK);
16777
16778 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16779 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16780 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16781 tg3_flag(tp, PCIX_MODE)) {
16782 tp->rx_offset = NET_SKB_PAD;
16783 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16784 tp->rx_copy_thresh = ~(u16)0;
16785 #endif
16786 }
16787
16788 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16789 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16790 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16791
16792 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16793
16794 /* Increment the rx prod index on the rx std ring by at most
16795 * 8 for these chips to workaround hw errata.
16796 */
16797 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16798 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16799 tg3_asic_rev(tp) == ASIC_REV_5755)
16800 tp->rx_std_max_post = 8;
16801
16802 if (tg3_flag(tp, ASPM_WORKAROUND))
16803 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16804 PCIE_PWR_MGMT_L1_THRESH_MSK;
16805
16806 return err;
16807 }
16808
16809 #ifdef CONFIG_SPARC
16810 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16811 {
16812 struct net_device *dev = tp->dev;
16813 struct pci_dev *pdev = tp->pdev;
16814 struct device_node *dp = pci_device_to_OF_node(pdev);
16815 const unsigned char *addr;
16816 int len;
16817
16818 addr = of_get_property(dp, "local-mac-address", &len);
16819 if (addr && len == ETH_ALEN) {
16820 memcpy(dev->dev_addr, addr, ETH_ALEN);
16821 return 0;
16822 }
16823 return -ENODEV;
16824 }
16825
16826 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16827 {
16828 struct net_device *dev = tp->dev;
16829
16830 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16831 return 0;
16832 }
16833 #endif
16834
16835 static int tg3_get_device_address(struct tg3 *tp)
16836 {
16837 struct net_device *dev = tp->dev;
16838 u32 hi, lo, mac_offset;
16839 int addr_ok = 0;
16840 int err;
16841
16842 #ifdef CONFIG_SPARC
16843 if (!tg3_get_macaddr_sparc(tp))
16844 return 0;
16845 #endif
16846
16847 if (tg3_flag(tp, IS_SSB_CORE)) {
16848 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16849 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16850 return 0;
16851 }
16852
16853 mac_offset = 0x7c;
16854 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16855 tg3_flag(tp, 5780_CLASS)) {
16856 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16857 mac_offset = 0xcc;
16858 if (tg3_nvram_lock(tp))
16859 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16860 else
16861 tg3_nvram_unlock(tp);
16862 } else if (tg3_flag(tp, 5717_PLUS)) {
16863 if (tp->pci_fn & 1)
16864 mac_offset = 0xcc;
16865 if (tp->pci_fn > 1)
16866 mac_offset += 0x18c;
16867 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16868 mac_offset = 0x10;
16869
16870 /* First try to get it from MAC address mailbox. */
16871 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16872 if ((hi >> 16) == 0x484b) {
16873 dev->dev_addr[0] = (hi >> 8) & 0xff;
16874 dev->dev_addr[1] = (hi >> 0) & 0xff;
16875
16876 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16877 dev->dev_addr[2] = (lo >> 24) & 0xff;
16878 dev->dev_addr[3] = (lo >> 16) & 0xff;
16879 dev->dev_addr[4] = (lo >> 8) & 0xff;
16880 dev->dev_addr[5] = (lo >> 0) & 0xff;
16881
16882 /* Some old bootcode may report a 0 MAC address in SRAM */
16883 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16884 }
16885 if (!addr_ok) {
16886 /* Next, try NVRAM. */
16887 if (!tg3_flag(tp, NO_NVRAM) &&
16888 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16889 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16890 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16891 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16892 }
16893 /* Finally just fetch it out of the MAC control regs. */
16894 else {
16895 hi = tr32(MAC_ADDR_0_HIGH);
16896 lo = tr32(MAC_ADDR_0_LOW);
16897
16898 dev->dev_addr[5] = lo & 0xff;
16899 dev->dev_addr[4] = (lo >> 8) & 0xff;
16900 dev->dev_addr[3] = (lo >> 16) & 0xff;
16901 dev->dev_addr[2] = (lo >> 24) & 0xff;
16902 dev->dev_addr[1] = hi & 0xff;
16903 dev->dev_addr[0] = (hi >> 8) & 0xff;
16904 }
16905 }
16906
16907 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16908 #ifdef CONFIG_SPARC
16909 if (!tg3_get_default_macaddr_sparc(tp))
16910 return 0;
16911 #endif
16912 return -EINVAL;
16913 }
16914 return 0;
16915 }
16916
16917 #define BOUNDARY_SINGLE_CACHELINE 1
16918 #define BOUNDARY_MULTI_CACHELINE 2
16919
16920 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16921 {
16922 int cacheline_size;
16923 u8 byte;
16924 int goal;
16925
16926 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16927 if (byte == 0)
16928 cacheline_size = 1024;
16929 else
16930 cacheline_size = (int) byte * 4;
16931
16932 /* On 5703 and later chips, the boundary bits have no
16933 * effect.
16934 */
16935 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16936 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16937 !tg3_flag(tp, PCI_EXPRESS))
16938 goto out;
16939
16940 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16941 goal = BOUNDARY_MULTI_CACHELINE;
16942 #else
16943 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16944 goal = BOUNDARY_SINGLE_CACHELINE;
16945 #else
16946 goal = 0;
16947 #endif
16948 #endif
16949
16950 if (tg3_flag(tp, 57765_PLUS)) {
16951 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16952 goto out;
16953 }
16954
16955 if (!goal)
16956 goto out;
16957
16958 /* PCI controllers on most RISC systems tend to disconnect
16959 * when a device tries to burst across a cache-line boundary.
16960 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16961 *
16962 * Unfortunately, for PCI-E there are only limited
16963 * write-side controls for this, and thus for reads
16964 * we will still get the disconnects. We'll also waste
16965 * these PCI cycles for both read and write for chips
16966 * other than 5700 and 5701 which do not implement the
16967 * boundary bits.
16968 */
16969 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16970 switch (cacheline_size) {
16971 case 16:
16972 case 32:
16973 case 64:
16974 case 128:
16975 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16976 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16977 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16978 } else {
16979 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16980 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16981 }
16982 break;
16983
16984 case 256:
16985 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16986 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16987 break;
16988
16989 default:
16990 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16991 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16992 break;
16993 }
16994 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16995 switch (cacheline_size) {
16996 case 16:
16997 case 32:
16998 case 64:
16999 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17000 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17001 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17002 break;
17003 }
17004 /* fallthrough */
17005 case 128:
17006 default:
17007 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17008 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17009 break;
17010 }
17011 } else {
17012 switch (cacheline_size) {
17013 case 16:
17014 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17015 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17016 DMA_RWCTRL_WRITE_BNDRY_16);
17017 break;
17018 }
17019 /* fallthrough */
17020 case 32:
17021 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17022 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17023 DMA_RWCTRL_WRITE_BNDRY_32);
17024 break;
17025 }
17026 /* fallthrough */
17027 case 64:
17028 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17029 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17030 DMA_RWCTRL_WRITE_BNDRY_64);
17031 break;
17032 }
17033 /* fallthrough */
17034 case 128:
17035 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17036 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17037 DMA_RWCTRL_WRITE_BNDRY_128);
17038 break;
17039 }
17040 /* fallthrough */
17041 case 256:
17042 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17043 DMA_RWCTRL_WRITE_BNDRY_256);
17044 break;
17045 case 512:
17046 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17047 DMA_RWCTRL_WRITE_BNDRY_512);
17048 break;
17049 case 1024:
17050 default:
17051 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17052 DMA_RWCTRL_WRITE_BNDRY_1024);
17053 break;
17054 }
17055 }
17056
17057 out:
17058 return val;
17059 }
17060
17061 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17062 int size, bool to_device)
17063 {
17064 struct tg3_internal_buffer_desc test_desc;
17065 u32 sram_dma_descs;
17066 int i, ret;
17067
17068 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17069
17070 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17071 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17072 tw32(RDMAC_STATUS, 0);
17073 tw32(WDMAC_STATUS, 0);
17074
17075 tw32(BUFMGR_MODE, 0);
17076 tw32(FTQ_RESET, 0);
17077
17078 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17079 test_desc.addr_lo = buf_dma & 0xffffffff;
17080 test_desc.nic_mbuf = 0x00002100;
17081 test_desc.len = size;
17082
17083 /*
17084 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17085 * the *second* time the tg3 driver was getting loaded after an
17086 * initial scan.
17087 *
17088 * Broadcom tells me:
17089 * ...the DMA engine is connected to the GRC block and a DMA
17090 * reset may affect the GRC block in some unpredictable way...
17091 * The behavior of resets to individual blocks has not been tested.
17092 *
17093 * Broadcom noted the GRC reset will also reset all sub-components.
17094 */
17095 if (to_device) {
17096 test_desc.cqid_sqid = (13 << 8) | 2;
17097
17098 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17099 udelay(40);
17100 } else {
17101 test_desc.cqid_sqid = (16 << 8) | 7;
17102
17103 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17104 udelay(40);
17105 }
17106 test_desc.flags = 0x00000005;
17107
17108 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17109 u32 val;
17110
17111 val = *(((u32 *)&test_desc) + i);
17112 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17113 sram_dma_descs + (i * sizeof(u32)));
17114 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17115 }
17116 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17117
17118 if (to_device)
17119 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17120 else
17121 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17122
17123 ret = -ENODEV;
17124 for (i = 0; i < 40; i++) {
17125 u32 val;
17126
17127 if (to_device)
17128 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17129 else
17130 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17131 if ((val & 0xffff) == sram_dma_descs) {
17132 ret = 0;
17133 break;
17134 }
17135
17136 udelay(100);
17137 }
17138
17139 return ret;
17140 }
17141
17142 #define TEST_BUFFER_SIZE 0x2000
17143
17144 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
17145 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17146 { },
17147 };
17148
17149 static int tg3_test_dma(struct tg3 *tp)
17150 {
17151 dma_addr_t buf_dma;
17152 u32 *buf, saved_dma_rwctrl;
17153 int ret = 0;
17154
17155 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17156 &buf_dma, GFP_KERNEL);
17157 if (!buf) {
17158 ret = -ENOMEM;
17159 goto out_nofree;
17160 }
17161
17162 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17163 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17164
17165 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17166
17167 if (tg3_flag(tp, 57765_PLUS))
17168 goto out;
17169
17170 if (tg3_flag(tp, PCI_EXPRESS)) {
17171 /* DMA read watermark not used on PCIE */
17172 tp->dma_rwctrl |= 0x00180000;
17173 } else if (!tg3_flag(tp, PCIX_MODE)) {
17174 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17175 tg3_asic_rev(tp) == ASIC_REV_5750)
17176 tp->dma_rwctrl |= 0x003f0000;
17177 else
17178 tp->dma_rwctrl |= 0x003f000f;
17179 } else {
17180 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17181 tg3_asic_rev(tp) == ASIC_REV_5704) {
17182 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17183 u32 read_water = 0x7;
17184
17185 /* If the 5704 is behind the EPB bridge, we can
17186 * do the less restrictive ONE_DMA workaround for
17187 * better performance.
17188 */
17189 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17190 tg3_asic_rev(tp) == ASIC_REV_5704)
17191 tp->dma_rwctrl |= 0x8000;
17192 else if (ccval == 0x6 || ccval == 0x7)
17193 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17194
17195 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17196 read_water = 4;
17197 /* Set bit 23 to enable PCIX hw bug fix */
17198 tp->dma_rwctrl |=
17199 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17200 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17201 (1 << 23);
17202 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17203 /* 5780 always in PCIX mode */
17204 tp->dma_rwctrl |= 0x00144000;
17205 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17206 /* 5714 always in PCIX mode */
17207 tp->dma_rwctrl |= 0x00148000;
17208 } else {
17209 tp->dma_rwctrl |= 0x001b000f;
17210 }
17211 }
17212 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17213 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17214
17215 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17216 tg3_asic_rev(tp) == ASIC_REV_5704)
17217 tp->dma_rwctrl &= 0xfffffff0;
17218
17219 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17220 tg3_asic_rev(tp) == ASIC_REV_5701) {
17221 /* Remove this if it causes problems for some boards. */
17222 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17223
17224 /* On 5700/5701 chips, we need to set this bit.
17225 * Otherwise the chip will issue cacheline transactions
17226 * to streamable DMA memory with not all the byte
17227 * enables turned on. This is an error on several
17228 * RISC PCI controllers, in particular sparc64.
17229 *
17230 * On 5703/5704 chips, this bit has been reassigned
17231 * a different meaning. In particular, it is used
17232 * on those chips to enable a PCI-X workaround.
17233 */
17234 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17235 }
17236
17237 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17238
17239
17240 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17241 tg3_asic_rev(tp) != ASIC_REV_5701)
17242 goto out;
17243
17244 /* It is best to perform DMA test with maximum write burst size
17245 * to expose the 5700/5701 write DMA bug.
17246 */
17247 saved_dma_rwctrl = tp->dma_rwctrl;
17248 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17249 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17250
17251 while (1) {
17252 u32 *p = buf, i;
17253
17254 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17255 p[i] = i;
17256
17257 /* Send the buffer to the chip. */
17258 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17259 if (ret) {
17260 dev_err(&tp->pdev->dev,
17261 "%s: Buffer write failed. err = %d\n",
17262 __func__, ret);
17263 break;
17264 }
17265
17266 /* Now read it back. */
17267 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17268 if (ret) {
17269 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17270 "err = %d\n", __func__, ret);
17271 break;
17272 }
17273
17274 /* Verify it. */
17275 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17276 if (p[i] == i)
17277 continue;
17278
17279 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17280 DMA_RWCTRL_WRITE_BNDRY_16) {
17281 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17282 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17283 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17284 break;
17285 } else {
17286 dev_err(&tp->pdev->dev,
17287 "%s: Buffer corrupted on read back! "
17288 "(%d != %d)\n", __func__, p[i], i);
17289 ret = -ENODEV;
17290 goto out;
17291 }
17292 }
17293
17294 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17295 /* Success. */
17296 ret = 0;
17297 break;
17298 }
17299 }
17300 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17301 DMA_RWCTRL_WRITE_BNDRY_16) {
17302 /* DMA test passed without adjusting DMA boundary,
17303 * now look for chipsets that are known to expose the
17304 * DMA bug without failing the test.
17305 */
17306 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17307 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17308 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17309 } else {
17310 /* Safe to use the calculated DMA boundary. */
17311 tp->dma_rwctrl = saved_dma_rwctrl;
17312 }
17313
17314 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17315 }
17316
17317 out:
17318 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17319 out_nofree:
17320 return ret;
17321 }
17322
17323 static void tg3_init_bufmgr_config(struct tg3 *tp)
17324 {
17325 if (tg3_flag(tp, 57765_PLUS)) {
17326 tp->bufmgr_config.mbuf_read_dma_low_water =
17327 DEFAULT_MB_RDMA_LOW_WATER_5705;
17328 tp->bufmgr_config.mbuf_mac_rx_low_water =
17329 DEFAULT_MB_MACRX_LOW_WATER_57765;
17330 tp->bufmgr_config.mbuf_high_water =
17331 DEFAULT_MB_HIGH_WATER_57765;
17332
17333 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17334 DEFAULT_MB_RDMA_LOW_WATER_5705;
17335 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17336 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17337 tp->bufmgr_config.mbuf_high_water_jumbo =
17338 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17339 } else if (tg3_flag(tp, 5705_PLUS)) {
17340 tp->bufmgr_config.mbuf_read_dma_low_water =
17341 DEFAULT_MB_RDMA_LOW_WATER_5705;
17342 tp->bufmgr_config.mbuf_mac_rx_low_water =
17343 DEFAULT_MB_MACRX_LOW_WATER_5705;
17344 tp->bufmgr_config.mbuf_high_water =
17345 DEFAULT_MB_HIGH_WATER_5705;
17346 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17347 tp->bufmgr_config.mbuf_mac_rx_low_water =
17348 DEFAULT_MB_MACRX_LOW_WATER_5906;
17349 tp->bufmgr_config.mbuf_high_water =
17350 DEFAULT_MB_HIGH_WATER_5906;
17351 }
17352
17353 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17354 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17355 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17356 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17357 tp->bufmgr_config.mbuf_high_water_jumbo =
17358 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17359 } else {
17360 tp->bufmgr_config.mbuf_read_dma_low_water =
17361 DEFAULT_MB_RDMA_LOW_WATER;
17362 tp->bufmgr_config.mbuf_mac_rx_low_water =
17363 DEFAULT_MB_MACRX_LOW_WATER;
17364 tp->bufmgr_config.mbuf_high_water =
17365 DEFAULT_MB_HIGH_WATER;
17366
17367 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17368 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17369 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17370 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17371 tp->bufmgr_config.mbuf_high_water_jumbo =
17372 DEFAULT_MB_HIGH_WATER_JUMBO;
17373 }
17374
17375 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17376 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17377 }
17378
17379 static char *tg3_phy_string(struct tg3 *tp)
17380 {
17381 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17382 case TG3_PHY_ID_BCM5400: return "5400";
17383 case TG3_PHY_ID_BCM5401: return "5401";
17384 case TG3_PHY_ID_BCM5411: return "5411";
17385 case TG3_PHY_ID_BCM5701: return "5701";
17386 case TG3_PHY_ID_BCM5703: return "5703";
17387 case TG3_PHY_ID_BCM5704: return "5704";
17388 case TG3_PHY_ID_BCM5705: return "5705";
17389 case TG3_PHY_ID_BCM5750: return "5750";
17390 case TG3_PHY_ID_BCM5752: return "5752";
17391 case TG3_PHY_ID_BCM5714: return "5714";
17392 case TG3_PHY_ID_BCM5780: return "5780";
17393 case TG3_PHY_ID_BCM5755: return "5755";
17394 case TG3_PHY_ID_BCM5787: return "5787";
17395 case TG3_PHY_ID_BCM5784: return "5784";
17396 case TG3_PHY_ID_BCM5756: return "5722/5756";
17397 case TG3_PHY_ID_BCM5906: return "5906";
17398 case TG3_PHY_ID_BCM5761: return "5761";
17399 case TG3_PHY_ID_BCM5718C: return "5718C";
17400 case TG3_PHY_ID_BCM5718S: return "5718S";
17401 case TG3_PHY_ID_BCM57765: return "57765";
17402 case TG3_PHY_ID_BCM5719C: return "5719C";
17403 case TG3_PHY_ID_BCM5720C: return "5720C";
17404 case TG3_PHY_ID_BCM5762: return "5762C";
17405 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17406 case 0: return "serdes";
17407 default: return "unknown";
17408 }
17409 }
17410
17411 static char *tg3_bus_string(struct tg3 *tp, char *str)
17412 {
17413 if (tg3_flag(tp, PCI_EXPRESS)) {
17414 strcpy(str, "PCI Express");
17415 return str;
17416 } else if (tg3_flag(tp, PCIX_MODE)) {
17417 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17418
17419 strcpy(str, "PCIX:");
17420
17421 if ((clock_ctrl == 7) ||
17422 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17423 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17424 strcat(str, "133MHz");
17425 else if (clock_ctrl == 0)
17426 strcat(str, "33MHz");
17427 else if (clock_ctrl == 2)
17428 strcat(str, "50MHz");
17429 else if (clock_ctrl == 4)
17430 strcat(str, "66MHz");
17431 else if (clock_ctrl == 6)
17432 strcat(str, "100MHz");
17433 } else {
17434 strcpy(str, "PCI:");
17435 if (tg3_flag(tp, PCI_HIGH_SPEED))
17436 strcat(str, "66MHz");
17437 else
17438 strcat(str, "33MHz");
17439 }
17440 if (tg3_flag(tp, PCI_32BIT))
17441 strcat(str, ":32-bit");
17442 else
17443 strcat(str, ":64-bit");
17444 return str;
17445 }
17446
17447 static void tg3_init_coal(struct tg3 *tp)
17448 {
17449 struct ethtool_coalesce *ec = &tp->coal;
17450
17451 memset(ec, 0, sizeof(*ec));
17452 ec->cmd = ETHTOOL_GCOALESCE;
17453 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17454 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17455 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17456 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17457 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17458 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17459 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17460 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17461 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17462
17463 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17464 HOSTCC_MODE_CLRTICK_TXBD)) {
17465 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17466 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17467 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17468 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17469 }
17470
17471 if (tg3_flag(tp, 5705_PLUS)) {
17472 ec->rx_coalesce_usecs_irq = 0;
17473 ec->tx_coalesce_usecs_irq = 0;
17474 ec->stats_block_coalesce_usecs = 0;
17475 }
17476 }
17477
17478 static int tg3_init_one(struct pci_dev *pdev,
17479 const struct pci_device_id *ent)
17480 {
17481 struct net_device *dev;
17482 struct tg3 *tp;
17483 int i, err;
17484 u32 sndmbx, rcvmbx, intmbx;
17485 char str[40];
17486 u64 dma_mask, persist_dma_mask;
17487 netdev_features_t features = 0;
17488
17489 printk_once(KERN_INFO "%s\n", version);
17490
17491 err = pci_enable_device(pdev);
17492 if (err) {
17493 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17494 return err;
17495 }
17496
17497 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17498 if (err) {
17499 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17500 goto err_out_disable_pdev;
17501 }
17502
17503 pci_set_master(pdev);
17504
17505 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17506 if (!dev) {
17507 err = -ENOMEM;
17508 goto err_out_free_res;
17509 }
17510
17511 SET_NETDEV_DEV(dev, &pdev->dev);
17512
17513 tp = netdev_priv(dev);
17514 tp->pdev = pdev;
17515 tp->dev = dev;
17516 tp->rx_mode = TG3_DEF_RX_MODE;
17517 tp->tx_mode = TG3_DEF_TX_MODE;
17518 tp->irq_sync = 1;
17519
17520 if (tg3_debug > 0)
17521 tp->msg_enable = tg3_debug;
17522 else
17523 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17524
17525 if (pdev_is_ssb_gige_core(pdev)) {
17526 tg3_flag_set(tp, IS_SSB_CORE);
17527 if (ssb_gige_must_flush_posted_writes(pdev))
17528 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17529 if (ssb_gige_one_dma_at_once(pdev))
17530 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17531 if (ssb_gige_have_roboswitch(pdev)) {
17532 tg3_flag_set(tp, USE_PHYLIB);
17533 tg3_flag_set(tp, ROBOSWITCH);
17534 }
17535 if (ssb_gige_is_rgmii(pdev))
17536 tg3_flag_set(tp, RGMII_MODE);
17537 }
17538
17539 /* The word/byte swap controls here control register access byte
17540 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17541 * setting below.
17542 */
17543 tp->misc_host_ctrl =
17544 MISC_HOST_CTRL_MASK_PCI_INT |
17545 MISC_HOST_CTRL_WORD_SWAP |
17546 MISC_HOST_CTRL_INDIR_ACCESS |
17547 MISC_HOST_CTRL_PCISTATE_RW;
17548
17549 /* The NONFRM (non-frame) byte/word swap controls take effect
17550 * on descriptor entries, anything which isn't packet data.
17551 *
17552 * The StrongARM chips on the board (one for tx, one for rx)
17553 * are running in big-endian mode.
17554 */
17555 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17556 GRC_MODE_WSWAP_NONFRM_DATA);
17557 #ifdef __BIG_ENDIAN
17558 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17559 #endif
17560 spin_lock_init(&tp->lock);
17561 spin_lock_init(&tp->indirect_lock);
17562 INIT_WORK(&tp->reset_task, tg3_reset_task);
17563
17564 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17565 if (!tp->regs) {
17566 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17567 err = -ENOMEM;
17568 goto err_out_free_dev;
17569 }
17570
17571 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17572 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17576 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17577 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17578 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17579 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17580 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17581 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17582 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17583 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17584 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17585 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17586 tg3_flag_set(tp, ENABLE_APE);
17587 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17588 if (!tp->aperegs) {
17589 dev_err(&pdev->dev,
17590 "Cannot map APE registers, aborting\n");
17591 err = -ENOMEM;
17592 goto err_out_iounmap;
17593 }
17594 }
17595
17596 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17597 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17598
17599 dev->ethtool_ops = &tg3_ethtool_ops;
17600 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17601 dev->netdev_ops = &tg3_netdev_ops;
17602 dev->irq = pdev->irq;
17603
17604 err = tg3_get_invariants(tp, ent);
17605 if (err) {
17606 dev_err(&pdev->dev,
17607 "Problem fetching invariants of chip, aborting\n");
17608 goto err_out_apeunmap;
17609 }
17610
17611 /* The EPB bridge inside 5714, 5715, and 5780 and any
17612 * device behind the EPB cannot support DMA addresses > 40-bit.
17613 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17614 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17615 * do DMA address check in tg3_start_xmit().
17616 */
17617 if (tg3_flag(tp, IS_5788))
17618 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17619 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17620 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17621 #ifdef CONFIG_HIGHMEM
17622 dma_mask = DMA_BIT_MASK(64);
17623 #endif
17624 } else
17625 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17626
17627 /* Configure DMA attributes. */
17628 if (dma_mask > DMA_BIT_MASK(32)) {
17629 err = pci_set_dma_mask(pdev, dma_mask);
17630 if (!err) {
17631 features |= NETIF_F_HIGHDMA;
17632 err = pci_set_consistent_dma_mask(pdev,
17633 persist_dma_mask);
17634 if (err < 0) {
17635 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17636 "DMA for consistent allocations\n");
17637 goto err_out_apeunmap;
17638 }
17639 }
17640 }
17641 if (err || dma_mask == DMA_BIT_MASK(32)) {
17642 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17643 if (err) {
17644 dev_err(&pdev->dev,
17645 "No usable DMA configuration, aborting\n");
17646 goto err_out_apeunmap;
17647 }
17648 }
17649
17650 tg3_init_bufmgr_config(tp);
17651
17652 /* 5700 B0 chips do not support checksumming correctly due
17653 * to hardware bugs.
17654 */
17655 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17656 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17657
17658 if (tg3_flag(tp, 5755_PLUS))
17659 features |= NETIF_F_IPV6_CSUM;
17660 }
17661
17662 /* TSO is on by default on chips that support hardware TSO.
17663 * Firmware TSO on older chips gives lower performance, so it
17664 * is off by default, but can be enabled using ethtool.
17665 */
17666 if ((tg3_flag(tp, HW_TSO_1) ||
17667 tg3_flag(tp, HW_TSO_2) ||
17668 tg3_flag(tp, HW_TSO_3)) &&
17669 (features & NETIF_F_IP_CSUM))
17670 features |= NETIF_F_TSO;
17671 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17672 if (features & NETIF_F_IPV6_CSUM)
17673 features |= NETIF_F_TSO6;
17674 if (tg3_flag(tp, HW_TSO_3) ||
17675 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17676 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17677 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17678 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17679 tg3_asic_rev(tp) == ASIC_REV_57780)
17680 features |= NETIF_F_TSO_ECN;
17681 }
17682
17683 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17684 NETIF_F_HW_VLAN_CTAG_RX;
17685 dev->vlan_features |= features;
17686
17687 /*
17688 * Add loopback capability only for a subset of devices that support
17689 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17690 * loopback for the remaining devices.
17691 */
17692 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17693 !tg3_flag(tp, CPMU_PRESENT))
17694 /* Add the loopback capability */
17695 features |= NETIF_F_LOOPBACK;
17696
17697 dev->hw_features |= features;
17698 dev->priv_flags |= IFF_UNICAST_FLT;
17699
17700 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17701 !tg3_flag(tp, TSO_CAPABLE) &&
17702 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17703 tg3_flag_set(tp, MAX_RXPEND_64);
17704 tp->rx_pending = 63;
17705 }
17706
17707 err = tg3_get_device_address(tp);
17708 if (err) {
17709 dev_err(&pdev->dev,
17710 "Could not obtain valid ethernet address, aborting\n");
17711 goto err_out_apeunmap;
17712 }
17713
17714 /*
17715 * Reset chip in case UNDI or EFI driver did not shutdown
17716 * DMA self test will enable WDMAC and we'll see (spurious)
17717 * pending DMA on the PCI bus at that point.
17718 */
17719 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17720 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17721 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17722 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17723 }
17724
17725 err = tg3_test_dma(tp);
17726 if (err) {
17727 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17728 goto err_out_apeunmap;
17729 }
17730
17731 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17732 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17733 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17734 for (i = 0; i < tp->irq_max; i++) {
17735 struct tg3_napi *tnapi = &tp->napi[i];
17736
17737 tnapi->tp = tp;
17738 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17739
17740 tnapi->int_mbox = intmbx;
17741 if (i <= 4)
17742 intmbx += 0x8;
17743 else
17744 intmbx += 0x4;
17745
17746 tnapi->consmbox = rcvmbx;
17747 tnapi->prodmbox = sndmbx;
17748
17749 if (i)
17750 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17751 else
17752 tnapi->coal_now = HOSTCC_MODE_NOW;
17753
17754 if (!tg3_flag(tp, SUPPORT_MSIX))
17755 break;
17756
17757 /*
17758 * If we support MSIX, we'll be using RSS. If we're using
17759 * RSS, the first vector only handles link interrupts and the
17760 * remaining vectors handle rx and tx interrupts. Reuse the
17761 * mailbox values for the next iteration. The values we setup
17762 * above are still useful for the single vectored mode.
17763 */
17764 if (!i)
17765 continue;
17766
17767 rcvmbx += 0x8;
17768
17769 if (sndmbx & 0x4)
17770 sndmbx -= 0x4;
17771 else
17772 sndmbx += 0xc;
17773 }
17774
17775 tg3_init_coal(tp);
17776
17777 pci_set_drvdata(pdev, dev);
17778
17779 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17780 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17781 tg3_asic_rev(tp) == ASIC_REV_5762)
17782 tg3_flag_set(tp, PTP_CAPABLE);
17783
17784 tg3_timer_init(tp);
17785
17786 tg3_carrier_off(tp);
17787
17788 err = register_netdev(dev);
17789 if (err) {
17790 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17791 goto err_out_apeunmap;
17792 }
17793
17794 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17795 tp->board_part_number,
17796 tg3_chip_rev_id(tp),
17797 tg3_bus_string(tp, str),
17798 dev->dev_addr);
17799
17800 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17801 struct phy_device *phydev;
17802 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17803 netdev_info(dev,
17804 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17805 phydev->drv->name, dev_name(&phydev->dev));
17806 } else {
17807 char *ethtype;
17808
17809 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17810 ethtype = "10/100Base-TX";
17811 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17812 ethtype = "1000Base-SX";
17813 else
17814 ethtype = "10/100/1000Base-T";
17815
17816 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17817 "(WireSpeed[%d], EEE[%d])\n",
17818 tg3_phy_string(tp), ethtype,
17819 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17820 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17821 }
17822
17823 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17824 (dev->features & NETIF_F_RXCSUM) != 0,
17825 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17826 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17827 tg3_flag(tp, ENABLE_ASF) != 0,
17828 tg3_flag(tp, TSO_CAPABLE) != 0);
17829 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17830 tp->dma_rwctrl,
17831 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17832 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17833
17834 pci_save_state(pdev);
17835
17836 return 0;
17837
17838 err_out_apeunmap:
17839 if (tp->aperegs) {
17840 iounmap(tp->aperegs);
17841 tp->aperegs = NULL;
17842 }
17843
17844 err_out_iounmap:
17845 if (tp->regs) {
17846 iounmap(tp->regs);
17847 tp->regs = NULL;
17848 }
17849
17850 err_out_free_dev:
17851 free_netdev(dev);
17852
17853 err_out_free_res:
17854 pci_release_regions(pdev);
17855
17856 err_out_disable_pdev:
17857 if (pci_is_enabled(pdev))
17858 pci_disable_device(pdev);
17859 return err;
17860 }
17861
17862 static void tg3_remove_one(struct pci_dev *pdev)
17863 {
17864 struct net_device *dev = pci_get_drvdata(pdev);
17865
17866 if (dev) {
17867 struct tg3 *tp = netdev_priv(dev);
17868
17869 release_firmware(tp->fw);
17870
17871 tg3_reset_task_cancel(tp);
17872
17873 if (tg3_flag(tp, USE_PHYLIB)) {
17874 tg3_phy_fini(tp);
17875 tg3_mdio_fini(tp);
17876 }
17877
17878 unregister_netdev(dev);
17879 if (tp->aperegs) {
17880 iounmap(tp->aperegs);
17881 tp->aperegs = NULL;
17882 }
17883 if (tp->regs) {
17884 iounmap(tp->regs);
17885 tp->regs = NULL;
17886 }
17887 free_netdev(dev);
17888 pci_release_regions(pdev);
17889 pci_disable_device(pdev);
17890 }
17891 }
17892
17893 #ifdef CONFIG_PM_SLEEP
17894 static int tg3_suspend(struct device *device)
17895 {
17896 struct pci_dev *pdev = to_pci_dev(device);
17897 struct net_device *dev = pci_get_drvdata(pdev);
17898 struct tg3 *tp = netdev_priv(dev);
17899 int err = 0;
17900
17901 rtnl_lock();
17902
17903 if (!netif_running(dev))
17904 goto unlock;
17905
17906 tg3_reset_task_cancel(tp);
17907 tg3_phy_stop(tp);
17908 tg3_netif_stop(tp);
17909
17910 tg3_timer_stop(tp);
17911
17912 tg3_full_lock(tp, 1);
17913 tg3_disable_ints(tp);
17914 tg3_full_unlock(tp);
17915
17916 netif_device_detach(dev);
17917
17918 tg3_full_lock(tp, 0);
17919 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17920 tg3_flag_clear(tp, INIT_COMPLETE);
17921 tg3_full_unlock(tp);
17922
17923 err = tg3_power_down_prepare(tp);
17924 if (err) {
17925 int err2;
17926
17927 tg3_full_lock(tp, 0);
17928
17929 tg3_flag_set(tp, INIT_COMPLETE);
17930 err2 = tg3_restart_hw(tp, true);
17931 if (err2)
17932 goto out;
17933
17934 tg3_timer_start(tp);
17935
17936 netif_device_attach(dev);
17937 tg3_netif_start(tp);
17938
17939 out:
17940 tg3_full_unlock(tp);
17941
17942 if (!err2)
17943 tg3_phy_start(tp);
17944 }
17945
17946 unlock:
17947 rtnl_unlock();
17948 return err;
17949 }
17950
17951 static int tg3_resume(struct device *device)
17952 {
17953 struct pci_dev *pdev = to_pci_dev(device);
17954 struct net_device *dev = pci_get_drvdata(pdev);
17955 struct tg3 *tp = netdev_priv(dev);
17956 int err = 0;
17957
17958 rtnl_lock();
17959
17960 if (!netif_running(dev))
17961 goto unlock;
17962
17963 netif_device_attach(dev);
17964
17965 tg3_full_lock(tp, 0);
17966
17967 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17968
17969 tg3_flag_set(tp, INIT_COMPLETE);
17970 err = tg3_restart_hw(tp,
17971 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17972 if (err)
17973 goto out;
17974
17975 tg3_timer_start(tp);
17976
17977 tg3_netif_start(tp);
17978
17979 out:
17980 tg3_full_unlock(tp);
17981
17982 if (!err)
17983 tg3_phy_start(tp);
17984
17985 unlock:
17986 rtnl_unlock();
17987 return err;
17988 }
17989 #endif /* CONFIG_PM_SLEEP */
17990
17991 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17992
17993 static void tg3_shutdown(struct pci_dev *pdev)
17994 {
17995 struct net_device *dev = pci_get_drvdata(pdev);
17996 struct tg3 *tp = netdev_priv(dev);
17997
17998 rtnl_lock();
17999 netif_device_detach(dev);
18000
18001 if (netif_running(dev))
18002 dev_close(dev);
18003
18004 if (system_state == SYSTEM_POWER_OFF)
18005 tg3_power_down(tp);
18006
18007 rtnl_unlock();
18008 }
18009
18010 /**
18011 * tg3_io_error_detected - called when PCI error is detected
18012 * @pdev: Pointer to PCI device
18013 * @state: The current pci connection state
18014 *
18015 * This function is called after a PCI bus error affecting
18016 * this device has been detected.
18017 */
18018 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18019 pci_channel_state_t state)
18020 {
18021 struct net_device *netdev = pci_get_drvdata(pdev);
18022 struct tg3 *tp = netdev_priv(netdev);
18023 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18024
18025 netdev_info(netdev, "PCI I/O error detected\n");
18026
18027 rtnl_lock();
18028
18029 /* We probably don't have netdev yet */
18030 if (!netdev || !netif_running(netdev))
18031 goto done;
18032
18033 tg3_phy_stop(tp);
18034
18035 tg3_netif_stop(tp);
18036
18037 tg3_timer_stop(tp);
18038
18039 /* Want to make sure that the reset task doesn't run */
18040 tg3_reset_task_cancel(tp);
18041
18042 netif_device_detach(netdev);
18043
18044 /* Clean up software state, even if MMIO is blocked */
18045 tg3_full_lock(tp, 0);
18046 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18047 tg3_full_unlock(tp);
18048
18049 done:
18050 if (state == pci_channel_io_perm_failure) {
18051 if (netdev) {
18052 tg3_napi_enable(tp);
18053 dev_close(netdev);
18054 }
18055 err = PCI_ERS_RESULT_DISCONNECT;
18056 } else {
18057 pci_disable_device(pdev);
18058 }
18059
18060 rtnl_unlock();
18061
18062 return err;
18063 }
18064
18065 /**
18066 * tg3_io_slot_reset - called after the pci bus has been reset.
18067 * @pdev: Pointer to PCI device
18068 *
18069 * Restart the card from scratch, as if from a cold-boot.
18070 * At this point, the card has exprienced a hard reset,
18071 * followed by fixups by BIOS, and has its config space
18072 * set up identically to what it was at cold boot.
18073 */
18074 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18075 {
18076 struct net_device *netdev = pci_get_drvdata(pdev);
18077 struct tg3 *tp = netdev_priv(netdev);
18078 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18079 int err;
18080
18081 rtnl_lock();
18082
18083 if (pci_enable_device(pdev)) {
18084 dev_err(&pdev->dev,
18085 "Cannot re-enable PCI device after reset.\n");
18086 goto done;
18087 }
18088
18089 pci_set_master(pdev);
18090 pci_restore_state(pdev);
18091 pci_save_state(pdev);
18092
18093 if (!netdev || !netif_running(netdev)) {
18094 rc = PCI_ERS_RESULT_RECOVERED;
18095 goto done;
18096 }
18097
18098 err = tg3_power_up(tp);
18099 if (err)
18100 goto done;
18101
18102 rc = PCI_ERS_RESULT_RECOVERED;
18103
18104 done:
18105 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18106 tg3_napi_enable(tp);
18107 dev_close(netdev);
18108 }
18109 rtnl_unlock();
18110
18111 return rc;
18112 }
18113
18114 /**
18115 * tg3_io_resume - called when traffic can start flowing again.
18116 * @pdev: Pointer to PCI device
18117 *
18118 * This callback is called when the error recovery driver tells
18119 * us that its OK to resume normal operation.
18120 */
18121 static void tg3_io_resume(struct pci_dev *pdev)
18122 {
18123 struct net_device *netdev = pci_get_drvdata(pdev);
18124 struct tg3 *tp = netdev_priv(netdev);
18125 int err;
18126
18127 rtnl_lock();
18128
18129 if (!netif_running(netdev))
18130 goto done;
18131
18132 tg3_full_lock(tp, 0);
18133 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18134 tg3_flag_set(tp, INIT_COMPLETE);
18135 err = tg3_restart_hw(tp, true);
18136 if (err) {
18137 tg3_full_unlock(tp);
18138 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18139 goto done;
18140 }
18141
18142 netif_device_attach(netdev);
18143
18144 tg3_timer_start(tp);
18145
18146 tg3_netif_start(tp);
18147
18148 tg3_full_unlock(tp);
18149
18150 tg3_phy_start(tp);
18151
18152 done:
18153 rtnl_unlock();
18154 }
18155
18156 static const struct pci_error_handlers tg3_err_handler = {
18157 .error_detected = tg3_io_error_detected,
18158 .slot_reset = tg3_io_slot_reset,
18159 .resume = tg3_io_resume
18160 };
18161
18162 static struct pci_driver tg3_driver = {
18163 .name = DRV_MODULE_NAME,
18164 .id_table = tg3_pci_tbl,
18165 .probe = tg3_init_one,
18166 .remove = tg3_remove_one,
18167 .err_handler = &tg3_err_handler,
18168 .driver.pm = &tg3_pm_ops,
18169 .shutdown = tg3_shutdown,
18170 };
18171
18172 module_pci_driver(tg3_driver);
This page took 0.446576 seconds and 5 git commands to generate.