Merge branch 'next/drivers' into HEAD
[deliverable/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 #endif
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #define BAR_0 0
65 #define BAR_2 2
66
67 #include "tg3.h"
68
69 /* Functions & macros to verify TG3_FLAGS types */
70
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73 return test_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 set_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 clear_bit(flag, bits);
84 }
85
86 #define tg3_flag(tp, flag) \
87 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag) \
89 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag) \
91 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
92
93 #define DRV_MODULE_NAME "tg3"
94 #define TG3_MAJ_NUM 3
95 #define TG3_MIN_NUM 124
96 #define DRV_MODULE_VERSION \
97 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE "March 21, 2012"
99
100 #define RESET_KIND_SHUTDOWN 0
101 #define RESET_KIND_INIT 1
102 #define RESET_KIND_SUSPEND 2
103
104 #define TG3_DEF_RX_MODE 0
105 #define TG3_DEF_TX_MODE 0
106 #define TG3_DEF_MSG_ENABLE \
107 (NETIF_MSG_DRV | \
108 NETIF_MSG_PROBE | \
109 NETIF_MSG_LINK | \
110 NETIF_MSG_TIMER | \
111 NETIF_MSG_IFDOWN | \
112 NETIF_MSG_IFUP | \
113 NETIF_MSG_RX_ERR | \
114 NETIF_MSG_TX_ERR)
115
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
117
118 /* length of time before we decide the hardware is borked,
119 * and dev->tx_timeout() should be called to fix the problem
120 */
121
122 #define TG3_TX_TIMEOUT (5 * HZ)
123
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU 60
126 #define TG3_MAX_MTU(tp) \
127 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
128
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130 * You can't change the ring sizes, but you can change where you place
131 * them in the NIC onboard memory.
132 */
133 #define TG3_RX_STD_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING 200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
141
142 /* Do not place this n-ring entries value into the tp struct itself,
143 * we really want to expose these constants to GCC so that modulo et
144 * al. operations are done with shifts and masks instead of with
145 * hw multiply/modulo instructions. Another solution would be to
146 * replace things like '% foo' with '& (foo - 1)'.
147 */
148
149 #define TG3_TX_RING_SIZE 512
150 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
151
152 #define TG3_RX_STD_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
159 TG3_TX_RING_SIZE)
160 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
161
162 #define TG3_DMA_BYTE_ENAB 64
163
164 #define TG3_RX_STD_DMA_SZ 1536
165 #define TG3_RX_JMB_DMA_SZ 9046
166
167 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
168
169 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
171
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
174
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
177
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179 * that are at least dword aligned when used in PCIX mode. The driver
180 * works around this bug by double copying the packet. This workaround
181 * is built into the normal double copy length check for efficiency.
182 *
183 * However, the double copy is only necessary on those architectures
184 * where unaligned memory accesses are inefficient. For those architectures
185 * where unaligned memory accesses incur little penalty, we can reintegrate
186 * the 5701 in the normal rx path. Doing so saves a device structure
187 * dereference by hardcoding the double copy threshold in place.
188 */
189 #define TG3_RX_COPY_THRESHOLD 256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
192 #else
193 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #endif
195
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
200 #endif
201
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K 2048
205 #define TG3_TX_BD_DMA_MAX_4K 4096
206
207 #define TG3_RAW_IP_ALIGN 2
208
209 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
210 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
211
212 #define FIRMWARE_TG3 "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
215
216 static char version[] __devinitdata =
217 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
218
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
226
227 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
230
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
314 {}
315 };
316
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
318
319 static const struct {
320 const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
322 { "rx_octets" },
323 { "rx_fragments" },
324 { "rx_ucast_packets" },
325 { "rx_mcast_packets" },
326 { "rx_bcast_packets" },
327 { "rx_fcs_errors" },
328 { "rx_align_errors" },
329 { "rx_xon_pause_rcvd" },
330 { "rx_xoff_pause_rcvd" },
331 { "rx_mac_ctrl_rcvd" },
332 { "rx_xoff_entered" },
333 { "rx_frame_too_long_errors" },
334 { "rx_jabbers" },
335 { "rx_undersize_packets" },
336 { "rx_in_length_errors" },
337 { "rx_out_length_errors" },
338 { "rx_64_or_less_octet_packets" },
339 { "rx_65_to_127_octet_packets" },
340 { "rx_128_to_255_octet_packets" },
341 { "rx_256_to_511_octet_packets" },
342 { "rx_512_to_1023_octet_packets" },
343 { "rx_1024_to_1522_octet_packets" },
344 { "rx_1523_to_2047_octet_packets" },
345 { "rx_2048_to_4095_octet_packets" },
346 { "rx_4096_to_8191_octet_packets" },
347 { "rx_8192_to_9022_octet_packets" },
348
349 { "tx_octets" },
350 { "tx_collisions" },
351
352 { "tx_xon_sent" },
353 { "tx_xoff_sent" },
354 { "tx_flow_control" },
355 { "tx_mac_errors" },
356 { "tx_single_collisions" },
357 { "tx_mult_collisions" },
358 { "tx_deferred" },
359 { "tx_excessive_collisions" },
360 { "tx_late_collisions" },
361 { "tx_collide_2times" },
362 { "tx_collide_3times" },
363 { "tx_collide_4times" },
364 { "tx_collide_5times" },
365 { "tx_collide_6times" },
366 { "tx_collide_7times" },
367 { "tx_collide_8times" },
368 { "tx_collide_9times" },
369 { "tx_collide_10times" },
370 { "tx_collide_11times" },
371 { "tx_collide_12times" },
372 { "tx_collide_13times" },
373 { "tx_collide_14times" },
374 { "tx_collide_15times" },
375 { "tx_ucast_packets" },
376 { "tx_mcast_packets" },
377 { "tx_bcast_packets" },
378 { "tx_carrier_sense_errors" },
379 { "tx_discards" },
380 { "tx_errors" },
381
382 { "dma_writeq_full" },
383 { "dma_write_prioq_full" },
384 { "rxbds_empty" },
385 { "rx_discards" },
386 { "rx_errors" },
387 { "rx_threshold_hit" },
388
389 { "dma_readq_full" },
390 { "dma_read_prioq_full" },
391 { "tx_comp_queue_full" },
392
393 { "ring_set_send_prod_index" },
394 { "ring_status_update" },
395 { "nic_irqs" },
396 { "nic_avoided_irqs" },
397 { "nic_tx_threshold_hit" },
398
399 { "mbuf_lwm_thresh_hit" },
400 };
401
402 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
403
404
405 static const struct {
406 const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408 { "nvram test (online) " },
409 { "link test (online) " },
410 { "register test (offline)" },
411 { "memory test (offline)" },
412 { "mac loopback test (offline)" },
413 { "phy loopback test (offline)" },
414 { "ext loopback test (offline)" },
415 { "interrupt test (offline)" },
416 };
417
418 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
419
420
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
422 {
423 writel(val, tp->regs + off);
424 }
425
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
427 {
428 return readl(tp->regs + off);
429 }
430
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
432 {
433 writel(val, tp->aperegs + off);
434 }
435
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
437 {
438 return readl(tp->aperegs + off);
439 }
440
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
442 {
443 unsigned long flags;
444
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 }
450
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
452 {
453 writel(val, tp->regs + off);
454 readl(tp->regs + off);
455 }
456
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 {
459 unsigned long flags;
460 u32 val;
461
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
466 return val;
467 }
468
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
470 {
471 unsigned long flags;
472
473 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
477 }
478 if (off == TG3_RX_STD_PROD_IDX_REG) {
479 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480 TG3_64BIT_REG_LOW, val);
481 return;
482 }
483
484 spin_lock_irqsave(&tp->indirect_lock, flags);
485 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487 spin_unlock_irqrestore(&tp->indirect_lock, flags);
488
489 /* In indirect mode when disabling interrupts, we also need
490 * to clear the interrupt bit in the GRC local ctrl register.
491 */
492 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
493 (val == 0x1)) {
494 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
496 }
497 }
498
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512 * where it is unsafe to read back the register without some delay.
513 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
515 */
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
517 {
518 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519 /* Non-posted methods */
520 tp->write32(tp, off, val);
521 else {
522 /* Posted method */
523 tg3_write32(tp, off, val);
524 if (usec_wait)
525 udelay(usec_wait);
526 tp->read32(tp, off);
527 }
528 /* Wait again after the read for the posted method to guarantee that
529 * the wait time is met.
530 */
531 if (usec_wait)
532 udelay(usec_wait);
533 }
534
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
536 {
537 tp->write32_mbox(tp, off, val);
538 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539 tp->read32_mbox(tp, off);
540 }
541
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
543 {
544 void __iomem *mbox = tp->regs + off;
545 writel(val, mbox);
546 if (tg3_flag(tp, TXD_MBOX_HWBUG))
547 writel(val, mbox);
548 if (tg3_flag(tp, MBOX_WRITE_REORDER))
549 readl(mbox);
550 }
551
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
553 {
554 return readl(tp->regs + off + GRCMBOX_BASE);
555 }
556
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
558 {
559 writel(val, tp->regs + off + GRCMBOX_BASE);
560 }
561
562 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
567
568 #define tw32(reg, val) tp->write32(tp, reg, val)
569 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg) tp->read32(tp, reg)
572
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
574 {
575 unsigned long flags;
576
577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579 return;
580
581 spin_lock_irqsave(&tp->indirect_lock, flags);
582 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
585
586 /* Always leave this as zero. */
587 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
588 } else {
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590 tw32_f(TG3PCI_MEM_WIN_DATA, val);
591
592 /* Always leave this as zero. */
593 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
594 }
595 spin_unlock_irqrestore(&tp->indirect_lock, flags);
596 }
597
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
599 {
600 unsigned long flags;
601
602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604 *val = 0;
605 return;
606 }
607
608 spin_lock_irqsave(&tp->indirect_lock, flags);
609 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
612
613 /* Always leave this as zero. */
614 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
615 } else {
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617 *val = tr32(TG3PCI_MEM_WIN_DATA);
618
619 /* Always leave this as zero. */
620 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
621 }
622 spin_unlock_irqrestore(&tp->indirect_lock, flags);
623 }
624
625 static void tg3_ape_lock_init(struct tg3 *tp)
626 {
627 int i;
628 u32 regbase, bit;
629
630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631 regbase = TG3_APE_LOCK_GRANT;
632 else
633 regbase = TG3_APE_PER_LOCK_GRANT;
634
635 /* Make sure the driver hasn't any stale locks. */
636 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
637 switch (i) {
638 case TG3_APE_LOCK_PHY0:
639 case TG3_APE_LOCK_PHY1:
640 case TG3_APE_LOCK_PHY2:
641 case TG3_APE_LOCK_PHY3:
642 bit = APE_LOCK_GRANT_DRIVER;
643 break;
644 default:
645 if (!tp->pci_fn)
646 bit = APE_LOCK_GRANT_DRIVER;
647 else
648 bit = 1 << tp->pci_fn;
649 }
650 tg3_ape_write32(tp, regbase + 4 * i, bit);
651 }
652
653 }
654
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
656 {
657 int i, off;
658 int ret = 0;
659 u32 status, req, gnt, bit;
660
661 if (!tg3_flag(tp, ENABLE_APE))
662 return 0;
663
664 switch (locknum) {
665 case TG3_APE_LOCK_GPIO:
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667 return 0;
668 case TG3_APE_LOCK_GRC:
669 case TG3_APE_LOCK_MEM:
670 if (!tp->pci_fn)
671 bit = APE_LOCK_REQ_DRIVER;
672 else
673 bit = 1 << tp->pci_fn;
674 break;
675 case TG3_APE_LOCK_PHY0:
676 case TG3_APE_LOCK_PHY1:
677 case TG3_APE_LOCK_PHY2:
678 case TG3_APE_LOCK_PHY3:
679 bit = APE_LOCK_REQ_DRIVER;
680 break;
681 default:
682 return -EINVAL;
683 }
684
685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
686 req = TG3_APE_LOCK_REQ;
687 gnt = TG3_APE_LOCK_GRANT;
688 } else {
689 req = TG3_APE_PER_LOCK_REQ;
690 gnt = TG3_APE_PER_LOCK_GRANT;
691 }
692
693 off = 4 * locknum;
694
695 tg3_ape_write32(tp, req + off, bit);
696
697 /* Wait for up to 1 millisecond to acquire lock. */
698 for (i = 0; i < 100; i++) {
699 status = tg3_ape_read32(tp, gnt + off);
700 if (status == bit)
701 break;
702 udelay(10);
703 }
704
705 if (status != bit) {
706 /* Revoke the lock request. */
707 tg3_ape_write32(tp, gnt + off, bit);
708 ret = -EBUSY;
709 }
710
711 return ret;
712 }
713
714 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
715 {
716 u32 gnt, bit;
717
718 if (!tg3_flag(tp, ENABLE_APE))
719 return;
720
721 switch (locknum) {
722 case TG3_APE_LOCK_GPIO:
723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
724 return;
725 case TG3_APE_LOCK_GRC:
726 case TG3_APE_LOCK_MEM:
727 if (!tp->pci_fn)
728 bit = APE_LOCK_GRANT_DRIVER;
729 else
730 bit = 1 << tp->pci_fn;
731 break;
732 case TG3_APE_LOCK_PHY0:
733 case TG3_APE_LOCK_PHY1:
734 case TG3_APE_LOCK_PHY2:
735 case TG3_APE_LOCK_PHY3:
736 bit = APE_LOCK_GRANT_DRIVER;
737 break;
738 default:
739 return;
740 }
741
742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
743 gnt = TG3_APE_LOCK_GRANT;
744 else
745 gnt = TG3_APE_PER_LOCK_GRANT;
746
747 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
748 }
749
750 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
751 {
752 u32 apedata;
753
754 while (timeout_us) {
755 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756 return -EBUSY;
757
758 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
759 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760 break;
761
762 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764 udelay(10);
765 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
766 }
767
768 return timeout_us ? 0 : -EBUSY;
769 }
770
771 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
772 {
773 u32 i, apedata;
774
775 for (i = 0; i < timeout_us / 10; i++) {
776 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
777
778 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
779 break;
780
781 udelay(10);
782 }
783
784 return i == timeout_us / 10;
785 }
786
787 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
788 {
789 int err;
790 u32 i, bufoff, msgoff, maxlen, apedata;
791
792 if (!tg3_flag(tp, APE_HAS_NCSI))
793 return 0;
794
795 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
796 if (apedata != APE_SEG_SIG_MAGIC)
797 return -ENODEV;
798
799 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
800 if (!(apedata & APE_FW_STATUS_READY))
801 return -EAGAIN;
802
803 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804 TG3_APE_SHMEM_BASE;
805 msgoff = bufoff + 2 * sizeof(u32);
806 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
807
808 while (len) {
809 u32 length;
810
811 /* Cap xfer sizes to scratchpad limits. */
812 length = (len > maxlen) ? maxlen : len;
813 len -= length;
814
815 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
816 if (!(apedata & APE_FW_STATUS_READY))
817 return -EAGAIN;
818
819 /* Wait for up to 1 msec for APE to service previous event. */
820 err = tg3_ape_event_lock(tp, 1000);
821 if (err)
822 return err;
823
824 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
825 APE_EVENT_STATUS_SCRTCHPD_READ |
826 APE_EVENT_STATUS_EVENT_PENDING;
827 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
828
829 tg3_ape_write32(tp, bufoff, base_off);
830 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
831
832 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
833 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
834
835 base_off += length;
836
837 if (tg3_ape_wait_for_event(tp, 30000))
838 return -EAGAIN;
839
840 for (i = 0; length; i += 4, length -= 4) {
841 u32 val = tg3_ape_read32(tp, msgoff + i);
842 memcpy(data, &val, sizeof(u32));
843 data++;
844 }
845 }
846
847 return 0;
848 }
849
850 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
851 {
852 int err;
853 u32 apedata;
854
855 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856 if (apedata != APE_SEG_SIG_MAGIC)
857 return -EAGAIN;
858
859 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860 if (!(apedata & APE_FW_STATUS_READY))
861 return -EAGAIN;
862
863 /* Wait for up to 1 millisecond for APE to service previous event. */
864 err = tg3_ape_event_lock(tp, 1000);
865 if (err)
866 return err;
867
868 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
869 event | APE_EVENT_STATUS_EVENT_PENDING);
870
871 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874 return 0;
875 }
876
877 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
878 {
879 u32 event;
880 u32 apedata;
881
882 if (!tg3_flag(tp, ENABLE_APE))
883 return;
884
885 switch (kind) {
886 case RESET_KIND_INIT:
887 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
888 APE_HOST_SEG_SIG_MAGIC);
889 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
890 APE_HOST_SEG_LEN_MAGIC);
891 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
892 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
893 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
894 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
895 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
896 APE_HOST_BEHAV_NO_PHYLOCK);
897 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
898 TG3_APE_HOST_DRVR_STATE_START);
899
900 event = APE_EVENT_STATUS_STATE_START;
901 break;
902 case RESET_KIND_SHUTDOWN:
903 /* With the interface we are currently using,
904 * APE does not track driver state. Wiping
905 * out the HOST SEGMENT SIGNATURE forces
906 * the APE to assume OS absent status.
907 */
908 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
909
910 if (device_may_wakeup(&tp->pdev->dev) &&
911 tg3_flag(tp, WOL_ENABLE)) {
912 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
913 TG3_APE_HOST_WOL_SPEED_AUTO);
914 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915 } else
916 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
917
918 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
919
920 event = APE_EVENT_STATUS_STATE_UNLOAD;
921 break;
922 case RESET_KIND_SUSPEND:
923 event = APE_EVENT_STATUS_STATE_SUSPEND;
924 break;
925 default:
926 return;
927 }
928
929 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
930
931 tg3_ape_send_event(tp, event);
932 }
933
934 static void tg3_disable_ints(struct tg3 *tp)
935 {
936 int i;
937
938 tw32(TG3PCI_MISC_HOST_CTRL,
939 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
940 for (i = 0; i < tp->irq_max; i++)
941 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
942 }
943
944 static void tg3_enable_ints(struct tg3 *tp)
945 {
946 int i;
947
948 tp->irq_sync = 0;
949 wmb();
950
951 tw32(TG3PCI_MISC_HOST_CTRL,
952 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
953
954 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
955 for (i = 0; i < tp->irq_cnt; i++) {
956 struct tg3_napi *tnapi = &tp->napi[i];
957
958 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959 if (tg3_flag(tp, 1SHOT_MSI))
960 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
961
962 tp->coal_now |= tnapi->coal_now;
963 }
964
965 /* Force an initial interrupt */
966 if (!tg3_flag(tp, TAGGED_STATUS) &&
967 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
968 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969 else
970 tw32(HOSTCC_MODE, tp->coal_now);
971
972 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
973 }
974
975 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
976 {
977 struct tg3 *tp = tnapi->tp;
978 struct tg3_hw_status *sblk = tnapi->hw_status;
979 unsigned int work_exists = 0;
980
981 /* check for phy events */
982 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
983 if (sblk->status & SD_STATUS_LINK_CHG)
984 work_exists = 1;
985 }
986
987 /* check for TX work to do */
988 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989 work_exists = 1;
990
991 /* check for RX work to do */
992 if (tnapi->rx_rcb_prod_idx &&
993 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
994 work_exists = 1;
995
996 return work_exists;
997 }
998
999 /* tg3_int_reenable
1000 * similar to tg3_enable_ints, but it accurately determines whether there
1001 * is new work pending and can return without flushing the PIO write
1002 * which reenables interrupts
1003 */
1004 static void tg3_int_reenable(struct tg3_napi *tnapi)
1005 {
1006 struct tg3 *tp = tnapi->tp;
1007
1008 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009 mmiowb();
1010
1011 /* When doing tagged status, this work check is unnecessary.
1012 * The last_tag we write above tells the chip which piece of
1013 * work we've completed.
1014 */
1015 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1016 tw32(HOSTCC_MODE, tp->coalesce_mode |
1017 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1018 }
1019
1020 static void tg3_switch_clocks(struct tg3 *tp)
1021 {
1022 u32 clock_ctrl;
1023 u32 orig_clock_ctrl;
1024
1025 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026 return;
1027
1028 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1029
1030 orig_clock_ctrl = clock_ctrl;
1031 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1032 CLOCK_CTRL_CLKRUN_OENABLE |
1033 0x1f);
1034 tp->pci_clock_ctrl = clock_ctrl;
1035
1036 if (tg3_flag(tp, 5705_PLUS)) {
1037 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1038 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1039 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1040 }
1041 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1042 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043 clock_ctrl |
1044 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045 40);
1046 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1047 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048 40);
1049 }
1050 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1051 }
1052
1053 #define PHY_BUSY_LOOPS 5000
1054
1055 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1056 {
1057 u32 frame_val;
1058 unsigned int loops;
1059 int ret;
1060
1061 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062 tw32_f(MAC_MI_MODE,
1063 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1064 udelay(80);
1065 }
1066
1067 tg3_ape_lock(tp, tp->phy_ape_lock);
1068
1069 *val = 0x0;
1070
1071 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1072 MI_COM_PHY_ADDR_MASK);
1073 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1074 MI_COM_REG_ADDR_MASK);
1075 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1076
1077 tw32_f(MAC_MI_COM, frame_val);
1078
1079 loops = PHY_BUSY_LOOPS;
1080 while (loops != 0) {
1081 udelay(10);
1082 frame_val = tr32(MAC_MI_COM);
1083
1084 if ((frame_val & MI_COM_BUSY) == 0) {
1085 udelay(5);
1086 frame_val = tr32(MAC_MI_COM);
1087 break;
1088 }
1089 loops -= 1;
1090 }
1091
1092 ret = -EBUSY;
1093 if (loops != 0) {
1094 *val = frame_val & MI_COM_DATA_MASK;
1095 ret = 0;
1096 }
1097
1098 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099 tw32_f(MAC_MI_MODE, tp->mi_mode);
1100 udelay(80);
1101 }
1102
1103 tg3_ape_unlock(tp, tp->phy_ape_lock);
1104
1105 return ret;
1106 }
1107
1108 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1109 {
1110 u32 frame_val;
1111 unsigned int loops;
1112 int ret;
1113
1114 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1115 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116 return 0;
1117
1118 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119 tw32_f(MAC_MI_MODE,
1120 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1121 udelay(80);
1122 }
1123
1124 tg3_ape_lock(tp, tp->phy_ape_lock);
1125
1126 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 MI_COM_PHY_ADDR_MASK);
1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 MI_COM_REG_ADDR_MASK);
1130 frame_val |= (val & MI_COM_DATA_MASK);
1131 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1132
1133 tw32_f(MAC_MI_COM, frame_val);
1134
1135 loops = PHY_BUSY_LOOPS;
1136 while (loops != 0) {
1137 udelay(10);
1138 frame_val = tr32(MAC_MI_COM);
1139 if ((frame_val & MI_COM_BUSY) == 0) {
1140 udelay(5);
1141 frame_val = tr32(MAC_MI_COM);
1142 break;
1143 }
1144 loops -= 1;
1145 }
1146
1147 ret = -EBUSY;
1148 if (loops != 0)
1149 ret = 0;
1150
1151 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153 udelay(80);
1154 }
1155
1156 tg3_ape_unlock(tp, tp->phy_ape_lock);
1157
1158 return ret;
1159 }
1160
1161 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1162 {
1163 int err;
1164
1165 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1166 if (err)
1167 goto done;
1168
1169 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1170 if (err)
1171 goto done;
1172
1173 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1174 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1175 if (err)
1176 goto done;
1177
1178 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1179
1180 done:
1181 return err;
1182 }
1183
1184 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1185 {
1186 int err;
1187
1188 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1189 if (err)
1190 goto done;
1191
1192 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1193 if (err)
1194 goto done;
1195
1196 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1197 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1198 if (err)
1199 goto done;
1200
1201 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1202
1203 done:
1204 return err;
1205 }
1206
1207 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1208 {
1209 int err;
1210
1211 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212 if (!err)
1213 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1214
1215 return err;
1216 }
1217
1218 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1219 {
1220 int err;
1221
1222 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223 if (!err)
1224 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1225
1226 return err;
1227 }
1228
1229 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1230 {
1231 int err;
1232
1233 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1234 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1235 MII_TG3_AUXCTL_SHDWSEL_MISC);
1236 if (!err)
1237 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1238
1239 return err;
1240 }
1241
1242 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1243 {
1244 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1245 set |= MII_TG3_AUXCTL_MISC_WREN;
1246
1247 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1248 }
1249
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253 MII_TG3_AUXCTL_ACTL_TX_6DB)
1254
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257 MII_TG3_AUXCTL_ACTL_TX_6DB);
1258
1259 static int tg3_bmcr_reset(struct tg3 *tp)
1260 {
1261 u32 phy_control;
1262 int limit, err;
1263
1264 /* OK, reset it, and poll the BMCR_RESET bit until it
1265 * clears or we time out.
1266 */
1267 phy_control = BMCR_RESET;
1268 err = tg3_writephy(tp, MII_BMCR, phy_control);
1269 if (err != 0)
1270 return -EBUSY;
1271
1272 limit = 5000;
1273 while (limit--) {
1274 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1275 if (err != 0)
1276 return -EBUSY;
1277
1278 if ((phy_control & BMCR_RESET) == 0) {
1279 udelay(40);
1280 break;
1281 }
1282 udelay(10);
1283 }
1284 if (limit < 0)
1285 return -EBUSY;
1286
1287 return 0;
1288 }
1289
1290 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1291 {
1292 struct tg3 *tp = bp->priv;
1293 u32 val;
1294
1295 spin_lock_bh(&tp->lock);
1296
1297 if (tg3_readphy(tp, reg, &val))
1298 val = -EIO;
1299
1300 spin_unlock_bh(&tp->lock);
1301
1302 return val;
1303 }
1304
1305 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1306 {
1307 struct tg3 *tp = bp->priv;
1308 u32 ret = 0;
1309
1310 spin_lock_bh(&tp->lock);
1311
1312 if (tg3_writephy(tp, reg, val))
1313 ret = -EIO;
1314
1315 spin_unlock_bh(&tp->lock);
1316
1317 return ret;
1318 }
1319
1320 static int tg3_mdio_reset(struct mii_bus *bp)
1321 {
1322 return 0;
1323 }
1324
1325 static void tg3_mdio_config_5785(struct tg3 *tp)
1326 {
1327 u32 val;
1328 struct phy_device *phydev;
1329
1330 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1331 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1332 case PHY_ID_BCM50610:
1333 case PHY_ID_BCM50610M:
1334 val = MAC_PHYCFG2_50610_LED_MODES;
1335 break;
1336 case PHY_ID_BCMAC131:
1337 val = MAC_PHYCFG2_AC131_LED_MODES;
1338 break;
1339 case PHY_ID_RTL8211C:
1340 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341 break;
1342 case PHY_ID_RTL8201E:
1343 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1344 break;
1345 default:
1346 return;
1347 }
1348
1349 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1350 tw32(MAC_PHYCFG2, val);
1351
1352 val = tr32(MAC_PHYCFG1);
1353 val &= ~(MAC_PHYCFG1_RGMII_INT |
1354 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1355 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1356 tw32(MAC_PHYCFG1, val);
1357
1358 return;
1359 }
1360
1361 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1362 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1363 MAC_PHYCFG2_FMODE_MASK_MASK |
1364 MAC_PHYCFG2_GMODE_MASK_MASK |
1365 MAC_PHYCFG2_ACT_MASK_MASK |
1366 MAC_PHYCFG2_QUAL_MASK_MASK |
1367 MAC_PHYCFG2_INBAND_ENABLE;
1368
1369 tw32(MAC_PHYCFG2, val);
1370
1371 val = tr32(MAC_PHYCFG1);
1372 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1373 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1374 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1375 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1377 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1378 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1379 }
1380 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1381 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1382 tw32(MAC_PHYCFG1, val);
1383
1384 val = tr32(MAC_EXT_RGMII_MODE);
1385 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1386 MAC_RGMII_MODE_RX_QUALITY |
1387 MAC_RGMII_MODE_RX_ACTIVITY |
1388 MAC_RGMII_MODE_RX_ENG_DET |
1389 MAC_RGMII_MODE_TX_ENABLE |
1390 MAC_RGMII_MODE_TX_LOWPWR |
1391 MAC_RGMII_MODE_TX_RESET);
1392 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1393 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1394 val |= MAC_RGMII_MODE_RX_INT_B |
1395 MAC_RGMII_MODE_RX_QUALITY |
1396 MAC_RGMII_MODE_RX_ACTIVITY |
1397 MAC_RGMII_MODE_RX_ENG_DET;
1398 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1399 val |= MAC_RGMII_MODE_TX_ENABLE |
1400 MAC_RGMII_MODE_TX_LOWPWR |
1401 MAC_RGMII_MODE_TX_RESET;
1402 }
1403 tw32(MAC_EXT_RGMII_MODE, val);
1404 }
1405
1406 static void tg3_mdio_start(struct tg3 *tp)
1407 {
1408 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1409 tw32_f(MAC_MI_MODE, tp->mi_mode);
1410 udelay(80);
1411
1412 if (tg3_flag(tp, MDIOBUS_INITED) &&
1413 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414 tg3_mdio_config_5785(tp);
1415 }
1416
1417 static int tg3_mdio_init(struct tg3 *tp)
1418 {
1419 int i;
1420 u32 reg;
1421 struct phy_device *phydev;
1422
1423 if (tg3_flag(tp, 5717_PLUS)) {
1424 u32 is_serdes;
1425
1426 tp->phy_addr = tp->pci_fn + 1;
1427
1428 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1429 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430 else
1431 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1432 TG3_CPMU_PHY_STRAP_IS_SERDES;
1433 if (is_serdes)
1434 tp->phy_addr += 7;
1435 } else
1436 tp->phy_addr = TG3_PHY_MII_ADDR;
1437
1438 tg3_mdio_start(tp);
1439
1440 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441 return 0;
1442
1443 tp->mdio_bus = mdiobus_alloc();
1444 if (tp->mdio_bus == NULL)
1445 return -ENOMEM;
1446
1447 tp->mdio_bus->name = "tg3 mdio bus";
1448 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1449 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1450 tp->mdio_bus->priv = tp;
1451 tp->mdio_bus->parent = &tp->pdev->dev;
1452 tp->mdio_bus->read = &tg3_mdio_read;
1453 tp->mdio_bus->write = &tg3_mdio_write;
1454 tp->mdio_bus->reset = &tg3_mdio_reset;
1455 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1456 tp->mdio_bus->irq = &tp->mdio_irq[0];
1457
1458 for (i = 0; i < PHY_MAX_ADDR; i++)
1459 tp->mdio_bus->irq[i] = PHY_POLL;
1460
1461 /* The bus registration will look for all the PHYs on the mdio bus.
1462 * Unfortunately, it does not ensure the PHY is powered up before
1463 * accessing the PHY ID registers. A chip reset is the
1464 * quickest way to bring the device back to an operational state..
1465 */
1466 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1467 tg3_bmcr_reset(tp);
1468
1469 i = mdiobus_register(tp->mdio_bus);
1470 if (i) {
1471 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1472 mdiobus_free(tp->mdio_bus);
1473 return i;
1474 }
1475
1476 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1477
1478 if (!phydev || !phydev->drv) {
1479 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1480 mdiobus_unregister(tp->mdio_bus);
1481 mdiobus_free(tp->mdio_bus);
1482 return -ENODEV;
1483 }
1484
1485 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1486 case PHY_ID_BCM57780:
1487 phydev->interface = PHY_INTERFACE_MODE_GMII;
1488 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489 break;
1490 case PHY_ID_BCM50610:
1491 case PHY_ID_BCM50610M:
1492 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1493 PHY_BRCM_RX_REFCLK_UNUSED |
1494 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1495 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1496 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1497 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1498 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1500 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1501 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502 /* fallthru */
1503 case PHY_ID_RTL8211C:
1504 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505 break;
1506 case PHY_ID_RTL8201E:
1507 case PHY_ID_BCMAC131:
1508 phydev->interface = PHY_INTERFACE_MODE_MII;
1509 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1510 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1511 break;
1512 }
1513
1514 tg3_flag_set(tp, MDIOBUS_INITED);
1515
1516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1517 tg3_mdio_config_5785(tp);
1518
1519 return 0;
1520 }
1521
1522 static void tg3_mdio_fini(struct tg3 *tp)
1523 {
1524 if (tg3_flag(tp, MDIOBUS_INITED)) {
1525 tg3_flag_clear(tp, MDIOBUS_INITED);
1526 mdiobus_unregister(tp->mdio_bus);
1527 mdiobus_free(tp->mdio_bus);
1528 }
1529 }
1530
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3 *tp)
1533 {
1534 u32 val;
1535
1536 val = tr32(GRC_RX_CPU_EVENT);
1537 val |= GRC_RX_CPU_DRIVER_EVENT;
1538 tw32_f(GRC_RX_CPU_EVENT, val);
1539
1540 tp->last_event_jiffies = jiffies;
1541 }
1542
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1544
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3 *tp)
1547 {
1548 int i;
1549 unsigned int delay_cnt;
1550 long time_remain;
1551
1552 /* If enough time has passed, no wait is necessary. */
1553 time_remain = (long)(tp->last_event_jiffies + 1 +
1554 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555 (long)jiffies;
1556 if (time_remain < 0)
1557 return;
1558
1559 /* Check if we can shorten the wait time. */
1560 delay_cnt = jiffies_to_usecs(time_remain);
1561 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1562 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1563 delay_cnt = (delay_cnt >> 3) + 1;
1564
1565 for (i = 0; i < delay_cnt; i++) {
1566 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1567 break;
1568 udelay(8);
1569 }
1570 }
1571
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1574 {
1575 u32 reg, val;
1576
1577 val = 0;
1578 if (!tg3_readphy(tp, MII_BMCR, &reg))
1579 val = reg << 16;
1580 if (!tg3_readphy(tp, MII_BMSR, &reg))
1581 val |= (reg & 0xffff);
1582 *data++ = val;
1583
1584 val = 0;
1585 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1586 val = reg << 16;
1587 if (!tg3_readphy(tp, MII_LPA, &reg))
1588 val |= (reg & 0xffff);
1589 *data++ = val;
1590
1591 val = 0;
1592 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1593 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1594 val = reg << 16;
1595 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1596 val |= (reg & 0xffff);
1597 }
1598 *data++ = val;
1599
1600 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1601 val = reg << 16;
1602 else
1603 val = 0;
1604 *data++ = val;
1605 }
1606
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3 *tp)
1609 {
1610 u32 data[4];
1611
1612 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613 return;
1614
1615 tg3_phy_gather_ump_data(tp, data);
1616
1617 tg3_wait_for_event_ack(tp);
1618
1619 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1620 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1621 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1622 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1623 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1624 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1625
1626 tg3_generate_fw_event(tp);
1627 }
1628
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3 *tp)
1631 {
1632 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1633 /* Wait for RX cpu to ACK the previous event. */
1634 tg3_wait_for_event_ack(tp);
1635
1636 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1637
1638 tg3_generate_fw_event(tp);
1639
1640 /* Wait for RX cpu to ACK this event. */
1641 tg3_wait_for_event_ack(tp);
1642 }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1647 {
1648 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1649 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1650
1651 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652 switch (kind) {
1653 case RESET_KIND_INIT:
1654 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1655 DRV_STATE_START);
1656 break;
1657
1658 case RESET_KIND_SHUTDOWN:
1659 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1660 DRV_STATE_UNLOAD);
1661 break;
1662
1663 case RESET_KIND_SUSPEND:
1664 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665 DRV_STATE_SUSPEND);
1666 break;
1667
1668 default:
1669 break;
1670 }
1671 }
1672
1673 if (kind == RESET_KIND_INIT ||
1674 kind == RESET_KIND_SUSPEND)
1675 tg3_ape_driver_state_change(tp, kind);
1676 }
1677
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1680 {
1681 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682 switch (kind) {
1683 case RESET_KIND_INIT:
1684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685 DRV_STATE_START_DONE);
1686 break;
1687
1688 case RESET_KIND_SHUTDOWN:
1689 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690 DRV_STATE_UNLOAD_DONE);
1691 break;
1692
1693 default:
1694 break;
1695 }
1696 }
1697
1698 if (kind == RESET_KIND_SHUTDOWN)
1699 tg3_ape_driver_state_change(tp, kind);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1704 {
1705 if (tg3_flag(tp, ENABLE_ASF)) {
1706 switch (kind) {
1707 case RESET_KIND_INIT:
1708 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709 DRV_STATE_START);
1710 break;
1711
1712 case RESET_KIND_SHUTDOWN:
1713 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714 DRV_STATE_UNLOAD);
1715 break;
1716
1717 case RESET_KIND_SUSPEND:
1718 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1719 DRV_STATE_SUSPEND);
1720 break;
1721
1722 default:
1723 break;
1724 }
1725 }
1726 }
1727
1728 static int tg3_poll_fw(struct tg3 *tp)
1729 {
1730 int i;
1731 u32 val;
1732
1733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1734 /* Wait up to 20ms for init done. */
1735 for (i = 0; i < 200; i++) {
1736 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1737 return 0;
1738 udelay(100);
1739 }
1740 return -ENODEV;
1741 }
1742
1743 /* Wait for firmware initialization to complete. */
1744 for (i = 0; i < 100000; i++) {
1745 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1746 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1747 break;
1748 udelay(10);
1749 }
1750
1751 /* Chip might not be fitted with firmware. Some Sun onboard
1752 * parts are configured like that. So don't signal the timeout
1753 * of the above loop as an error, but do report the lack of
1754 * running firmware once.
1755 */
1756 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1757 tg3_flag_set(tp, NO_FWARE_REPORTED);
1758
1759 netdev_info(tp->dev, "No firmware running\n");
1760 }
1761
1762 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1763 /* The 57765 A0 needs a little more
1764 * time to do some important work.
1765 */
1766 mdelay(10);
1767 }
1768
1769 return 0;
1770 }
1771
1772 static void tg3_link_report(struct tg3 *tp)
1773 {
1774 if (!netif_carrier_ok(tp->dev)) {
1775 netif_info(tp, link, tp->dev, "Link is down\n");
1776 tg3_ump_link_report(tp);
1777 } else if (netif_msg_link(tp)) {
1778 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1779 (tp->link_config.active_speed == SPEED_1000 ?
1780 1000 :
1781 (tp->link_config.active_speed == SPEED_100 ?
1782 100 : 10)),
1783 (tp->link_config.active_duplex == DUPLEX_FULL ?
1784 "full" : "half"));
1785
1786 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1787 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788 "on" : "off",
1789 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790 "on" : "off");
1791
1792 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1793 netdev_info(tp->dev, "EEE is %s\n",
1794 tp->setlpicnt ? "enabled" : "disabled");
1795
1796 tg3_ump_link_report(tp);
1797 }
1798 }
1799
1800 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1801 {
1802 u16 miireg;
1803
1804 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1805 miireg = ADVERTISE_1000XPAUSE;
1806 else if (flow_ctrl & FLOW_CTRL_TX)
1807 miireg = ADVERTISE_1000XPSE_ASYM;
1808 else if (flow_ctrl & FLOW_CTRL_RX)
1809 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1810 else
1811 miireg = 0;
1812
1813 return miireg;
1814 }
1815
1816 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1817 {
1818 u8 cap = 0;
1819
1820 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1821 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1822 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1823 if (lcladv & ADVERTISE_1000XPAUSE)
1824 cap = FLOW_CTRL_RX;
1825 if (rmtadv & ADVERTISE_1000XPAUSE)
1826 cap = FLOW_CTRL_TX;
1827 }
1828
1829 return cap;
1830 }
1831
1832 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1833 {
1834 u8 autoneg;
1835 u8 flowctrl = 0;
1836 u32 old_rx_mode = tp->rx_mode;
1837 u32 old_tx_mode = tp->tx_mode;
1838
1839 if (tg3_flag(tp, USE_PHYLIB))
1840 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841 else
1842 autoneg = tp->link_config.autoneg;
1843
1844 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1845 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1846 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847 else
1848 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849 } else
1850 flowctrl = tp->link_config.flowctrl;
1851
1852 tp->link_config.active_flowctrl = flowctrl;
1853
1854 if (flowctrl & FLOW_CTRL_RX)
1855 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856 else
1857 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1858
1859 if (old_rx_mode != tp->rx_mode)
1860 tw32_f(MAC_RX_MODE, tp->rx_mode);
1861
1862 if (flowctrl & FLOW_CTRL_TX)
1863 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864 else
1865 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1866
1867 if (old_tx_mode != tp->tx_mode)
1868 tw32_f(MAC_TX_MODE, tp->tx_mode);
1869 }
1870
1871 static void tg3_adjust_link(struct net_device *dev)
1872 {
1873 u8 oldflowctrl, linkmesg = 0;
1874 u32 mac_mode, lcl_adv, rmt_adv;
1875 struct tg3 *tp = netdev_priv(dev);
1876 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1877
1878 spin_lock_bh(&tp->lock);
1879
1880 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1881 MAC_MODE_HALF_DUPLEX);
1882
1883 oldflowctrl = tp->link_config.active_flowctrl;
1884
1885 if (phydev->link) {
1886 lcl_adv = 0;
1887 rmt_adv = 0;
1888
1889 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1890 mac_mode |= MAC_MODE_PORT_MODE_MII;
1891 else if (phydev->speed == SPEED_1000 ||
1892 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1893 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894 else
1895 mac_mode |= MAC_MODE_PORT_MODE_MII;
1896
1897 if (phydev->duplex == DUPLEX_HALF)
1898 mac_mode |= MAC_MODE_HALF_DUPLEX;
1899 else {
1900 lcl_adv = mii_advertise_flowctrl(
1901 tp->link_config.flowctrl);
1902
1903 if (phydev->pause)
1904 rmt_adv = LPA_PAUSE_CAP;
1905 if (phydev->asym_pause)
1906 rmt_adv |= LPA_PAUSE_ASYM;
1907 }
1908
1909 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910 } else
1911 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1912
1913 if (mac_mode != tp->mac_mode) {
1914 tp->mac_mode = mac_mode;
1915 tw32_f(MAC_MODE, tp->mac_mode);
1916 udelay(40);
1917 }
1918
1919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1920 if (phydev->speed == SPEED_10)
1921 tw32(MAC_MI_STAT,
1922 MAC_MI_STAT_10MBPS_MODE |
1923 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924 else
1925 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1926 }
1927
1928 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1929 tw32(MAC_TX_LENGTHS,
1930 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1931 (6 << TX_LENGTHS_IPG_SHIFT) |
1932 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933 else
1934 tw32(MAC_TX_LENGTHS,
1935 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1936 (6 << TX_LENGTHS_IPG_SHIFT) |
1937 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1938
1939 if (phydev->link != tp->old_link ||
1940 phydev->speed != tp->link_config.active_speed ||
1941 phydev->duplex != tp->link_config.active_duplex ||
1942 oldflowctrl != tp->link_config.active_flowctrl)
1943 linkmesg = 1;
1944
1945 tp->old_link = phydev->link;
1946 tp->link_config.active_speed = phydev->speed;
1947 tp->link_config.active_duplex = phydev->duplex;
1948
1949 spin_unlock_bh(&tp->lock);
1950
1951 if (linkmesg)
1952 tg3_link_report(tp);
1953 }
1954
1955 static int tg3_phy_init(struct tg3 *tp)
1956 {
1957 struct phy_device *phydev;
1958
1959 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960 return 0;
1961
1962 /* Bring the PHY back to a known state. */
1963 tg3_bmcr_reset(tp);
1964
1965 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1966
1967 /* Attach the MAC to the PHY. */
1968 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1969 phydev->dev_flags, phydev->interface);
1970 if (IS_ERR(phydev)) {
1971 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1972 return PTR_ERR(phydev);
1973 }
1974
1975 /* Mask with MAC supported features. */
1976 switch (phydev->interface) {
1977 case PHY_INTERFACE_MODE_GMII:
1978 case PHY_INTERFACE_MODE_RGMII:
1979 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1980 phydev->supported &= (PHY_GBIT_FEATURES |
1981 SUPPORTED_Pause |
1982 SUPPORTED_Asym_Pause);
1983 break;
1984 }
1985 /* fallthru */
1986 case PHY_INTERFACE_MODE_MII:
1987 phydev->supported &= (PHY_BASIC_FEATURES |
1988 SUPPORTED_Pause |
1989 SUPPORTED_Asym_Pause);
1990 break;
1991 default:
1992 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1993 return -EINVAL;
1994 }
1995
1996 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1997
1998 phydev->advertising = phydev->supported;
1999
2000 return 0;
2001 }
2002
2003 static void tg3_phy_start(struct tg3 *tp)
2004 {
2005 struct phy_device *phydev;
2006
2007 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008 return;
2009
2010 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2011
2012 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2013 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2014 phydev->speed = tp->link_config.speed;
2015 phydev->duplex = tp->link_config.duplex;
2016 phydev->autoneg = tp->link_config.autoneg;
2017 phydev->advertising = tp->link_config.advertising;
2018 }
2019
2020 phy_start(phydev);
2021
2022 phy_start_aneg(phydev);
2023 }
2024
2025 static void tg3_phy_stop(struct tg3 *tp)
2026 {
2027 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028 return;
2029
2030 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2031 }
2032
2033 static void tg3_phy_fini(struct tg3 *tp)
2034 {
2035 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2036 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2037 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2038 }
2039 }
2040
2041 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2042 {
2043 int err;
2044 u32 val;
2045
2046 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047 return 0;
2048
2049 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2050 /* Cannot do read-modify-write on 5401 */
2051 err = tg3_phy_auxctl_write(tp,
2052 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2053 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2054 0x4c20);
2055 goto done;
2056 }
2057
2058 err = tg3_phy_auxctl_read(tp,
2059 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2060 if (err)
2061 return err;
2062
2063 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2064 err = tg3_phy_auxctl_write(tp,
2065 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2066
2067 done:
2068 return err;
2069 }
2070
2071 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2072 {
2073 u32 phytest;
2074
2075 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076 u32 phy;
2077
2078 tg3_writephy(tp, MII_TG3_FET_TEST,
2079 phytest | MII_TG3_FET_SHADOW_EN);
2080 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081 if (enable)
2082 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083 else
2084 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2085 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2086 }
2087 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2088 }
2089 }
2090
2091 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2092 {
2093 u32 reg;
2094
2095 if (!tg3_flag(tp, 5705_PLUS) ||
2096 (tg3_flag(tp, 5717_PLUS) &&
2097 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098 return;
2099
2100 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2101 tg3_phy_fet_toggle_apd(tp, enable);
2102 return;
2103 }
2104
2105 reg = MII_TG3_MISC_SHDW_WREN |
2106 MII_TG3_MISC_SHDW_SCR5_SEL |
2107 MII_TG3_MISC_SHDW_SCR5_LPED |
2108 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2109 MII_TG3_MISC_SHDW_SCR5_SDTL |
2110 MII_TG3_MISC_SHDW_SCR5_C125OE;
2111 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2112 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2113
2114 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2115
2116
2117 reg = MII_TG3_MISC_SHDW_WREN |
2118 MII_TG3_MISC_SHDW_APD_SEL |
2119 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120 if (enable)
2121 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2122
2123 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2124 }
2125
2126 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2127 {
2128 u32 phy;
2129
2130 if (!tg3_flag(tp, 5705_PLUS) ||
2131 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132 return;
2133
2134 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135 u32 ephy;
2136
2137 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2138 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2139
2140 tg3_writephy(tp, MII_TG3_FET_TEST,
2141 ephy | MII_TG3_FET_SHADOW_EN);
2142 if (!tg3_readphy(tp, reg, &phy)) {
2143 if (enable)
2144 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145 else
2146 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2147 tg3_writephy(tp, reg, phy);
2148 }
2149 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2150 }
2151 } else {
2152 int ret;
2153
2154 ret = tg3_phy_auxctl_read(tp,
2155 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156 if (!ret) {
2157 if (enable)
2158 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159 else
2160 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2161 tg3_phy_auxctl_write(tp,
2162 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2163 }
2164 }
2165 }
2166
2167 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2168 {
2169 int ret;
2170 u32 val;
2171
2172 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173 return;
2174
2175 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176 if (!ret)
2177 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2178 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2179 }
2180
2181 static void tg3_phy_apply_otp(struct tg3 *tp)
2182 {
2183 u32 otp, phy;
2184
2185 if (!tp->phy_otp)
2186 return;
2187
2188 otp = tp->phy_otp;
2189
2190 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191 return;
2192
2193 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2194 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2195 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2196
2197 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2198 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2199 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2200
2201 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2202 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2203 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2204
2205 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2206 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2207
2208 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2209 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2210
2211 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2212 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2213 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2214
2215 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2216 }
2217
2218 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2219 {
2220 u32 val;
2221
2222 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2223 return;
2224
2225 tp->setlpicnt = 0;
2226
2227 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2228 current_link_up == 1 &&
2229 tp->link_config.active_duplex == DUPLEX_FULL &&
2230 (tp->link_config.active_speed == SPEED_100 ||
2231 tp->link_config.active_speed == SPEED_1000)) {
2232 u32 eeectl;
2233
2234 if (tp->link_config.active_speed == SPEED_1000)
2235 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236 else
2237 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2238
2239 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2240
2241 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2242 TG3_CL45_D7_EEERES_STAT, &val);
2243
2244 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2245 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2246 tp->setlpicnt = 2;
2247 }
2248
2249 if (!tp->setlpicnt) {
2250 if (current_link_up == 1 &&
2251 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2252 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2253 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2254 }
2255
2256 val = tr32(TG3_CPMU_EEE_MODE);
2257 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2258 }
2259 }
2260
2261 static void tg3_phy_eee_enable(struct tg3 *tp)
2262 {
2263 u32 val;
2264
2265 if (tp->link_config.active_speed == SPEED_1000 &&
2266 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2268 tg3_flag(tp, 57765_CLASS)) &&
2269 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2270 val = MII_TG3_DSP_TAP26_ALNOKO |
2271 MII_TG3_DSP_TAP26_RMRXSTO;
2272 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2273 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2274 }
2275
2276 val = tr32(TG3_CPMU_EEE_MODE);
2277 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2278 }
2279
2280 static int tg3_wait_macro_done(struct tg3 *tp)
2281 {
2282 int limit = 100;
2283
2284 while (limit--) {
2285 u32 tmp32;
2286
2287 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2288 if ((tmp32 & 0x1000) == 0)
2289 break;
2290 }
2291 }
2292 if (limit < 0)
2293 return -EBUSY;
2294
2295 return 0;
2296 }
2297
2298 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2299 {
2300 static const u32 test_pat[4][6] = {
2301 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2305 };
2306 int chan;
2307
2308 for (chan = 0; chan < 4; chan++) {
2309 int i;
2310
2311 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2312 (chan * 0x2000) | 0x0200);
2313 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2314
2315 for (i = 0; i < 6; i++)
2316 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317 test_pat[chan][i]);
2318
2319 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2320 if (tg3_wait_macro_done(tp)) {
2321 *resetp = 1;
2322 return -EBUSY;
2323 }
2324
2325 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2326 (chan * 0x2000) | 0x0200);
2327 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2328 if (tg3_wait_macro_done(tp)) {
2329 *resetp = 1;
2330 return -EBUSY;
2331 }
2332
2333 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2334 if (tg3_wait_macro_done(tp)) {
2335 *resetp = 1;
2336 return -EBUSY;
2337 }
2338
2339 for (i = 0; i < 6; i += 2) {
2340 u32 low, high;
2341
2342 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2343 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2344 tg3_wait_macro_done(tp)) {
2345 *resetp = 1;
2346 return -EBUSY;
2347 }
2348 low &= 0x7fff;
2349 high &= 0x000f;
2350 if (low != test_pat[chan][i] ||
2351 high != test_pat[chan][i+1]) {
2352 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2353 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2354 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2355
2356 return -EBUSY;
2357 }
2358 }
2359 }
2360
2361 return 0;
2362 }
2363
2364 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2365 {
2366 int chan;
2367
2368 for (chan = 0; chan < 4; chan++) {
2369 int i;
2370
2371 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2372 (chan * 0x2000) | 0x0200);
2373 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2374 for (i = 0; i < 6; i++)
2375 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2376 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2377 if (tg3_wait_macro_done(tp))
2378 return -EBUSY;
2379 }
2380
2381 return 0;
2382 }
2383
2384 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2385 {
2386 u32 reg32, phy9_orig;
2387 int retries, do_phy_reset, err;
2388
2389 retries = 10;
2390 do_phy_reset = 1;
2391 do {
2392 if (do_phy_reset) {
2393 err = tg3_bmcr_reset(tp);
2394 if (err)
2395 return err;
2396 do_phy_reset = 0;
2397 }
2398
2399 /* Disable transmitter and interrupt. */
2400 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2401 continue;
2402
2403 reg32 |= 0x3000;
2404 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2405
2406 /* Set full-duplex, 1000 mbps. */
2407 tg3_writephy(tp, MII_BMCR,
2408 BMCR_FULLDPLX | BMCR_SPEED1000);
2409
2410 /* Set to master mode. */
2411 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412 continue;
2413
2414 tg3_writephy(tp, MII_CTRL1000,
2415 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2416
2417 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2418 if (err)
2419 return err;
2420
2421 /* Block the PHY control access. */
2422 tg3_phydsp_write(tp, 0x8005, 0x0800);
2423
2424 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425 if (!err)
2426 break;
2427 } while (--retries);
2428
2429 err = tg3_phy_reset_chanpat(tp);
2430 if (err)
2431 return err;
2432
2433 tg3_phydsp_write(tp, 0x8005, 0x0000);
2434
2435 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2436 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2437
2438 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439
2440 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2441
2442 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2443 reg32 &= ~0x3000;
2444 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445 } else if (!err)
2446 err = -EBUSY;
2447
2448 return err;
2449 }
2450
2451 /* This will reset the tigon3 PHY if there is no valid
2452 * link unless the FORCE argument is non-zero.
2453 */
2454 static int tg3_phy_reset(struct tg3 *tp)
2455 {
2456 u32 val, cpmuctrl;
2457 int err;
2458
2459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2460 val = tr32(GRC_MISC_CFG);
2461 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462 udelay(40);
2463 }
2464 err = tg3_readphy(tp, MII_BMSR, &val);
2465 err |= tg3_readphy(tp, MII_BMSR, &val);
2466 if (err != 0)
2467 return -EBUSY;
2468
2469 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2470 netif_carrier_off(tp->dev);
2471 tg3_link_report(tp);
2472 }
2473
2474 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2477 err = tg3_phy_reset_5703_4_5(tp);
2478 if (err)
2479 return err;
2480 goto out;
2481 }
2482
2483 cpmuctrl = 0;
2484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2485 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2486 cpmuctrl = tr32(TG3_CPMU_CTRL);
2487 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488 tw32(TG3_CPMU_CTRL,
2489 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2490 }
2491
2492 err = tg3_bmcr_reset(tp);
2493 if (err)
2494 return err;
2495
2496 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2497 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2498 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2499
2500 tw32(TG3_CPMU_CTRL, cpmuctrl);
2501 }
2502
2503 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2504 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2505 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2506 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2507 CPMU_LSPD_1000MB_MACCLK_12_5) {
2508 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509 udelay(40);
2510 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2511 }
2512 }
2513
2514 if (tg3_flag(tp, 5717_PLUS) &&
2515 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516 return 0;
2517
2518 tg3_phy_apply_otp(tp);
2519
2520 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2521 tg3_phy_toggle_apd(tp, true);
2522 else
2523 tg3_phy_toggle_apd(tp, false);
2524
2525 out:
2526 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2527 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2528 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2529 tg3_phydsp_write(tp, 0x000a, 0x0323);
2530 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2531 }
2532
2533 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2534 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2536 }
2537
2538 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2539 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2540 tg3_phydsp_write(tp, 0x000a, 0x310b);
2541 tg3_phydsp_write(tp, 0x201f, 0x9506);
2542 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2543 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2544 }
2545 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2546 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2547 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2548 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2549 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2550 tg3_writephy(tp, MII_TG3_TEST1,
2551 MII_TG3_TEST1_TRIM_EN | 0x4);
2552 } else
2553 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2554
2555 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2556 }
2557 }
2558
2559 /* Set Extended packet length bit (bit 14) on all chips that */
2560 /* support jumbo frames */
2561 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2562 /* Cannot do read-modify-write on 5401 */
2563 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2564 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2565 /* Set bit 14 with read-modify-write to preserve other bits */
2566 err = tg3_phy_auxctl_read(tp,
2567 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568 if (!err)
2569 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2570 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2571 }
2572
2573 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574 * jumbo frames transmission.
2575 */
2576 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2577 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2578 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2579 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2580 }
2581
2582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2583 /* adjust output voltage */
2584 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2585 }
2586
2587 tg3_phy_toggle_automdix(tp, 1);
2588 tg3_phy_set_wirespeed(tp);
2589 return 0;
2590 }
2591
2592 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2594 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2595 TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600 (TG3_GPIO_MSG_DRVR_PRES << 12))
2601
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606 (TG3_GPIO_MSG_NEED_VAUX << 12))
2607
2608 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2609 {
2610 u32 status, shift;
2611
2612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2614 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615 else
2616 status = tr32(TG3_CPMU_DRV_STATUS);
2617
2618 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2619 status &= ~(TG3_GPIO_MSG_MASK << shift);
2620 status |= (newstat << shift);
2621
2622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2624 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625 else
2626 tw32(TG3_CPMU_DRV_STATUS, status);
2627
2628 return status >> TG3_APE_GPIO_MSG_SHIFT;
2629 }
2630
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2632 {
2633 if (!tg3_flag(tp, IS_NIC))
2634 return 0;
2635
2636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2639 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640 return -EIO;
2641
2642 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2643
2644 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2645 TG3_GRC_LCLCTL_PWRSW_DELAY);
2646
2647 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648 } else {
2649 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2650 TG3_GRC_LCLCTL_PWRSW_DELAY);
2651 }
2652
2653 return 0;
2654 }
2655
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2657 {
2658 u32 grc_local_ctrl;
2659
2660 if (!tg3_flag(tp, IS_NIC) ||
2661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663 return;
2664
2665 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2666
2667 tw32_wait_f(GRC_LOCAL_CTRL,
2668 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2669 TG3_GRC_LCLCTL_PWRSW_DELAY);
2670
2671 tw32_wait_f(GRC_LOCAL_CTRL,
2672 grc_local_ctrl,
2673 TG3_GRC_LCLCTL_PWRSW_DELAY);
2674
2675 tw32_wait_f(GRC_LOCAL_CTRL,
2676 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2677 TG3_GRC_LCLCTL_PWRSW_DELAY);
2678 }
2679
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2681 {
2682 if (!tg3_flag(tp, IS_NIC))
2683 return;
2684
2685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2688 (GRC_LCLCTRL_GPIO_OE0 |
2689 GRC_LCLCTRL_GPIO_OE1 |
2690 GRC_LCLCTRL_GPIO_OE2 |
2691 GRC_LCLCTRL_GPIO_OUTPUT0 |
2692 GRC_LCLCTRL_GPIO_OUTPUT1),
2693 TG3_GRC_LCLCTL_PWRSW_DELAY);
2694 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2695 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2696 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2698 GRC_LCLCTRL_GPIO_OE1 |
2699 GRC_LCLCTRL_GPIO_OE2 |
2700 GRC_LCLCTRL_GPIO_OUTPUT0 |
2701 GRC_LCLCTRL_GPIO_OUTPUT1 |
2702 tp->grc_local_ctrl;
2703 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2704 TG3_GRC_LCLCTL_PWRSW_DELAY);
2705
2706 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2707 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2708 TG3_GRC_LCLCTL_PWRSW_DELAY);
2709
2710 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2711 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2712 TG3_GRC_LCLCTL_PWRSW_DELAY);
2713 } else {
2714 u32 no_gpio2;
2715 u32 grc_local_ctrl = 0;
2716
2717 /* Workaround to prevent overdrawing Amps. */
2718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2719 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2720 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721 grc_local_ctrl,
2722 TG3_GRC_LCLCTL_PWRSW_DELAY);
2723 }
2724
2725 /* On 5753 and variants, GPIO2 cannot be used. */
2726 no_gpio2 = tp->nic_sram_data_cfg &
2727 NIC_SRAM_DATA_CFG_NO_GPIO2;
2728
2729 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2730 GRC_LCLCTRL_GPIO_OE1 |
2731 GRC_LCLCTRL_GPIO_OE2 |
2732 GRC_LCLCTRL_GPIO_OUTPUT1 |
2733 GRC_LCLCTRL_GPIO_OUTPUT2;
2734 if (no_gpio2) {
2735 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2736 GRC_LCLCTRL_GPIO_OUTPUT2);
2737 }
2738 tw32_wait_f(GRC_LOCAL_CTRL,
2739 tp->grc_local_ctrl | grc_local_ctrl,
2740 TG3_GRC_LCLCTL_PWRSW_DELAY);
2741
2742 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2743
2744 tw32_wait_f(GRC_LOCAL_CTRL,
2745 tp->grc_local_ctrl | grc_local_ctrl,
2746 TG3_GRC_LCLCTL_PWRSW_DELAY);
2747
2748 if (!no_gpio2) {
2749 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2750 tw32_wait_f(GRC_LOCAL_CTRL,
2751 tp->grc_local_ctrl | grc_local_ctrl,
2752 TG3_GRC_LCLCTL_PWRSW_DELAY);
2753 }
2754 }
2755 }
2756
2757 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2758 {
2759 u32 msg = 0;
2760
2761 /* Serialize power state transitions */
2762 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763 return;
2764
2765 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2766 msg = TG3_GPIO_MSG_NEED_VAUX;
2767
2768 msg = tg3_set_function_status(tp, msg);
2769
2770 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771 goto done;
2772
2773 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2774 tg3_pwrsrc_switch_to_vaux(tp);
2775 else
2776 tg3_pwrsrc_die_with_vmain(tp);
2777
2778 done:
2779 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2780 }
2781
2782 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2783 {
2784 bool need_vaux = false;
2785
2786 /* The GPIOs do something completely different on 57765. */
2787 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788 return;
2789
2790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2793 tg3_frob_aux_power_5717(tp, include_wol ?
2794 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2795 return;
2796 }
2797
2798 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2799 struct net_device *dev_peer;
2800
2801 dev_peer = pci_get_drvdata(tp->pdev_peer);
2802
2803 /* remove_one() may have been run on the peer. */
2804 if (dev_peer) {
2805 struct tg3 *tp_peer = netdev_priv(dev_peer);
2806
2807 if (tg3_flag(tp_peer, INIT_COMPLETE))
2808 return;
2809
2810 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2811 tg3_flag(tp_peer, ENABLE_ASF))
2812 need_vaux = true;
2813 }
2814 }
2815
2816 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2817 tg3_flag(tp, ENABLE_ASF))
2818 need_vaux = true;
2819
2820 if (need_vaux)
2821 tg3_pwrsrc_switch_to_vaux(tp);
2822 else
2823 tg3_pwrsrc_die_with_vmain(tp);
2824 }
2825
2826 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2827 {
2828 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829 return 1;
2830 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2831 if (speed != SPEED_10)
2832 return 1;
2833 } else if (speed == SPEED_10)
2834 return 1;
2835
2836 return 0;
2837 }
2838
2839 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2840 {
2841 u32 val;
2842
2843 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2845 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2846 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2847
2848 sg_dig_ctrl |=
2849 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2850 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2851 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2852 }
2853 return;
2854 }
2855
2856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857 tg3_bmcr_reset(tp);
2858 val = tr32(GRC_MISC_CFG);
2859 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860 udelay(40);
2861 return;
2862 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863 u32 phytest;
2864 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865 u32 phy;
2866
2867 tg3_writephy(tp, MII_ADVERTISE, 0);
2868 tg3_writephy(tp, MII_BMCR,
2869 BMCR_ANENABLE | BMCR_ANRESTART);
2870
2871 tg3_writephy(tp, MII_TG3_FET_TEST,
2872 phytest | MII_TG3_FET_SHADOW_EN);
2873 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2874 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875 tg3_writephy(tp,
2876 MII_TG3_FET_SHDW_AUXMODE4,
2877 phy);
2878 }
2879 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2880 }
2881 return;
2882 } else if (do_low_power) {
2883 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2884 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2885
2886 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2887 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2888 MII_TG3_AUXCTL_PCTL_VREG_11V;
2889 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2890 }
2891
2892 /* The PHY should not be powered down on some chips because
2893 * of bugs.
2894 */
2895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2897 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2898 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2899 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2900 !tp->pci_fn))
2901 return;
2902
2903 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2904 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2905 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2906 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2907 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2908 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2909 }
2910
2911 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2912 }
2913
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3 *tp)
2916 {
2917 if (tg3_flag(tp, NVRAM)) {
2918 int i;
2919
2920 if (tp->nvram_lock_cnt == 0) {
2921 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2922 for (i = 0; i < 8000; i++) {
2923 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2924 break;
2925 udelay(20);
2926 }
2927 if (i == 8000) {
2928 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2929 return -ENODEV;
2930 }
2931 }
2932 tp->nvram_lock_cnt++;
2933 }
2934 return 0;
2935 }
2936
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3 *tp)
2939 {
2940 if (tg3_flag(tp, NVRAM)) {
2941 if (tp->nvram_lock_cnt > 0)
2942 tp->nvram_lock_cnt--;
2943 if (tp->nvram_lock_cnt == 0)
2944 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2945 }
2946 }
2947
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3 *tp)
2950 {
2951 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2952 u32 nvaccess = tr32(NVRAM_ACCESS);
2953
2954 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2955 }
2956 }
2957
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3 *tp)
2960 {
2961 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2962 u32 nvaccess = tr32(NVRAM_ACCESS);
2963
2964 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2965 }
2966 }
2967
2968 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2969 u32 offset, u32 *val)
2970 {
2971 u32 tmp;
2972 int i;
2973
2974 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975 return -EINVAL;
2976
2977 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2978 EEPROM_ADDR_DEVID_MASK |
2979 EEPROM_ADDR_READ);
2980 tw32(GRC_EEPROM_ADDR,
2981 tmp |
2982 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2983 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2984 EEPROM_ADDR_ADDR_MASK) |
2985 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2986
2987 for (i = 0; i < 1000; i++) {
2988 tmp = tr32(GRC_EEPROM_ADDR);
2989
2990 if (tmp & EEPROM_ADDR_COMPLETE)
2991 break;
2992 msleep(1);
2993 }
2994 if (!(tmp & EEPROM_ADDR_COMPLETE))
2995 return -EBUSY;
2996
2997 tmp = tr32(GRC_EEPROM_DATA);
2998
2999 /*
3000 * The data will always be opposite the native endian
3001 * format. Perform a blind byteswap to compensate.
3002 */
3003 *val = swab32(tmp);
3004
3005 return 0;
3006 }
3007
3008 #define NVRAM_CMD_TIMEOUT 10000
3009
3010 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3011 {
3012 int i;
3013
3014 tw32(NVRAM_CMD, nvram_cmd);
3015 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016 udelay(10);
3017 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3018 udelay(10);
3019 break;
3020 }
3021 }
3022
3023 if (i == NVRAM_CMD_TIMEOUT)
3024 return -EBUSY;
3025
3026 return 0;
3027 }
3028
3029 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3030 {
3031 if (tg3_flag(tp, NVRAM) &&
3032 tg3_flag(tp, NVRAM_BUFFERED) &&
3033 tg3_flag(tp, FLASH) &&
3034 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3035 (tp->nvram_jedecnum == JEDEC_ATMEL))
3036
3037 addr = ((addr / tp->nvram_pagesize) <<
3038 ATMEL_AT45DB0X1B_PAGE_POS) +
3039 (addr % tp->nvram_pagesize);
3040
3041 return addr;
3042 }
3043
3044 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3045 {
3046 if (tg3_flag(tp, NVRAM) &&
3047 tg3_flag(tp, NVRAM_BUFFERED) &&
3048 tg3_flag(tp, FLASH) &&
3049 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3050 (tp->nvram_jedecnum == JEDEC_ATMEL))
3051
3052 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3053 tp->nvram_pagesize) +
3054 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3055
3056 return addr;
3057 }
3058
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060 * the byteswapping settings for all other register accesses.
3061 * tg3 devices are BE devices, so on a BE machine, the data
3062 * returned will be exactly as it is seen in NVRAM. On a LE
3063 * machine, the 32-bit value will be byteswapped.
3064 */
3065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3066 {
3067 int ret;
3068
3069 if (!tg3_flag(tp, NVRAM))
3070 return tg3_nvram_read_using_eeprom(tp, offset, val);
3071
3072 offset = tg3_nvram_phys_addr(tp, offset);
3073
3074 if (offset > NVRAM_ADDR_MSK)
3075 return -EINVAL;
3076
3077 ret = tg3_nvram_lock(tp);
3078 if (ret)
3079 return ret;
3080
3081 tg3_enable_nvram_access(tp);
3082
3083 tw32(NVRAM_ADDR, offset);
3084 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3085 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3086
3087 if (ret == 0)
3088 *val = tr32(NVRAM_RDDATA);
3089
3090 tg3_disable_nvram_access(tp);
3091
3092 tg3_nvram_unlock(tp);
3093
3094 return ret;
3095 }
3096
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3099 {
3100 u32 v;
3101 int res = tg3_nvram_read(tp, offset, &v);
3102 if (!res)
3103 *val = cpu_to_be32(v);
3104 return res;
3105 }
3106
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3108 u32 offset, u32 len, u8 *buf)
3109 {
3110 int i, j, rc = 0;
3111 u32 val;
3112
3113 for (i = 0; i < len; i += 4) {
3114 u32 addr;
3115 __be32 data;
3116
3117 addr = offset + i;
3118
3119 memcpy(&data, buf + i, 4);
3120
3121 /*
3122 * The SEEPROM interface expects the data to always be opposite
3123 * the native endian format. We accomplish this by reversing
3124 * all the operations that would have been performed on the
3125 * data from a call to tg3_nvram_read_be32().
3126 */
3127 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3128
3129 val = tr32(GRC_EEPROM_ADDR);
3130 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3131
3132 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133 EEPROM_ADDR_READ);
3134 tw32(GRC_EEPROM_ADDR, val |
3135 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3136 (addr & EEPROM_ADDR_ADDR_MASK) |
3137 EEPROM_ADDR_START |
3138 EEPROM_ADDR_WRITE);
3139
3140 for (j = 0; j < 1000; j++) {
3141 val = tr32(GRC_EEPROM_ADDR);
3142
3143 if (val & EEPROM_ADDR_COMPLETE)
3144 break;
3145 msleep(1);
3146 }
3147 if (!(val & EEPROM_ADDR_COMPLETE)) {
3148 rc = -EBUSY;
3149 break;
3150 }
3151 }
3152
3153 return rc;
3154 }
3155
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3158 u8 *buf)
3159 {
3160 int ret = 0;
3161 u32 pagesize = tp->nvram_pagesize;
3162 u32 pagemask = pagesize - 1;
3163 u32 nvram_cmd;
3164 u8 *tmp;
3165
3166 tmp = kmalloc(pagesize, GFP_KERNEL);
3167 if (tmp == NULL)
3168 return -ENOMEM;
3169
3170 while (len) {
3171 int j;
3172 u32 phy_addr, page_off, size;
3173
3174 phy_addr = offset & ~pagemask;
3175
3176 for (j = 0; j < pagesize; j += 4) {
3177 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3178 (__be32 *) (tmp + j));
3179 if (ret)
3180 break;
3181 }
3182 if (ret)
3183 break;
3184
3185 page_off = offset & pagemask;
3186 size = pagesize;
3187 if (len < size)
3188 size = len;
3189
3190 len -= size;
3191
3192 memcpy(tmp + page_off, buf, size);
3193
3194 offset = offset + (pagesize - page_off);
3195
3196 tg3_enable_nvram_access(tp);
3197
3198 /*
3199 * Before we can erase the flash page, we need
3200 * to issue a special "write enable" command.
3201 */
3202 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3203
3204 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205 break;
3206
3207 /* Erase the target page */
3208 tw32(NVRAM_ADDR, phy_addr);
3209
3210 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3211 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3212
3213 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214 break;
3215
3216 /* Issue another write enable to start the write. */
3217 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3218
3219 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220 break;
3221
3222 for (j = 0; j < pagesize; j += 4) {
3223 __be32 data;
3224
3225 data = *((__be32 *) (tmp + j));
3226
3227 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3228
3229 tw32(NVRAM_ADDR, phy_addr + j);
3230
3231 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3232 NVRAM_CMD_WR;
3233
3234 if (j == 0)
3235 nvram_cmd |= NVRAM_CMD_FIRST;
3236 else if (j == (pagesize - 4))
3237 nvram_cmd |= NVRAM_CMD_LAST;
3238
3239 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3240 if (ret)
3241 break;
3242 }
3243 if (ret)
3244 break;
3245 }
3246
3247 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248 tg3_nvram_exec_cmd(tp, nvram_cmd);
3249
3250 kfree(tmp);
3251
3252 return ret;
3253 }
3254
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3257 u8 *buf)
3258 {
3259 int i, ret = 0;
3260
3261 for (i = 0; i < len; i += 4, offset += 4) {
3262 u32 page_off, phy_addr, nvram_cmd;
3263 __be32 data;
3264
3265 memcpy(&data, buf + i, 4);
3266 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3267
3268 page_off = offset % tp->nvram_pagesize;
3269
3270 phy_addr = tg3_nvram_phys_addr(tp, offset);
3271
3272 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3273
3274 if (page_off == 0 || i == 0)
3275 nvram_cmd |= NVRAM_CMD_FIRST;
3276 if (page_off == (tp->nvram_pagesize - 4))
3277 nvram_cmd |= NVRAM_CMD_LAST;
3278
3279 if (i == (len - 4))
3280 nvram_cmd |= NVRAM_CMD_LAST;
3281
3282 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3283 !tg3_flag(tp, FLASH) ||
3284 !tg3_flag(tp, 57765_PLUS))
3285 tw32(NVRAM_ADDR, phy_addr);
3286
3287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3288 !tg3_flag(tp, 5755_PLUS) &&
3289 (tp->nvram_jedecnum == JEDEC_ST) &&
3290 (nvram_cmd & NVRAM_CMD_FIRST)) {
3291 u32 cmd;
3292
3293 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3294 ret = tg3_nvram_exec_cmd(tp, cmd);
3295 if (ret)
3296 break;
3297 }
3298 if (!tg3_flag(tp, FLASH)) {
3299 /* We always do complete word writes to eeprom. */
3300 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3301 }
3302
3303 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3304 if (ret)
3305 break;
3306 }
3307 return ret;
3308 }
3309
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3312 {
3313 int ret;
3314
3315 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3316 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3317 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3318 udelay(40);
3319 }
3320
3321 if (!tg3_flag(tp, NVRAM)) {
3322 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3323 } else {
3324 u32 grc_mode;
3325
3326 ret = tg3_nvram_lock(tp);
3327 if (ret)
3328 return ret;
3329
3330 tg3_enable_nvram_access(tp);
3331 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3332 tw32(NVRAM_WRITE1, 0x406);
3333
3334 grc_mode = tr32(GRC_MODE);
3335 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3336
3337 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3338 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339 buf);
3340 } else {
3341 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3342 buf);
3343 }
3344
3345 grc_mode = tr32(GRC_MODE);
3346 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3347
3348 tg3_disable_nvram_access(tp);
3349 tg3_nvram_unlock(tp);
3350 }
3351
3352 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3353 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3354 udelay(40);
3355 }
3356
3357 return ret;
3358 }
3359
3360 #define RX_CPU_SCRATCH_BASE 0x30000
3361 #define RX_CPU_SCRATCH_SIZE 0x04000
3362 #define TX_CPU_SCRATCH_BASE 0x34000
3363 #define TX_CPU_SCRATCH_SIZE 0x04000
3364
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3367 {
3368 int i;
3369
3370 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3371
3372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3373 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3374
3375 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376 return 0;
3377 }
3378 if (offset == RX_CPU_BASE) {
3379 for (i = 0; i < 10000; i++) {
3380 tw32(offset + CPU_STATE, 0xffffffff);
3381 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3382 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3383 break;
3384 }
3385
3386 tw32(offset + CPU_STATE, 0xffffffff);
3387 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3388 udelay(10);
3389 } else {
3390 for (i = 0; i < 10000; i++) {
3391 tw32(offset + CPU_STATE, 0xffffffff);
3392 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3393 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3394 break;
3395 }
3396 }
3397
3398 if (i >= 10000) {
3399 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3400 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3401 return -ENODEV;
3402 }
3403
3404 /* Clear firmware's nvram arbitration. */
3405 if (tg3_flag(tp, NVRAM))
3406 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3407 return 0;
3408 }
3409
3410 struct fw_info {
3411 unsigned int fw_base;
3412 unsigned int fw_len;
3413 const __be32 *fw_data;
3414 };
3415
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3418 u32 cpu_scratch_base, int cpu_scratch_size,
3419 struct fw_info *info)
3420 {
3421 int err, lock_err, i;
3422 void (*write_op)(struct tg3 *, u32, u32);
3423
3424 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425 netdev_err(tp->dev,
3426 "%s: Trying to load TX cpu firmware which is 5705\n",
3427 __func__);
3428 return -EINVAL;
3429 }
3430
3431 if (tg3_flag(tp, 5705_PLUS))
3432 write_op = tg3_write_mem;
3433 else
3434 write_op = tg3_write_indirect_reg32;
3435
3436 /* It is possible that bootcode is still loading at this point.
3437 * Get the nvram lock first before halting the cpu.
3438 */
3439 lock_err = tg3_nvram_lock(tp);
3440 err = tg3_halt_cpu(tp, cpu_base);
3441 if (!lock_err)
3442 tg3_nvram_unlock(tp);
3443 if (err)
3444 goto out;
3445
3446 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3447 write_op(tp, cpu_scratch_base + i, 0);
3448 tw32(cpu_base + CPU_STATE, 0xffffffff);
3449 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3450 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3451 write_op(tp, (cpu_scratch_base +
3452 (info->fw_base & 0xffff) +
3453 (i * sizeof(u32))),
3454 be32_to_cpu(info->fw_data[i]));
3455
3456 err = 0;
3457
3458 out:
3459 return err;
3460 }
3461
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3464 {
3465 struct fw_info info;
3466 const __be32 *fw_data;
3467 int err, i;
3468
3469 fw_data = (void *)tp->fw->data;
3470
3471 /* Firmware blob starts with version numbers, followed by
3472 start address and length. We are setting complete length.
3473 length = end_address_of_bss - start_address_of_text.
3474 Remainder is the blob to be loaded contiguously
3475 from start address. */
3476
3477 info.fw_base = be32_to_cpu(fw_data[1]);
3478 info.fw_len = tp->fw->size - 12;
3479 info.fw_data = &fw_data[3];
3480
3481 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3482 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3483 &info);
3484 if (err)
3485 return err;
3486
3487 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3488 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3489 &info);
3490 if (err)
3491 return err;
3492
3493 /* Now startup only the RX cpu. */
3494 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3495 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3496
3497 for (i = 0; i < 5; i++) {
3498 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499 break;
3500 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3501 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3502 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3503 udelay(1000);
3504 }
3505 if (i >= 5) {
3506 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3507 "should be %08x\n", __func__,
3508 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509 return -ENODEV;
3510 }
3511 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3512 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3513
3514 return 0;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3 *tp)
3519 {
3520 struct fw_info info;
3521 const __be32 *fw_data;
3522 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523 int err, i;
3524
3525 if (tg3_flag(tp, HW_TSO_1) ||
3526 tg3_flag(tp, HW_TSO_2) ||
3527 tg3_flag(tp, HW_TSO_3))
3528 return 0;
3529
3530 fw_data = (void *)tp->fw->data;
3531
3532 /* Firmware blob starts with version numbers, followed by
3533 start address and length. We are setting complete length.
3534 length = end_address_of_bss - start_address_of_text.
3535 Remainder is the blob to be loaded contiguously
3536 from start address. */
3537
3538 info.fw_base = be32_to_cpu(fw_data[1]);
3539 cpu_scratch_size = tp->fw_len;
3540 info.fw_len = tp->fw->size - 12;
3541 info.fw_data = &fw_data[3];
3542
3543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3544 cpu_base = RX_CPU_BASE;
3545 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546 } else {
3547 cpu_base = TX_CPU_BASE;
3548 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3549 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3550 }
3551
3552 err = tg3_load_firmware_cpu(tp, cpu_base,
3553 cpu_scratch_base, cpu_scratch_size,
3554 &info);
3555 if (err)
3556 return err;
3557
3558 /* Now startup the cpu. */
3559 tw32(cpu_base + CPU_STATE, 0xffffffff);
3560 tw32_f(cpu_base + CPU_PC, info.fw_base);
3561
3562 for (i = 0; i < 5; i++) {
3563 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564 break;
3565 tw32(cpu_base + CPU_STATE, 0xffffffff);
3566 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3567 tw32_f(cpu_base + CPU_PC, info.fw_base);
3568 udelay(1000);
3569 }
3570 if (i >= 5) {
3571 netdev_err(tp->dev,
3572 "%s fails to set CPU PC, is %08x should be %08x\n",
3573 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574 return -ENODEV;
3575 }
3576 tw32(cpu_base + CPU_STATE, 0xffffffff);
3577 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3578 return 0;
3579 }
3580
3581
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3584 {
3585 u32 addr_high, addr_low;
3586 int i;
3587
3588 addr_high = ((tp->dev->dev_addr[0] << 8) |
3589 tp->dev->dev_addr[1]);
3590 addr_low = ((tp->dev->dev_addr[2] << 24) |
3591 (tp->dev->dev_addr[3] << 16) |
3592 (tp->dev->dev_addr[4] << 8) |
3593 (tp->dev->dev_addr[5] << 0));
3594 for (i = 0; i < 4; i++) {
3595 if (i == 1 && skip_mac_1)
3596 continue;
3597 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3598 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3599 }
3600
3601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3603 for (i = 0; i < 12; i++) {
3604 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3605 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3606 }
3607 }
3608
3609 addr_high = (tp->dev->dev_addr[0] +
3610 tp->dev->dev_addr[1] +
3611 tp->dev->dev_addr[2] +
3612 tp->dev->dev_addr[3] +
3613 tp->dev->dev_addr[4] +
3614 tp->dev->dev_addr[5]) &
3615 TX_BACKOFF_SEED_MASK;
3616 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3617 }
3618
3619 static void tg3_enable_register_access(struct tg3 *tp)
3620 {
3621 /*
3622 * Make sure register accesses (indirect or otherwise) will function
3623 * correctly.
3624 */
3625 pci_write_config_dword(tp->pdev,
3626 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3627 }
3628
3629 static int tg3_power_up(struct tg3 *tp)
3630 {
3631 int err;
3632
3633 tg3_enable_register_access(tp);
3634
3635 err = pci_set_power_state(tp->pdev, PCI_D0);
3636 if (!err) {
3637 /* Switch out of Vaux if it is a NIC */
3638 tg3_pwrsrc_switch_to_vmain(tp);
3639 } else {
3640 netdev_err(tp->dev, "Transition to D0 failed\n");
3641 }
3642
3643 return err;
3644 }
3645
3646 static int tg3_setup_phy(struct tg3 *, int);
3647
3648 static int tg3_power_down_prepare(struct tg3 *tp)
3649 {
3650 u32 misc_host_ctrl;
3651 bool device_should_wake, do_low_power;
3652
3653 tg3_enable_register_access(tp);
3654
3655 /* Restore the CLKREQ setting. */
3656 if (tg3_flag(tp, CLKREQ_BUG))
3657 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3658 PCI_EXP_LNKCTL_CLKREQ_EN);
3659
3660 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3661 tw32(TG3PCI_MISC_HOST_CTRL,
3662 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3663
3664 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3665 tg3_flag(tp, WOL_ENABLE);
3666
3667 if (tg3_flag(tp, USE_PHYLIB)) {
3668 do_low_power = false;
3669 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3670 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3671 struct phy_device *phydev;
3672 u32 phyid, advertising;
3673
3674 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3675
3676 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3677
3678 tp->link_config.speed = phydev->speed;
3679 tp->link_config.duplex = phydev->duplex;
3680 tp->link_config.autoneg = phydev->autoneg;
3681 tp->link_config.advertising = phydev->advertising;
3682
3683 advertising = ADVERTISED_TP |
3684 ADVERTISED_Pause |
3685 ADVERTISED_Autoneg |
3686 ADVERTISED_10baseT_Half;
3687
3688 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3689 if (tg3_flag(tp, WOL_SPEED_100MB))
3690 advertising |=
3691 ADVERTISED_100baseT_Half |
3692 ADVERTISED_100baseT_Full |
3693 ADVERTISED_10baseT_Full;
3694 else
3695 advertising |= ADVERTISED_10baseT_Full;
3696 }
3697
3698 phydev->advertising = advertising;
3699
3700 phy_start_aneg(phydev);
3701
3702 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3703 if (phyid != PHY_ID_BCMAC131) {
3704 phyid &= PHY_BCM_OUI_MASK;
3705 if (phyid == PHY_BCM_OUI_1 ||
3706 phyid == PHY_BCM_OUI_2 ||
3707 phyid == PHY_BCM_OUI_3)
3708 do_low_power = true;
3709 }
3710 }
3711 } else {
3712 do_low_power = true;
3713
3714 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3715 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3716
3717 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3718 tg3_setup_phy(tp, 0);
3719 }
3720
3721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3722 u32 val;
3723
3724 val = tr32(GRC_VCPU_EXT_CTRL);
3725 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3726 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3727 int i;
3728 u32 val;
3729
3730 for (i = 0; i < 200; i++) {
3731 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3732 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3733 break;
3734 msleep(1);
3735 }
3736 }
3737 if (tg3_flag(tp, WOL_CAP))
3738 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3739 WOL_DRV_STATE_SHUTDOWN |
3740 WOL_DRV_WOL |
3741 WOL_SET_MAGIC_PKT);
3742
3743 if (device_should_wake) {
3744 u32 mac_mode;
3745
3746 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3747 if (do_low_power &&
3748 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3749 tg3_phy_auxctl_write(tp,
3750 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3751 MII_TG3_AUXCTL_PCTL_WOL_EN |
3752 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3753 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3754 udelay(40);
3755 }
3756
3757 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3758 mac_mode = MAC_MODE_PORT_MODE_GMII;
3759 else
3760 mac_mode = MAC_MODE_PORT_MODE_MII;
3761
3762 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3763 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3764 ASIC_REV_5700) {
3765 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3766 SPEED_100 : SPEED_10;
3767 if (tg3_5700_link_polarity(tp, speed))
3768 mac_mode |= MAC_MODE_LINK_POLARITY;
3769 else
3770 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3771 }
3772 } else {
3773 mac_mode = MAC_MODE_PORT_MODE_TBI;
3774 }
3775
3776 if (!tg3_flag(tp, 5750_PLUS))
3777 tw32(MAC_LED_CTRL, tp->led_ctrl);
3778
3779 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3780 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3781 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3782 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3783
3784 if (tg3_flag(tp, ENABLE_APE))
3785 mac_mode |= MAC_MODE_APE_TX_EN |
3786 MAC_MODE_APE_RX_EN |
3787 MAC_MODE_TDE_ENABLE;
3788
3789 tw32_f(MAC_MODE, mac_mode);
3790 udelay(100);
3791
3792 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3793 udelay(10);
3794 }
3795
3796 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3797 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3798 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3799 u32 base_val;
3800
3801 base_val = tp->pci_clock_ctrl;
3802 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3803 CLOCK_CTRL_TXCLK_DISABLE);
3804
3805 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3806 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3807 } else if (tg3_flag(tp, 5780_CLASS) ||
3808 tg3_flag(tp, CPMU_PRESENT) ||
3809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3810 /* do nothing */
3811 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3812 u32 newbits1, newbits2;
3813
3814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3816 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3817 CLOCK_CTRL_TXCLK_DISABLE |
3818 CLOCK_CTRL_ALTCLK);
3819 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3820 } else if (tg3_flag(tp, 5705_PLUS)) {
3821 newbits1 = CLOCK_CTRL_625_CORE;
3822 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3823 } else {
3824 newbits1 = CLOCK_CTRL_ALTCLK;
3825 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3826 }
3827
3828 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3829 40);
3830
3831 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3832 40);
3833
3834 if (!tg3_flag(tp, 5705_PLUS)) {
3835 u32 newbits3;
3836
3837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3838 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3839 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3840 CLOCK_CTRL_TXCLK_DISABLE |
3841 CLOCK_CTRL_44MHZ_CORE);
3842 } else {
3843 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3844 }
3845
3846 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3847 tp->pci_clock_ctrl | newbits3, 40);
3848 }
3849 }
3850
3851 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3852 tg3_power_down_phy(tp, do_low_power);
3853
3854 tg3_frob_aux_power(tp, true);
3855
3856 /* Workaround for unstable PLL clock */
3857 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3858 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3859 u32 val = tr32(0x7d00);
3860
3861 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3862 tw32(0x7d00, val);
3863 if (!tg3_flag(tp, ENABLE_ASF)) {
3864 int err;
3865
3866 err = tg3_nvram_lock(tp);
3867 tg3_halt_cpu(tp, RX_CPU_BASE);
3868 if (!err)
3869 tg3_nvram_unlock(tp);
3870 }
3871 }
3872
3873 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3874
3875 return 0;
3876 }
3877
3878 static void tg3_power_down(struct tg3 *tp)
3879 {
3880 tg3_power_down_prepare(tp);
3881
3882 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3883 pci_set_power_state(tp->pdev, PCI_D3hot);
3884 }
3885
3886 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3887 {
3888 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3889 case MII_TG3_AUX_STAT_10HALF:
3890 *speed = SPEED_10;
3891 *duplex = DUPLEX_HALF;
3892 break;
3893
3894 case MII_TG3_AUX_STAT_10FULL:
3895 *speed = SPEED_10;
3896 *duplex = DUPLEX_FULL;
3897 break;
3898
3899 case MII_TG3_AUX_STAT_100HALF:
3900 *speed = SPEED_100;
3901 *duplex = DUPLEX_HALF;
3902 break;
3903
3904 case MII_TG3_AUX_STAT_100FULL:
3905 *speed = SPEED_100;
3906 *duplex = DUPLEX_FULL;
3907 break;
3908
3909 case MII_TG3_AUX_STAT_1000HALF:
3910 *speed = SPEED_1000;
3911 *duplex = DUPLEX_HALF;
3912 break;
3913
3914 case MII_TG3_AUX_STAT_1000FULL:
3915 *speed = SPEED_1000;
3916 *duplex = DUPLEX_FULL;
3917 break;
3918
3919 default:
3920 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3921 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3922 SPEED_10;
3923 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3924 DUPLEX_HALF;
3925 break;
3926 }
3927 *speed = SPEED_UNKNOWN;
3928 *duplex = DUPLEX_UNKNOWN;
3929 break;
3930 }
3931 }
3932
3933 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3934 {
3935 int err = 0;
3936 u32 val, new_adv;
3937
3938 new_adv = ADVERTISE_CSMA;
3939 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3940 new_adv |= mii_advertise_flowctrl(flowctrl);
3941
3942 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3943 if (err)
3944 goto done;
3945
3946 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3947 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3948
3949 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3950 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3951 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3952
3953 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3954 if (err)
3955 goto done;
3956 }
3957
3958 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3959 goto done;
3960
3961 tw32(TG3_CPMU_EEE_MODE,
3962 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3963
3964 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3965 if (!err) {
3966 u32 err2;
3967
3968 val = 0;
3969 /* Advertise 100-BaseTX EEE ability */
3970 if (advertise & ADVERTISED_100baseT_Full)
3971 val |= MDIO_AN_EEE_ADV_100TX;
3972 /* Advertise 1000-BaseT EEE ability */
3973 if (advertise & ADVERTISED_1000baseT_Full)
3974 val |= MDIO_AN_EEE_ADV_1000T;
3975 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3976 if (err)
3977 val = 0;
3978
3979 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3980 case ASIC_REV_5717:
3981 case ASIC_REV_57765:
3982 case ASIC_REV_57766:
3983 case ASIC_REV_5719:
3984 /* If we advertised any eee advertisements above... */
3985 if (val)
3986 val = MII_TG3_DSP_TAP26_ALNOKO |
3987 MII_TG3_DSP_TAP26_RMRXSTO |
3988 MII_TG3_DSP_TAP26_OPCSINPT;
3989 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3990 /* Fall through */
3991 case ASIC_REV_5720:
3992 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3993 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3994 MII_TG3_DSP_CH34TP2_HIBW01);
3995 }
3996
3997 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3998 if (!err)
3999 err = err2;
4000 }
4001
4002 done:
4003 return err;
4004 }
4005
4006 static void tg3_phy_copper_begin(struct tg3 *tp)
4007 {
4008 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4009 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4010 u32 adv, fc;
4011
4012 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4013 adv = ADVERTISED_10baseT_Half |
4014 ADVERTISED_10baseT_Full;
4015 if (tg3_flag(tp, WOL_SPEED_100MB))
4016 adv |= ADVERTISED_100baseT_Half |
4017 ADVERTISED_100baseT_Full;
4018
4019 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4020 } else {
4021 adv = tp->link_config.advertising;
4022 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4023 adv &= ~(ADVERTISED_1000baseT_Half |
4024 ADVERTISED_1000baseT_Full);
4025
4026 fc = tp->link_config.flowctrl;
4027 }
4028
4029 tg3_phy_autoneg_cfg(tp, adv, fc);
4030
4031 tg3_writephy(tp, MII_BMCR,
4032 BMCR_ANENABLE | BMCR_ANRESTART);
4033 } else {
4034 int i;
4035 u32 bmcr, orig_bmcr;
4036
4037 tp->link_config.active_speed = tp->link_config.speed;
4038 tp->link_config.active_duplex = tp->link_config.duplex;
4039
4040 bmcr = 0;
4041 switch (tp->link_config.speed) {
4042 default:
4043 case SPEED_10:
4044 break;
4045
4046 case SPEED_100:
4047 bmcr |= BMCR_SPEED100;
4048 break;
4049
4050 case SPEED_1000:
4051 bmcr |= BMCR_SPEED1000;
4052 break;
4053 }
4054
4055 if (tp->link_config.duplex == DUPLEX_FULL)
4056 bmcr |= BMCR_FULLDPLX;
4057
4058 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4059 (bmcr != orig_bmcr)) {
4060 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4061 for (i = 0; i < 1500; i++) {
4062 u32 tmp;
4063
4064 udelay(10);
4065 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4066 tg3_readphy(tp, MII_BMSR, &tmp))
4067 continue;
4068 if (!(tmp & BMSR_LSTATUS)) {
4069 udelay(40);
4070 break;
4071 }
4072 }
4073 tg3_writephy(tp, MII_BMCR, bmcr);
4074 udelay(40);
4075 }
4076 }
4077 }
4078
4079 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4080 {
4081 int err;
4082
4083 /* Turn off tap power management. */
4084 /* Set Extended packet length bit */
4085 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4086
4087 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4088 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4089 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4090 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4091 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4092
4093 udelay(40);
4094
4095 return err;
4096 }
4097
4098 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4099 {
4100 u32 advmsk, tgtadv, advertising;
4101
4102 advertising = tp->link_config.advertising;
4103 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4104
4105 advmsk = ADVERTISE_ALL;
4106 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4107 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4108 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4109 }
4110
4111 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4112 return false;
4113
4114 if ((*lcladv & advmsk) != tgtadv)
4115 return false;
4116
4117 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4118 u32 tg3_ctrl;
4119
4120 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4121
4122 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4123 return false;
4124
4125 if (tgtadv &&
4126 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4127 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4128 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4129 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4130 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4131 } else {
4132 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4133 }
4134
4135 if (tg3_ctrl != tgtadv)
4136 return false;
4137 }
4138
4139 return true;
4140 }
4141
4142 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4143 {
4144 u32 lpeth = 0;
4145
4146 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4147 u32 val;
4148
4149 if (tg3_readphy(tp, MII_STAT1000, &val))
4150 return false;
4151
4152 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4153 }
4154
4155 if (tg3_readphy(tp, MII_LPA, rmtadv))
4156 return false;
4157
4158 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4159 tp->link_config.rmt_adv = lpeth;
4160
4161 return true;
4162 }
4163
4164 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4165 {
4166 int current_link_up;
4167 u32 bmsr, val;
4168 u32 lcl_adv, rmt_adv;
4169 u16 current_speed;
4170 u8 current_duplex;
4171 int i, err;
4172
4173 tw32(MAC_EVENT, 0);
4174
4175 tw32_f(MAC_STATUS,
4176 (MAC_STATUS_SYNC_CHANGED |
4177 MAC_STATUS_CFG_CHANGED |
4178 MAC_STATUS_MI_COMPLETION |
4179 MAC_STATUS_LNKSTATE_CHANGED));
4180 udelay(40);
4181
4182 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4183 tw32_f(MAC_MI_MODE,
4184 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4185 udelay(80);
4186 }
4187
4188 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4189
4190 /* Some third-party PHYs need to be reset on link going
4191 * down.
4192 */
4193 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4196 netif_carrier_ok(tp->dev)) {
4197 tg3_readphy(tp, MII_BMSR, &bmsr);
4198 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4199 !(bmsr & BMSR_LSTATUS))
4200 force_reset = 1;
4201 }
4202 if (force_reset)
4203 tg3_phy_reset(tp);
4204
4205 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4206 tg3_readphy(tp, MII_BMSR, &bmsr);
4207 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4208 !tg3_flag(tp, INIT_COMPLETE))
4209 bmsr = 0;
4210
4211 if (!(bmsr & BMSR_LSTATUS)) {
4212 err = tg3_init_5401phy_dsp(tp);
4213 if (err)
4214 return err;
4215
4216 tg3_readphy(tp, MII_BMSR, &bmsr);
4217 for (i = 0; i < 1000; i++) {
4218 udelay(10);
4219 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4220 (bmsr & BMSR_LSTATUS)) {
4221 udelay(40);
4222 break;
4223 }
4224 }
4225
4226 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4227 TG3_PHY_REV_BCM5401_B0 &&
4228 !(bmsr & BMSR_LSTATUS) &&
4229 tp->link_config.active_speed == SPEED_1000) {
4230 err = tg3_phy_reset(tp);
4231 if (!err)
4232 err = tg3_init_5401phy_dsp(tp);
4233 if (err)
4234 return err;
4235 }
4236 }
4237 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4238 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4239 /* 5701 {A0,B0} CRC bug workaround */
4240 tg3_writephy(tp, 0x15, 0x0a75);
4241 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4242 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4243 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4244 }
4245
4246 /* Clear pending interrupts... */
4247 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4248 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4249
4250 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4251 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4252 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4253 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4254
4255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4257 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4258 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4259 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4260 else
4261 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4262 }
4263
4264 current_link_up = 0;
4265 current_speed = SPEED_UNKNOWN;
4266 current_duplex = DUPLEX_UNKNOWN;
4267 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4268 tp->link_config.rmt_adv = 0;
4269
4270 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4271 err = tg3_phy_auxctl_read(tp,
4272 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4273 &val);
4274 if (!err && !(val & (1 << 10))) {
4275 tg3_phy_auxctl_write(tp,
4276 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4277 val | (1 << 10));
4278 goto relink;
4279 }
4280 }
4281
4282 bmsr = 0;
4283 for (i = 0; i < 100; i++) {
4284 tg3_readphy(tp, MII_BMSR, &bmsr);
4285 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4286 (bmsr & BMSR_LSTATUS))
4287 break;
4288 udelay(40);
4289 }
4290
4291 if (bmsr & BMSR_LSTATUS) {
4292 u32 aux_stat, bmcr;
4293
4294 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4295 for (i = 0; i < 2000; i++) {
4296 udelay(10);
4297 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4298 aux_stat)
4299 break;
4300 }
4301
4302 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4303 &current_speed,
4304 &current_duplex);
4305
4306 bmcr = 0;
4307 for (i = 0; i < 200; i++) {
4308 tg3_readphy(tp, MII_BMCR, &bmcr);
4309 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4310 continue;
4311 if (bmcr && bmcr != 0x7fff)
4312 break;
4313 udelay(10);
4314 }
4315
4316 lcl_adv = 0;
4317 rmt_adv = 0;
4318
4319 tp->link_config.active_speed = current_speed;
4320 tp->link_config.active_duplex = current_duplex;
4321
4322 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4323 if ((bmcr & BMCR_ANENABLE) &&
4324 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4325 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4326 current_link_up = 1;
4327 } else {
4328 if (!(bmcr & BMCR_ANENABLE) &&
4329 tp->link_config.speed == current_speed &&
4330 tp->link_config.duplex == current_duplex &&
4331 tp->link_config.flowctrl ==
4332 tp->link_config.active_flowctrl) {
4333 current_link_up = 1;
4334 }
4335 }
4336
4337 if (current_link_up == 1 &&
4338 tp->link_config.active_duplex == DUPLEX_FULL) {
4339 u32 reg, bit;
4340
4341 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4342 reg = MII_TG3_FET_GEN_STAT;
4343 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4344 } else {
4345 reg = MII_TG3_EXT_STAT;
4346 bit = MII_TG3_EXT_STAT_MDIX;
4347 }
4348
4349 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4350 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4351
4352 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4353 }
4354 }
4355
4356 relink:
4357 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4358 tg3_phy_copper_begin(tp);
4359
4360 tg3_readphy(tp, MII_BMSR, &bmsr);
4361 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4362 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4363 current_link_up = 1;
4364 }
4365
4366 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4367 if (current_link_up == 1) {
4368 if (tp->link_config.active_speed == SPEED_100 ||
4369 tp->link_config.active_speed == SPEED_10)
4370 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4371 else
4372 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4373 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4374 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4375 else
4376 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4377
4378 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4379 if (tp->link_config.active_duplex == DUPLEX_HALF)
4380 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4381
4382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4383 if (current_link_up == 1 &&
4384 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4385 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4386 else
4387 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4388 }
4389
4390 /* ??? Without this setting Netgear GA302T PHY does not
4391 * ??? send/receive packets...
4392 */
4393 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4394 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4395 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4396 tw32_f(MAC_MI_MODE, tp->mi_mode);
4397 udelay(80);
4398 }
4399
4400 tw32_f(MAC_MODE, tp->mac_mode);
4401 udelay(40);
4402
4403 tg3_phy_eee_adjust(tp, current_link_up);
4404
4405 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4406 /* Polled via timer. */
4407 tw32_f(MAC_EVENT, 0);
4408 } else {
4409 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4410 }
4411 udelay(40);
4412
4413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4414 current_link_up == 1 &&
4415 tp->link_config.active_speed == SPEED_1000 &&
4416 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4417 udelay(120);
4418 tw32_f(MAC_STATUS,
4419 (MAC_STATUS_SYNC_CHANGED |
4420 MAC_STATUS_CFG_CHANGED));
4421 udelay(40);
4422 tg3_write_mem(tp,
4423 NIC_SRAM_FIRMWARE_MBOX,
4424 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4425 }
4426
4427 /* Prevent send BD corruption. */
4428 if (tg3_flag(tp, CLKREQ_BUG)) {
4429 if (tp->link_config.active_speed == SPEED_100 ||
4430 tp->link_config.active_speed == SPEED_10)
4431 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4432 PCI_EXP_LNKCTL_CLKREQ_EN);
4433 else
4434 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4435 PCI_EXP_LNKCTL_CLKREQ_EN);
4436 }
4437
4438 if (current_link_up != netif_carrier_ok(tp->dev)) {
4439 if (current_link_up)
4440 netif_carrier_on(tp->dev);
4441 else
4442 netif_carrier_off(tp->dev);
4443 tg3_link_report(tp);
4444 }
4445
4446 return 0;
4447 }
4448
4449 struct tg3_fiber_aneginfo {
4450 int state;
4451 #define ANEG_STATE_UNKNOWN 0
4452 #define ANEG_STATE_AN_ENABLE 1
4453 #define ANEG_STATE_RESTART_INIT 2
4454 #define ANEG_STATE_RESTART 3
4455 #define ANEG_STATE_DISABLE_LINK_OK 4
4456 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4457 #define ANEG_STATE_ABILITY_DETECT 6
4458 #define ANEG_STATE_ACK_DETECT_INIT 7
4459 #define ANEG_STATE_ACK_DETECT 8
4460 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4461 #define ANEG_STATE_COMPLETE_ACK 10
4462 #define ANEG_STATE_IDLE_DETECT_INIT 11
4463 #define ANEG_STATE_IDLE_DETECT 12
4464 #define ANEG_STATE_LINK_OK 13
4465 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4466 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4467
4468 u32 flags;
4469 #define MR_AN_ENABLE 0x00000001
4470 #define MR_RESTART_AN 0x00000002
4471 #define MR_AN_COMPLETE 0x00000004
4472 #define MR_PAGE_RX 0x00000008
4473 #define MR_NP_LOADED 0x00000010
4474 #define MR_TOGGLE_TX 0x00000020
4475 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4476 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4477 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4478 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4479 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4480 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4481 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4482 #define MR_TOGGLE_RX 0x00002000
4483 #define MR_NP_RX 0x00004000
4484
4485 #define MR_LINK_OK 0x80000000
4486
4487 unsigned long link_time, cur_time;
4488
4489 u32 ability_match_cfg;
4490 int ability_match_count;
4491
4492 char ability_match, idle_match, ack_match;
4493
4494 u32 txconfig, rxconfig;
4495 #define ANEG_CFG_NP 0x00000080
4496 #define ANEG_CFG_ACK 0x00000040
4497 #define ANEG_CFG_RF2 0x00000020
4498 #define ANEG_CFG_RF1 0x00000010
4499 #define ANEG_CFG_PS2 0x00000001
4500 #define ANEG_CFG_PS1 0x00008000
4501 #define ANEG_CFG_HD 0x00004000
4502 #define ANEG_CFG_FD 0x00002000
4503 #define ANEG_CFG_INVAL 0x00001f06
4504
4505 };
4506 #define ANEG_OK 0
4507 #define ANEG_DONE 1
4508 #define ANEG_TIMER_ENAB 2
4509 #define ANEG_FAILED -1
4510
4511 #define ANEG_STATE_SETTLE_TIME 10000
4512
4513 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4514 struct tg3_fiber_aneginfo *ap)
4515 {
4516 u16 flowctrl;
4517 unsigned long delta;
4518 u32 rx_cfg_reg;
4519 int ret;
4520
4521 if (ap->state == ANEG_STATE_UNKNOWN) {
4522 ap->rxconfig = 0;
4523 ap->link_time = 0;
4524 ap->cur_time = 0;
4525 ap->ability_match_cfg = 0;
4526 ap->ability_match_count = 0;
4527 ap->ability_match = 0;
4528 ap->idle_match = 0;
4529 ap->ack_match = 0;
4530 }
4531 ap->cur_time++;
4532
4533 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4534 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4535
4536 if (rx_cfg_reg != ap->ability_match_cfg) {
4537 ap->ability_match_cfg = rx_cfg_reg;
4538 ap->ability_match = 0;
4539 ap->ability_match_count = 0;
4540 } else {
4541 if (++ap->ability_match_count > 1) {
4542 ap->ability_match = 1;
4543 ap->ability_match_cfg = rx_cfg_reg;
4544 }
4545 }
4546 if (rx_cfg_reg & ANEG_CFG_ACK)
4547 ap->ack_match = 1;
4548 else
4549 ap->ack_match = 0;
4550
4551 ap->idle_match = 0;
4552 } else {
4553 ap->idle_match = 1;
4554 ap->ability_match_cfg = 0;
4555 ap->ability_match_count = 0;
4556 ap->ability_match = 0;
4557 ap->ack_match = 0;
4558
4559 rx_cfg_reg = 0;
4560 }
4561
4562 ap->rxconfig = rx_cfg_reg;
4563 ret = ANEG_OK;
4564
4565 switch (ap->state) {
4566 case ANEG_STATE_UNKNOWN:
4567 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4568 ap->state = ANEG_STATE_AN_ENABLE;
4569
4570 /* fallthru */
4571 case ANEG_STATE_AN_ENABLE:
4572 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4573 if (ap->flags & MR_AN_ENABLE) {
4574 ap->link_time = 0;
4575 ap->cur_time = 0;
4576 ap->ability_match_cfg = 0;
4577 ap->ability_match_count = 0;
4578 ap->ability_match = 0;
4579 ap->idle_match = 0;
4580 ap->ack_match = 0;
4581
4582 ap->state = ANEG_STATE_RESTART_INIT;
4583 } else {
4584 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4585 }
4586 break;
4587
4588 case ANEG_STATE_RESTART_INIT:
4589 ap->link_time = ap->cur_time;
4590 ap->flags &= ~(MR_NP_LOADED);
4591 ap->txconfig = 0;
4592 tw32(MAC_TX_AUTO_NEG, 0);
4593 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4594 tw32_f(MAC_MODE, tp->mac_mode);
4595 udelay(40);
4596
4597 ret = ANEG_TIMER_ENAB;
4598 ap->state = ANEG_STATE_RESTART;
4599
4600 /* fallthru */
4601 case ANEG_STATE_RESTART:
4602 delta = ap->cur_time - ap->link_time;
4603 if (delta > ANEG_STATE_SETTLE_TIME)
4604 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4605 else
4606 ret = ANEG_TIMER_ENAB;
4607 break;
4608
4609 case ANEG_STATE_DISABLE_LINK_OK:
4610 ret = ANEG_DONE;
4611 break;
4612
4613 case ANEG_STATE_ABILITY_DETECT_INIT:
4614 ap->flags &= ~(MR_TOGGLE_TX);
4615 ap->txconfig = ANEG_CFG_FD;
4616 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4617 if (flowctrl & ADVERTISE_1000XPAUSE)
4618 ap->txconfig |= ANEG_CFG_PS1;
4619 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4620 ap->txconfig |= ANEG_CFG_PS2;
4621 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4622 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4623 tw32_f(MAC_MODE, tp->mac_mode);
4624 udelay(40);
4625
4626 ap->state = ANEG_STATE_ABILITY_DETECT;
4627 break;
4628
4629 case ANEG_STATE_ABILITY_DETECT:
4630 if (ap->ability_match != 0 && ap->rxconfig != 0)
4631 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4632 break;
4633
4634 case ANEG_STATE_ACK_DETECT_INIT:
4635 ap->txconfig |= ANEG_CFG_ACK;
4636 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4637 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4638 tw32_f(MAC_MODE, tp->mac_mode);
4639 udelay(40);
4640
4641 ap->state = ANEG_STATE_ACK_DETECT;
4642
4643 /* fallthru */
4644 case ANEG_STATE_ACK_DETECT:
4645 if (ap->ack_match != 0) {
4646 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4647 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4648 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4649 } else {
4650 ap->state = ANEG_STATE_AN_ENABLE;
4651 }
4652 } else if (ap->ability_match != 0 &&
4653 ap->rxconfig == 0) {
4654 ap->state = ANEG_STATE_AN_ENABLE;
4655 }
4656 break;
4657
4658 case ANEG_STATE_COMPLETE_ACK_INIT:
4659 if (ap->rxconfig & ANEG_CFG_INVAL) {
4660 ret = ANEG_FAILED;
4661 break;
4662 }
4663 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4664 MR_LP_ADV_HALF_DUPLEX |
4665 MR_LP_ADV_SYM_PAUSE |
4666 MR_LP_ADV_ASYM_PAUSE |
4667 MR_LP_ADV_REMOTE_FAULT1 |
4668 MR_LP_ADV_REMOTE_FAULT2 |
4669 MR_LP_ADV_NEXT_PAGE |
4670 MR_TOGGLE_RX |
4671 MR_NP_RX);
4672 if (ap->rxconfig & ANEG_CFG_FD)
4673 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4674 if (ap->rxconfig & ANEG_CFG_HD)
4675 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4676 if (ap->rxconfig & ANEG_CFG_PS1)
4677 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4678 if (ap->rxconfig & ANEG_CFG_PS2)
4679 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4680 if (ap->rxconfig & ANEG_CFG_RF1)
4681 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4682 if (ap->rxconfig & ANEG_CFG_RF2)
4683 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4684 if (ap->rxconfig & ANEG_CFG_NP)
4685 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4686
4687 ap->link_time = ap->cur_time;
4688
4689 ap->flags ^= (MR_TOGGLE_TX);
4690 if (ap->rxconfig & 0x0008)
4691 ap->flags |= MR_TOGGLE_RX;
4692 if (ap->rxconfig & ANEG_CFG_NP)
4693 ap->flags |= MR_NP_RX;
4694 ap->flags |= MR_PAGE_RX;
4695
4696 ap->state = ANEG_STATE_COMPLETE_ACK;
4697 ret = ANEG_TIMER_ENAB;
4698 break;
4699
4700 case ANEG_STATE_COMPLETE_ACK:
4701 if (ap->ability_match != 0 &&
4702 ap->rxconfig == 0) {
4703 ap->state = ANEG_STATE_AN_ENABLE;
4704 break;
4705 }
4706 delta = ap->cur_time - ap->link_time;
4707 if (delta > ANEG_STATE_SETTLE_TIME) {
4708 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4709 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4710 } else {
4711 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4712 !(ap->flags & MR_NP_RX)) {
4713 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4714 } else {
4715 ret = ANEG_FAILED;
4716 }
4717 }
4718 }
4719 break;
4720
4721 case ANEG_STATE_IDLE_DETECT_INIT:
4722 ap->link_time = ap->cur_time;
4723 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4724 tw32_f(MAC_MODE, tp->mac_mode);
4725 udelay(40);
4726
4727 ap->state = ANEG_STATE_IDLE_DETECT;
4728 ret = ANEG_TIMER_ENAB;
4729 break;
4730
4731 case ANEG_STATE_IDLE_DETECT:
4732 if (ap->ability_match != 0 &&
4733 ap->rxconfig == 0) {
4734 ap->state = ANEG_STATE_AN_ENABLE;
4735 break;
4736 }
4737 delta = ap->cur_time - ap->link_time;
4738 if (delta > ANEG_STATE_SETTLE_TIME) {
4739 /* XXX another gem from the Broadcom driver :( */
4740 ap->state = ANEG_STATE_LINK_OK;
4741 }
4742 break;
4743
4744 case ANEG_STATE_LINK_OK:
4745 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4746 ret = ANEG_DONE;
4747 break;
4748
4749 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4750 /* ??? unimplemented */
4751 break;
4752
4753 case ANEG_STATE_NEXT_PAGE_WAIT:
4754 /* ??? unimplemented */
4755 break;
4756
4757 default:
4758 ret = ANEG_FAILED;
4759 break;
4760 }
4761
4762 return ret;
4763 }
4764
4765 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4766 {
4767 int res = 0;
4768 struct tg3_fiber_aneginfo aninfo;
4769 int status = ANEG_FAILED;
4770 unsigned int tick;
4771 u32 tmp;
4772
4773 tw32_f(MAC_TX_AUTO_NEG, 0);
4774
4775 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4776 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4777 udelay(40);
4778
4779 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4780 udelay(40);
4781
4782 memset(&aninfo, 0, sizeof(aninfo));
4783 aninfo.flags |= MR_AN_ENABLE;
4784 aninfo.state = ANEG_STATE_UNKNOWN;
4785 aninfo.cur_time = 0;
4786 tick = 0;
4787 while (++tick < 195000) {
4788 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4789 if (status == ANEG_DONE || status == ANEG_FAILED)
4790 break;
4791
4792 udelay(1);
4793 }
4794
4795 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4796 tw32_f(MAC_MODE, tp->mac_mode);
4797 udelay(40);
4798
4799 *txflags = aninfo.txconfig;
4800 *rxflags = aninfo.flags;
4801
4802 if (status == ANEG_DONE &&
4803 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4804 MR_LP_ADV_FULL_DUPLEX)))
4805 res = 1;
4806
4807 return res;
4808 }
4809
4810 static void tg3_init_bcm8002(struct tg3 *tp)
4811 {
4812 u32 mac_status = tr32(MAC_STATUS);
4813 int i;
4814
4815 /* Reset when initting first time or we have a link. */
4816 if (tg3_flag(tp, INIT_COMPLETE) &&
4817 !(mac_status & MAC_STATUS_PCS_SYNCED))
4818 return;
4819
4820 /* Set PLL lock range. */
4821 tg3_writephy(tp, 0x16, 0x8007);
4822
4823 /* SW reset */
4824 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4825
4826 /* Wait for reset to complete. */
4827 /* XXX schedule_timeout() ... */
4828 for (i = 0; i < 500; i++)
4829 udelay(10);
4830
4831 /* Config mode; select PMA/Ch 1 regs. */
4832 tg3_writephy(tp, 0x10, 0x8411);
4833
4834 /* Enable auto-lock and comdet, select txclk for tx. */
4835 tg3_writephy(tp, 0x11, 0x0a10);
4836
4837 tg3_writephy(tp, 0x18, 0x00a0);
4838 tg3_writephy(tp, 0x16, 0x41ff);
4839
4840 /* Assert and deassert POR. */
4841 tg3_writephy(tp, 0x13, 0x0400);
4842 udelay(40);
4843 tg3_writephy(tp, 0x13, 0x0000);
4844
4845 tg3_writephy(tp, 0x11, 0x0a50);
4846 udelay(40);
4847 tg3_writephy(tp, 0x11, 0x0a10);
4848
4849 /* Wait for signal to stabilize */
4850 /* XXX schedule_timeout() ... */
4851 for (i = 0; i < 15000; i++)
4852 udelay(10);
4853
4854 /* Deselect the channel register so we can read the PHYID
4855 * later.
4856 */
4857 tg3_writephy(tp, 0x10, 0x8011);
4858 }
4859
4860 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4861 {
4862 u16 flowctrl;
4863 u32 sg_dig_ctrl, sg_dig_status;
4864 u32 serdes_cfg, expected_sg_dig_ctrl;
4865 int workaround, port_a;
4866 int current_link_up;
4867
4868 serdes_cfg = 0;
4869 expected_sg_dig_ctrl = 0;
4870 workaround = 0;
4871 port_a = 1;
4872 current_link_up = 0;
4873
4874 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4875 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4876 workaround = 1;
4877 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4878 port_a = 0;
4879
4880 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4881 /* preserve bits 20-23 for voltage regulator */
4882 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4883 }
4884
4885 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4886
4887 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4888 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4889 if (workaround) {
4890 u32 val = serdes_cfg;
4891
4892 if (port_a)
4893 val |= 0xc010000;
4894 else
4895 val |= 0x4010000;
4896 tw32_f(MAC_SERDES_CFG, val);
4897 }
4898
4899 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4900 }
4901 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4902 tg3_setup_flow_control(tp, 0, 0);
4903 current_link_up = 1;
4904 }
4905 goto out;
4906 }
4907
4908 /* Want auto-negotiation. */
4909 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4910
4911 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4912 if (flowctrl & ADVERTISE_1000XPAUSE)
4913 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4914 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4915 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4916
4917 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4918 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4919 tp->serdes_counter &&
4920 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4921 MAC_STATUS_RCVD_CFG)) ==
4922 MAC_STATUS_PCS_SYNCED)) {
4923 tp->serdes_counter--;
4924 current_link_up = 1;
4925 goto out;
4926 }
4927 restart_autoneg:
4928 if (workaround)
4929 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4930 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4931 udelay(5);
4932 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4933
4934 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4935 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4936 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4937 MAC_STATUS_SIGNAL_DET)) {
4938 sg_dig_status = tr32(SG_DIG_STATUS);
4939 mac_status = tr32(MAC_STATUS);
4940
4941 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4942 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4943 u32 local_adv = 0, remote_adv = 0;
4944
4945 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4946 local_adv |= ADVERTISE_1000XPAUSE;
4947 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4948 local_adv |= ADVERTISE_1000XPSE_ASYM;
4949
4950 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4951 remote_adv |= LPA_1000XPAUSE;
4952 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4953 remote_adv |= LPA_1000XPAUSE_ASYM;
4954
4955 tp->link_config.rmt_adv =
4956 mii_adv_to_ethtool_adv_x(remote_adv);
4957
4958 tg3_setup_flow_control(tp, local_adv, remote_adv);
4959 current_link_up = 1;
4960 tp->serdes_counter = 0;
4961 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4962 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4963 if (tp->serdes_counter)
4964 tp->serdes_counter--;
4965 else {
4966 if (workaround) {
4967 u32 val = serdes_cfg;
4968
4969 if (port_a)
4970 val |= 0xc010000;
4971 else
4972 val |= 0x4010000;
4973
4974 tw32_f(MAC_SERDES_CFG, val);
4975 }
4976
4977 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4978 udelay(40);
4979
4980 /* Link parallel detection - link is up */
4981 /* only if we have PCS_SYNC and not */
4982 /* receiving config code words */
4983 mac_status = tr32(MAC_STATUS);
4984 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4985 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4986 tg3_setup_flow_control(tp, 0, 0);
4987 current_link_up = 1;
4988 tp->phy_flags |=
4989 TG3_PHYFLG_PARALLEL_DETECT;
4990 tp->serdes_counter =
4991 SERDES_PARALLEL_DET_TIMEOUT;
4992 } else
4993 goto restart_autoneg;
4994 }
4995 }
4996 } else {
4997 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4998 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999 }
5000
5001 out:
5002 return current_link_up;
5003 }
5004
5005 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5006 {
5007 int current_link_up = 0;
5008
5009 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5010 goto out;
5011
5012 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5013 u32 txflags, rxflags;
5014 int i;
5015
5016 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5017 u32 local_adv = 0, remote_adv = 0;
5018
5019 if (txflags & ANEG_CFG_PS1)
5020 local_adv |= ADVERTISE_1000XPAUSE;
5021 if (txflags & ANEG_CFG_PS2)
5022 local_adv |= ADVERTISE_1000XPSE_ASYM;
5023
5024 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5025 remote_adv |= LPA_1000XPAUSE;
5026 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5027 remote_adv |= LPA_1000XPAUSE_ASYM;
5028
5029 tp->link_config.rmt_adv =
5030 mii_adv_to_ethtool_adv_x(remote_adv);
5031
5032 tg3_setup_flow_control(tp, local_adv, remote_adv);
5033
5034 current_link_up = 1;
5035 }
5036 for (i = 0; i < 30; i++) {
5037 udelay(20);
5038 tw32_f(MAC_STATUS,
5039 (MAC_STATUS_SYNC_CHANGED |
5040 MAC_STATUS_CFG_CHANGED));
5041 udelay(40);
5042 if ((tr32(MAC_STATUS) &
5043 (MAC_STATUS_SYNC_CHANGED |
5044 MAC_STATUS_CFG_CHANGED)) == 0)
5045 break;
5046 }
5047
5048 mac_status = tr32(MAC_STATUS);
5049 if (current_link_up == 0 &&
5050 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5051 !(mac_status & MAC_STATUS_RCVD_CFG))
5052 current_link_up = 1;
5053 } else {
5054 tg3_setup_flow_control(tp, 0, 0);
5055
5056 /* Forcing 1000FD link up. */
5057 current_link_up = 1;
5058
5059 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5060 udelay(40);
5061
5062 tw32_f(MAC_MODE, tp->mac_mode);
5063 udelay(40);
5064 }
5065
5066 out:
5067 return current_link_up;
5068 }
5069
5070 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5071 {
5072 u32 orig_pause_cfg;
5073 u16 orig_active_speed;
5074 u8 orig_active_duplex;
5075 u32 mac_status;
5076 int current_link_up;
5077 int i;
5078
5079 orig_pause_cfg = tp->link_config.active_flowctrl;
5080 orig_active_speed = tp->link_config.active_speed;
5081 orig_active_duplex = tp->link_config.active_duplex;
5082
5083 if (!tg3_flag(tp, HW_AUTONEG) &&
5084 netif_carrier_ok(tp->dev) &&
5085 tg3_flag(tp, INIT_COMPLETE)) {
5086 mac_status = tr32(MAC_STATUS);
5087 mac_status &= (MAC_STATUS_PCS_SYNCED |
5088 MAC_STATUS_SIGNAL_DET |
5089 MAC_STATUS_CFG_CHANGED |
5090 MAC_STATUS_RCVD_CFG);
5091 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5092 MAC_STATUS_SIGNAL_DET)) {
5093 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5094 MAC_STATUS_CFG_CHANGED));
5095 return 0;
5096 }
5097 }
5098
5099 tw32_f(MAC_TX_AUTO_NEG, 0);
5100
5101 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5102 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5103 tw32_f(MAC_MODE, tp->mac_mode);
5104 udelay(40);
5105
5106 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5107 tg3_init_bcm8002(tp);
5108
5109 /* Enable link change event even when serdes polling. */
5110 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5111 udelay(40);
5112
5113 current_link_up = 0;
5114 tp->link_config.rmt_adv = 0;
5115 mac_status = tr32(MAC_STATUS);
5116
5117 if (tg3_flag(tp, HW_AUTONEG))
5118 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5119 else
5120 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5121
5122 tp->napi[0].hw_status->status =
5123 (SD_STATUS_UPDATED |
5124 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5125
5126 for (i = 0; i < 100; i++) {
5127 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5128 MAC_STATUS_CFG_CHANGED));
5129 udelay(5);
5130 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5131 MAC_STATUS_CFG_CHANGED |
5132 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5133 break;
5134 }
5135
5136 mac_status = tr32(MAC_STATUS);
5137 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5138 current_link_up = 0;
5139 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5140 tp->serdes_counter == 0) {
5141 tw32_f(MAC_MODE, (tp->mac_mode |
5142 MAC_MODE_SEND_CONFIGS));
5143 udelay(1);
5144 tw32_f(MAC_MODE, tp->mac_mode);
5145 }
5146 }
5147
5148 if (current_link_up == 1) {
5149 tp->link_config.active_speed = SPEED_1000;
5150 tp->link_config.active_duplex = DUPLEX_FULL;
5151 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5152 LED_CTRL_LNKLED_OVERRIDE |
5153 LED_CTRL_1000MBPS_ON));
5154 } else {
5155 tp->link_config.active_speed = SPEED_UNKNOWN;
5156 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5157 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5158 LED_CTRL_LNKLED_OVERRIDE |
5159 LED_CTRL_TRAFFIC_OVERRIDE));
5160 }
5161
5162 if (current_link_up != netif_carrier_ok(tp->dev)) {
5163 if (current_link_up)
5164 netif_carrier_on(tp->dev);
5165 else
5166 netif_carrier_off(tp->dev);
5167 tg3_link_report(tp);
5168 } else {
5169 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5170 if (orig_pause_cfg != now_pause_cfg ||
5171 orig_active_speed != tp->link_config.active_speed ||
5172 orig_active_duplex != tp->link_config.active_duplex)
5173 tg3_link_report(tp);
5174 }
5175
5176 return 0;
5177 }
5178
5179 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5180 {
5181 int current_link_up, err = 0;
5182 u32 bmsr, bmcr;
5183 u16 current_speed;
5184 u8 current_duplex;
5185 u32 local_adv, remote_adv;
5186
5187 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5188 tw32_f(MAC_MODE, tp->mac_mode);
5189 udelay(40);
5190
5191 tw32(MAC_EVENT, 0);
5192
5193 tw32_f(MAC_STATUS,
5194 (MAC_STATUS_SYNC_CHANGED |
5195 MAC_STATUS_CFG_CHANGED |
5196 MAC_STATUS_MI_COMPLETION |
5197 MAC_STATUS_LNKSTATE_CHANGED));
5198 udelay(40);
5199
5200 if (force_reset)
5201 tg3_phy_reset(tp);
5202
5203 current_link_up = 0;
5204 current_speed = SPEED_UNKNOWN;
5205 current_duplex = DUPLEX_UNKNOWN;
5206 tp->link_config.rmt_adv = 0;
5207
5208 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5209 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5211 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5212 bmsr |= BMSR_LSTATUS;
5213 else
5214 bmsr &= ~BMSR_LSTATUS;
5215 }
5216
5217 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5218
5219 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5220 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5221 /* do nothing, just check for link up at the end */
5222 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5223 u32 adv, newadv;
5224
5225 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5226 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5227 ADVERTISE_1000XPAUSE |
5228 ADVERTISE_1000XPSE_ASYM |
5229 ADVERTISE_SLCT);
5230
5231 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5232 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5233
5234 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5235 tg3_writephy(tp, MII_ADVERTISE, newadv);
5236 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5237 tg3_writephy(tp, MII_BMCR, bmcr);
5238
5239 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5240 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5241 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5242
5243 return err;
5244 }
5245 } else {
5246 u32 new_bmcr;
5247
5248 bmcr &= ~BMCR_SPEED1000;
5249 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5250
5251 if (tp->link_config.duplex == DUPLEX_FULL)
5252 new_bmcr |= BMCR_FULLDPLX;
5253
5254 if (new_bmcr != bmcr) {
5255 /* BMCR_SPEED1000 is a reserved bit that needs
5256 * to be set on write.
5257 */
5258 new_bmcr |= BMCR_SPEED1000;
5259
5260 /* Force a linkdown */
5261 if (netif_carrier_ok(tp->dev)) {
5262 u32 adv;
5263
5264 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5265 adv &= ~(ADVERTISE_1000XFULL |
5266 ADVERTISE_1000XHALF |
5267 ADVERTISE_SLCT);
5268 tg3_writephy(tp, MII_ADVERTISE, adv);
5269 tg3_writephy(tp, MII_BMCR, bmcr |
5270 BMCR_ANRESTART |
5271 BMCR_ANENABLE);
5272 udelay(10);
5273 netif_carrier_off(tp->dev);
5274 }
5275 tg3_writephy(tp, MII_BMCR, new_bmcr);
5276 bmcr = new_bmcr;
5277 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5278 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5279 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5280 ASIC_REV_5714) {
5281 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5282 bmsr |= BMSR_LSTATUS;
5283 else
5284 bmsr &= ~BMSR_LSTATUS;
5285 }
5286 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5287 }
5288 }
5289
5290 if (bmsr & BMSR_LSTATUS) {
5291 current_speed = SPEED_1000;
5292 current_link_up = 1;
5293 if (bmcr & BMCR_FULLDPLX)
5294 current_duplex = DUPLEX_FULL;
5295 else
5296 current_duplex = DUPLEX_HALF;
5297
5298 local_adv = 0;
5299 remote_adv = 0;
5300
5301 if (bmcr & BMCR_ANENABLE) {
5302 u32 common;
5303
5304 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5305 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5306 common = local_adv & remote_adv;
5307 if (common & (ADVERTISE_1000XHALF |
5308 ADVERTISE_1000XFULL)) {
5309 if (common & ADVERTISE_1000XFULL)
5310 current_duplex = DUPLEX_FULL;
5311 else
5312 current_duplex = DUPLEX_HALF;
5313
5314 tp->link_config.rmt_adv =
5315 mii_adv_to_ethtool_adv_x(remote_adv);
5316 } else if (!tg3_flag(tp, 5780_CLASS)) {
5317 /* Link is up via parallel detect */
5318 } else {
5319 current_link_up = 0;
5320 }
5321 }
5322 }
5323
5324 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5325 tg3_setup_flow_control(tp, local_adv, remote_adv);
5326
5327 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5328 if (tp->link_config.active_duplex == DUPLEX_HALF)
5329 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5330
5331 tw32_f(MAC_MODE, tp->mac_mode);
5332 udelay(40);
5333
5334 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5335
5336 tp->link_config.active_speed = current_speed;
5337 tp->link_config.active_duplex = current_duplex;
5338
5339 if (current_link_up != netif_carrier_ok(tp->dev)) {
5340 if (current_link_up)
5341 netif_carrier_on(tp->dev);
5342 else {
5343 netif_carrier_off(tp->dev);
5344 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5345 }
5346 tg3_link_report(tp);
5347 }
5348 return err;
5349 }
5350
5351 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5352 {
5353 if (tp->serdes_counter) {
5354 /* Give autoneg time to complete. */
5355 tp->serdes_counter--;
5356 return;
5357 }
5358
5359 if (!netif_carrier_ok(tp->dev) &&
5360 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5361 u32 bmcr;
5362
5363 tg3_readphy(tp, MII_BMCR, &bmcr);
5364 if (bmcr & BMCR_ANENABLE) {
5365 u32 phy1, phy2;
5366
5367 /* Select shadow register 0x1f */
5368 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5369 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5370
5371 /* Select expansion interrupt status register */
5372 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5373 MII_TG3_DSP_EXP1_INT_STAT);
5374 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5375 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5376
5377 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5378 /* We have signal detect and not receiving
5379 * config code words, link is up by parallel
5380 * detection.
5381 */
5382
5383 bmcr &= ~BMCR_ANENABLE;
5384 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5385 tg3_writephy(tp, MII_BMCR, bmcr);
5386 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5387 }
5388 }
5389 } else if (netif_carrier_ok(tp->dev) &&
5390 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5391 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5392 u32 phy2;
5393
5394 /* Select expansion interrupt status register */
5395 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5396 MII_TG3_DSP_EXP1_INT_STAT);
5397 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5398 if (phy2 & 0x20) {
5399 u32 bmcr;
5400
5401 /* Config code words received, turn on autoneg. */
5402 tg3_readphy(tp, MII_BMCR, &bmcr);
5403 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5404
5405 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5406
5407 }
5408 }
5409 }
5410
5411 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5412 {
5413 u32 val;
5414 int err;
5415
5416 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5417 err = tg3_setup_fiber_phy(tp, force_reset);
5418 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5419 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5420 else
5421 err = tg3_setup_copper_phy(tp, force_reset);
5422
5423 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5424 u32 scale;
5425
5426 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5427 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5428 scale = 65;
5429 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5430 scale = 6;
5431 else
5432 scale = 12;
5433
5434 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5435 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5436 tw32(GRC_MISC_CFG, val);
5437 }
5438
5439 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5440 (6 << TX_LENGTHS_IPG_SHIFT);
5441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5442 val |= tr32(MAC_TX_LENGTHS) &
5443 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5444 TX_LENGTHS_CNT_DWN_VAL_MSK);
5445
5446 if (tp->link_config.active_speed == SPEED_1000 &&
5447 tp->link_config.active_duplex == DUPLEX_HALF)
5448 tw32(MAC_TX_LENGTHS, val |
5449 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5450 else
5451 tw32(MAC_TX_LENGTHS, val |
5452 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5453
5454 if (!tg3_flag(tp, 5705_PLUS)) {
5455 if (netif_carrier_ok(tp->dev)) {
5456 tw32(HOSTCC_STAT_COAL_TICKS,
5457 tp->coal.stats_block_coalesce_usecs);
5458 } else {
5459 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5460 }
5461 }
5462
5463 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5464 val = tr32(PCIE_PWR_MGMT_THRESH);
5465 if (!netif_carrier_ok(tp->dev))
5466 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5467 tp->pwrmgmt_thresh;
5468 else
5469 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5470 tw32(PCIE_PWR_MGMT_THRESH, val);
5471 }
5472
5473 return err;
5474 }
5475
5476 static inline int tg3_irq_sync(struct tg3 *tp)
5477 {
5478 return tp->irq_sync;
5479 }
5480
5481 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5482 {
5483 int i;
5484
5485 dst = (u32 *)((u8 *)dst + off);
5486 for (i = 0; i < len; i += sizeof(u32))
5487 *dst++ = tr32(off + i);
5488 }
5489
5490 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5491 {
5492 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5493 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5494 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5495 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5496 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5497 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5498 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5499 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5500 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5501 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5502 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5503 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5504 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5505 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5506 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5507 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5508 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5509 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5510 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5511
5512 if (tg3_flag(tp, SUPPORT_MSIX))
5513 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5514
5515 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5516 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5517 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5518 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5519 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5520 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5521 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5522 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5523
5524 if (!tg3_flag(tp, 5705_PLUS)) {
5525 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5526 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5527 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5528 }
5529
5530 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5531 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5532 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5533 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5534 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5535
5536 if (tg3_flag(tp, NVRAM))
5537 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5538 }
5539
5540 static void tg3_dump_state(struct tg3 *tp)
5541 {
5542 int i;
5543 u32 *regs;
5544
5545 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5546 if (!regs) {
5547 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5548 return;
5549 }
5550
5551 if (tg3_flag(tp, PCI_EXPRESS)) {
5552 /* Read up to but not including private PCI registers */
5553 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5554 regs[i / sizeof(u32)] = tr32(i);
5555 } else
5556 tg3_dump_legacy_regs(tp, regs);
5557
5558 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5559 if (!regs[i + 0] && !regs[i + 1] &&
5560 !regs[i + 2] && !regs[i + 3])
5561 continue;
5562
5563 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5564 i * 4,
5565 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5566 }
5567
5568 kfree(regs);
5569
5570 for (i = 0; i < tp->irq_cnt; i++) {
5571 struct tg3_napi *tnapi = &tp->napi[i];
5572
5573 /* SW status block */
5574 netdev_err(tp->dev,
5575 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5576 i,
5577 tnapi->hw_status->status,
5578 tnapi->hw_status->status_tag,
5579 tnapi->hw_status->rx_jumbo_consumer,
5580 tnapi->hw_status->rx_consumer,
5581 tnapi->hw_status->rx_mini_consumer,
5582 tnapi->hw_status->idx[0].rx_producer,
5583 tnapi->hw_status->idx[0].tx_consumer);
5584
5585 netdev_err(tp->dev,
5586 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5587 i,
5588 tnapi->last_tag, tnapi->last_irq_tag,
5589 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5590 tnapi->rx_rcb_ptr,
5591 tnapi->prodring.rx_std_prod_idx,
5592 tnapi->prodring.rx_std_cons_idx,
5593 tnapi->prodring.rx_jmb_prod_idx,
5594 tnapi->prodring.rx_jmb_cons_idx);
5595 }
5596 }
5597
5598 /* This is called whenever we suspect that the system chipset is re-
5599 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5600 * is bogus tx completions. We try to recover by setting the
5601 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5602 * in the workqueue.
5603 */
5604 static void tg3_tx_recover(struct tg3 *tp)
5605 {
5606 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5607 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5608
5609 netdev_warn(tp->dev,
5610 "The system may be re-ordering memory-mapped I/O "
5611 "cycles to the network device, attempting to recover. "
5612 "Please report the problem to the driver maintainer "
5613 "and include system chipset information.\n");
5614
5615 spin_lock(&tp->lock);
5616 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5617 spin_unlock(&tp->lock);
5618 }
5619
5620 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5621 {
5622 /* Tell compiler to fetch tx indices from memory. */
5623 barrier();
5624 return tnapi->tx_pending -
5625 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5626 }
5627
5628 /* Tigon3 never reports partial packet sends. So we do not
5629 * need special logic to handle SKBs that have not had all
5630 * of their frags sent yet, like SunGEM does.
5631 */
5632 static void tg3_tx(struct tg3_napi *tnapi)
5633 {
5634 struct tg3 *tp = tnapi->tp;
5635 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5636 u32 sw_idx = tnapi->tx_cons;
5637 struct netdev_queue *txq;
5638 int index = tnapi - tp->napi;
5639 unsigned int pkts_compl = 0, bytes_compl = 0;
5640
5641 if (tg3_flag(tp, ENABLE_TSS))
5642 index--;
5643
5644 txq = netdev_get_tx_queue(tp->dev, index);
5645
5646 while (sw_idx != hw_idx) {
5647 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5648 struct sk_buff *skb = ri->skb;
5649 int i, tx_bug = 0;
5650
5651 if (unlikely(skb == NULL)) {
5652 tg3_tx_recover(tp);
5653 return;
5654 }
5655
5656 pci_unmap_single(tp->pdev,
5657 dma_unmap_addr(ri, mapping),
5658 skb_headlen(skb),
5659 PCI_DMA_TODEVICE);
5660
5661 ri->skb = NULL;
5662
5663 while (ri->fragmented) {
5664 ri->fragmented = false;
5665 sw_idx = NEXT_TX(sw_idx);
5666 ri = &tnapi->tx_buffers[sw_idx];
5667 }
5668
5669 sw_idx = NEXT_TX(sw_idx);
5670
5671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5672 ri = &tnapi->tx_buffers[sw_idx];
5673 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5674 tx_bug = 1;
5675
5676 pci_unmap_page(tp->pdev,
5677 dma_unmap_addr(ri, mapping),
5678 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5679 PCI_DMA_TODEVICE);
5680
5681 while (ri->fragmented) {
5682 ri->fragmented = false;
5683 sw_idx = NEXT_TX(sw_idx);
5684 ri = &tnapi->tx_buffers[sw_idx];
5685 }
5686
5687 sw_idx = NEXT_TX(sw_idx);
5688 }
5689
5690 pkts_compl++;
5691 bytes_compl += skb->len;
5692
5693 dev_kfree_skb(skb);
5694
5695 if (unlikely(tx_bug)) {
5696 tg3_tx_recover(tp);
5697 return;
5698 }
5699 }
5700
5701 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5702
5703 tnapi->tx_cons = sw_idx;
5704
5705 /* Need to make the tx_cons update visible to tg3_start_xmit()
5706 * before checking for netif_queue_stopped(). Without the
5707 * memory barrier, there is a small possibility that tg3_start_xmit()
5708 * will miss it and cause the queue to be stopped forever.
5709 */
5710 smp_mb();
5711
5712 if (unlikely(netif_tx_queue_stopped(txq) &&
5713 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5714 __netif_tx_lock(txq, smp_processor_id());
5715 if (netif_tx_queue_stopped(txq) &&
5716 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5717 netif_tx_wake_queue(txq);
5718 __netif_tx_unlock(txq);
5719 }
5720 }
5721
5722 static void tg3_frag_free(bool is_frag, void *data)
5723 {
5724 if (is_frag)
5725 put_page(virt_to_head_page(data));
5726 else
5727 kfree(data);
5728 }
5729
5730 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5731 {
5732 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5733 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5734
5735 if (!ri->data)
5736 return;
5737
5738 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5739 map_sz, PCI_DMA_FROMDEVICE);
5740 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5741 ri->data = NULL;
5742 }
5743
5744
5745 /* Returns size of skb allocated or < 0 on error.
5746 *
5747 * We only need to fill in the address because the other members
5748 * of the RX descriptor are invariant, see tg3_init_rings.
5749 *
5750 * Note the purposeful assymetry of cpu vs. chip accesses. For
5751 * posting buffers we only dirty the first cache line of the RX
5752 * descriptor (containing the address). Whereas for the RX status
5753 * buffers the cpu only reads the last cacheline of the RX descriptor
5754 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5755 */
5756 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5757 u32 opaque_key, u32 dest_idx_unmasked,
5758 unsigned int *frag_size)
5759 {
5760 struct tg3_rx_buffer_desc *desc;
5761 struct ring_info *map;
5762 u8 *data;
5763 dma_addr_t mapping;
5764 int skb_size, data_size, dest_idx;
5765
5766 switch (opaque_key) {
5767 case RXD_OPAQUE_RING_STD:
5768 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5769 desc = &tpr->rx_std[dest_idx];
5770 map = &tpr->rx_std_buffers[dest_idx];
5771 data_size = tp->rx_pkt_map_sz;
5772 break;
5773
5774 case RXD_OPAQUE_RING_JUMBO:
5775 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5776 desc = &tpr->rx_jmb[dest_idx].std;
5777 map = &tpr->rx_jmb_buffers[dest_idx];
5778 data_size = TG3_RX_JMB_MAP_SZ;
5779 break;
5780
5781 default:
5782 return -EINVAL;
5783 }
5784
5785 /* Do not overwrite any of the map or rp information
5786 * until we are sure we can commit to a new buffer.
5787 *
5788 * Callers depend upon this behavior and assume that
5789 * we leave everything unchanged if we fail.
5790 */
5791 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5792 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5793 if (skb_size <= PAGE_SIZE) {
5794 data = netdev_alloc_frag(skb_size);
5795 *frag_size = skb_size;
5796 } else {
5797 data = kmalloc(skb_size, GFP_ATOMIC);
5798 *frag_size = 0;
5799 }
5800 if (!data)
5801 return -ENOMEM;
5802
5803 mapping = pci_map_single(tp->pdev,
5804 data + TG3_RX_OFFSET(tp),
5805 data_size,
5806 PCI_DMA_FROMDEVICE);
5807 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5808 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5809 return -EIO;
5810 }
5811
5812 map->data = data;
5813 dma_unmap_addr_set(map, mapping, mapping);
5814
5815 desc->addr_hi = ((u64)mapping >> 32);
5816 desc->addr_lo = ((u64)mapping & 0xffffffff);
5817
5818 return data_size;
5819 }
5820
5821 /* We only need to move over in the address because the other
5822 * members of the RX descriptor are invariant. See notes above
5823 * tg3_alloc_rx_data for full details.
5824 */
5825 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5826 struct tg3_rx_prodring_set *dpr,
5827 u32 opaque_key, int src_idx,
5828 u32 dest_idx_unmasked)
5829 {
5830 struct tg3 *tp = tnapi->tp;
5831 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5832 struct ring_info *src_map, *dest_map;
5833 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5834 int dest_idx;
5835
5836 switch (opaque_key) {
5837 case RXD_OPAQUE_RING_STD:
5838 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5839 dest_desc = &dpr->rx_std[dest_idx];
5840 dest_map = &dpr->rx_std_buffers[dest_idx];
5841 src_desc = &spr->rx_std[src_idx];
5842 src_map = &spr->rx_std_buffers[src_idx];
5843 break;
5844
5845 case RXD_OPAQUE_RING_JUMBO:
5846 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5847 dest_desc = &dpr->rx_jmb[dest_idx].std;
5848 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5849 src_desc = &spr->rx_jmb[src_idx].std;
5850 src_map = &spr->rx_jmb_buffers[src_idx];
5851 break;
5852
5853 default:
5854 return;
5855 }
5856
5857 dest_map->data = src_map->data;
5858 dma_unmap_addr_set(dest_map, mapping,
5859 dma_unmap_addr(src_map, mapping));
5860 dest_desc->addr_hi = src_desc->addr_hi;
5861 dest_desc->addr_lo = src_desc->addr_lo;
5862
5863 /* Ensure that the update to the skb happens after the physical
5864 * addresses have been transferred to the new BD location.
5865 */
5866 smp_wmb();
5867
5868 src_map->data = NULL;
5869 }
5870
5871 /* The RX ring scheme is composed of multiple rings which post fresh
5872 * buffers to the chip, and one special ring the chip uses to report
5873 * status back to the host.
5874 *
5875 * The special ring reports the status of received packets to the
5876 * host. The chip does not write into the original descriptor the
5877 * RX buffer was obtained from. The chip simply takes the original
5878 * descriptor as provided by the host, updates the status and length
5879 * field, then writes this into the next status ring entry.
5880 *
5881 * Each ring the host uses to post buffers to the chip is described
5882 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5883 * it is first placed into the on-chip ram. When the packet's length
5884 * is known, it walks down the TG3_BDINFO entries to select the ring.
5885 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5886 * which is within the range of the new packet's length is chosen.
5887 *
5888 * The "separate ring for rx status" scheme may sound queer, but it makes
5889 * sense from a cache coherency perspective. If only the host writes
5890 * to the buffer post rings, and only the chip writes to the rx status
5891 * rings, then cache lines never move beyond shared-modified state.
5892 * If both the host and chip were to write into the same ring, cache line
5893 * eviction could occur since both entities want it in an exclusive state.
5894 */
5895 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5896 {
5897 struct tg3 *tp = tnapi->tp;
5898 u32 work_mask, rx_std_posted = 0;
5899 u32 std_prod_idx, jmb_prod_idx;
5900 u32 sw_idx = tnapi->rx_rcb_ptr;
5901 u16 hw_idx;
5902 int received;
5903 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5904
5905 hw_idx = *(tnapi->rx_rcb_prod_idx);
5906 /*
5907 * We need to order the read of hw_idx and the read of
5908 * the opaque cookie.
5909 */
5910 rmb();
5911 work_mask = 0;
5912 received = 0;
5913 std_prod_idx = tpr->rx_std_prod_idx;
5914 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5915 while (sw_idx != hw_idx && budget > 0) {
5916 struct ring_info *ri;
5917 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5918 unsigned int len;
5919 struct sk_buff *skb;
5920 dma_addr_t dma_addr;
5921 u32 opaque_key, desc_idx, *post_ptr;
5922 u8 *data;
5923
5924 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5925 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5926 if (opaque_key == RXD_OPAQUE_RING_STD) {
5927 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5928 dma_addr = dma_unmap_addr(ri, mapping);
5929 data = ri->data;
5930 post_ptr = &std_prod_idx;
5931 rx_std_posted++;
5932 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5933 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5934 dma_addr = dma_unmap_addr(ri, mapping);
5935 data = ri->data;
5936 post_ptr = &jmb_prod_idx;
5937 } else
5938 goto next_pkt_nopost;
5939
5940 work_mask |= opaque_key;
5941
5942 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5943 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5944 drop_it:
5945 tg3_recycle_rx(tnapi, tpr, opaque_key,
5946 desc_idx, *post_ptr);
5947 drop_it_no_recycle:
5948 /* Other statistics kept track of by card. */
5949 tp->rx_dropped++;
5950 goto next_pkt;
5951 }
5952
5953 prefetch(data + TG3_RX_OFFSET(tp));
5954 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5955 ETH_FCS_LEN;
5956
5957 if (len > TG3_RX_COPY_THRESH(tp)) {
5958 int skb_size;
5959 unsigned int frag_size;
5960
5961 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5962 *post_ptr, &frag_size);
5963 if (skb_size < 0)
5964 goto drop_it;
5965
5966 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5967 PCI_DMA_FROMDEVICE);
5968
5969 skb = build_skb(data, frag_size);
5970 if (!skb) {
5971 tg3_frag_free(frag_size != 0, data);
5972 goto drop_it_no_recycle;
5973 }
5974 skb_reserve(skb, TG3_RX_OFFSET(tp));
5975 /* Ensure that the update to the data happens
5976 * after the usage of the old DMA mapping.
5977 */
5978 smp_wmb();
5979
5980 ri->data = NULL;
5981
5982 } else {
5983 tg3_recycle_rx(tnapi, tpr, opaque_key,
5984 desc_idx, *post_ptr);
5985
5986 skb = netdev_alloc_skb(tp->dev,
5987 len + TG3_RAW_IP_ALIGN);
5988 if (skb == NULL)
5989 goto drop_it_no_recycle;
5990
5991 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5992 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5993 memcpy(skb->data,
5994 data + TG3_RX_OFFSET(tp),
5995 len);
5996 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5997 }
5998
5999 skb_put(skb, len);
6000 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6001 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6002 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6003 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6004 skb->ip_summed = CHECKSUM_UNNECESSARY;
6005 else
6006 skb_checksum_none_assert(skb);
6007
6008 skb->protocol = eth_type_trans(skb, tp->dev);
6009
6010 if (len > (tp->dev->mtu + ETH_HLEN) &&
6011 skb->protocol != htons(ETH_P_8021Q)) {
6012 dev_kfree_skb(skb);
6013 goto drop_it_no_recycle;
6014 }
6015
6016 if (desc->type_flags & RXD_FLAG_VLAN &&
6017 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6018 __vlan_hwaccel_put_tag(skb,
6019 desc->err_vlan & RXD_VLAN_MASK);
6020
6021 napi_gro_receive(&tnapi->napi, skb);
6022
6023 received++;
6024 budget--;
6025
6026 next_pkt:
6027 (*post_ptr)++;
6028
6029 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6030 tpr->rx_std_prod_idx = std_prod_idx &
6031 tp->rx_std_ring_mask;
6032 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6033 tpr->rx_std_prod_idx);
6034 work_mask &= ~RXD_OPAQUE_RING_STD;
6035 rx_std_posted = 0;
6036 }
6037 next_pkt_nopost:
6038 sw_idx++;
6039 sw_idx &= tp->rx_ret_ring_mask;
6040
6041 /* Refresh hw_idx to see if there is new work */
6042 if (sw_idx == hw_idx) {
6043 hw_idx = *(tnapi->rx_rcb_prod_idx);
6044 rmb();
6045 }
6046 }
6047
6048 /* ACK the status ring. */
6049 tnapi->rx_rcb_ptr = sw_idx;
6050 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6051
6052 /* Refill RX ring(s). */
6053 if (!tg3_flag(tp, ENABLE_RSS)) {
6054 /* Sync BD data before updating mailbox */
6055 wmb();
6056
6057 if (work_mask & RXD_OPAQUE_RING_STD) {
6058 tpr->rx_std_prod_idx = std_prod_idx &
6059 tp->rx_std_ring_mask;
6060 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6061 tpr->rx_std_prod_idx);
6062 }
6063 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6064 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6065 tp->rx_jmb_ring_mask;
6066 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6067 tpr->rx_jmb_prod_idx);
6068 }
6069 mmiowb();
6070 } else if (work_mask) {
6071 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6072 * updated before the producer indices can be updated.
6073 */
6074 smp_wmb();
6075
6076 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6077 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6078
6079 if (tnapi != &tp->napi[1]) {
6080 tp->rx_refill = true;
6081 napi_schedule(&tp->napi[1].napi);
6082 }
6083 }
6084
6085 return received;
6086 }
6087
6088 static void tg3_poll_link(struct tg3 *tp)
6089 {
6090 /* handle link change and other phy events */
6091 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6092 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6093
6094 if (sblk->status & SD_STATUS_LINK_CHG) {
6095 sblk->status = SD_STATUS_UPDATED |
6096 (sblk->status & ~SD_STATUS_LINK_CHG);
6097 spin_lock(&tp->lock);
6098 if (tg3_flag(tp, USE_PHYLIB)) {
6099 tw32_f(MAC_STATUS,
6100 (MAC_STATUS_SYNC_CHANGED |
6101 MAC_STATUS_CFG_CHANGED |
6102 MAC_STATUS_MI_COMPLETION |
6103 MAC_STATUS_LNKSTATE_CHANGED));
6104 udelay(40);
6105 } else
6106 tg3_setup_phy(tp, 0);
6107 spin_unlock(&tp->lock);
6108 }
6109 }
6110 }
6111
6112 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6113 struct tg3_rx_prodring_set *dpr,
6114 struct tg3_rx_prodring_set *spr)
6115 {
6116 u32 si, di, cpycnt, src_prod_idx;
6117 int i, err = 0;
6118
6119 while (1) {
6120 src_prod_idx = spr->rx_std_prod_idx;
6121
6122 /* Make sure updates to the rx_std_buffers[] entries and the
6123 * standard producer index are seen in the correct order.
6124 */
6125 smp_rmb();
6126
6127 if (spr->rx_std_cons_idx == src_prod_idx)
6128 break;
6129
6130 if (spr->rx_std_cons_idx < src_prod_idx)
6131 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6132 else
6133 cpycnt = tp->rx_std_ring_mask + 1 -
6134 spr->rx_std_cons_idx;
6135
6136 cpycnt = min(cpycnt,
6137 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6138
6139 si = spr->rx_std_cons_idx;
6140 di = dpr->rx_std_prod_idx;
6141
6142 for (i = di; i < di + cpycnt; i++) {
6143 if (dpr->rx_std_buffers[i].data) {
6144 cpycnt = i - di;
6145 err = -ENOSPC;
6146 break;
6147 }
6148 }
6149
6150 if (!cpycnt)
6151 break;
6152
6153 /* Ensure that updates to the rx_std_buffers ring and the
6154 * shadowed hardware producer ring from tg3_recycle_skb() are
6155 * ordered correctly WRT the skb check above.
6156 */
6157 smp_rmb();
6158
6159 memcpy(&dpr->rx_std_buffers[di],
6160 &spr->rx_std_buffers[si],
6161 cpycnt * sizeof(struct ring_info));
6162
6163 for (i = 0; i < cpycnt; i++, di++, si++) {
6164 struct tg3_rx_buffer_desc *sbd, *dbd;
6165 sbd = &spr->rx_std[si];
6166 dbd = &dpr->rx_std[di];
6167 dbd->addr_hi = sbd->addr_hi;
6168 dbd->addr_lo = sbd->addr_lo;
6169 }
6170
6171 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6172 tp->rx_std_ring_mask;
6173 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6174 tp->rx_std_ring_mask;
6175 }
6176
6177 while (1) {
6178 src_prod_idx = spr->rx_jmb_prod_idx;
6179
6180 /* Make sure updates to the rx_jmb_buffers[] entries and
6181 * the jumbo producer index are seen in the correct order.
6182 */
6183 smp_rmb();
6184
6185 if (spr->rx_jmb_cons_idx == src_prod_idx)
6186 break;
6187
6188 if (spr->rx_jmb_cons_idx < src_prod_idx)
6189 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6190 else
6191 cpycnt = tp->rx_jmb_ring_mask + 1 -
6192 spr->rx_jmb_cons_idx;
6193
6194 cpycnt = min(cpycnt,
6195 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6196
6197 si = spr->rx_jmb_cons_idx;
6198 di = dpr->rx_jmb_prod_idx;
6199
6200 for (i = di; i < di + cpycnt; i++) {
6201 if (dpr->rx_jmb_buffers[i].data) {
6202 cpycnt = i - di;
6203 err = -ENOSPC;
6204 break;
6205 }
6206 }
6207
6208 if (!cpycnt)
6209 break;
6210
6211 /* Ensure that updates to the rx_jmb_buffers ring and the
6212 * shadowed hardware producer ring from tg3_recycle_skb() are
6213 * ordered correctly WRT the skb check above.
6214 */
6215 smp_rmb();
6216
6217 memcpy(&dpr->rx_jmb_buffers[di],
6218 &spr->rx_jmb_buffers[si],
6219 cpycnt * sizeof(struct ring_info));
6220
6221 for (i = 0; i < cpycnt; i++, di++, si++) {
6222 struct tg3_rx_buffer_desc *sbd, *dbd;
6223 sbd = &spr->rx_jmb[si].std;
6224 dbd = &dpr->rx_jmb[di].std;
6225 dbd->addr_hi = sbd->addr_hi;
6226 dbd->addr_lo = sbd->addr_lo;
6227 }
6228
6229 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6230 tp->rx_jmb_ring_mask;
6231 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6232 tp->rx_jmb_ring_mask;
6233 }
6234
6235 return err;
6236 }
6237
6238 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6239 {
6240 struct tg3 *tp = tnapi->tp;
6241
6242 /* run TX completion thread */
6243 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6244 tg3_tx(tnapi);
6245 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6246 return work_done;
6247 }
6248
6249 if (!tnapi->rx_rcb_prod_idx)
6250 return work_done;
6251
6252 /* run RX thread, within the bounds set by NAPI.
6253 * All RX "locking" is done by ensuring outside
6254 * code synchronizes with tg3->napi.poll()
6255 */
6256 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6257 work_done += tg3_rx(tnapi, budget - work_done);
6258
6259 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6260 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6261 int i, err = 0;
6262 u32 std_prod_idx = dpr->rx_std_prod_idx;
6263 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6264
6265 tp->rx_refill = false;
6266 for (i = 1; i < tp->irq_cnt; i++)
6267 err |= tg3_rx_prodring_xfer(tp, dpr,
6268 &tp->napi[i].prodring);
6269
6270 wmb();
6271
6272 if (std_prod_idx != dpr->rx_std_prod_idx)
6273 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6274 dpr->rx_std_prod_idx);
6275
6276 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6277 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6278 dpr->rx_jmb_prod_idx);
6279
6280 mmiowb();
6281
6282 if (err)
6283 tw32_f(HOSTCC_MODE, tp->coal_now);
6284 }
6285
6286 return work_done;
6287 }
6288
6289 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6290 {
6291 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6292 schedule_work(&tp->reset_task);
6293 }
6294
6295 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6296 {
6297 cancel_work_sync(&tp->reset_task);
6298 tg3_flag_clear(tp, RESET_TASK_PENDING);
6299 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6300 }
6301
6302 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6303 {
6304 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6305 struct tg3 *tp = tnapi->tp;
6306 int work_done = 0;
6307 struct tg3_hw_status *sblk = tnapi->hw_status;
6308
6309 while (1) {
6310 work_done = tg3_poll_work(tnapi, work_done, budget);
6311
6312 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6313 goto tx_recovery;
6314
6315 if (unlikely(work_done >= budget))
6316 break;
6317
6318 /* tp->last_tag is used in tg3_int_reenable() below
6319 * to tell the hw how much work has been processed,
6320 * so we must read it before checking for more work.
6321 */
6322 tnapi->last_tag = sblk->status_tag;
6323 tnapi->last_irq_tag = tnapi->last_tag;
6324 rmb();
6325
6326 /* check for RX/TX work to do */
6327 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6328 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6329
6330 /* This test here is not race free, but will reduce
6331 * the number of interrupts by looping again.
6332 */
6333 if (tnapi == &tp->napi[1] && tp->rx_refill)
6334 continue;
6335
6336 napi_complete(napi);
6337 /* Reenable interrupts. */
6338 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6339
6340 /* This test here is synchronized by napi_schedule()
6341 * and napi_complete() to close the race condition.
6342 */
6343 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6344 tw32(HOSTCC_MODE, tp->coalesce_mode |
6345 HOSTCC_MODE_ENABLE |
6346 tnapi->coal_now);
6347 }
6348 mmiowb();
6349 break;
6350 }
6351 }
6352
6353 return work_done;
6354
6355 tx_recovery:
6356 /* work_done is guaranteed to be less than budget. */
6357 napi_complete(napi);
6358 tg3_reset_task_schedule(tp);
6359 return work_done;
6360 }
6361
6362 static void tg3_process_error(struct tg3 *tp)
6363 {
6364 u32 val;
6365 bool real_error = false;
6366
6367 if (tg3_flag(tp, ERROR_PROCESSED))
6368 return;
6369
6370 /* Check Flow Attention register */
6371 val = tr32(HOSTCC_FLOW_ATTN);
6372 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6373 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6374 real_error = true;
6375 }
6376
6377 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6378 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6379 real_error = true;
6380 }
6381
6382 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6383 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6384 real_error = true;
6385 }
6386
6387 if (!real_error)
6388 return;
6389
6390 tg3_dump_state(tp);
6391
6392 tg3_flag_set(tp, ERROR_PROCESSED);
6393 tg3_reset_task_schedule(tp);
6394 }
6395
6396 static int tg3_poll(struct napi_struct *napi, int budget)
6397 {
6398 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6399 struct tg3 *tp = tnapi->tp;
6400 int work_done = 0;
6401 struct tg3_hw_status *sblk = tnapi->hw_status;
6402
6403 while (1) {
6404 if (sblk->status & SD_STATUS_ERROR)
6405 tg3_process_error(tp);
6406
6407 tg3_poll_link(tp);
6408
6409 work_done = tg3_poll_work(tnapi, work_done, budget);
6410
6411 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6412 goto tx_recovery;
6413
6414 if (unlikely(work_done >= budget))
6415 break;
6416
6417 if (tg3_flag(tp, TAGGED_STATUS)) {
6418 /* tp->last_tag is used in tg3_int_reenable() below
6419 * to tell the hw how much work has been processed,
6420 * so we must read it before checking for more work.
6421 */
6422 tnapi->last_tag = sblk->status_tag;
6423 tnapi->last_irq_tag = tnapi->last_tag;
6424 rmb();
6425 } else
6426 sblk->status &= ~SD_STATUS_UPDATED;
6427
6428 if (likely(!tg3_has_work(tnapi))) {
6429 napi_complete(napi);
6430 tg3_int_reenable(tnapi);
6431 break;
6432 }
6433 }
6434
6435 return work_done;
6436
6437 tx_recovery:
6438 /* work_done is guaranteed to be less than budget. */
6439 napi_complete(napi);
6440 tg3_reset_task_schedule(tp);
6441 return work_done;
6442 }
6443
6444 static void tg3_napi_disable(struct tg3 *tp)
6445 {
6446 int i;
6447
6448 for (i = tp->irq_cnt - 1; i >= 0; i--)
6449 napi_disable(&tp->napi[i].napi);
6450 }
6451
6452 static void tg3_napi_enable(struct tg3 *tp)
6453 {
6454 int i;
6455
6456 for (i = 0; i < tp->irq_cnt; i++)
6457 napi_enable(&tp->napi[i].napi);
6458 }
6459
6460 static void tg3_napi_init(struct tg3 *tp)
6461 {
6462 int i;
6463
6464 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6465 for (i = 1; i < tp->irq_cnt; i++)
6466 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6467 }
6468
6469 static void tg3_napi_fini(struct tg3 *tp)
6470 {
6471 int i;
6472
6473 for (i = 0; i < tp->irq_cnt; i++)
6474 netif_napi_del(&tp->napi[i].napi);
6475 }
6476
6477 static inline void tg3_netif_stop(struct tg3 *tp)
6478 {
6479 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6480 tg3_napi_disable(tp);
6481 netif_tx_disable(tp->dev);
6482 }
6483
6484 static inline void tg3_netif_start(struct tg3 *tp)
6485 {
6486 /* NOTE: unconditional netif_tx_wake_all_queues is only
6487 * appropriate so long as all callers are assured to
6488 * have free tx slots (such as after tg3_init_hw)
6489 */
6490 netif_tx_wake_all_queues(tp->dev);
6491
6492 tg3_napi_enable(tp);
6493 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6494 tg3_enable_ints(tp);
6495 }
6496
6497 static void tg3_irq_quiesce(struct tg3 *tp)
6498 {
6499 int i;
6500
6501 BUG_ON(tp->irq_sync);
6502
6503 tp->irq_sync = 1;
6504 smp_mb();
6505
6506 for (i = 0; i < tp->irq_cnt; i++)
6507 synchronize_irq(tp->napi[i].irq_vec);
6508 }
6509
6510 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6511 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6512 * with as well. Most of the time, this is not necessary except when
6513 * shutting down the device.
6514 */
6515 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6516 {
6517 spin_lock_bh(&tp->lock);
6518 if (irq_sync)
6519 tg3_irq_quiesce(tp);
6520 }
6521
6522 static inline void tg3_full_unlock(struct tg3 *tp)
6523 {
6524 spin_unlock_bh(&tp->lock);
6525 }
6526
6527 /* One-shot MSI handler - Chip automatically disables interrupt
6528 * after sending MSI so driver doesn't have to do it.
6529 */
6530 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6531 {
6532 struct tg3_napi *tnapi = dev_id;
6533 struct tg3 *tp = tnapi->tp;
6534
6535 prefetch(tnapi->hw_status);
6536 if (tnapi->rx_rcb)
6537 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6538
6539 if (likely(!tg3_irq_sync(tp)))
6540 napi_schedule(&tnapi->napi);
6541
6542 return IRQ_HANDLED;
6543 }
6544
6545 /* MSI ISR - No need to check for interrupt sharing and no need to
6546 * flush status block and interrupt mailbox. PCI ordering rules
6547 * guarantee that MSI will arrive after the status block.
6548 */
6549 static irqreturn_t tg3_msi(int irq, void *dev_id)
6550 {
6551 struct tg3_napi *tnapi = dev_id;
6552 struct tg3 *tp = tnapi->tp;
6553
6554 prefetch(tnapi->hw_status);
6555 if (tnapi->rx_rcb)
6556 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6557 /*
6558 * Writing any value to intr-mbox-0 clears PCI INTA# and
6559 * chip-internal interrupt pending events.
6560 * Writing non-zero to intr-mbox-0 additional tells the
6561 * NIC to stop sending us irqs, engaging "in-intr-handler"
6562 * event coalescing.
6563 */
6564 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6565 if (likely(!tg3_irq_sync(tp)))
6566 napi_schedule(&tnapi->napi);
6567
6568 return IRQ_RETVAL(1);
6569 }
6570
6571 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6572 {
6573 struct tg3_napi *tnapi = dev_id;
6574 struct tg3 *tp = tnapi->tp;
6575 struct tg3_hw_status *sblk = tnapi->hw_status;
6576 unsigned int handled = 1;
6577
6578 /* In INTx mode, it is possible for the interrupt to arrive at
6579 * the CPU before the status block posted prior to the interrupt.
6580 * Reading the PCI State register will confirm whether the
6581 * interrupt is ours and will flush the status block.
6582 */
6583 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6584 if (tg3_flag(tp, CHIP_RESETTING) ||
6585 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6586 handled = 0;
6587 goto out;
6588 }
6589 }
6590
6591 /*
6592 * Writing any value to intr-mbox-0 clears PCI INTA# and
6593 * chip-internal interrupt pending events.
6594 * Writing non-zero to intr-mbox-0 additional tells the
6595 * NIC to stop sending us irqs, engaging "in-intr-handler"
6596 * event coalescing.
6597 *
6598 * Flush the mailbox to de-assert the IRQ immediately to prevent
6599 * spurious interrupts. The flush impacts performance but
6600 * excessive spurious interrupts can be worse in some cases.
6601 */
6602 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6603 if (tg3_irq_sync(tp))
6604 goto out;
6605 sblk->status &= ~SD_STATUS_UPDATED;
6606 if (likely(tg3_has_work(tnapi))) {
6607 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6608 napi_schedule(&tnapi->napi);
6609 } else {
6610 /* No work, shared interrupt perhaps? re-enable
6611 * interrupts, and flush that PCI write
6612 */
6613 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6614 0x00000000);
6615 }
6616 out:
6617 return IRQ_RETVAL(handled);
6618 }
6619
6620 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6621 {
6622 struct tg3_napi *tnapi = dev_id;
6623 struct tg3 *tp = tnapi->tp;
6624 struct tg3_hw_status *sblk = tnapi->hw_status;
6625 unsigned int handled = 1;
6626
6627 /* In INTx mode, it is possible for the interrupt to arrive at
6628 * the CPU before the status block posted prior to the interrupt.
6629 * Reading the PCI State register will confirm whether the
6630 * interrupt is ours and will flush the status block.
6631 */
6632 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6633 if (tg3_flag(tp, CHIP_RESETTING) ||
6634 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6635 handled = 0;
6636 goto out;
6637 }
6638 }
6639
6640 /*
6641 * writing any value to intr-mbox-0 clears PCI INTA# and
6642 * chip-internal interrupt pending events.
6643 * writing non-zero to intr-mbox-0 additional tells the
6644 * NIC to stop sending us irqs, engaging "in-intr-handler"
6645 * event coalescing.
6646 *
6647 * Flush the mailbox to de-assert the IRQ immediately to prevent
6648 * spurious interrupts. The flush impacts performance but
6649 * excessive spurious interrupts can be worse in some cases.
6650 */
6651 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6652
6653 /*
6654 * In a shared interrupt configuration, sometimes other devices'
6655 * interrupts will scream. We record the current status tag here
6656 * so that the above check can report that the screaming interrupts
6657 * are unhandled. Eventually they will be silenced.
6658 */
6659 tnapi->last_irq_tag = sblk->status_tag;
6660
6661 if (tg3_irq_sync(tp))
6662 goto out;
6663
6664 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6665
6666 napi_schedule(&tnapi->napi);
6667
6668 out:
6669 return IRQ_RETVAL(handled);
6670 }
6671
6672 /* ISR for interrupt test */
6673 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6674 {
6675 struct tg3_napi *tnapi = dev_id;
6676 struct tg3 *tp = tnapi->tp;
6677 struct tg3_hw_status *sblk = tnapi->hw_status;
6678
6679 if ((sblk->status & SD_STATUS_UPDATED) ||
6680 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6681 tg3_disable_ints(tp);
6682 return IRQ_RETVAL(1);
6683 }
6684 return IRQ_RETVAL(0);
6685 }
6686
6687 #ifdef CONFIG_NET_POLL_CONTROLLER
6688 static void tg3_poll_controller(struct net_device *dev)
6689 {
6690 int i;
6691 struct tg3 *tp = netdev_priv(dev);
6692
6693 for (i = 0; i < tp->irq_cnt; i++)
6694 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6695 }
6696 #endif
6697
6698 static void tg3_tx_timeout(struct net_device *dev)
6699 {
6700 struct tg3 *tp = netdev_priv(dev);
6701
6702 if (netif_msg_tx_err(tp)) {
6703 netdev_err(dev, "transmit timed out, resetting\n");
6704 tg3_dump_state(tp);
6705 }
6706
6707 tg3_reset_task_schedule(tp);
6708 }
6709
6710 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6711 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6712 {
6713 u32 base = (u32) mapping & 0xffffffff;
6714
6715 return (base > 0xffffdcc0) && (base + len + 8 < base);
6716 }
6717
6718 /* Test for DMA addresses > 40-bit */
6719 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6720 int len)
6721 {
6722 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6723 if (tg3_flag(tp, 40BIT_DMA_BUG))
6724 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6725 return 0;
6726 #else
6727 return 0;
6728 #endif
6729 }
6730
6731 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6732 dma_addr_t mapping, u32 len, u32 flags,
6733 u32 mss, u32 vlan)
6734 {
6735 txbd->addr_hi = ((u64) mapping >> 32);
6736 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6737 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6738 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6739 }
6740
6741 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6742 dma_addr_t map, u32 len, u32 flags,
6743 u32 mss, u32 vlan)
6744 {
6745 struct tg3 *tp = tnapi->tp;
6746 bool hwbug = false;
6747
6748 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6749 hwbug = true;
6750
6751 if (tg3_4g_overflow_test(map, len))
6752 hwbug = true;
6753
6754 if (tg3_40bit_overflow_test(tp, map, len))
6755 hwbug = true;
6756
6757 if (tp->dma_limit) {
6758 u32 prvidx = *entry;
6759 u32 tmp_flag = flags & ~TXD_FLAG_END;
6760 while (len > tp->dma_limit && *budget) {
6761 u32 frag_len = tp->dma_limit;
6762 len -= tp->dma_limit;
6763
6764 /* Avoid the 8byte DMA problem */
6765 if (len <= 8) {
6766 len += tp->dma_limit / 2;
6767 frag_len = tp->dma_limit / 2;
6768 }
6769
6770 tnapi->tx_buffers[*entry].fragmented = true;
6771
6772 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6773 frag_len, tmp_flag, mss, vlan);
6774 *budget -= 1;
6775 prvidx = *entry;
6776 *entry = NEXT_TX(*entry);
6777
6778 map += frag_len;
6779 }
6780
6781 if (len) {
6782 if (*budget) {
6783 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6784 len, flags, mss, vlan);
6785 *budget -= 1;
6786 *entry = NEXT_TX(*entry);
6787 } else {
6788 hwbug = true;
6789 tnapi->tx_buffers[prvidx].fragmented = false;
6790 }
6791 }
6792 } else {
6793 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6794 len, flags, mss, vlan);
6795 *entry = NEXT_TX(*entry);
6796 }
6797
6798 return hwbug;
6799 }
6800
6801 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6802 {
6803 int i;
6804 struct sk_buff *skb;
6805 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6806
6807 skb = txb->skb;
6808 txb->skb = NULL;
6809
6810 pci_unmap_single(tnapi->tp->pdev,
6811 dma_unmap_addr(txb, mapping),
6812 skb_headlen(skb),
6813 PCI_DMA_TODEVICE);
6814
6815 while (txb->fragmented) {
6816 txb->fragmented = false;
6817 entry = NEXT_TX(entry);
6818 txb = &tnapi->tx_buffers[entry];
6819 }
6820
6821 for (i = 0; i <= last; i++) {
6822 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6823
6824 entry = NEXT_TX(entry);
6825 txb = &tnapi->tx_buffers[entry];
6826
6827 pci_unmap_page(tnapi->tp->pdev,
6828 dma_unmap_addr(txb, mapping),
6829 skb_frag_size(frag), PCI_DMA_TODEVICE);
6830
6831 while (txb->fragmented) {
6832 txb->fragmented = false;
6833 entry = NEXT_TX(entry);
6834 txb = &tnapi->tx_buffers[entry];
6835 }
6836 }
6837 }
6838
6839 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6840 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6841 struct sk_buff **pskb,
6842 u32 *entry, u32 *budget,
6843 u32 base_flags, u32 mss, u32 vlan)
6844 {
6845 struct tg3 *tp = tnapi->tp;
6846 struct sk_buff *new_skb, *skb = *pskb;
6847 dma_addr_t new_addr = 0;
6848 int ret = 0;
6849
6850 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6851 new_skb = skb_copy(skb, GFP_ATOMIC);
6852 else {
6853 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6854
6855 new_skb = skb_copy_expand(skb,
6856 skb_headroom(skb) + more_headroom,
6857 skb_tailroom(skb), GFP_ATOMIC);
6858 }
6859
6860 if (!new_skb) {
6861 ret = -1;
6862 } else {
6863 /* New SKB is guaranteed to be linear. */
6864 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6865 PCI_DMA_TODEVICE);
6866 /* Make sure the mapping succeeded */
6867 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6868 dev_kfree_skb(new_skb);
6869 ret = -1;
6870 } else {
6871 u32 save_entry = *entry;
6872
6873 base_flags |= TXD_FLAG_END;
6874
6875 tnapi->tx_buffers[*entry].skb = new_skb;
6876 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6877 mapping, new_addr);
6878
6879 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6880 new_skb->len, base_flags,
6881 mss, vlan)) {
6882 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6883 dev_kfree_skb(new_skb);
6884 ret = -1;
6885 }
6886 }
6887 }
6888
6889 dev_kfree_skb(skb);
6890 *pskb = new_skb;
6891 return ret;
6892 }
6893
6894 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6895
6896 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6897 * TSO header is greater than 80 bytes.
6898 */
6899 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6900 {
6901 struct sk_buff *segs, *nskb;
6902 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6903
6904 /* Estimate the number of fragments in the worst case */
6905 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6906 netif_stop_queue(tp->dev);
6907
6908 /* netif_tx_stop_queue() must be done before checking
6909 * checking tx index in tg3_tx_avail() below, because in
6910 * tg3_tx(), we update tx index before checking for
6911 * netif_tx_queue_stopped().
6912 */
6913 smp_mb();
6914 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6915 return NETDEV_TX_BUSY;
6916
6917 netif_wake_queue(tp->dev);
6918 }
6919
6920 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6921 if (IS_ERR(segs))
6922 goto tg3_tso_bug_end;
6923
6924 do {
6925 nskb = segs;
6926 segs = segs->next;
6927 nskb->next = NULL;
6928 tg3_start_xmit(nskb, tp->dev);
6929 } while (segs);
6930
6931 tg3_tso_bug_end:
6932 dev_kfree_skb(skb);
6933
6934 return NETDEV_TX_OK;
6935 }
6936
6937 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6938 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6939 */
6940 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6941 {
6942 struct tg3 *tp = netdev_priv(dev);
6943 u32 len, entry, base_flags, mss, vlan = 0;
6944 u32 budget;
6945 int i = -1, would_hit_hwbug;
6946 dma_addr_t mapping;
6947 struct tg3_napi *tnapi;
6948 struct netdev_queue *txq;
6949 unsigned int last;
6950
6951 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6952 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6953 if (tg3_flag(tp, ENABLE_TSS))
6954 tnapi++;
6955
6956 budget = tg3_tx_avail(tnapi);
6957
6958 /* We are running in BH disabled context with netif_tx_lock
6959 * and TX reclaim runs via tp->napi.poll inside of a software
6960 * interrupt. Furthermore, IRQ processing runs lockless so we have
6961 * no IRQ context deadlocks to worry about either. Rejoice!
6962 */
6963 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6964 if (!netif_tx_queue_stopped(txq)) {
6965 netif_tx_stop_queue(txq);
6966
6967 /* This is a hard error, log it. */
6968 netdev_err(dev,
6969 "BUG! Tx Ring full when queue awake!\n");
6970 }
6971 return NETDEV_TX_BUSY;
6972 }
6973
6974 entry = tnapi->tx_prod;
6975 base_flags = 0;
6976 if (skb->ip_summed == CHECKSUM_PARTIAL)
6977 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6978
6979 mss = skb_shinfo(skb)->gso_size;
6980 if (mss) {
6981 struct iphdr *iph;
6982 u32 tcp_opt_len, hdr_len;
6983
6984 if (skb_header_cloned(skb) &&
6985 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6986 goto drop;
6987
6988 iph = ip_hdr(skb);
6989 tcp_opt_len = tcp_optlen(skb);
6990
6991 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6992
6993 if (!skb_is_gso_v6(skb)) {
6994 iph->check = 0;
6995 iph->tot_len = htons(mss + hdr_len);
6996 }
6997
6998 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6999 tg3_flag(tp, TSO_BUG))
7000 return tg3_tso_bug(tp, skb);
7001
7002 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7003 TXD_FLAG_CPU_POST_DMA);
7004
7005 if (tg3_flag(tp, HW_TSO_1) ||
7006 tg3_flag(tp, HW_TSO_2) ||
7007 tg3_flag(tp, HW_TSO_3)) {
7008 tcp_hdr(skb)->check = 0;
7009 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7010 } else
7011 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7012 iph->daddr, 0,
7013 IPPROTO_TCP,
7014 0);
7015
7016 if (tg3_flag(tp, HW_TSO_3)) {
7017 mss |= (hdr_len & 0xc) << 12;
7018 if (hdr_len & 0x10)
7019 base_flags |= 0x00000010;
7020 base_flags |= (hdr_len & 0x3e0) << 5;
7021 } else if (tg3_flag(tp, HW_TSO_2))
7022 mss |= hdr_len << 9;
7023 else if (tg3_flag(tp, HW_TSO_1) ||
7024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7025 if (tcp_opt_len || iph->ihl > 5) {
7026 int tsflags;
7027
7028 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7029 mss |= (tsflags << 11);
7030 }
7031 } else {
7032 if (tcp_opt_len || iph->ihl > 5) {
7033 int tsflags;
7034
7035 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7036 base_flags |= tsflags << 12;
7037 }
7038 }
7039 }
7040
7041 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7042 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7043 base_flags |= TXD_FLAG_JMB_PKT;
7044
7045 if (vlan_tx_tag_present(skb)) {
7046 base_flags |= TXD_FLAG_VLAN;
7047 vlan = vlan_tx_tag_get(skb);
7048 }
7049
7050 len = skb_headlen(skb);
7051
7052 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7053 if (pci_dma_mapping_error(tp->pdev, mapping))
7054 goto drop;
7055
7056
7057 tnapi->tx_buffers[entry].skb = skb;
7058 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7059
7060 would_hit_hwbug = 0;
7061
7062 if (tg3_flag(tp, 5701_DMA_BUG))
7063 would_hit_hwbug = 1;
7064
7065 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7066 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7067 mss, vlan)) {
7068 would_hit_hwbug = 1;
7069 } else if (skb_shinfo(skb)->nr_frags > 0) {
7070 u32 tmp_mss = mss;
7071
7072 if (!tg3_flag(tp, HW_TSO_1) &&
7073 !tg3_flag(tp, HW_TSO_2) &&
7074 !tg3_flag(tp, HW_TSO_3))
7075 tmp_mss = 0;
7076
7077 /* Now loop through additional data
7078 * fragments, and queue them.
7079 */
7080 last = skb_shinfo(skb)->nr_frags - 1;
7081 for (i = 0; i <= last; i++) {
7082 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7083
7084 len = skb_frag_size(frag);
7085 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7086 len, DMA_TO_DEVICE);
7087
7088 tnapi->tx_buffers[entry].skb = NULL;
7089 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7090 mapping);
7091 if (dma_mapping_error(&tp->pdev->dev, mapping))
7092 goto dma_error;
7093
7094 if (!budget ||
7095 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7096 len, base_flags |
7097 ((i == last) ? TXD_FLAG_END : 0),
7098 tmp_mss, vlan)) {
7099 would_hit_hwbug = 1;
7100 break;
7101 }
7102 }
7103 }
7104
7105 if (would_hit_hwbug) {
7106 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7107
7108 /* If the workaround fails due to memory/mapping
7109 * failure, silently drop this packet.
7110 */
7111 entry = tnapi->tx_prod;
7112 budget = tg3_tx_avail(tnapi);
7113 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7114 base_flags, mss, vlan))
7115 goto drop_nofree;
7116 }
7117
7118 skb_tx_timestamp(skb);
7119 netdev_tx_sent_queue(txq, skb->len);
7120
7121 /* Sync BD data before updating mailbox */
7122 wmb();
7123
7124 /* Packets are ready, update Tx producer idx local and on card. */
7125 tw32_tx_mbox(tnapi->prodmbox, entry);
7126
7127 tnapi->tx_prod = entry;
7128 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7129 netif_tx_stop_queue(txq);
7130
7131 /* netif_tx_stop_queue() must be done before checking
7132 * checking tx index in tg3_tx_avail() below, because in
7133 * tg3_tx(), we update tx index before checking for
7134 * netif_tx_queue_stopped().
7135 */
7136 smp_mb();
7137 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7138 netif_tx_wake_queue(txq);
7139 }
7140
7141 mmiowb();
7142 return NETDEV_TX_OK;
7143
7144 dma_error:
7145 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7146 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7147 drop:
7148 dev_kfree_skb(skb);
7149 drop_nofree:
7150 tp->tx_dropped++;
7151 return NETDEV_TX_OK;
7152 }
7153
7154 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7155 {
7156 if (enable) {
7157 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7158 MAC_MODE_PORT_MODE_MASK);
7159
7160 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7161
7162 if (!tg3_flag(tp, 5705_PLUS))
7163 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7164
7165 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7166 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7167 else
7168 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7169 } else {
7170 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7171
7172 if (tg3_flag(tp, 5705_PLUS) ||
7173 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7175 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7176 }
7177
7178 tw32(MAC_MODE, tp->mac_mode);
7179 udelay(40);
7180 }
7181
7182 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7183 {
7184 u32 val, bmcr, mac_mode, ptest = 0;
7185
7186 tg3_phy_toggle_apd(tp, false);
7187 tg3_phy_toggle_automdix(tp, 0);
7188
7189 if (extlpbk && tg3_phy_set_extloopbk(tp))
7190 return -EIO;
7191
7192 bmcr = BMCR_FULLDPLX;
7193 switch (speed) {
7194 case SPEED_10:
7195 break;
7196 case SPEED_100:
7197 bmcr |= BMCR_SPEED100;
7198 break;
7199 case SPEED_1000:
7200 default:
7201 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7202 speed = SPEED_100;
7203 bmcr |= BMCR_SPEED100;
7204 } else {
7205 speed = SPEED_1000;
7206 bmcr |= BMCR_SPEED1000;
7207 }
7208 }
7209
7210 if (extlpbk) {
7211 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7212 tg3_readphy(tp, MII_CTRL1000, &val);
7213 val |= CTL1000_AS_MASTER |
7214 CTL1000_ENABLE_MASTER;
7215 tg3_writephy(tp, MII_CTRL1000, val);
7216 } else {
7217 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7218 MII_TG3_FET_PTEST_TRIM_2;
7219 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7220 }
7221 } else
7222 bmcr |= BMCR_LOOPBACK;
7223
7224 tg3_writephy(tp, MII_BMCR, bmcr);
7225
7226 /* The write needs to be flushed for the FETs */
7227 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7228 tg3_readphy(tp, MII_BMCR, &bmcr);
7229
7230 udelay(40);
7231
7232 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7234 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7235 MII_TG3_FET_PTEST_FRC_TX_LINK |
7236 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7237
7238 /* The write needs to be flushed for the AC131 */
7239 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7240 }
7241
7242 /* Reset to prevent losing 1st rx packet intermittently */
7243 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7244 tg3_flag(tp, 5780_CLASS)) {
7245 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7246 udelay(10);
7247 tw32_f(MAC_RX_MODE, tp->rx_mode);
7248 }
7249
7250 mac_mode = tp->mac_mode &
7251 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7252 if (speed == SPEED_1000)
7253 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7254 else
7255 mac_mode |= MAC_MODE_PORT_MODE_MII;
7256
7257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7258 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7259
7260 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7261 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7262 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7263 mac_mode |= MAC_MODE_LINK_POLARITY;
7264
7265 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7266 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7267 }
7268
7269 tw32(MAC_MODE, mac_mode);
7270 udelay(40);
7271
7272 return 0;
7273 }
7274
7275 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7276 {
7277 struct tg3 *tp = netdev_priv(dev);
7278
7279 if (features & NETIF_F_LOOPBACK) {
7280 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7281 return;
7282
7283 spin_lock_bh(&tp->lock);
7284 tg3_mac_loopback(tp, true);
7285 netif_carrier_on(tp->dev);
7286 spin_unlock_bh(&tp->lock);
7287 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7288 } else {
7289 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7290 return;
7291
7292 spin_lock_bh(&tp->lock);
7293 tg3_mac_loopback(tp, false);
7294 /* Force link status check */
7295 tg3_setup_phy(tp, 1);
7296 spin_unlock_bh(&tp->lock);
7297 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7298 }
7299 }
7300
7301 static netdev_features_t tg3_fix_features(struct net_device *dev,
7302 netdev_features_t features)
7303 {
7304 struct tg3 *tp = netdev_priv(dev);
7305
7306 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7307 features &= ~NETIF_F_ALL_TSO;
7308
7309 return features;
7310 }
7311
7312 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7313 {
7314 netdev_features_t changed = dev->features ^ features;
7315
7316 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7317 tg3_set_loopback(dev, features);
7318
7319 return 0;
7320 }
7321
7322 static void tg3_rx_prodring_free(struct tg3 *tp,
7323 struct tg3_rx_prodring_set *tpr)
7324 {
7325 int i;
7326
7327 if (tpr != &tp->napi[0].prodring) {
7328 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7329 i = (i + 1) & tp->rx_std_ring_mask)
7330 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7331 tp->rx_pkt_map_sz);
7332
7333 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7334 for (i = tpr->rx_jmb_cons_idx;
7335 i != tpr->rx_jmb_prod_idx;
7336 i = (i + 1) & tp->rx_jmb_ring_mask) {
7337 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7338 TG3_RX_JMB_MAP_SZ);
7339 }
7340 }
7341
7342 return;
7343 }
7344
7345 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7346 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7347 tp->rx_pkt_map_sz);
7348
7349 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7350 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7351 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7352 TG3_RX_JMB_MAP_SZ);
7353 }
7354 }
7355
7356 /* Initialize rx rings for packet processing.
7357 *
7358 * The chip has been shut down and the driver detached from
7359 * the networking, so no interrupts or new tx packets will
7360 * end up in the driver. tp->{tx,}lock are held and thus
7361 * we may not sleep.
7362 */
7363 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7364 struct tg3_rx_prodring_set *tpr)
7365 {
7366 u32 i, rx_pkt_dma_sz;
7367
7368 tpr->rx_std_cons_idx = 0;
7369 tpr->rx_std_prod_idx = 0;
7370 tpr->rx_jmb_cons_idx = 0;
7371 tpr->rx_jmb_prod_idx = 0;
7372
7373 if (tpr != &tp->napi[0].prodring) {
7374 memset(&tpr->rx_std_buffers[0], 0,
7375 TG3_RX_STD_BUFF_RING_SIZE(tp));
7376 if (tpr->rx_jmb_buffers)
7377 memset(&tpr->rx_jmb_buffers[0], 0,
7378 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7379 goto done;
7380 }
7381
7382 /* Zero out all descriptors. */
7383 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7384
7385 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7386 if (tg3_flag(tp, 5780_CLASS) &&
7387 tp->dev->mtu > ETH_DATA_LEN)
7388 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7389 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7390
7391 /* Initialize invariants of the rings, we only set this
7392 * stuff once. This works because the card does not
7393 * write into the rx buffer posting rings.
7394 */
7395 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7396 struct tg3_rx_buffer_desc *rxd;
7397
7398 rxd = &tpr->rx_std[i];
7399 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7400 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7401 rxd->opaque = (RXD_OPAQUE_RING_STD |
7402 (i << RXD_OPAQUE_INDEX_SHIFT));
7403 }
7404
7405 /* Now allocate fresh SKBs for each rx ring. */
7406 for (i = 0; i < tp->rx_pending; i++) {
7407 unsigned int frag_size;
7408
7409 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7410 &frag_size) < 0) {
7411 netdev_warn(tp->dev,
7412 "Using a smaller RX standard ring. Only "
7413 "%d out of %d buffers were allocated "
7414 "successfully\n", i, tp->rx_pending);
7415 if (i == 0)
7416 goto initfail;
7417 tp->rx_pending = i;
7418 break;
7419 }
7420 }
7421
7422 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7423 goto done;
7424
7425 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7426
7427 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7428 goto done;
7429
7430 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7431 struct tg3_rx_buffer_desc *rxd;
7432
7433 rxd = &tpr->rx_jmb[i].std;
7434 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7435 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7436 RXD_FLAG_JUMBO;
7437 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7438 (i << RXD_OPAQUE_INDEX_SHIFT));
7439 }
7440
7441 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7442 unsigned int frag_size;
7443
7444 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7445 &frag_size) < 0) {
7446 netdev_warn(tp->dev,
7447 "Using a smaller RX jumbo ring. Only %d "
7448 "out of %d buffers were allocated "
7449 "successfully\n", i, tp->rx_jumbo_pending);
7450 if (i == 0)
7451 goto initfail;
7452 tp->rx_jumbo_pending = i;
7453 break;
7454 }
7455 }
7456
7457 done:
7458 return 0;
7459
7460 initfail:
7461 tg3_rx_prodring_free(tp, tpr);
7462 return -ENOMEM;
7463 }
7464
7465 static void tg3_rx_prodring_fini(struct tg3 *tp,
7466 struct tg3_rx_prodring_set *tpr)
7467 {
7468 kfree(tpr->rx_std_buffers);
7469 tpr->rx_std_buffers = NULL;
7470 kfree(tpr->rx_jmb_buffers);
7471 tpr->rx_jmb_buffers = NULL;
7472 if (tpr->rx_std) {
7473 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7474 tpr->rx_std, tpr->rx_std_mapping);
7475 tpr->rx_std = NULL;
7476 }
7477 if (tpr->rx_jmb) {
7478 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7479 tpr->rx_jmb, tpr->rx_jmb_mapping);
7480 tpr->rx_jmb = NULL;
7481 }
7482 }
7483
7484 static int tg3_rx_prodring_init(struct tg3 *tp,
7485 struct tg3_rx_prodring_set *tpr)
7486 {
7487 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7488 GFP_KERNEL);
7489 if (!tpr->rx_std_buffers)
7490 return -ENOMEM;
7491
7492 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7493 TG3_RX_STD_RING_BYTES(tp),
7494 &tpr->rx_std_mapping,
7495 GFP_KERNEL);
7496 if (!tpr->rx_std)
7497 goto err_out;
7498
7499 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7500 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7501 GFP_KERNEL);
7502 if (!tpr->rx_jmb_buffers)
7503 goto err_out;
7504
7505 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7506 TG3_RX_JMB_RING_BYTES(tp),
7507 &tpr->rx_jmb_mapping,
7508 GFP_KERNEL);
7509 if (!tpr->rx_jmb)
7510 goto err_out;
7511 }
7512
7513 return 0;
7514
7515 err_out:
7516 tg3_rx_prodring_fini(tp, tpr);
7517 return -ENOMEM;
7518 }
7519
7520 /* Free up pending packets in all rx/tx rings.
7521 *
7522 * The chip has been shut down and the driver detached from
7523 * the networking, so no interrupts or new tx packets will
7524 * end up in the driver. tp->{tx,}lock is not held and we are not
7525 * in an interrupt context and thus may sleep.
7526 */
7527 static void tg3_free_rings(struct tg3 *tp)
7528 {
7529 int i, j;
7530
7531 for (j = 0; j < tp->irq_cnt; j++) {
7532 struct tg3_napi *tnapi = &tp->napi[j];
7533
7534 tg3_rx_prodring_free(tp, &tnapi->prodring);
7535
7536 if (!tnapi->tx_buffers)
7537 continue;
7538
7539 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7540 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7541
7542 if (!skb)
7543 continue;
7544
7545 tg3_tx_skb_unmap(tnapi, i,
7546 skb_shinfo(skb)->nr_frags - 1);
7547
7548 dev_kfree_skb_any(skb);
7549 }
7550 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7551 }
7552 }
7553
7554 /* Initialize tx/rx rings for packet processing.
7555 *
7556 * The chip has been shut down and the driver detached from
7557 * the networking, so no interrupts or new tx packets will
7558 * end up in the driver. tp->{tx,}lock are held and thus
7559 * we may not sleep.
7560 */
7561 static int tg3_init_rings(struct tg3 *tp)
7562 {
7563 int i;
7564
7565 /* Free up all the SKBs. */
7566 tg3_free_rings(tp);
7567
7568 for (i = 0; i < tp->irq_cnt; i++) {
7569 struct tg3_napi *tnapi = &tp->napi[i];
7570
7571 tnapi->last_tag = 0;
7572 tnapi->last_irq_tag = 0;
7573 tnapi->hw_status->status = 0;
7574 tnapi->hw_status->status_tag = 0;
7575 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7576
7577 tnapi->tx_prod = 0;
7578 tnapi->tx_cons = 0;
7579 if (tnapi->tx_ring)
7580 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7581
7582 tnapi->rx_rcb_ptr = 0;
7583 if (tnapi->rx_rcb)
7584 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7585
7586 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7587 tg3_free_rings(tp);
7588 return -ENOMEM;
7589 }
7590 }
7591
7592 return 0;
7593 }
7594
7595 /*
7596 * Must not be invoked with interrupt sources disabled and
7597 * the hardware shutdown down.
7598 */
7599 static void tg3_free_consistent(struct tg3 *tp)
7600 {
7601 int i;
7602
7603 for (i = 0; i < tp->irq_cnt; i++) {
7604 struct tg3_napi *tnapi = &tp->napi[i];
7605
7606 if (tnapi->tx_ring) {
7607 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7608 tnapi->tx_ring, tnapi->tx_desc_mapping);
7609 tnapi->tx_ring = NULL;
7610 }
7611
7612 kfree(tnapi->tx_buffers);
7613 tnapi->tx_buffers = NULL;
7614
7615 if (tnapi->rx_rcb) {
7616 dma_free_coherent(&tp->pdev->dev,
7617 TG3_RX_RCB_RING_BYTES(tp),
7618 tnapi->rx_rcb,
7619 tnapi->rx_rcb_mapping);
7620 tnapi->rx_rcb = NULL;
7621 }
7622
7623 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7624
7625 if (tnapi->hw_status) {
7626 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7627 tnapi->hw_status,
7628 tnapi->status_mapping);
7629 tnapi->hw_status = NULL;
7630 }
7631 }
7632
7633 if (tp->hw_stats) {
7634 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7635 tp->hw_stats, tp->stats_mapping);
7636 tp->hw_stats = NULL;
7637 }
7638 }
7639
7640 /*
7641 * Must not be invoked with interrupt sources disabled and
7642 * the hardware shutdown down. Can sleep.
7643 */
7644 static int tg3_alloc_consistent(struct tg3 *tp)
7645 {
7646 int i;
7647
7648 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7649 sizeof(struct tg3_hw_stats),
7650 &tp->stats_mapping,
7651 GFP_KERNEL);
7652 if (!tp->hw_stats)
7653 goto err_out;
7654
7655 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7656
7657 for (i = 0; i < tp->irq_cnt; i++) {
7658 struct tg3_napi *tnapi = &tp->napi[i];
7659 struct tg3_hw_status *sblk;
7660
7661 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7662 TG3_HW_STATUS_SIZE,
7663 &tnapi->status_mapping,
7664 GFP_KERNEL);
7665 if (!tnapi->hw_status)
7666 goto err_out;
7667
7668 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7669 sblk = tnapi->hw_status;
7670
7671 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7672 goto err_out;
7673
7674 /* If multivector TSS is enabled, vector 0 does not handle
7675 * tx interrupts. Don't allocate any resources for it.
7676 */
7677 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7678 (i && tg3_flag(tp, ENABLE_TSS))) {
7679 tnapi->tx_buffers = kzalloc(
7680 sizeof(struct tg3_tx_ring_info) *
7681 TG3_TX_RING_SIZE, GFP_KERNEL);
7682 if (!tnapi->tx_buffers)
7683 goto err_out;
7684
7685 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7686 TG3_TX_RING_BYTES,
7687 &tnapi->tx_desc_mapping,
7688 GFP_KERNEL);
7689 if (!tnapi->tx_ring)
7690 goto err_out;
7691 }
7692
7693 /*
7694 * When RSS is enabled, the status block format changes
7695 * slightly. The "rx_jumbo_consumer", "reserved",
7696 * and "rx_mini_consumer" members get mapped to the
7697 * other three rx return ring producer indexes.
7698 */
7699 switch (i) {
7700 default:
7701 if (tg3_flag(tp, ENABLE_RSS)) {
7702 tnapi->rx_rcb_prod_idx = NULL;
7703 break;
7704 }
7705 /* Fall through */
7706 case 1:
7707 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7708 break;
7709 case 2:
7710 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7711 break;
7712 case 3:
7713 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7714 break;
7715 case 4:
7716 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7717 break;
7718 }
7719
7720 /*
7721 * If multivector RSS is enabled, vector 0 does not handle
7722 * rx or tx interrupts. Don't allocate any resources for it.
7723 */
7724 if (!i && tg3_flag(tp, ENABLE_RSS))
7725 continue;
7726
7727 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7728 TG3_RX_RCB_RING_BYTES(tp),
7729 &tnapi->rx_rcb_mapping,
7730 GFP_KERNEL);
7731 if (!tnapi->rx_rcb)
7732 goto err_out;
7733
7734 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7735 }
7736
7737 return 0;
7738
7739 err_out:
7740 tg3_free_consistent(tp);
7741 return -ENOMEM;
7742 }
7743
7744 #define MAX_WAIT_CNT 1000
7745
7746 /* To stop a block, clear the enable bit and poll till it
7747 * clears. tp->lock is held.
7748 */
7749 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7750 {
7751 unsigned int i;
7752 u32 val;
7753
7754 if (tg3_flag(tp, 5705_PLUS)) {
7755 switch (ofs) {
7756 case RCVLSC_MODE:
7757 case DMAC_MODE:
7758 case MBFREE_MODE:
7759 case BUFMGR_MODE:
7760 case MEMARB_MODE:
7761 /* We can't enable/disable these bits of the
7762 * 5705/5750, just say success.
7763 */
7764 return 0;
7765
7766 default:
7767 break;
7768 }
7769 }
7770
7771 val = tr32(ofs);
7772 val &= ~enable_bit;
7773 tw32_f(ofs, val);
7774
7775 for (i = 0; i < MAX_WAIT_CNT; i++) {
7776 udelay(100);
7777 val = tr32(ofs);
7778 if ((val & enable_bit) == 0)
7779 break;
7780 }
7781
7782 if (i == MAX_WAIT_CNT && !silent) {
7783 dev_err(&tp->pdev->dev,
7784 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7785 ofs, enable_bit);
7786 return -ENODEV;
7787 }
7788
7789 return 0;
7790 }
7791
7792 /* tp->lock is held. */
7793 static int tg3_abort_hw(struct tg3 *tp, int silent)
7794 {
7795 int i, err;
7796
7797 tg3_disable_ints(tp);
7798
7799 tp->rx_mode &= ~RX_MODE_ENABLE;
7800 tw32_f(MAC_RX_MODE, tp->rx_mode);
7801 udelay(10);
7802
7803 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7804 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7805 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7806 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7807 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7808 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7809
7810 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7811 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7812 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7813 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7814 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7815 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7816 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7817
7818 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7819 tw32_f(MAC_MODE, tp->mac_mode);
7820 udelay(40);
7821
7822 tp->tx_mode &= ~TX_MODE_ENABLE;
7823 tw32_f(MAC_TX_MODE, tp->tx_mode);
7824
7825 for (i = 0; i < MAX_WAIT_CNT; i++) {
7826 udelay(100);
7827 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7828 break;
7829 }
7830 if (i >= MAX_WAIT_CNT) {
7831 dev_err(&tp->pdev->dev,
7832 "%s timed out, TX_MODE_ENABLE will not clear "
7833 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7834 err |= -ENODEV;
7835 }
7836
7837 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7838 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7839 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7840
7841 tw32(FTQ_RESET, 0xffffffff);
7842 tw32(FTQ_RESET, 0x00000000);
7843
7844 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7845 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7846
7847 for (i = 0; i < tp->irq_cnt; i++) {
7848 struct tg3_napi *tnapi = &tp->napi[i];
7849 if (tnapi->hw_status)
7850 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7851 }
7852
7853 return err;
7854 }
7855
7856 /* Save PCI command register before chip reset */
7857 static void tg3_save_pci_state(struct tg3 *tp)
7858 {
7859 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7860 }
7861
7862 /* Restore PCI state after chip reset */
7863 static void tg3_restore_pci_state(struct tg3 *tp)
7864 {
7865 u32 val;
7866
7867 /* Re-enable indirect register accesses. */
7868 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7869 tp->misc_host_ctrl);
7870
7871 /* Set MAX PCI retry to zero. */
7872 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7873 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7874 tg3_flag(tp, PCIX_MODE))
7875 val |= PCISTATE_RETRY_SAME_DMA;
7876 /* Allow reads and writes to the APE register and memory space. */
7877 if (tg3_flag(tp, ENABLE_APE))
7878 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7879 PCISTATE_ALLOW_APE_SHMEM_WR |
7880 PCISTATE_ALLOW_APE_PSPACE_WR;
7881 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7882
7883 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7884
7885 if (!tg3_flag(tp, PCI_EXPRESS)) {
7886 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7887 tp->pci_cacheline_sz);
7888 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7889 tp->pci_lat_timer);
7890 }
7891
7892 /* Make sure PCI-X relaxed ordering bit is clear. */
7893 if (tg3_flag(tp, PCIX_MODE)) {
7894 u16 pcix_cmd;
7895
7896 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7897 &pcix_cmd);
7898 pcix_cmd &= ~PCI_X_CMD_ERO;
7899 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7900 pcix_cmd);
7901 }
7902
7903 if (tg3_flag(tp, 5780_CLASS)) {
7904
7905 /* Chip reset on 5780 will reset MSI enable bit,
7906 * so need to restore it.
7907 */
7908 if (tg3_flag(tp, USING_MSI)) {
7909 u16 ctrl;
7910
7911 pci_read_config_word(tp->pdev,
7912 tp->msi_cap + PCI_MSI_FLAGS,
7913 &ctrl);
7914 pci_write_config_word(tp->pdev,
7915 tp->msi_cap + PCI_MSI_FLAGS,
7916 ctrl | PCI_MSI_FLAGS_ENABLE);
7917 val = tr32(MSGINT_MODE);
7918 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7919 }
7920 }
7921 }
7922
7923 /* tp->lock is held. */
7924 static int tg3_chip_reset(struct tg3 *tp)
7925 {
7926 u32 val;
7927 void (*write_op)(struct tg3 *, u32, u32);
7928 int i, err;
7929
7930 tg3_nvram_lock(tp);
7931
7932 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7933
7934 /* No matching tg3_nvram_unlock() after this because
7935 * chip reset below will undo the nvram lock.
7936 */
7937 tp->nvram_lock_cnt = 0;
7938
7939 /* GRC_MISC_CFG core clock reset will clear the memory
7940 * enable bit in PCI register 4 and the MSI enable bit
7941 * on some chips, so we save relevant registers here.
7942 */
7943 tg3_save_pci_state(tp);
7944
7945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7946 tg3_flag(tp, 5755_PLUS))
7947 tw32(GRC_FASTBOOT_PC, 0);
7948
7949 /*
7950 * We must avoid the readl() that normally takes place.
7951 * It locks machines, causes machine checks, and other
7952 * fun things. So, temporarily disable the 5701
7953 * hardware workaround, while we do the reset.
7954 */
7955 write_op = tp->write32;
7956 if (write_op == tg3_write_flush_reg32)
7957 tp->write32 = tg3_write32;
7958
7959 /* Prevent the irq handler from reading or writing PCI registers
7960 * during chip reset when the memory enable bit in the PCI command
7961 * register may be cleared. The chip does not generate interrupt
7962 * at this time, but the irq handler may still be called due to irq
7963 * sharing or irqpoll.
7964 */
7965 tg3_flag_set(tp, CHIP_RESETTING);
7966 for (i = 0; i < tp->irq_cnt; i++) {
7967 struct tg3_napi *tnapi = &tp->napi[i];
7968 if (tnapi->hw_status) {
7969 tnapi->hw_status->status = 0;
7970 tnapi->hw_status->status_tag = 0;
7971 }
7972 tnapi->last_tag = 0;
7973 tnapi->last_irq_tag = 0;
7974 }
7975 smp_mb();
7976
7977 for (i = 0; i < tp->irq_cnt; i++)
7978 synchronize_irq(tp->napi[i].irq_vec);
7979
7980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7981 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7982 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7983 }
7984
7985 /* do the reset */
7986 val = GRC_MISC_CFG_CORECLK_RESET;
7987
7988 if (tg3_flag(tp, PCI_EXPRESS)) {
7989 /* Force PCIe 1.0a mode */
7990 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7991 !tg3_flag(tp, 57765_PLUS) &&
7992 tr32(TG3_PCIE_PHY_TSTCTL) ==
7993 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7994 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7995
7996 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7997 tw32(GRC_MISC_CFG, (1 << 29));
7998 val |= (1 << 29);
7999 }
8000 }
8001
8002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8003 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8004 tw32(GRC_VCPU_EXT_CTRL,
8005 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8006 }
8007
8008 /* Manage gphy power for all CPMU absent PCIe devices. */
8009 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8010 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8011
8012 tw32(GRC_MISC_CFG, val);
8013
8014 /* restore 5701 hardware bug workaround write method */
8015 tp->write32 = write_op;
8016
8017 /* Unfortunately, we have to delay before the PCI read back.
8018 * Some 575X chips even will not respond to a PCI cfg access
8019 * when the reset command is given to the chip.
8020 *
8021 * How do these hardware designers expect things to work
8022 * properly if the PCI write is posted for a long period
8023 * of time? It is always necessary to have some method by
8024 * which a register read back can occur to push the write
8025 * out which does the reset.
8026 *
8027 * For most tg3 variants the trick below was working.
8028 * Ho hum...
8029 */
8030 udelay(120);
8031
8032 /* Flush PCI posted writes. The normal MMIO registers
8033 * are inaccessible at this time so this is the only
8034 * way to make this reliably (actually, this is no longer
8035 * the case, see above). I tried to use indirect
8036 * register read/write but this upset some 5701 variants.
8037 */
8038 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8039
8040 udelay(120);
8041
8042 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8043 u16 val16;
8044
8045 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8046 int i;
8047 u32 cfg_val;
8048
8049 /* Wait for link training to complete. */
8050 for (i = 0; i < 5000; i++)
8051 udelay(100);
8052
8053 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8054 pci_write_config_dword(tp->pdev, 0xc4,
8055 cfg_val | (1 << 15));
8056 }
8057
8058 /* Clear the "no snoop" and "relaxed ordering" bits. */
8059 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8060 /*
8061 * Older PCIe devices only support the 128 byte
8062 * MPS setting. Enforce the restriction.
8063 */
8064 if (!tg3_flag(tp, CPMU_PRESENT))
8065 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8066 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8067
8068 /* Clear error status */
8069 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8070 PCI_EXP_DEVSTA_CED |
8071 PCI_EXP_DEVSTA_NFED |
8072 PCI_EXP_DEVSTA_FED |
8073 PCI_EXP_DEVSTA_URD);
8074 }
8075
8076 tg3_restore_pci_state(tp);
8077
8078 tg3_flag_clear(tp, CHIP_RESETTING);
8079 tg3_flag_clear(tp, ERROR_PROCESSED);
8080
8081 val = 0;
8082 if (tg3_flag(tp, 5780_CLASS))
8083 val = tr32(MEMARB_MODE);
8084 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8085
8086 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8087 tg3_stop_fw(tp);
8088 tw32(0x5000, 0x400);
8089 }
8090
8091 tw32(GRC_MODE, tp->grc_mode);
8092
8093 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8094 val = tr32(0xc4);
8095
8096 tw32(0xc4, val | (1 << 15));
8097 }
8098
8099 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8101 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8102 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8103 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8104 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8105 }
8106
8107 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8108 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8109 val = tp->mac_mode;
8110 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8111 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8112 val = tp->mac_mode;
8113 } else
8114 val = 0;
8115
8116 tw32_f(MAC_MODE, val);
8117 udelay(40);
8118
8119 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8120
8121 err = tg3_poll_fw(tp);
8122 if (err)
8123 return err;
8124
8125 tg3_mdio_start(tp);
8126
8127 if (tg3_flag(tp, PCI_EXPRESS) &&
8128 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8129 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8130 !tg3_flag(tp, 57765_PLUS)) {
8131 val = tr32(0x7c00);
8132
8133 tw32(0x7c00, val | (1 << 25));
8134 }
8135
8136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8137 val = tr32(TG3_CPMU_CLCK_ORIDE);
8138 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8139 }
8140
8141 /* Reprobe ASF enable state. */
8142 tg3_flag_clear(tp, ENABLE_ASF);
8143 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8144 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8145 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8146 u32 nic_cfg;
8147
8148 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8149 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8150 tg3_flag_set(tp, ENABLE_ASF);
8151 tp->last_event_jiffies = jiffies;
8152 if (tg3_flag(tp, 5750_PLUS))
8153 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8154 }
8155 }
8156
8157 return 0;
8158 }
8159
8160 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8161 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8162
8163 /* tp->lock is held. */
8164 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8165 {
8166 int err;
8167
8168 tg3_stop_fw(tp);
8169
8170 tg3_write_sig_pre_reset(tp, kind);
8171
8172 tg3_abort_hw(tp, silent);
8173 err = tg3_chip_reset(tp);
8174
8175 __tg3_set_mac_addr(tp, 0);
8176
8177 tg3_write_sig_legacy(tp, kind);
8178 tg3_write_sig_post_reset(tp, kind);
8179
8180 if (tp->hw_stats) {
8181 /* Save the stats across chip resets... */
8182 tg3_get_nstats(tp, &tp->net_stats_prev);
8183 tg3_get_estats(tp, &tp->estats_prev);
8184
8185 /* And make sure the next sample is new data */
8186 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8187 }
8188
8189 if (err)
8190 return err;
8191
8192 return 0;
8193 }
8194
8195 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8196 {
8197 struct tg3 *tp = netdev_priv(dev);
8198 struct sockaddr *addr = p;
8199 int err = 0, skip_mac_1 = 0;
8200
8201 if (!is_valid_ether_addr(addr->sa_data))
8202 return -EADDRNOTAVAIL;
8203
8204 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8205
8206 if (!netif_running(dev))
8207 return 0;
8208
8209 if (tg3_flag(tp, ENABLE_ASF)) {
8210 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8211
8212 addr0_high = tr32(MAC_ADDR_0_HIGH);
8213 addr0_low = tr32(MAC_ADDR_0_LOW);
8214 addr1_high = tr32(MAC_ADDR_1_HIGH);
8215 addr1_low = tr32(MAC_ADDR_1_LOW);
8216
8217 /* Skip MAC addr 1 if ASF is using it. */
8218 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8219 !(addr1_high == 0 && addr1_low == 0))
8220 skip_mac_1 = 1;
8221 }
8222 spin_lock_bh(&tp->lock);
8223 __tg3_set_mac_addr(tp, skip_mac_1);
8224 spin_unlock_bh(&tp->lock);
8225
8226 return err;
8227 }
8228
8229 /* tp->lock is held. */
8230 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8231 dma_addr_t mapping, u32 maxlen_flags,
8232 u32 nic_addr)
8233 {
8234 tg3_write_mem(tp,
8235 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8236 ((u64) mapping >> 32));
8237 tg3_write_mem(tp,
8238 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8239 ((u64) mapping & 0xffffffff));
8240 tg3_write_mem(tp,
8241 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8242 maxlen_flags);
8243
8244 if (!tg3_flag(tp, 5705_PLUS))
8245 tg3_write_mem(tp,
8246 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8247 nic_addr);
8248 }
8249
8250 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8251 {
8252 int i;
8253
8254 if (!tg3_flag(tp, ENABLE_TSS)) {
8255 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8256 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8257 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8258 } else {
8259 tw32(HOSTCC_TXCOL_TICKS, 0);
8260 tw32(HOSTCC_TXMAX_FRAMES, 0);
8261 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8262 }
8263
8264 if (!tg3_flag(tp, ENABLE_RSS)) {
8265 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8266 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8267 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8268 } else {
8269 tw32(HOSTCC_RXCOL_TICKS, 0);
8270 tw32(HOSTCC_RXMAX_FRAMES, 0);
8271 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8272 }
8273
8274 if (!tg3_flag(tp, 5705_PLUS)) {
8275 u32 val = ec->stats_block_coalesce_usecs;
8276
8277 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8278 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8279
8280 if (!netif_carrier_ok(tp->dev))
8281 val = 0;
8282
8283 tw32(HOSTCC_STAT_COAL_TICKS, val);
8284 }
8285
8286 for (i = 0; i < tp->irq_cnt - 1; i++) {
8287 u32 reg;
8288
8289 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8290 tw32(reg, ec->rx_coalesce_usecs);
8291 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8292 tw32(reg, ec->rx_max_coalesced_frames);
8293 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8294 tw32(reg, ec->rx_max_coalesced_frames_irq);
8295
8296 if (tg3_flag(tp, ENABLE_TSS)) {
8297 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8298 tw32(reg, ec->tx_coalesce_usecs);
8299 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8300 tw32(reg, ec->tx_max_coalesced_frames);
8301 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8302 tw32(reg, ec->tx_max_coalesced_frames_irq);
8303 }
8304 }
8305
8306 for (; i < tp->irq_max - 1; i++) {
8307 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8308 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8309 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8310
8311 if (tg3_flag(tp, ENABLE_TSS)) {
8312 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8313 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8314 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8315 }
8316 }
8317 }
8318
8319 /* tp->lock is held. */
8320 static void tg3_rings_reset(struct tg3 *tp)
8321 {
8322 int i;
8323 u32 stblk, txrcb, rxrcb, limit;
8324 struct tg3_napi *tnapi = &tp->napi[0];
8325
8326 /* Disable all transmit rings but the first. */
8327 if (!tg3_flag(tp, 5705_PLUS))
8328 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8329 else if (tg3_flag(tp, 5717_PLUS))
8330 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8331 else if (tg3_flag(tp, 57765_CLASS))
8332 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8333 else
8334 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8335
8336 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8337 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8338 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8339 BDINFO_FLAGS_DISABLED);
8340
8341
8342 /* Disable all receive return rings but the first. */
8343 if (tg3_flag(tp, 5717_PLUS))
8344 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8345 else if (!tg3_flag(tp, 5705_PLUS))
8346 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8347 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8348 tg3_flag(tp, 57765_CLASS))
8349 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8350 else
8351 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8352
8353 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8354 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8355 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8356 BDINFO_FLAGS_DISABLED);
8357
8358 /* Disable interrupts */
8359 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8360 tp->napi[0].chk_msi_cnt = 0;
8361 tp->napi[0].last_rx_cons = 0;
8362 tp->napi[0].last_tx_cons = 0;
8363
8364 /* Zero mailbox registers. */
8365 if (tg3_flag(tp, SUPPORT_MSIX)) {
8366 for (i = 1; i < tp->irq_max; i++) {
8367 tp->napi[i].tx_prod = 0;
8368 tp->napi[i].tx_cons = 0;
8369 if (tg3_flag(tp, ENABLE_TSS))
8370 tw32_mailbox(tp->napi[i].prodmbox, 0);
8371 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8372 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8373 tp->napi[i].chk_msi_cnt = 0;
8374 tp->napi[i].last_rx_cons = 0;
8375 tp->napi[i].last_tx_cons = 0;
8376 }
8377 if (!tg3_flag(tp, ENABLE_TSS))
8378 tw32_mailbox(tp->napi[0].prodmbox, 0);
8379 } else {
8380 tp->napi[0].tx_prod = 0;
8381 tp->napi[0].tx_cons = 0;
8382 tw32_mailbox(tp->napi[0].prodmbox, 0);
8383 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8384 }
8385
8386 /* Make sure the NIC-based send BD rings are disabled. */
8387 if (!tg3_flag(tp, 5705_PLUS)) {
8388 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8389 for (i = 0; i < 16; i++)
8390 tw32_tx_mbox(mbox + i * 8, 0);
8391 }
8392
8393 txrcb = NIC_SRAM_SEND_RCB;
8394 rxrcb = NIC_SRAM_RCV_RET_RCB;
8395
8396 /* Clear status block in ram. */
8397 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8398
8399 /* Set status block DMA address */
8400 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8401 ((u64) tnapi->status_mapping >> 32));
8402 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8403 ((u64) tnapi->status_mapping & 0xffffffff));
8404
8405 if (tnapi->tx_ring) {
8406 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8407 (TG3_TX_RING_SIZE <<
8408 BDINFO_FLAGS_MAXLEN_SHIFT),
8409 NIC_SRAM_TX_BUFFER_DESC);
8410 txrcb += TG3_BDINFO_SIZE;
8411 }
8412
8413 if (tnapi->rx_rcb) {
8414 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8415 (tp->rx_ret_ring_mask + 1) <<
8416 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8417 rxrcb += TG3_BDINFO_SIZE;
8418 }
8419
8420 stblk = HOSTCC_STATBLCK_RING1;
8421
8422 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8423 u64 mapping = (u64)tnapi->status_mapping;
8424 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8425 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8426
8427 /* Clear status block in ram. */
8428 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8429
8430 if (tnapi->tx_ring) {
8431 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8432 (TG3_TX_RING_SIZE <<
8433 BDINFO_FLAGS_MAXLEN_SHIFT),
8434 NIC_SRAM_TX_BUFFER_DESC);
8435 txrcb += TG3_BDINFO_SIZE;
8436 }
8437
8438 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8439 ((tp->rx_ret_ring_mask + 1) <<
8440 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8441
8442 stblk += 8;
8443 rxrcb += TG3_BDINFO_SIZE;
8444 }
8445 }
8446
8447 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8448 {
8449 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8450
8451 if (!tg3_flag(tp, 5750_PLUS) ||
8452 tg3_flag(tp, 5780_CLASS) ||
8453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8455 tg3_flag(tp, 57765_PLUS))
8456 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8457 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8458 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8459 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8460 else
8461 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8462
8463 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8464 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8465
8466 val = min(nic_rep_thresh, host_rep_thresh);
8467 tw32(RCVBDI_STD_THRESH, val);
8468
8469 if (tg3_flag(tp, 57765_PLUS))
8470 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8471
8472 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8473 return;
8474
8475 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8476
8477 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8478
8479 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8480 tw32(RCVBDI_JUMBO_THRESH, val);
8481
8482 if (tg3_flag(tp, 57765_PLUS))
8483 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8484 }
8485
8486 static inline u32 calc_crc(unsigned char *buf, int len)
8487 {
8488 u32 reg;
8489 u32 tmp;
8490 int j, k;
8491
8492 reg = 0xffffffff;
8493
8494 for (j = 0; j < len; j++) {
8495 reg ^= buf[j];
8496
8497 for (k = 0; k < 8; k++) {
8498 tmp = reg & 0x01;
8499
8500 reg >>= 1;
8501
8502 if (tmp)
8503 reg ^= 0xedb88320;
8504 }
8505 }
8506
8507 return ~reg;
8508 }
8509
8510 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8511 {
8512 /* accept or reject all multicast frames */
8513 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8514 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8515 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8516 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8517 }
8518
8519 static void __tg3_set_rx_mode(struct net_device *dev)
8520 {
8521 struct tg3 *tp = netdev_priv(dev);
8522 u32 rx_mode;
8523
8524 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8525 RX_MODE_KEEP_VLAN_TAG);
8526
8527 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8528 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8529 * flag clear.
8530 */
8531 if (!tg3_flag(tp, ENABLE_ASF))
8532 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8533 #endif
8534
8535 if (dev->flags & IFF_PROMISC) {
8536 /* Promiscuous mode. */
8537 rx_mode |= RX_MODE_PROMISC;
8538 } else if (dev->flags & IFF_ALLMULTI) {
8539 /* Accept all multicast. */
8540 tg3_set_multi(tp, 1);
8541 } else if (netdev_mc_empty(dev)) {
8542 /* Reject all multicast. */
8543 tg3_set_multi(tp, 0);
8544 } else {
8545 /* Accept one or more multicast(s). */
8546 struct netdev_hw_addr *ha;
8547 u32 mc_filter[4] = { 0, };
8548 u32 regidx;
8549 u32 bit;
8550 u32 crc;
8551
8552 netdev_for_each_mc_addr(ha, dev) {
8553 crc = calc_crc(ha->addr, ETH_ALEN);
8554 bit = ~crc & 0x7f;
8555 regidx = (bit & 0x60) >> 5;
8556 bit &= 0x1f;
8557 mc_filter[regidx] |= (1 << bit);
8558 }
8559
8560 tw32(MAC_HASH_REG_0, mc_filter[0]);
8561 tw32(MAC_HASH_REG_1, mc_filter[1]);
8562 tw32(MAC_HASH_REG_2, mc_filter[2]);
8563 tw32(MAC_HASH_REG_3, mc_filter[3]);
8564 }
8565
8566 if (rx_mode != tp->rx_mode) {
8567 tp->rx_mode = rx_mode;
8568 tw32_f(MAC_RX_MODE, rx_mode);
8569 udelay(10);
8570 }
8571 }
8572
8573 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8574 {
8575 int i;
8576
8577 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8578 tp->rss_ind_tbl[i] =
8579 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8580 }
8581
8582 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8583 {
8584 int i;
8585
8586 if (!tg3_flag(tp, SUPPORT_MSIX))
8587 return;
8588
8589 if (tp->irq_cnt <= 2) {
8590 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8591 return;
8592 }
8593
8594 /* Validate table against current IRQ count */
8595 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8596 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8597 break;
8598 }
8599
8600 if (i != TG3_RSS_INDIR_TBL_SIZE)
8601 tg3_rss_init_dflt_indir_tbl(tp);
8602 }
8603
8604 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8605 {
8606 int i = 0;
8607 u32 reg = MAC_RSS_INDIR_TBL_0;
8608
8609 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8610 u32 val = tp->rss_ind_tbl[i];
8611 i++;
8612 for (; i % 8; i++) {
8613 val <<= 4;
8614 val |= tp->rss_ind_tbl[i];
8615 }
8616 tw32(reg, val);
8617 reg += 4;
8618 }
8619 }
8620
8621 /* tp->lock is held. */
8622 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8623 {
8624 u32 val, rdmac_mode;
8625 int i, err, limit;
8626 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8627
8628 tg3_disable_ints(tp);
8629
8630 tg3_stop_fw(tp);
8631
8632 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8633
8634 if (tg3_flag(tp, INIT_COMPLETE))
8635 tg3_abort_hw(tp, 1);
8636
8637 /* Enable MAC control of LPI */
8638 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8639 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8640 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8641 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8642
8643 tw32_f(TG3_CPMU_EEE_CTRL,
8644 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8645
8646 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8647 TG3_CPMU_EEEMD_LPI_IN_TX |
8648 TG3_CPMU_EEEMD_LPI_IN_RX |
8649 TG3_CPMU_EEEMD_EEE_ENABLE;
8650
8651 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8652 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8653
8654 if (tg3_flag(tp, ENABLE_APE))
8655 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8656
8657 tw32_f(TG3_CPMU_EEE_MODE, val);
8658
8659 tw32_f(TG3_CPMU_EEE_DBTMR1,
8660 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8661 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8662
8663 tw32_f(TG3_CPMU_EEE_DBTMR2,
8664 TG3_CPMU_DBTMR2_APE_TX_2047US |
8665 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8666 }
8667
8668 if (reset_phy)
8669 tg3_phy_reset(tp);
8670
8671 err = tg3_chip_reset(tp);
8672 if (err)
8673 return err;
8674
8675 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8676
8677 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8678 val = tr32(TG3_CPMU_CTRL);
8679 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8680 tw32(TG3_CPMU_CTRL, val);
8681
8682 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8683 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8684 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8685 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8686
8687 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8688 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8689 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8690 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8691
8692 val = tr32(TG3_CPMU_HST_ACC);
8693 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8694 val |= CPMU_HST_ACC_MACCLK_6_25;
8695 tw32(TG3_CPMU_HST_ACC, val);
8696 }
8697
8698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8699 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8700 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8701 PCIE_PWR_MGMT_L1_THRESH_4MS;
8702 tw32(PCIE_PWR_MGMT_THRESH, val);
8703
8704 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8705 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8706
8707 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8708
8709 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8710 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8711 }
8712
8713 if (tg3_flag(tp, L1PLLPD_EN)) {
8714 u32 grc_mode = tr32(GRC_MODE);
8715
8716 /* Access the lower 1K of PL PCIE block registers. */
8717 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8718 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8719
8720 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8721 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8722 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8723
8724 tw32(GRC_MODE, grc_mode);
8725 }
8726
8727 if (tg3_flag(tp, 57765_CLASS)) {
8728 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8729 u32 grc_mode = tr32(GRC_MODE);
8730
8731 /* Access the lower 1K of PL PCIE block registers. */
8732 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8733 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8734
8735 val = tr32(TG3_PCIE_TLDLPL_PORT +
8736 TG3_PCIE_PL_LO_PHYCTL5);
8737 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8738 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8739
8740 tw32(GRC_MODE, grc_mode);
8741 }
8742
8743 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8744 u32 grc_mode = tr32(GRC_MODE);
8745
8746 /* Access the lower 1K of DL PCIE block registers. */
8747 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8748 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8749
8750 val = tr32(TG3_PCIE_TLDLPL_PORT +
8751 TG3_PCIE_DL_LO_FTSMAX);
8752 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8753 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8754 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8755
8756 tw32(GRC_MODE, grc_mode);
8757 }
8758
8759 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8760 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8761 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8762 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8763 }
8764
8765 /* This works around an issue with Athlon chipsets on
8766 * B3 tigon3 silicon. This bit has no effect on any
8767 * other revision. But do not set this on PCI Express
8768 * chips and don't even touch the clocks if the CPMU is present.
8769 */
8770 if (!tg3_flag(tp, CPMU_PRESENT)) {
8771 if (!tg3_flag(tp, PCI_EXPRESS))
8772 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8773 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8774 }
8775
8776 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8777 tg3_flag(tp, PCIX_MODE)) {
8778 val = tr32(TG3PCI_PCISTATE);
8779 val |= PCISTATE_RETRY_SAME_DMA;
8780 tw32(TG3PCI_PCISTATE, val);
8781 }
8782
8783 if (tg3_flag(tp, ENABLE_APE)) {
8784 /* Allow reads and writes to the
8785 * APE register and memory space.
8786 */
8787 val = tr32(TG3PCI_PCISTATE);
8788 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8789 PCISTATE_ALLOW_APE_SHMEM_WR |
8790 PCISTATE_ALLOW_APE_PSPACE_WR;
8791 tw32(TG3PCI_PCISTATE, val);
8792 }
8793
8794 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8795 /* Enable some hw fixes. */
8796 val = tr32(TG3PCI_MSI_DATA);
8797 val |= (1 << 26) | (1 << 28) | (1 << 29);
8798 tw32(TG3PCI_MSI_DATA, val);
8799 }
8800
8801 /* Descriptor ring init may make accesses to the
8802 * NIC SRAM area to setup the TX descriptors, so we
8803 * can only do this after the hardware has been
8804 * successfully reset.
8805 */
8806 err = tg3_init_rings(tp);
8807 if (err)
8808 return err;
8809
8810 if (tg3_flag(tp, 57765_PLUS)) {
8811 val = tr32(TG3PCI_DMA_RW_CTRL) &
8812 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8813 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8814 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8815 if (!tg3_flag(tp, 57765_CLASS) &&
8816 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8817 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8818 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8819 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8820 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8821 /* This value is determined during the probe time DMA
8822 * engine test, tg3_test_dma.
8823 */
8824 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8825 }
8826
8827 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8828 GRC_MODE_4X_NIC_SEND_RINGS |
8829 GRC_MODE_NO_TX_PHDR_CSUM |
8830 GRC_MODE_NO_RX_PHDR_CSUM);
8831 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8832
8833 /* Pseudo-header checksum is done by hardware logic and not
8834 * the offload processers, so make the chip do the pseudo-
8835 * header checksums on receive. For transmit it is more
8836 * convenient to do the pseudo-header checksum in software
8837 * as Linux does that on transmit for us in all cases.
8838 */
8839 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8840
8841 tw32(GRC_MODE,
8842 tp->grc_mode |
8843 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8844
8845 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8846 val = tr32(GRC_MISC_CFG);
8847 val &= ~0xff;
8848 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8849 tw32(GRC_MISC_CFG, val);
8850
8851 /* Initialize MBUF/DESC pool. */
8852 if (tg3_flag(tp, 5750_PLUS)) {
8853 /* Do nothing. */
8854 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8855 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8857 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8858 else
8859 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8860 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8861 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8862 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8863 int fw_len;
8864
8865 fw_len = tp->fw_len;
8866 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8867 tw32(BUFMGR_MB_POOL_ADDR,
8868 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8869 tw32(BUFMGR_MB_POOL_SIZE,
8870 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8871 }
8872
8873 if (tp->dev->mtu <= ETH_DATA_LEN) {
8874 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8875 tp->bufmgr_config.mbuf_read_dma_low_water);
8876 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8877 tp->bufmgr_config.mbuf_mac_rx_low_water);
8878 tw32(BUFMGR_MB_HIGH_WATER,
8879 tp->bufmgr_config.mbuf_high_water);
8880 } else {
8881 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8882 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8883 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8884 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8885 tw32(BUFMGR_MB_HIGH_WATER,
8886 tp->bufmgr_config.mbuf_high_water_jumbo);
8887 }
8888 tw32(BUFMGR_DMA_LOW_WATER,
8889 tp->bufmgr_config.dma_low_water);
8890 tw32(BUFMGR_DMA_HIGH_WATER,
8891 tp->bufmgr_config.dma_high_water);
8892
8893 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8895 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8897 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8898 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8899 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8900 tw32(BUFMGR_MODE, val);
8901 for (i = 0; i < 2000; i++) {
8902 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8903 break;
8904 udelay(10);
8905 }
8906 if (i >= 2000) {
8907 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8908 return -ENODEV;
8909 }
8910
8911 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8912 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8913
8914 tg3_setup_rxbd_thresholds(tp);
8915
8916 /* Initialize TG3_BDINFO's at:
8917 * RCVDBDI_STD_BD: standard eth size rx ring
8918 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8919 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8920 *
8921 * like so:
8922 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8923 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8924 * ring attribute flags
8925 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8926 *
8927 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8928 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8929 *
8930 * The size of each ring is fixed in the firmware, but the location is
8931 * configurable.
8932 */
8933 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8934 ((u64) tpr->rx_std_mapping >> 32));
8935 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8936 ((u64) tpr->rx_std_mapping & 0xffffffff));
8937 if (!tg3_flag(tp, 5717_PLUS))
8938 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8939 NIC_SRAM_RX_BUFFER_DESC);
8940
8941 /* Disable the mini ring */
8942 if (!tg3_flag(tp, 5705_PLUS))
8943 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8944 BDINFO_FLAGS_DISABLED);
8945
8946 /* Program the jumbo buffer descriptor ring control
8947 * blocks on those devices that have them.
8948 */
8949 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8950 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8951
8952 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8953 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8954 ((u64) tpr->rx_jmb_mapping >> 32));
8955 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8956 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8957 val = TG3_RX_JMB_RING_SIZE(tp) <<
8958 BDINFO_FLAGS_MAXLEN_SHIFT;
8959 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8960 val | BDINFO_FLAGS_USE_EXT_RECV);
8961 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8962 tg3_flag(tp, 57765_CLASS))
8963 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8964 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8965 } else {
8966 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8967 BDINFO_FLAGS_DISABLED);
8968 }
8969
8970 if (tg3_flag(tp, 57765_PLUS)) {
8971 val = TG3_RX_STD_RING_SIZE(tp);
8972 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8973 val |= (TG3_RX_STD_DMA_SZ << 2);
8974 } else
8975 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8976 } else
8977 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8978
8979 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8980
8981 tpr->rx_std_prod_idx = tp->rx_pending;
8982 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8983
8984 tpr->rx_jmb_prod_idx =
8985 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8986 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8987
8988 tg3_rings_reset(tp);
8989
8990 /* Initialize MAC address and backoff seed. */
8991 __tg3_set_mac_addr(tp, 0);
8992
8993 /* MTU + ethernet header + FCS + optional VLAN tag */
8994 tw32(MAC_RX_MTU_SIZE,
8995 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8996
8997 /* The slot time is changed by tg3_setup_phy if we
8998 * run at gigabit with half duplex.
8999 */
9000 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9001 (6 << TX_LENGTHS_IPG_SHIFT) |
9002 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9003
9004 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9005 val |= tr32(MAC_TX_LENGTHS) &
9006 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9007 TX_LENGTHS_CNT_DWN_VAL_MSK);
9008
9009 tw32(MAC_TX_LENGTHS, val);
9010
9011 /* Receive rules. */
9012 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9013 tw32(RCVLPC_CONFIG, 0x0181);
9014
9015 /* Calculate RDMAC_MODE setting early, we need it to determine
9016 * the RCVLPC_STATE_ENABLE mask.
9017 */
9018 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9019 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9020 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9021 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9022 RDMAC_MODE_LNGREAD_ENAB);
9023
9024 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9025 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9026
9027 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9030 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9031 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9032 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9033
9034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9035 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9036 if (tg3_flag(tp, TSO_CAPABLE) &&
9037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9038 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9039 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9040 !tg3_flag(tp, IS_5788)) {
9041 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9042 }
9043 }
9044
9045 if (tg3_flag(tp, PCI_EXPRESS))
9046 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9047
9048 if (tg3_flag(tp, HW_TSO_1) ||
9049 tg3_flag(tp, HW_TSO_2) ||
9050 tg3_flag(tp, HW_TSO_3))
9051 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9052
9053 if (tg3_flag(tp, 57765_PLUS) ||
9054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9056 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9057
9058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9059 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9060
9061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9065 tg3_flag(tp, 57765_PLUS)) {
9066 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9067 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9068 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9069 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9070 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9071 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9072 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9073 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9074 }
9075 tw32(TG3_RDMA_RSRVCTRL_REG,
9076 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9077 }
9078
9079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9081 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9082 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9083 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9084 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9085 }
9086
9087 /* Receive/send statistics. */
9088 if (tg3_flag(tp, 5750_PLUS)) {
9089 val = tr32(RCVLPC_STATS_ENABLE);
9090 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9091 tw32(RCVLPC_STATS_ENABLE, val);
9092 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9093 tg3_flag(tp, TSO_CAPABLE)) {
9094 val = tr32(RCVLPC_STATS_ENABLE);
9095 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9096 tw32(RCVLPC_STATS_ENABLE, val);
9097 } else {
9098 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9099 }
9100 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9101 tw32(SNDDATAI_STATSENAB, 0xffffff);
9102 tw32(SNDDATAI_STATSCTRL,
9103 (SNDDATAI_SCTRL_ENABLE |
9104 SNDDATAI_SCTRL_FASTUPD));
9105
9106 /* Setup host coalescing engine. */
9107 tw32(HOSTCC_MODE, 0);
9108 for (i = 0; i < 2000; i++) {
9109 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9110 break;
9111 udelay(10);
9112 }
9113
9114 __tg3_set_coalesce(tp, &tp->coal);
9115
9116 if (!tg3_flag(tp, 5705_PLUS)) {
9117 /* Status/statistics block address. See tg3_timer,
9118 * the tg3_periodic_fetch_stats call there, and
9119 * tg3_get_stats to see how this works for 5705/5750 chips.
9120 */
9121 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9122 ((u64) tp->stats_mapping >> 32));
9123 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9124 ((u64) tp->stats_mapping & 0xffffffff));
9125 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9126
9127 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9128
9129 /* Clear statistics and status block memory areas */
9130 for (i = NIC_SRAM_STATS_BLK;
9131 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9132 i += sizeof(u32)) {
9133 tg3_write_mem(tp, i, 0);
9134 udelay(40);
9135 }
9136 }
9137
9138 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9139
9140 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9141 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9142 if (!tg3_flag(tp, 5705_PLUS))
9143 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9144
9145 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9146 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9147 /* reset to prevent losing 1st rx packet intermittently */
9148 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9149 udelay(10);
9150 }
9151
9152 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9153 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9154 MAC_MODE_FHDE_ENABLE;
9155 if (tg3_flag(tp, ENABLE_APE))
9156 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9157 if (!tg3_flag(tp, 5705_PLUS) &&
9158 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9159 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9160 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9161 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9162 udelay(40);
9163
9164 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9165 * If TG3_FLAG_IS_NIC is zero, we should read the
9166 * register to preserve the GPIO settings for LOMs. The GPIOs,
9167 * whether used as inputs or outputs, are set by boot code after
9168 * reset.
9169 */
9170 if (!tg3_flag(tp, IS_NIC)) {
9171 u32 gpio_mask;
9172
9173 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9174 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9175 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9176
9177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9178 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9179 GRC_LCLCTRL_GPIO_OUTPUT3;
9180
9181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9182 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9183
9184 tp->grc_local_ctrl &= ~gpio_mask;
9185 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9186
9187 /* GPIO1 must be driven high for eeprom write protect */
9188 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9189 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9190 GRC_LCLCTRL_GPIO_OUTPUT1);
9191 }
9192 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9193 udelay(100);
9194
9195 if (tg3_flag(tp, USING_MSIX)) {
9196 val = tr32(MSGINT_MODE);
9197 val |= MSGINT_MODE_ENABLE;
9198 if (tp->irq_cnt > 1)
9199 val |= MSGINT_MODE_MULTIVEC_EN;
9200 if (!tg3_flag(tp, 1SHOT_MSI))
9201 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9202 tw32(MSGINT_MODE, val);
9203 }
9204
9205 if (!tg3_flag(tp, 5705_PLUS)) {
9206 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9207 udelay(40);
9208 }
9209
9210 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9211 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9212 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9213 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9214 WDMAC_MODE_LNGREAD_ENAB);
9215
9216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9217 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9218 if (tg3_flag(tp, TSO_CAPABLE) &&
9219 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9220 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9221 /* nothing */
9222 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9223 !tg3_flag(tp, IS_5788)) {
9224 val |= WDMAC_MODE_RX_ACCEL;
9225 }
9226 }
9227
9228 /* Enable host coalescing bug fix */
9229 if (tg3_flag(tp, 5755_PLUS))
9230 val |= WDMAC_MODE_STATUS_TAG_FIX;
9231
9232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9233 val |= WDMAC_MODE_BURST_ALL_DATA;
9234
9235 tw32_f(WDMAC_MODE, val);
9236 udelay(40);
9237
9238 if (tg3_flag(tp, PCIX_MODE)) {
9239 u16 pcix_cmd;
9240
9241 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9242 &pcix_cmd);
9243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9244 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9245 pcix_cmd |= PCI_X_CMD_READ_2K;
9246 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9247 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9248 pcix_cmd |= PCI_X_CMD_READ_2K;
9249 }
9250 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9251 pcix_cmd);
9252 }
9253
9254 tw32_f(RDMAC_MODE, rdmac_mode);
9255 udelay(40);
9256
9257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9258 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9259 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9260 break;
9261 }
9262 if (i < TG3_NUM_RDMA_CHANNELS) {
9263 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9264 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9265 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9266 tg3_flag_set(tp, 5719_RDMA_BUG);
9267 }
9268 }
9269
9270 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9271 if (!tg3_flag(tp, 5705_PLUS))
9272 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9273
9274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9275 tw32(SNDDATAC_MODE,
9276 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9277 else
9278 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9279
9280 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9281 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9282 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9283 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9284 val |= RCVDBDI_MODE_LRG_RING_SZ;
9285 tw32(RCVDBDI_MODE, val);
9286 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9287 if (tg3_flag(tp, HW_TSO_1) ||
9288 tg3_flag(tp, HW_TSO_2) ||
9289 tg3_flag(tp, HW_TSO_3))
9290 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9291 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9292 if (tg3_flag(tp, ENABLE_TSS))
9293 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9294 tw32(SNDBDI_MODE, val);
9295 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9296
9297 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9298 err = tg3_load_5701_a0_firmware_fix(tp);
9299 if (err)
9300 return err;
9301 }
9302
9303 if (tg3_flag(tp, TSO_CAPABLE)) {
9304 err = tg3_load_tso_firmware(tp);
9305 if (err)
9306 return err;
9307 }
9308
9309 tp->tx_mode = TX_MODE_ENABLE;
9310
9311 if (tg3_flag(tp, 5755_PLUS) ||
9312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9313 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9314
9315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9316 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9317 tp->tx_mode &= ~val;
9318 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9319 }
9320
9321 tw32_f(MAC_TX_MODE, tp->tx_mode);
9322 udelay(100);
9323
9324 if (tg3_flag(tp, ENABLE_RSS)) {
9325 tg3_rss_write_indir_tbl(tp);
9326
9327 /* Setup the "secret" hash key. */
9328 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9329 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9330 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9331 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9332 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9333 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9334 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9335 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9336 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9337 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9338 }
9339
9340 tp->rx_mode = RX_MODE_ENABLE;
9341 if (tg3_flag(tp, 5755_PLUS))
9342 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9343
9344 if (tg3_flag(tp, ENABLE_RSS))
9345 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9346 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9347 RX_MODE_RSS_IPV6_HASH_EN |
9348 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9349 RX_MODE_RSS_IPV4_HASH_EN |
9350 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9351
9352 tw32_f(MAC_RX_MODE, tp->rx_mode);
9353 udelay(10);
9354
9355 tw32(MAC_LED_CTRL, tp->led_ctrl);
9356
9357 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9358 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9359 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9360 udelay(10);
9361 }
9362 tw32_f(MAC_RX_MODE, tp->rx_mode);
9363 udelay(10);
9364
9365 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9366 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9367 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9368 /* Set drive transmission level to 1.2V */
9369 /* only if the signal pre-emphasis bit is not set */
9370 val = tr32(MAC_SERDES_CFG);
9371 val &= 0xfffff000;
9372 val |= 0x880;
9373 tw32(MAC_SERDES_CFG, val);
9374 }
9375 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9376 tw32(MAC_SERDES_CFG, 0x616000);
9377 }
9378
9379 /* Prevent chip from dropping frames when flow control
9380 * is enabled.
9381 */
9382 if (tg3_flag(tp, 57765_CLASS))
9383 val = 1;
9384 else
9385 val = 2;
9386 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9387
9388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9389 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9390 /* Use hardware link auto-negotiation */
9391 tg3_flag_set(tp, HW_AUTONEG);
9392 }
9393
9394 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9396 u32 tmp;
9397
9398 tmp = tr32(SERDES_RX_CTRL);
9399 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9400 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9401 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9402 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9403 }
9404
9405 if (!tg3_flag(tp, USE_PHYLIB)) {
9406 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9407 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9408
9409 err = tg3_setup_phy(tp, 0);
9410 if (err)
9411 return err;
9412
9413 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9414 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9415 u32 tmp;
9416
9417 /* Clear CRC stats. */
9418 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9419 tg3_writephy(tp, MII_TG3_TEST1,
9420 tmp | MII_TG3_TEST1_CRC_EN);
9421 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9422 }
9423 }
9424 }
9425
9426 __tg3_set_rx_mode(tp->dev);
9427
9428 /* Initialize receive rules. */
9429 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9430 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9431 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9432 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9433
9434 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9435 limit = 8;
9436 else
9437 limit = 16;
9438 if (tg3_flag(tp, ENABLE_ASF))
9439 limit -= 4;
9440 switch (limit) {
9441 case 16:
9442 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9443 case 15:
9444 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9445 case 14:
9446 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9447 case 13:
9448 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9449 case 12:
9450 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9451 case 11:
9452 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9453 case 10:
9454 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9455 case 9:
9456 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9457 case 8:
9458 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9459 case 7:
9460 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9461 case 6:
9462 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9463 case 5:
9464 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9465 case 4:
9466 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9467 case 3:
9468 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9469 case 2:
9470 case 1:
9471
9472 default:
9473 break;
9474 }
9475
9476 if (tg3_flag(tp, ENABLE_APE))
9477 /* Write our heartbeat update interval to APE. */
9478 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9479 APE_HOST_HEARTBEAT_INT_DISABLE);
9480
9481 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9482
9483 return 0;
9484 }
9485
9486 /* Called at device open time to get the chip ready for
9487 * packet processing. Invoked with tp->lock held.
9488 */
9489 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9490 {
9491 tg3_switch_clocks(tp);
9492
9493 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9494
9495 return tg3_reset_hw(tp, reset_phy);
9496 }
9497
9498 #if IS_ENABLED(CONFIG_HWMON)
9499 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9500 {
9501 int i;
9502
9503 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9504 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9505
9506 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9507 off += len;
9508
9509 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9510 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9511 memset(ocir, 0, TG3_OCIR_LEN);
9512 }
9513 }
9514
9515 /* sysfs attributes for hwmon */
9516 static ssize_t tg3_show_temp(struct device *dev,
9517 struct device_attribute *devattr, char *buf)
9518 {
9519 struct pci_dev *pdev = to_pci_dev(dev);
9520 struct net_device *netdev = pci_get_drvdata(pdev);
9521 struct tg3 *tp = netdev_priv(netdev);
9522 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9523 u32 temperature;
9524
9525 spin_lock_bh(&tp->lock);
9526 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9527 sizeof(temperature));
9528 spin_unlock_bh(&tp->lock);
9529 return sprintf(buf, "%u\n", temperature);
9530 }
9531
9532
9533 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9534 TG3_TEMP_SENSOR_OFFSET);
9535 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9536 TG3_TEMP_CAUTION_OFFSET);
9537 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9538 TG3_TEMP_MAX_OFFSET);
9539
9540 static struct attribute *tg3_attributes[] = {
9541 &sensor_dev_attr_temp1_input.dev_attr.attr,
9542 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9543 &sensor_dev_attr_temp1_max.dev_attr.attr,
9544 NULL
9545 };
9546
9547 static const struct attribute_group tg3_group = {
9548 .attrs = tg3_attributes,
9549 };
9550
9551 #endif
9552
9553 static void tg3_hwmon_close(struct tg3 *tp)
9554 {
9555 #if IS_ENABLED(CONFIG_HWMON)
9556 if (tp->hwmon_dev) {
9557 hwmon_device_unregister(tp->hwmon_dev);
9558 tp->hwmon_dev = NULL;
9559 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9560 }
9561 #endif
9562 }
9563
9564 static void tg3_hwmon_open(struct tg3 *tp)
9565 {
9566 #if IS_ENABLED(CONFIG_HWMON)
9567 int i, err;
9568 u32 size = 0;
9569 struct pci_dev *pdev = tp->pdev;
9570 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9571
9572 tg3_sd_scan_scratchpad(tp, ocirs);
9573
9574 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9575 if (!ocirs[i].src_data_length)
9576 continue;
9577
9578 size += ocirs[i].src_hdr_length;
9579 size += ocirs[i].src_data_length;
9580 }
9581
9582 if (!size)
9583 return;
9584
9585 /* Register hwmon sysfs hooks */
9586 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9587 if (err) {
9588 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9589 return;
9590 }
9591
9592 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9593 if (IS_ERR(tp->hwmon_dev)) {
9594 tp->hwmon_dev = NULL;
9595 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9596 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9597 }
9598 #endif
9599 }
9600
9601
9602 #define TG3_STAT_ADD32(PSTAT, REG) \
9603 do { u32 __val = tr32(REG); \
9604 (PSTAT)->low += __val; \
9605 if ((PSTAT)->low < __val) \
9606 (PSTAT)->high += 1; \
9607 } while (0)
9608
9609 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9610 {
9611 struct tg3_hw_stats *sp = tp->hw_stats;
9612
9613 if (!netif_carrier_ok(tp->dev))
9614 return;
9615
9616 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9617 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9618 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9619 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9620 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9621 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9622 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9623 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9624 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9625 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9626 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9627 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9628 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9629 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9630 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9631 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9632 u32 val;
9633
9634 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9635 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9636 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9637 tg3_flag_clear(tp, 5719_RDMA_BUG);
9638 }
9639
9640 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9641 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9642 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9643 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9644 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9645 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9646 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9647 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9648 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9649 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9650 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9651 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9652 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9653 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9654
9655 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9656 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9657 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9658 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9659 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9660 } else {
9661 u32 val = tr32(HOSTCC_FLOW_ATTN);
9662 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9663 if (val) {
9664 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9665 sp->rx_discards.low += val;
9666 if (sp->rx_discards.low < val)
9667 sp->rx_discards.high += 1;
9668 }
9669 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9670 }
9671 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9672 }
9673
9674 static void tg3_chk_missed_msi(struct tg3 *tp)
9675 {
9676 u32 i;
9677
9678 for (i = 0; i < tp->irq_cnt; i++) {
9679 struct tg3_napi *tnapi = &tp->napi[i];
9680
9681 if (tg3_has_work(tnapi)) {
9682 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9683 tnapi->last_tx_cons == tnapi->tx_cons) {
9684 if (tnapi->chk_msi_cnt < 1) {
9685 tnapi->chk_msi_cnt++;
9686 return;
9687 }
9688 tg3_msi(0, tnapi);
9689 }
9690 }
9691 tnapi->chk_msi_cnt = 0;
9692 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9693 tnapi->last_tx_cons = tnapi->tx_cons;
9694 }
9695 }
9696
9697 static void tg3_timer(unsigned long __opaque)
9698 {
9699 struct tg3 *tp = (struct tg3 *) __opaque;
9700
9701 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9702 goto restart_timer;
9703
9704 spin_lock(&tp->lock);
9705
9706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9707 tg3_flag(tp, 57765_CLASS))
9708 tg3_chk_missed_msi(tp);
9709
9710 if (!tg3_flag(tp, TAGGED_STATUS)) {
9711 /* All of this garbage is because when using non-tagged
9712 * IRQ status the mailbox/status_block protocol the chip
9713 * uses with the cpu is race prone.
9714 */
9715 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9716 tw32(GRC_LOCAL_CTRL,
9717 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9718 } else {
9719 tw32(HOSTCC_MODE, tp->coalesce_mode |
9720 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9721 }
9722
9723 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9724 spin_unlock(&tp->lock);
9725 tg3_reset_task_schedule(tp);
9726 goto restart_timer;
9727 }
9728 }
9729
9730 /* This part only runs once per second. */
9731 if (!--tp->timer_counter) {
9732 if (tg3_flag(tp, 5705_PLUS))
9733 tg3_periodic_fetch_stats(tp);
9734
9735 if (tp->setlpicnt && !--tp->setlpicnt)
9736 tg3_phy_eee_enable(tp);
9737
9738 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9739 u32 mac_stat;
9740 int phy_event;
9741
9742 mac_stat = tr32(MAC_STATUS);
9743
9744 phy_event = 0;
9745 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9746 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9747 phy_event = 1;
9748 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9749 phy_event = 1;
9750
9751 if (phy_event)
9752 tg3_setup_phy(tp, 0);
9753 } else if (tg3_flag(tp, POLL_SERDES)) {
9754 u32 mac_stat = tr32(MAC_STATUS);
9755 int need_setup = 0;
9756
9757 if (netif_carrier_ok(tp->dev) &&
9758 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9759 need_setup = 1;
9760 }
9761 if (!netif_carrier_ok(tp->dev) &&
9762 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9763 MAC_STATUS_SIGNAL_DET))) {
9764 need_setup = 1;
9765 }
9766 if (need_setup) {
9767 if (!tp->serdes_counter) {
9768 tw32_f(MAC_MODE,
9769 (tp->mac_mode &
9770 ~MAC_MODE_PORT_MODE_MASK));
9771 udelay(40);
9772 tw32_f(MAC_MODE, tp->mac_mode);
9773 udelay(40);
9774 }
9775 tg3_setup_phy(tp, 0);
9776 }
9777 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9778 tg3_flag(tp, 5780_CLASS)) {
9779 tg3_serdes_parallel_detect(tp);
9780 }
9781
9782 tp->timer_counter = tp->timer_multiplier;
9783 }
9784
9785 /* Heartbeat is only sent once every 2 seconds.
9786 *
9787 * The heartbeat is to tell the ASF firmware that the host
9788 * driver is still alive. In the event that the OS crashes,
9789 * ASF needs to reset the hardware to free up the FIFO space
9790 * that may be filled with rx packets destined for the host.
9791 * If the FIFO is full, ASF will no longer function properly.
9792 *
9793 * Unintended resets have been reported on real time kernels
9794 * where the timer doesn't run on time. Netpoll will also have
9795 * same problem.
9796 *
9797 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9798 * to check the ring condition when the heartbeat is expiring
9799 * before doing the reset. This will prevent most unintended
9800 * resets.
9801 */
9802 if (!--tp->asf_counter) {
9803 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9804 tg3_wait_for_event_ack(tp);
9805
9806 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9807 FWCMD_NICDRV_ALIVE3);
9808 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9809 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9810 TG3_FW_UPDATE_TIMEOUT_SEC);
9811
9812 tg3_generate_fw_event(tp);
9813 }
9814 tp->asf_counter = tp->asf_multiplier;
9815 }
9816
9817 spin_unlock(&tp->lock);
9818
9819 restart_timer:
9820 tp->timer.expires = jiffies + tp->timer_offset;
9821 add_timer(&tp->timer);
9822 }
9823
9824 static void __devinit tg3_timer_init(struct tg3 *tp)
9825 {
9826 if (tg3_flag(tp, TAGGED_STATUS) &&
9827 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9828 !tg3_flag(tp, 57765_CLASS))
9829 tp->timer_offset = HZ;
9830 else
9831 tp->timer_offset = HZ / 10;
9832
9833 BUG_ON(tp->timer_offset > HZ);
9834
9835 tp->timer_multiplier = (HZ / tp->timer_offset);
9836 tp->asf_multiplier = (HZ / tp->timer_offset) *
9837 TG3_FW_UPDATE_FREQ_SEC;
9838
9839 init_timer(&tp->timer);
9840 tp->timer.data = (unsigned long) tp;
9841 tp->timer.function = tg3_timer;
9842 }
9843
9844 static void tg3_timer_start(struct tg3 *tp)
9845 {
9846 tp->asf_counter = tp->asf_multiplier;
9847 tp->timer_counter = tp->timer_multiplier;
9848
9849 tp->timer.expires = jiffies + tp->timer_offset;
9850 add_timer(&tp->timer);
9851 }
9852
9853 static void tg3_timer_stop(struct tg3 *tp)
9854 {
9855 del_timer_sync(&tp->timer);
9856 }
9857
9858 /* Restart hardware after configuration changes, self-test, etc.
9859 * Invoked with tp->lock held.
9860 */
9861 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9862 __releases(tp->lock)
9863 __acquires(tp->lock)
9864 {
9865 int err;
9866
9867 err = tg3_init_hw(tp, reset_phy);
9868 if (err) {
9869 netdev_err(tp->dev,
9870 "Failed to re-initialize device, aborting\n");
9871 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9872 tg3_full_unlock(tp);
9873 tg3_timer_stop(tp);
9874 tp->irq_sync = 0;
9875 tg3_napi_enable(tp);
9876 dev_close(tp->dev);
9877 tg3_full_lock(tp, 0);
9878 }
9879 return err;
9880 }
9881
9882 static void tg3_reset_task(struct work_struct *work)
9883 {
9884 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9885 int err;
9886
9887 tg3_full_lock(tp, 0);
9888
9889 if (!netif_running(tp->dev)) {
9890 tg3_flag_clear(tp, RESET_TASK_PENDING);
9891 tg3_full_unlock(tp);
9892 return;
9893 }
9894
9895 tg3_full_unlock(tp);
9896
9897 tg3_phy_stop(tp);
9898
9899 tg3_netif_stop(tp);
9900
9901 tg3_full_lock(tp, 1);
9902
9903 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9904 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9905 tp->write32_rx_mbox = tg3_write_flush_reg32;
9906 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9907 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9908 }
9909
9910 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9911 err = tg3_init_hw(tp, 1);
9912 if (err)
9913 goto out;
9914
9915 tg3_netif_start(tp);
9916
9917 out:
9918 tg3_full_unlock(tp);
9919
9920 if (!err)
9921 tg3_phy_start(tp);
9922
9923 tg3_flag_clear(tp, RESET_TASK_PENDING);
9924 }
9925
9926 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9927 {
9928 irq_handler_t fn;
9929 unsigned long flags;
9930 char *name;
9931 struct tg3_napi *tnapi = &tp->napi[irq_num];
9932
9933 if (tp->irq_cnt == 1)
9934 name = tp->dev->name;
9935 else {
9936 name = &tnapi->irq_lbl[0];
9937 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9938 name[IFNAMSIZ-1] = 0;
9939 }
9940
9941 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9942 fn = tg3_msi;
9943 if (tg3_flag(tp, 1SHOT_MSI))
9944 fn = tg3_msi_1shot;
9945 flags = 0;
9946 } else {
9947 fn = tg3_interrupt;
9948 if (tg3_flag(tp, TAGGED_STATUS))
9949 fn = tg3_interrupt_tagged;
9950 flags = IRQF_SHARED;
9951 }
9952
9953 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9954 }
9955
9956 static int tg3_test_interrupt(struct tg3 *tp)
9957 {
9958 struct tg3_napi *tnapi = &tp->napi[0];
9959 struct net_device *dev = tp->dev;
9960 int err, i, intr_ok = 0;
9961 u32 val;
9962
9963 if (!netif_running(dev))
9964 return -ENODEV;
9965
9966 tg3_disable_ints(tp);
9967
9968 free_irq(tnapi->irq_vec, tnapi);
9969
9970 /*
9971 * Turn off MSI one shot mode. Otherwise this test has no
9972 * observable way to know whether the interrupt was delivered.
9973 */
9974 if (tg3_flag(tp, 57765_PLUS)) {
9975 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9976 tw32(MSGINT_MODE, val);
9977 }
9978
9979 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9980 IRQF_SHARED, dev->name, tnapi);
9981 if (err)
9982 return err;
9983
9984 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9985 tg3_enable_ints(tp);
9986
9987 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9988 tnapi->coal_now);
9989
9990 for (i = 0; i < 5; i++) {
9991 u32 int_mbox, misc_host_ctrl;
9992
9993 int_mbox = tr32_mailbox(tnapi->int_mbox);
9994 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9995
9996 if ((int_mbox != 0) ||
9997 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9998 intr_ok = 1;
9999 break;
10000 }
10001
10002 if (tg3_flag(tp, 57765_PLUS) &&
10003 tnapi->hw_status->status_tag != tnapi->last_tag)
10004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10005
10006 msleep(10);
10007 }
10008
10009 tg3_disable_ints(tp);
10010
10011 free_irq(tnapi->irq_vec, tnapi);
10012
10013 err = tg3_request_irq(tp, 0);
10014
10015 if (err)
10016 return err;
10017
10018 if (intr_ok) {
10019 /* Reenable MSI one shot mode. */
10020 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10021 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10022 tw32(MSGINT_MODE, val);
10023 }
10024 return 0;
10025 }
10026
10027 return -EIO;
10028 }
10029
10030 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10031 * successfully restored
10032 */
10033 static int tg3_test_msi(struct tg3 *tp)
10034 {
10035 int err;
10036 u16 pci_cmd;
10037
10038 if (!tg3_flag(tp, USING_MSI))
10039 return 0;
10040
10041 /* Turn off SERR reporting in case MSI terminates with Master
10042 * Abort.
10043 */
10044 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10045 pci_write_config_word(tp->pdev, PCI_COMMAND,
10046 pci_cmd & ~PCI_COMMAND_SERR);
10047
10048 err = tg3_test_interrupt(tp);
10049
10050 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10051
10052 if (!err)
10053 return 0;
10054
10055 /* other failures */
10056 if (err != -EIO)
10057 return err;
10058
10059 /* MSI test failed, go back to INTx mode */
10060 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10061 "to INTx mode. Please report this failure to the PCI "
10062 "maintainer and include system chipset information\n");
10063
10064 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10065
10066 pci_disable_msi(tp->pdev);
10067
10068 tg3_flag_clear(tp, USING_MSI);
10069 tp->napi[0].irq_vec = tp->pdev->irq;
10070
10071 err = tg3_request_irq(tp, 0);
10072 if (err)
10073 return err;
10074
10075 /* Need to reset the chip because the MSI cycle may have terminated
10076 * with Master Abort.
10077 */
10078 tg3_full_lock(tp, 1);
10079
10080 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10081 err = tg3_init_hw(tp, 1);
10082
10083 tg3_full_unlock(tp);
10084
10085 if (err)
10086 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10087
10088 return err;
10089 }
10090
10091 static int tg3_request_firmware(struct tg3 *tp)
10092 {
10093 const __be32 *fw_data;
10094
10095 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10096 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10097 tp->fw_needed);
10098 return -ENOENT;
10099 }
10100
10101 fw_data = (void *)tp->fw->data;
10102
10103 /* Firmware blob starts with version numbers, followed by
10104 * start address and _full_ length including BSS sections
10105 * (which must be longer than the actual data, of course
10106 */
10107
10108 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10109 if (tp->fw_len < (tp->fw->size - 12)) {
10110 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10111 tp->fw_len, tp->fw_needed);
10112 release_firmware(tp->fw);
10113 tp->fw = NULL;
10114 return -EINVAL;
10115 }
10116
10117 /* We no longer need firmware; we have it. */
10118 tp->fw_needed = NULL;
10119 return 0;
10120 }
10121
10122 static bool tg3_enable_msix(struct tg3 *tp)
10123 {
10124 int i, rc;
10125 struct msix_entry msix_ent[tp->irq_max];
10126
10127 tp->irq_cnt = netif_get_num_default_rss_queues();
10128 if (tp->irq_cnt > 1) {
10129 /* We want as many rx rings enabled as there are cpus.
10130 * In multiqueue MSI-X mode, the first MSI-X vector
10131 * only deals with link interrupts, etc, so we add
10132 * one to the number of vectors we are requesting.
10133 */
10134 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
10135 }
10136
10137 for (i = 0; i < tp->irq_max; i++) {
10138 msix_ent[i].entry = i;
10139 msix_ent[i].vector = 0;
10140 }
10141
10142 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10143 if (rc < 0) {
10144 return false;
10145 } else if (rc != 0) {
10146 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10147 return false;
10148 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10149 tp->irq_cnt, rc);
10150 tp->irq_cnt = rc;
10151 }
10152
10153 for (i = 0; i < tp->irq_max; i++)
10154 tp->napi[i].irq_vec = msix_ent[i].vector;
10155
10156 netif_set_real_num_tx_queues(tp->dev, 1);
10157 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10158 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10159 pci_disable_msix(tp->pdev);
10160 return false;
10161 }
10162
10163 if (tp->irq_cnt > 1) {
10164 tg3_flag_set(tp, ENABLE_RSS);
10165
10166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
10168 tg3_flag_set(tp, ENABLE_TSS);
10169 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
10170 }
10171 }
10172
10173 return true;
10174 }
10175
10176 static void tg3_ints_init(struct tg3 *tp)
10177 {
10178 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10179 !tg3_flag(tp, TAGGED_STATUS)) {
10180 /* All MSI supporting chips should support tagged
10181 * status. Assert that this is the case.
10182 */
10183 netdev_warn(tp->dev,
10184 "MSI without TAGGED_STATUS? Not using MSI\n");
10185 goto defcfg;
10186 }
10187
10188 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10189 tg3_flag_set(tp, USING_MSIX);
10190 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10191 tg3_flag_set(tp, USING_MSI);
10192
10193 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10194 u32 msi_mode = tr32(MSGINT_MODE);
10195 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10196 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10197 if (!tg3_flag(tp, 1SHOT_MSI))
10198 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10199 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10200 }
10201 defcfg:
10202 if (!tg3_flag(tp, USING_MSIX)) {
10203 tp->irq_cnt = 1;
10204 tp->napi[0].irq_vec = tp->pdev->irq;
10205 netif_set_real_num_tx_queues(tp->dev, 1);
10206 netif_set_real_num_rx_queues(tp->dev, 1);
10207 }
10208 }
10209
10210 static void tg3_ints_fini(struct tg3 *tp)
10211 {
10212 if (tg3_flag(tp, USING_MSIX))
10213 pci_disable_msix(tp->pdev);
10214 else if (tg3_flag(tp, USING_MSI))
10215 pci_disable_msi(tp->pdev);
10216 tg3_flag_clear(tp, USING_MSI);
10217 tg3_flag_clear(tp, USING_MSIX);
10218 tg3_flag_clear(tp, ENABLE_RSS);
10219 tg3_flag_clear(tp, ENABLE_TSS);
10220 }
10221
10222 static int tg3_open(struct net_device *dev)
10223 {
10224 struct tg3 *tp = netdev_priv(dev);
10225 int i, err;
10226
10227 if (tp->fw_needed) {
10228 err = tg3_request_firmware(tp);
10229 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10230 if (err)
10231 return err;
10232 } else if (err) {
10233 netdev_warn(tp->dev, "TSO capability disabled\n");
10234 tg3_flag_clear(tp, TSO_CAPABLE);
10235 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10236 netdev_notice(tp->dev, "TSO capability restored\n");
10237 tg3_flag_set(tp, TSO_CAPABLE);
10238 }
10239 }
10240
10241 netif_carrier_off(tp->dev);
10242
10243 err = tg3_power_up(tp);
10244 if (err)
10245 return err;
10246
10247 tg3_full_lock(tp, 0);
10248
10249 tg3_disable_ints(tp);
10250 tg3_flag_clear(tp, INIT_COMPLETE);
10251
10252 tg3_full_unlock(tp);
10253
10254 /*
10255 * Setup interrupts first so we know how
10256 * many NAPI resources to allocate
10257 */
10258 tg3_ints_init(tp);
10259
10260 tg3_rss_check_indir_tbl(tp);
10261
10262 /* The placement of this call is tied
10263 * to the setup and use of Host TX descriptors.
10264 */
10265 err = tg3_alloc_consistent(tp);
10266 if (err)
10267 goto err_out1;
10268
10269 tg3_napi_init(tp);
10270
10271 tg3_napi_enable(tp);
10272
10273 for (i = 0; i < tp->irq_cnt; i++) {
10274 struct tg3_napi *tnapi = &tp->napi[i];
10275 err = tg3_request_irq(tp, i);
10276 if (err) {
10277 for (i--; i >= 0; i--) {
10278 tnapi = &tp->napi[i];
10279 free_irq(tnapi->irq_vec, tnapi);
10280 }
10281 goto err_out2;
10282 }
10283 }
10284
10285 tg3_full_lock(tp, 0);
10286
10287 err = tg3_init_hw(tp, 1);
10288 if (err) {
10289 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10290 tg3_free_rings(tp);
10291 }
10292
10293 tg3_full_unlock(tp);
10294
10295 if (err)
10296 goto err_out3;
10297
10298 if (tg3_flag(tp, USING_MSI)) {
10299 err = tg3_test_msi(tp);
10300
10301 if (err) {
10302 tg3_full_lock(tp, 0);
10303 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10304 tg3_free_rings(tp);
10305 tg3_full_unlock(tp);
10306
10307 goto err_out2;
10308 }
10309
10310 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10311 u32 val = tr32(PCIE_TRANSACTION_CFG);
10312
10313 tw32(PCIE_TRANSACTION_CFG,
10314 val | PCIE_TRANS_CFG_1SHOT_MSI);
10315 }
10316 }
10317
10318 tg3_phy_start(tp);
10319
10320 tg3_hwmon_open(tp);
10321
10322 tg3_full_lock(tp, 0);
10323
10324 tg3_timer_start(tp);
10325 tg3_flag_set(tp, INIT_COMPLETE);
10326 tg3_enable_ints(tp);
10327
10328 tg3_full_unlock(tp);
10329
10330 netif_tx_start_all_queues(dev);
10331
10332 /*
10333 * Reset loopback feature if it was turned on while the device was down
10334 * make sure that it's installed properly now.
10335 */
10336 if (dev->features & NETIF_F_LOOPBACK)
10337 tg3_set_loopback(dev, dev->features);
10338
10339 return 0;
10340
10341 err_out3:
10342 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10343 struct tg3_napi *tnapi = &tp->napi[i];
10344 free_irq(tnapi->irq_vec, tnapi);
10345 }
10346
10347 err_out2:
10348 tg3_napi_disable(tp);
10349 tg3_napi_fini(tp);
10350 tg3_free_consistent(tp);
10351
10352 err_out1:
10353 tg3_ints_fini(tp);
10354 tg3_frob_aux_power(tp, false);
10355 pci_set_power_state(tp->pdev, PCI_D3hot);
10356 return err;
10357 }
10358
10359 static int tg3_close(struct net_device *dev)
10360 {
10361 int i;
10362 struct tg3 *tp = netdev_priv(dev);
10363
10364 tg3_napi_disable(tp);
10365 tg3_reset_task_cancel(tp);
10366
10367 netif_tx_stop_all_queues(dev);
10368
10369 tg3_timer_stop(tp);
10370
10371 tg3_hwmon_close(tp);
10372
10373 tg3_phy_stop(tp);
10374
10375 tg3_full_lock(tp, 1);
10376
10377 tg3_disable_ints(tp);
10378
10379 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10380 tg3_free_rings(tp);
10381 tg3_flag_clear(tp, INIT_COMPLETE);
10382
10383 tg3_full_unlock(tp);
10384
10385 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10386 struct tg3_napi *tnapi = &tp->napi[i];
10387 free_irq(tnapi->irq_vec, tnapi);
10388 }
10389
10390 tg3_ints_fini(tp);
10391
10392 /* Clear stats across close / open calls */
10393 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10394 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10395
10396 tg3_napi_fini(tp);
10397
10398 tg3_free_consistent(tp);
10399
10400 tg3_power_down(tp);
10401
10402 netif_carrier_off(tp->dev);
10403
10404 return 0;
10405 }
10406
10407 static inline u64 get_stat64(tg3_stat64_t *val)
10408 {
10409 return ((u64)val->high << 32) | ((u64)val->low);
10410 }
10411
10412 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10413 {
10414 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10415
10416 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10417 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10419 u32 val;
10420
10421 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10422 tg3_writephy(tp, MII_TG3_TEST1,
10423 val | MII_TG3_TEST1_CRC_EN);
10424 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10425 } else
10426 val = 0;
10427
10428 tp->phy_crc_errors += val;
10429
10430 return tp->phy_crc_errors;
10431 }
10432
10433 return get_stat64(&hw_stats->rx_fcs_errors);
10434 }
10435
10436 #define ESTAT_ADD(member) \
10437 estats->member = old_estats->member + \
10438 get_stat64(&hw_stats->member)
10439
10440 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10441 {
10442 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10443 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10444
10445 ESTAT_ADD(rx_octets);
10446 ESTAT_ADD(rx_fragments);
10447 ESTAT_ADD(rx_ucast_packets);
10448 ESTAT_ADD(rx_mcast_packets);
10449 ESTAT_ADD(rx_bcast_packets);
10450 ESTAT_ADD(rx_fcs_errors);
10451 ESTAT_ADD(rx_align_errors);
10452 ESTAT_ADD(rx_xon_pause_rcvd);
10453 ESTAT_ADD(rx_xoff_pause_rcvd);
10454 ESTAT_ADD(rx_mac_ctrl_rcvd);
10455 ESTAT_ADD(rx_xoff_entered);
10456 ESTAT_ADD(rx_frame_too_long_errors);
10457 ESTAT_ADD(rx_jabbers);
10458 ESTAT_ADD(rx_undersize_packets);
10459 ESTAT_ADD(rx_in_length_errors);
10460 ESTAT_ADD(rx_out_length_errors);
10461 ESTAT_ADD(rx_64_or_less_octet_packets);
10462 ESTAT_ADD(rx_65_to_127_octet_packets);
10463 ESTAT_ADD(rx_128_to_255_octet_packets);
10464 ESTAT_ADD(rx_256_to_511_octet_packets);
10465 ESTAT_ADD(rx_512_to_1023_octet_packets);
10466 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10467 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10468 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10469 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10470 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10471
10472 ESTAT_ADD(tx_octets);
10473 ESTAT_ADD(tx_collisions);
10474 ESTAT_ADD(tx_xon_sent);
10475 ESTAT_ADD(tx_xoff_sent);
10476 ESTAT_ADD(tx_flow_control);
10477 ESTAT_ADD(tx_mac_errors);
10478 ESTAT_ADD(tx_single_collisions);
10479 ESTAT_ADD(tx_mult_collisions);
10480 ESTAT_ADD(tx_deferred);
10481 ESTAT_ADD(tx_excessive_collisions);
10482 ESTAT_ADD(tx_late_collisions);
10483 ESTAT_ADD(tx_collide_2times);
10484 ESTAT_ADD(tx_collide_3times);
10485 ESTAT_ADD(tx_collide_4times);
10486 ESTAT_ADD(tx_collide_5times);
10487 ESTAT_ADD(tx_collide_6times);
10488 ESTAT_ADD(tx_collide_7times);
10489 ESTAT_ADD(tx_collide_8times);
10490 ESTAT_ADD(tx_collide_9times);
10491 ESTAT_ADD(tx_collide_10times);
10492 ESTAT_ADD(tx_collide_11times);
10493 ESTAT_ADD(tx_collide_12times);
10494 ESTAT_ADD(tx_collide_13times);
10495 ESTAT_ADD(tx_collide_14times);
10496 ESTAT_ADD(tx_collide_15times);
10497 ESTAT_ADD(tx_ucast_packets);
10498 ESTAT_ADD(tx_mcast_packets);
10499 ESTAT_ADD(tx_bcast_packets);
10500 ESTAT_ADD(tx_carrier_sense_errors);
10501 ESTAT_ADD(tx_discards);
10502 ESTAT_ADD(tx_errors);
10503
10504 ESTAT_ADD(dma_writeq_full);
10505 ESTAT_ADD(dma_write_prioq_full);
10506 ESTAT_ADD(rxbds_empty);
10507 ESTAT_ADD(rx_discards);
10508 ESTAT_ADD(rx_errors);
10509 ESTAT_ADD(rx_threshold_hit);
10510
10511 ESTAT_ADD(dma_readq_full);
10512 ESTAT_ADD(dma_read_prioq_full);
10513 ESTAT_ADD(tx_comp_queue_full);
10514
10515 ESTAT_ADD(ring_set_send_prod_index);
10516 ESTAT_ADD(ring_status_update);
10517 ESTAT_ADD(nic_irqs);
10518 ESTAT_ADD(nic_avoided_irqs);
10519 ESTAT_ADD(nic_tx_threshold_hit);
10520
10521 ESTAT_ADD(mbuf_lwm_thresh_hit);
10522 }
10523
10524 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10525 {
10526 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10527 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10528
10529 stats->rx_packets = old_stats->rx_packets +
10530 get_stat64(&hw_stats->rx_ucast_packets) +
10531 get_stat64(&hw_stats->rx_mcast_packets) +
10532 get_stat64(&hw_stats->rx_bcast_packets);
10533
10534 stats->tx_packets = old_stats->tx_packets +
10535 get_stat64(&hw_stats->tx_ucast_packets) +
10536 get_stat64(&hw_stats->tx_mcast_packets) +
10537 get_stat64(&hw_stats->tx_bcast_packets);
10538
10539 stats->rx_bytes = old_stats->rx_bytes +
10540 get_stat64(&hw_stats->rx_octets);
10541 stats->tx_bytes = old_stats->tx_bytes +
10542 get_stat64(&hw_stats->tx_octets);
10543
10544 stats->rx_errors = old_stats->rx_errors +
10545 get_stat64(&hw_stats->rx_errors);
10546 stats->tx_errors = old_stats->tx_errors +
10547 get_stat64(&hw_stats->tx_errors) +
10548 get_stat64(&hw_stats->tx_mac_errors) +
10549 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10550 get_stat64(&hw_stats->tx_discards);
10551
10552 stats->multicast = old_stats->multicast +
10553 get_stat64(&hw_stats->rx_mcast_packets);
10554 stats->collisions = old_stats->collisions +
10555 get_stat64(&hw_stats->tx_collisions);
10556
10557 stats->rx_length_errors = old_stats->rx_length_errors +
10558 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10559 get_stat64(&hw_stats->rx_undersize_packets);
10560
10561 stats->rx_over_errors = old_stats->rx_over_errors +
10562 get_stat64(&hw_stats->rxbds_empty);
10563 stats->rx_frame_errors = old_stats->rx_frame_errors +
10564 get_stat64(&hw_stats->rx_align_errors);
10565 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10566 get_stat64(&hw_stats->tx_discards);
10567 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10568 get_stat64(&hw_stats->tx_carrier_sense_errors);
10569
10570 stats->rx_crc_errors = old_stats->rx_crc_errors +
10571 tg3_calc_crc_errors(tp);
10572
10573 stats->rx_missed_errors = old_stats->rx_missed_errors +
10574 get_stat64(&hw_stats->rx_discards);
10575
10576 stats->rx_dropped = tp->rx_dropped;
10577 stats->tx_dropped = tp->tx_dropped;
10578 }
10579
10580 static int tg3_get_regs_len(struct net_device *dev)
10581 {
10582 return TG3_REG_BLK_SIZE;
10583 }
10584
10585 static void tg3_get_regs(struct net_device *dev,
10586 struct ethtool_regs *regs, void *_p)
10587 {
10588 struct tg3 *tp = netdev_priv(dev);
10589
10590 regs->version = 0;
10591
10592 memset(_p, 0, TG3_REG_BLK_SIZE);
10593
10594 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10595 return;
10596
10597 tg3_full_lock(tp, 0);
10598
10599 tg3_dump_legacy_regs(tp, (u32 *)_p);
10600
10601 tg3_full_unlock(tp);
10602 }
10603
10604 static int tg3_get_eeprom_len(struct net_device *dev)
10605 {
10606 struct tg3 *tp = netdev_priv(dev);
10607
10608 return tp->nvram_size;
10609 }
10610
10611 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10612 {
10613 struct tg3 *tp = netdev_priv(dev);
10614 int ret;
10615 u8 *pd;
10616 u32 i, offset, len, b_offset, b_count;
10617 __be32 val;
10618
10619 if (tg3_flag(tp, NO_NVRAM))
10620 return -EINVAL;
10621
10622 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10623 return -EAGAIN;
10624
10625 offset = eeprom->offset;
10626 len = eeprom->len;
10627 eeprom->len = 0;
10628
10629 eeprom->magic = TG3_EEPROM_MAGIC;
10630
10631 if (offset & 3) {
10632 /* adjustments to start on required 4 byte boundary */
10633 b_offset = offset & 3;
10634 b_count = 4 - b_offset;
10635 if (b_count > len) {
10636 /* i.e. offset=1 len=2 */
10637 b_count = len;
10638 }
10639 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10640 if (ret)
10641 return ret;
10642 memcpy(data, ((char *)&val) + b_offset, b_count);
10643 len -= b_count;
10644 offset += b_count;
10645 eeprom->len += b_count;
10646 }
10647
10648 /* read bytes up to the last 4 byte boundary */
10649 pd = &data[eeprom->len];
10650 for (i = 0; i < (len - (len & 3)); i += 4) {
10651 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10652 if (ret) {
10653 eeprom->len += i;
10654 return ret;
10655 }
10656 memcpy(pd + i, &val, 4);
10657 }
10658 eeprom->len += i;
10659
10660 if (len & 3) {
10661 /* read last bytes not ending on 4 byte boundary */
10662 pd = &data[eeprom->len];
10663 b_count = len & 3;
10664 b_offset = offset + len - b_count;
10665 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10666 if (ret)
10667 return ret;
10668 memcpy(pd, &val, b_count);
10669 eeprom->len += b_count;
10670 }
10671 return 0;
10672 }
10673
10674 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10675 {
10676 struct tg3 *tp = netdev_priv(dev);
10677 int ret;
10678 u32 offset, len, b_offset, odd_len;
10679 u8 *buf;
10680 __be32 start, end;
10681
10682 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10683 return -EAGAIN;
10684
10685 if (tg3_flag(tp, NO_NVRAM) ||
10686 eeprom->magic != TG3_EEPROM_MAGIC)
10687 return -EINVAL;
10688
10689 offset = eeprom->offset;
10690 len = eeprom->len;
10691
10692 if ((b_offset = (offset & 3))) {
10693 /* adjustments to start on required 4 byte boundary */
10694 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10695 if (ret)
10696 return ret;
10697 len += b_offset;
10698 offset &= ~3;
10699 if (len < 4)
10700 len = 4;
10701 }
10702
10703 odd_len = 0;
10704 if (len & 3) {
10705 /* adjustments to end on required 4 byte boundary */
10706 odd_len = 1;
10707 len = (len + 3) & ~3;
10708 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10709 if (ret)
10710 return ret;
10711 }
10712
10713 buf = data;
10714 if (b_offset || odd_len) {
10715 buf = kmalloc(len, GFP_KERNEL);
10716 if (!buf)
10717 return -ENOMEM;
10718 if (b_offset)
10719 memcpy(buf, &start, 4);
10720 if (odd_len)
10721 memcpy(buf+len-4, &end, 4);
10722 memcpy(buf + b_offset, data, eeprom->len);
10723 }
10724
10725 ret = tg3_nvram_write_block(tp, offset, len, buf);
10726
10727 if (buf != data)
10728 kfree(buf);
10729
10730 return ret;
10731 }
10732
10733 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10734 {
10735 struct tg3 *tp = netdev_priv(dev);
10736
10737 if (tg3_flag(tp, USE_PHYLIB)) {
10738 struct phy_device *phydev;
10739 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10740 return -EAGAIN;
10741 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10742 return phy_ethtool_gset(phydev, cmd);
10743 }
10744
10745 cmd->supported = (SUPPORTED_Autoneg);
10746
10747 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10748 cmd->supported |= (SUPPORTED_1000baseT_Half |
10749 SUPPORTED_1000baseT_Full);
10750
10751 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10752 cmd->supported |= (SUPPORTED_100baseT_Half |
10753 SUPPORTED_100baseT_Full |
10754 SUPPORTED_10baseT_Half |
10755 SUPPORTED_10baseT_Full |
10756 SUPPORTED_TP);
10757 cmd->port = PORT_TP;
10758 } else {
10759 cmd->supported |= SUPPORTED_FIBRE;
10760 cmd->port = PORT_FIBRE;
10761 }
10762
10763 cmd->advertising = tp->link_config.advertising;
10764 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10765 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10766 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10767 cmd->advertising |= ADVERTISED_Pause;
10768 } else {
10769 cmd->advertising |= ADVERTISED_Pause |
10770 ADVERTISED_Asym_Pause;
10771 }
10772 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10773 cmd->advertising |= ADVERTISED_Asym_Pause;
10774 }
10775 }
10776 if (netif_running(dev) && netif_carrier_ok(dev)) {
10777 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10778 cmd->duplex = tp->link_config.active_duplex;
10779 cmd->lp_advertising = tp->link_config.rmt_adv;
10780 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10781 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10782 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10783 else
10784 cmd->eth_tp_mdix = ETH_TP_MDI;
10785 }
10786 } else {
10787 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10788 cmd->duplex = DUPLEX_UNKNOWN;
10789 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10790 }
10791 cmd->phy_address = tp->phy_addr;
10792 cmd->transceiver = XCVR_INTERNAL;
10793 cmd->autoneg = tp->link_config.autoneg;
10794 cmd->maxtxpkt = 0;
10795 cmd->maxrxpkt = 0;
10796 return 0;
10797 }
10798
10799 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10800 {
10801 struct tg3 *tp = netdev_priv(dev);
10802 u32 speed = ethtool_cmd_speed(cmd);
10803
10804 if (tg3_flag(tp, USE_PHYLIB)) {
10805 struct phy_device *phydev;
10806 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10807 return -EAGAIN;
10808 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10809 return phy_ethtool_sset(phydev, cmd);
10810 }
10811
10812 if (cmd->autoneg != AUTONEG_ENABLE &&
10813 cmd->autoneg != AUTONEG_DISABLE)
10814 return -EINVAL;
10815
10816 if (cmd->autoneg == AUTONEG_DISABLE &&
10817 cmd->duplex != DUPLEX_FULL &&
10818 cmd->duplex != DUPLEX_HALF)
10819 return -EINVAL;
10820
10821 if (cmd->autoneg == AUTONEG_ENABLE) {
10822 u32 mask = ADVERTISED_Autoneg |
10823 ADVERTISED_Pause |
10824 ADVERTISED_Asym_Pause;
10825
10826 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10827 mask |= ADVERTISED_1000baseT_Half |
10828 ADVERTISED_1000baseT_Full;
10829
10830 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10831 mask |= ADVERTISED_100baseT_Half |
10832 ADVERTISED_100baseT_Full |
10833 ADVERTISED_10baseT_Half |
10834 ADVERTISED_10baseT_Full |
10835 ADVERTISED_TP;
10836 else
10837 mask |= ADVERTISED_FIBRE;
10838
10839 if (cmd->advertising & ~mask)
10840 return -EINVAL;
10841
10842 mask &= (ADVERTISED_1000baseT_Half |
10843 ADVERTISED_1000baseT_Full |
10844 ADVERTISED_100baseT_Half |
10845 ADVERTISED_100baseT_Full |
10846 ADVERTISED_10baseT_Half |
10847 ADVERTISED_10baseT_Full);
10848
10849 cmd->advertising &= mask;
10850 } else {
10851 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10852 if (speed != SPEED_1000)
10853 return -EINVAL;
10854
10855 if (cmd->duplex != DUPLEX_FULL)
10856 return -EINVAL;
10857 } else {
10858 if (speed != SPEED_100 &&
10859 speed != SPEED_10)
10860 return -EINVAL;
10861 }
10862 }
10863
10864 tg3_full_lock(tp, 0);
10865
10866 tp->link_config.autoneg = cmd->autoneg;
10867 if (cmd->autoneg == AUTONEG_ENABLE) {
10868 tp->link_config.advertising = (cmd->advertising |
10869 ADVERTISED_Autoneg);
10870 tp->link_config.speed = SPEED_UNKNOWN;
10871 tp->link_config.duplex = DUPLEX_UNKNOWN;
10872 } else {
10873 tp->link_config.advertising = 0;
10874 tp->link_config.speed = speed;
10875 tp->link_config.duplex = cmd->duplex;
10876 }
10877
10878 if (netif_running(dev))
10879 tg3_setup_phy(tp, 1);
10880
10881 tg3_full_unlock(tp);
10882
10883 return 0;
10884 }
10885
10886 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10887 {
10888 struct tg3 *tp = netdev_priv(dev);
10889
10890 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10891 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10892 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10893 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10894 }
10895
10896 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10897 {
10898 struct tg3 *tp = netdev_priv(dev);
10899
10900 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10901 wol->supported = WAKE_MAGIC;
10902 else
10903 wol->supported = 0;
10904 wol->wolopts = 0;
10905 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10906 wol->wolopts = WAKE_MAGIC;
10907 memset(&wol->sopass, 0, sizeof(wol->sopass));
10908 }
10909
10910 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10911 {
10912 struct tg3 *tp = netdev_priv(dev);
10913 struct device *dp = &tp->pdev->dev;
10914
10915 if (wol->wolopts & ~WAKE_MAGIC)
10916 return -EINVAL;
10917 if ((wol->wolopts & WAKE_MAGIC) &&
10918 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10919 return -EINVAL;
10920
10921 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10922
10923 spin_lock_bh(&tp->lock);
10924 if (device_may_wakeup(dp))
10925 tg3_flag_set(tp, WOL_ENABLE);
10926 else
10927 tg3_flag_clear(tp, WOL_ENABLE);
10928 spin_unlock_bh(&tp->lock);
10929
10930 return 0;
10931 }
10932
10933 static u32 tg3_get_msglevel(struct net_device *dev)
10934 {
10935 struct tg3 *tp = netdev_priv(dev);
10936 return tp->msg_enable;
10937 }
10938
10939 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10940 {
10941 struct tg3 *tp = netdev_priv(dev);
10942 tp->msg_enable = value;
10943 }
10944
10945 static int tg3_nway_reset(struct net_device *dev)
10946 {
10947 struct tg3 *tp = netdev_priv(dev);
10948 int r;
10949
10950 if (!netif_running(dev))
10951 return -EAGAIN;
10952
10953 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10954 return -EINVAL;
10955
10956 if (tg3_flag(tp, USE_PHYLIB)) {
10957 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10958 return -EAGAIN;
10959 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10960 } else {
10961 u32 bmcr;
10962
10963 spin_lock_bh(&tp->lock);
10964 r = -EINVAL;
10965 tg3_readphy(tp, MII_BMCR, &bmcr);
10966 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10967 ((bmcr & BMCR_ANENABLE) ||
10968 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10969 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10970 BMCR_ANENABLE);
10971 r = 0;
10972 }
10973 spin_unlock_bh(&tp->lock);
10974 }
10975
10976 return r;
10977 }
10978
10979 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10980 {
10981 struct tg3 *tp = netdev_priv(dev);
10982
10983 ering->rx_max_pending = tp->rx_std_ring_mask;
10984 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10985 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10986 else
10987 ering->rx_jumbo_max_pending = 0;
10988
10989 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10990
10991 ering->rx_pending = tp->rx_pending;
10992 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10993 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10994 else
10995 ering->rx_jumbo_pending = 0;
10996
10997 ering->tx_pending = tp->napi[0].tx_pending;
10998 }
10999
11000 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11001 {
11002 struct tg3 *tp = netdev_priv(dev);
11003 int i, irq_sync = 0, err = 0;
11004
11005 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11006 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11007 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11008 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11009 (tg3_flag(tp, TSO_BUG) &&
11010 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11011 return -EINVAL;
11012
11013 if (netif_running(dev)) {
11014 tg3_phy_stop(tp);
11015 tg3_netif_stop(tp);
11016 irq_sync = 1;
11017 }
11018
11019 tg3_full_lock(tp, irq_sync);
11020
11021 tp->rx_pending = ering->rx_pending;
11022
11023 if (tg3_flag(tp, MAX_RXPEND_64) &&
11024 tp->rx_pending > 63)
11025 tp->rx_pending = 63;
11026 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11027
11028 for (i = 0; i < tp->irq_max; i++)
11029 tp->napi[i].tx_pending = ering->tx_pending;
11030
11031 if (netif_running(dev)) {
11032 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11033 err = tg3_restart_hw(tp, 1);
11034 if (!err)
11035 tg3_netif_start(tp);
11036 }
11037
11038 tg3_full_unlock(tp);
11039
11040 if (irq_sync && !err)
11041 tg3_phy_start(tp);
11042
11043 return err;
11044 }
11045
11046 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11047 {
11048 struct tg3 *tp = netdev_priv(dev);
11049
11050 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11051
11052 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11053 epause->rx_pause = 1;
11054 else
11055 epause->rx_pause = 0;
11056
11057 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11058 epause->tx_pause = 1;
11059 else
11060 epause->tx_pause = 0;
11061 }
11062
11063 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11064 {
11065 struct tg3 *tp = netdev_priv(dev);
11066 int err = 0;
11067
11068 if (tg3_flag(tp, USE_PHYLIB)) {
11069 u32 newadv;
11070 struct phy_device *phydev;
11071
11072 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11073
11074 if (!(phydev->supported & SUPPORTED_Pause) ||
11075 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11076 (epause->rx_pause != epause->tx_pause)))
11077 return -EINVAL;
11078
11079 tp->link_config.flowctrl = 0;
11080 if (epause->rx_pause) {
11081 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11082
11083 if (epause->tx_pause) {
11084 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11085 newadv = ADVERTISED_Pause;
11086 } else
11087 newadv = ADVERTISED_Pause |
11088 ADVERTISED_Asym_Pause;
11089 } else if (epause->tx_pause) {
11090 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11091 newadv = ADVERTISED_Asym_Pause;
11092 } else
11093 newadv = 0;
11094
11095 if (epause->autoneg)
11096 tg3_flag_set(tp, PAUSE_AUTONEG);
11097 else
11098 tg3_flag_clear(tp, PAUSE_AUTONEG);
11099
11100 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11101 u32 oldadv = phydev->advertising &
11102 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11103 if (oldadv != newadv) {
11104 phydev->advertising &=
11105 ~(ADVERTISED_Pause |
11106 ADVERTISED_Asym_Pause);
11107 phydev->advertising |= newadv;
11108 if (phydev->autoneg) {
11109 /*
11110 * Always renegotiate the link to
11111 * inform our link partner of our
11112 * flow control settings, even if the
11113 * flow control is forced. Let
11114 * tg3_adjust_link() do the final
11115 * flow control setup.
11116 */
11117 return phy_start_aneg(phydev);
11118 }
11119 }
11120
11121 if (!epause->autoneg)
11122 tg3_setup_flow_control(tp, 0, 0);
11123 } else {
11124 tp->link_config.advertising &=
11125 ~(ADVERTISED_Pause |
11126 ADVERTISED_Asym_Pause);
11127 tp->link_config.advertising |= newadv;
11128 }
11129 } else {
11130 int irq_sync = 0;
11131
11132 if (netif_running(dev)) {
11133 tg3_netif_stop(tp);
11134 irq_sync = 1;
11135 }
11136
11137 tg3_full_lock(tp, irq_sync);
11138
11139 if (epause->autoneg)
11140 tg3_flag_set(tp, PAUSE_AUTONEG);
11141 else
11142 tg3_flag_clear(tp, PAUSE_AUTONEG);
11143 if (epause->rx_pause)
11144 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11145 else
11146 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11147 if (epause->tx_pause)
11148 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11149 else
11150 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11151
11152 if (netif_running(dev)) {
11153 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11154 err = tg3_restart_hw(tp, 1);
11155 if (!err)
11156 tg3_netif_start(tp);
11157 }
11158
11159 tg3_full_unlock(tp);
11160 }
11161
11162 return err;
11163 }
11164
11165 static int tg3_get_sset_count(struct net_device *dev, int sset)
11166 {
11167 switch (sset) {
11168 case ETH_SS_TEST:
11169 return TG3_NUM_TEST;
11170 case ETH_SS_STATS:
11171 return TG3_NUM_STATS;
11172 default:
11173 return -EOPNOTSUPP;
11174 }
11175 }
11176
11177 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11178 u32 *rules __always_unused)
11179 {
11180 struct tg3 *tp = netdev_priv(dev);
11181
11182 if (!tg3_flag(tp, SUPPORT_MSIX))
11183 return -EOPNOTSUPP;
11184
11185 switch (info->cmd) {
11186 case ETHTOOL_GRXRINGS:
11187 if (netif_running(tp->dev))
11188 info->data = tp->irq_cnt;
11189 else {
11190 info->data = num_online_cpus();
11191 if (info->data > TG3_IRQ_MAX_VECS_RSS)
11192 info->data = TG3_IRQ_MAX_VECS_RSS;
11193 }
11194
11195 /* The first interrupt vector only
11196 * handles link interrupts.
11197 */
11198 info->data -= 1;
11199 return 0;
11200
11201 default:
11202 return -EOPNOTSUPP;
11203 }
11204 }
11205
11206 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11207 {
11208 u32 size = 0;
11209 struct tg3 *tp = netdev_priv(dev);
11210
11211 if (tg3_flag(tp, SUPPORT_MSIX))
11212 size = TG3_RSS_INDIR_TBL_SIZE;
11213
11214 return size;
11215 }
11216
11217 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11218 {
11219 struct tg3 *tp = netdev_priv(dev);
11220 int i;
11221
11222 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11223 indir[i] = tp->rss_ind_tbl[i];
11224
11225 return 0;
11226 }
11227
11228 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11229 {
11230 struct tg3 *tp = netdev_priv(dev);
11231 size_t i;
11232
11233 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11234 tp->rss_ind_tbl[i] = indir[i];
11235
11236 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11237 return 0;
11238
11239 /* It is legal to write the indirection
11240 * table while the device is running.
11241 */
11242 tg3_full_lock(tp, 0);
11243 tg3_rss_write_indir_tbl(tp);
11244 tg3_full_unlock(tp);
11245
11246 return 0;
11247 }
11248
11249 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11250 {
11251 switch (stringset) {
11252 case ETH_SS_STATS:
11253 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11254 break;
11255 case ETH_SS_TEST:
11256 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11257 break;
11258 default:
11259 WARN_ON(1); /* we need a WARN() */
11260 break;
11261 }
11262 }
11263
11264 static int tg3_set_phys_id(struct net_device *dev,
11265 enum ethtool_phys_id_state state)
11266 {
11267 struct tg3 *tp = netdev_priv(dev);
11268
11269 if (!netif_running(tp->dev))
11270 return -EAGAIN;
11271
11272 switch (state) {
11273 case ETHTOOL_ID_ACTIVE:
11274 return 1; /* cycle on/off once per second */
11275
11276 case ETHTOOL_ID_ON:
11277 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11278 LED_CTRL_1000MBPS_ON |
11279 LED_CTRL_100MBPS_ON |
11280 LED_CTRL_10MBPS_ON |
11281 LED_CTRL_TRAFFIC_OVERRIDE |
11282 LED_CTRL_TRAFFIC_BLINK |
11283 LED_CTRL_TRAFFIC_LED);
11284 break;
11285
11286 case ETHTOOL_ID_OFF:
11287 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11288 LED_CTRL_TRAFFIC_OVERRIDE);
11289 break;
11290
11291 case ETHTOOL_ID_INACTIVE:
11292 tw32(MAC_LED_CTRL, tp->led_ctrl);
11293 break;
11294 }
11295
11296 return 0;
11297 }
11298
11299 static void tg3_get_ethtool_stats(struct net_device *dev,
11300 struct ethtool_stats *estats, u64 *tmp_stats)
11301 {
11302 struct tg3 *tp = netdev_priv(dev);
11303
11304 if (tp->hw_stats)
11305 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11306 else
11307 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11308 }
11309
11310 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11311 {
11312 int i;
11313 __be32 *buf;
11314 u32 offset = 0, len = 0;
11315 u32 magic, val;
11316
11317 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11318 return NULL;
11319
11320 if (magic == TG3_EEPROM_MAGIC) {
11321 for (offset = TG3_NVM_DIR_START;
11322 offset < TG3_NVM_DIR_END;
11323 offset += TG3_NVM_DIRENT_SIZE) {
11324 if (tg3_nvram_read(tp, offset, &val))
11325 return NULL;
11326
11327 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11328 TG3_NVM_DIRTYPE_EXTVPD)
11329 break;
11330 }
11331
11332 if (offset != TG3_NVM_DIR_END) {
11333 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11334 if (tg3_nvram_read(tp, offset + 4, &offset))
11335 return NULL;
11336
11337 offset = tg3_nvram_logical_addr(tp, offset);
11338 }
11339 }
11340
11341 if (!offset || !len) {
11342 offset = TG3_NVM_VPD_OFF;
11343 len = TG3_NVM_VPD_LEN;
11344 }
11345
11346 buf = kmalloc(len, GFP_KERNEL);
11347 if (buf == NULL)
11348 return NULL;
11349
11350 if (magic == TG3_EEPROM_MAGIC) {
11351 for (i = 0; i < len; i += 4) {
11352 /* The data is in little-endian format in NVRAM.
11353 * Use the big-endian read routines to preserve
11354 * the byte order as it exists in NVRAM.
11355 */
11356 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11357 goto error;
11358 }
11359 } else {
11360 u8 *ptr;
11361 ssize_t cnt;
11362 unsigned int pos = 0;
11363
11364 ptr = (u8 *)&buf[0];
11365 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11366 cnt = pci_read_vpd(tp->pdev, pos,
11367 len - pos, ptr);
11368 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11369 cnt = 0;
11370 else if (cnt < 0)
11371 goto error;
11372 }
11373 if (pos != len)
11374 goto error;
11375 }
11376
11377 *vpdlen = len;
11378
11379 return buf;
11380
11381 error:
11382 kfree(buf);
11383 return NULL;
11384 }
11385
11386 #define NVRAM_TEST_SIZE 0x100
11387 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11388 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11389 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11390 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11391 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11392 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11393 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11394 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11395
11396 static int tg3_test_nvram(struct tg3 *tp)
11397 {
11398 u32 csum, magic, len;
11399 __be32 *buf;
11400 int i, j, k, err = 0, size;
11401
11402 if (tg3_flag(tp, NO_NVRAM))
11403 return 0;
11404
11405 if (tg3_nvram_read(tp, 0, &magic) != 0)
11406 return -EIO;
11407
11408 if (magic == TG3_EEPROM_MAGIC)
11409 size = NVRAM_TEST_SIZE;
11410 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11411 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11412 TG3_EEPROM_SB_FORMAT_1) {
11413 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11414 case TG3_EEPROM_SB_REVISION_0:
11415 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11416 break;
11417 case TG3_EEPROM_SB_REVISION_2:
11418 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11419 break;
11420 case TG3_EEPROM_SB_REVISION_3:
11421 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11422 break;
11423 case TG3_EEPROM_SB_REVISION_4:
11424 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11425 break;
11426 case TG3_EEPROM_SB_REVISION_5:
11427 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11428 break;
11429 case TG3_EEPROM_SB_REVISION_6:
11430 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11431 break;
11432 default:
11433 return -EIO;
11434 }
11435 } else
11436 return 0;
11437 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11438 size = NVRAM_SELFBOOT_HW_SIZE;
11439 else
11440 return -EIO;
11441
11442 buf = kmalloc(size, GFP_KERNEL);
11443 if (buf == NULL)
11444 return -ENOMEM;
11445
11446 err = -EIO;
11447 for (i = 0, j = 0; i < size; i += 4, j++) {
11448 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11449 if (err)
11450 break;
11451 }
11452 if (i < size)
11453 goto out;
11454
11455 /* Selfboot format */
11456 magic = be32_to_cpu(buf[0]);
11457 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11458 TG3_EEPROM_MAGIC_FW) {
11459 u8 *buf8 = (u8 *) buf, csum8 = 0;
11460
11461 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11462 TG3_EEPROM_SB_REVISION_2) {
11463 /* For rev 2, the csum doesn't include the MBA. */
11464 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11465 csum8 += buf8[i];
11466 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11467 csum8 += buf8[i];
11468 } else {
11469 for (i = 0; i < size; i++)
11470 csum8 += buf8[i];
11471 }
11472
11473 if (csum8 == 0) {
11474 err = 0;
11475 goto out;
11476 }
11477
11478 err = -EIO;
11479 goto out;
11480 }
11481
11482 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11483 TG3_EEPROM_MAGIC_HW) {
11484 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11485 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11486 u8 *buf8 = (u8 *) buf;
11487
11488 /* Separate the parity bits and the data bytes. */
11489 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11490 if ((i == 0) || (i == 8)) {
11491 int l;
11492 u8 msk;
11493
11494 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11495 parity[k++] = buf8[i] & msk;
11496 i++;
11497 } else if (i == 16) {
11498 int l;
11499 u8 msk;
11500
11501 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11502 parity[k++] = buf8[i] & msk;
11503 i++;
11504
11505 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11506 parity[k++] = buf8[i] & msk;
11507 i++;
11508 }
11509 data[j++] = buf8[i];
11510 }
11511
11512 err = -EIO;
11513 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11514 u8 hw8 = hweight8(data[i]);
11515
11516 if ((hw8 & 0x1) && parity[i])
11517 goto out;
11518 else if (!(hw8 & 0x1) && !parity[i])
11519 goto out;
11520 }
11521 err = 0;
11522 goto out;
11523 }
11524
11525 err = -EIO;
11526
11527 /* Bootstrap checksum at offset 0x10 */
11528 csum = calc_crc((unsigned char *) buf, 0x10);
11529 if (csum != le32_to_cpu(buf[0x10/4]))
11530 goto out;
11531
11532 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11533 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11534 if (csum != le32_to_cpu(buf[0xfc/4]))
11535 goto out;
11536
11537 kfree(buf);
11538
11539 buf = tg3_vpd_readblock(tp, &len);
11540 if (!buf)
11541 return -ENOMEM;
11542
11543 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11544 if (i > 0) {
11545 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11546 if (j < 0)
11547 goto out;
11548
11549 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11550 goto out;
11551
11552 i += PCI_VPD_LRDT_TAG_SIZE;
11553 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11554 PCI_VPD_RO_KEYWORD_CHKSUM);
11555 if (j > 0) {
11556 u8 csum8 = 0;
11557
11558 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11559
11560 for (i = 0; i <= j; i++)
11561 csum8 += ((u8 *)buf)[i];
11562
11563 if (csum8)
11564 goto out;
11565 }
11566 }
11567
11568 err = 0;
11569
11570 out:
11571 kfree(buf);
11572 return err;
11573 }
11574
11575 #define TG3_SERDES_TIMEOUT_SEC 2
11576 #define TG3_COPPER_TIMEOUT_SEC 6
11577
11578 static int tg3_test_link(struct tg3 *tp)
11579 {
11580 int i, max;
11581
11582 if (!netif_running(tp->dev))
11583 return -ENODEV;
11584
11585 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11586 max = TG3_SERDES_TIMEOUT_SEC;
11587 else
11588 max = TG3_COPPER_TIMEOUT_SEC;
11589
11590 for (i = 0; i < max; i++) {
11591 if (netif_carrier_ok(tp->dev))
11592 return 0;
11593
11594 if (msleep_interruptible(1000))
11595 break;
11596 }
11597
11598 return -EIO;
11599 }
11600
11601 /* Only test the commonly used registers */
11602 static int tg3_test_registers(struct tg3 *tp)
11603 {
11604 int i, is_5705, is_5750;
11605 u32 offset, read_mask, write_mask, val, save_val, read_val;
11606 static struct {
11607 u16 offset;
11608 u16 flags;
11609 #define TG3_FL_5705 0x1
11610 #define TG3_FL_NOT_5705 0x2
11611 #define TG3_FL_NOT_5788 0x4
11612 #define TG3_FL_NOT_5750 0x8
11613 u32 read_mask;
11614 u32 write_mask;
11615 } reg_tbl[] = {
11616 /* MAC Control Registers */
11617 { MAC_MODE, TG3_FL_NOT_5705,
11618 0x00000000, 0x00ef6f8c },
11619 { MAC_MODE, TG3_FL_5705,
11620 0x00000000, 0x01ef6b8c },
11621 { MAC_STATUS, TG3_FL_NOT_5705,
11622 0x03800107, 0x00000000 },
11623 { MAC_STATUS, TG3_FL_5705,
11624 0x03800100, 0x00000000 },
11625 { MAC_ADDR_0_HIGH, 0x0000,
11626 0x00000000, 0x0000ffff },
11627 { MAC_ADDR_0_LOW, 0x0000,
11628 0x00000000, 0xffffffff },
11629 { MAC_RX_MTU_SIZE, 0x0000,
11630 0x00000000, 0x0000ffff },
11631 { MAC_TX_MODE, 0x0000,
11632 0x00000000, 0x00000070 },
11633 { MAC_TX_LENGTHS, 0x0000,
11634 0x00000000, 0x00003fff },
11635 { MAC_RX_MODE, TG3_FL_NOT_5705,
11636 0x00000000, 0x000007fc },
11637 { MAC_RX_MODE, TG3_FL_5705,
11638 0x00000000, 0x000007dc },
11639 { MAC_HASH_REG_0, 0x0000,
11640 0x00000000, 0xffffffff },
11641 { MAC_HASH_REG_1, 0x0000,
11642 0x00000000, 0xffffffff },
11643 { MAC_HASH_REG_2, 0x0000,
11644 0x00000000, 0xffffffff },
11645 { MAC_HASH_REG_3, 0x0000,
11646 0x00000000, 0xffffffff },
11647
11648 /* Receive Data and Receive BD Initiator Control Registers. */
11649 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11650 0x00000000, 0xffffffff },
11651 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11652 0x00000000, 0xffffffff },
11653 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11654 0x00000000, 0x00000003 },
11655 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11656 0x00000000, 0xffffffff },
11657 { RCVDBDI_STD_BD+0, 0x0000,
11658 0x00000000, 0xffffffff },
11659 { RCVDBDI_STD_BD+4, 0x0000,
11660 0x00000000, 0xffffffff },
11661 { RCVDBDI_STD_BD+8, 0x0000,
11662 0x00000000, 0xffff0002 },
11663 { RCVDBDI_STD_BD+0xc, 0x0000,
11664 0x00000000, 0xffffffff },
11665
11666 /* Receive BD Initiator Control Registers. */
11667 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11668 0x00000000, 0xffffffff },
11669 { RCVBDI_STD_THRESH, TG3_FL_5705,
11670 0x00000000, 0x000003ff },
11671 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11672 0x00000000, 0xffffffff },
11673
11674 /* Host Coalescing Control Registers. */
11675 { HOSTCC_MODE, TG3_FL_NOT_5705,
11676 0x00000000, 0x00000004 },
11677 { HOSTCC_MODE, TG3_FL_5705,
11678 0x00000000, 0x000000f6 },
11679 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11680 0x00000000, 0xffffffff },
11681 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11682 0x00000000, 0x000003ff },
11683 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11684 0x00000000, 0xffffffff },
11685 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11686 0x00000000, 0x000003ff },
11687 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11688 0x00000000, 0xffffffff },
11689 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11690 0x00000000, 0x000000ff },
11691 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11692 0x00000000, 0xffffffff },
11693 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11694 0x00000000, 0x000000ff },
11695 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11696 0x00000000, 0xffffffff },
11697 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11698 0x00000000, 0xffffffff },
11699 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11700 0x00000000, 0xffffffff },
11701 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11702 0x00000000, 0x000000ff },
11703 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11704 0x00000000, 0xffffffff },
11705 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11706 0x00000000, 0x000000ff },
11707 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11708 0x00000000, 0xffffffff },
11709 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11710 0x00000000, 0xffffffff },
11711 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11712 0x00000000, 0xffffffff },
11713 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11714 0x00000000, 0xffffffff },
11715 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11716 0x00000000, 0xffffffff },
11717 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11718 0xffffffff, 0x00000000 },
11719 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11720 0xffffffff, 0x00000000 },
11721
11722 /* Buffer Manager Control Registers. */
11723 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11724 0x00000000, 0x007fff80 },
11725 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11726 0x00000000, 0x007fffff },
11727 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11728 0x00000000, 0x0000003f },
11729 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11730 0x00000000, 0x000001ff },
11731 { BUFMGR_MB_HIGH_WATER, 0x0000,
11732 0x00000000, 0x000001ff },
11733 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11734 0xffffffff, 0x00000000 },
11735 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11736 0xffffffff, 0x00000000 },
11737
11738 /* Mailbox Registers */
11739 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11740 0x00000000, 0x000001ff },
11741 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11742 0x00000000, 0x000001ff },
11743 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11744 0x00000000, 0x000007ff },
11745 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11746 0x00000000, 0x000001ff },
11747
11748 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11749 };
11750
11751 is_5705 = is_5750 = 0;
11752 if (tg3_flag(tp, 5705_PLUS)) {
11753 is_5705 = 1;
11754 if (tg3_flag(tp, 5750_PLUS))
11755 is_5750 = 1;
11756 }
11757
11758 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11759 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11760 continue;
11761
11762 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11763 continue;
11764
11765 if (tg3_flag(tp, IS_5788) &&
11766 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11767 continue;
11768
11769 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11770 continue;
11771
11772 offset = (u32) reg_tbl[i].offset;
11773 read_mask = reg_tbl[i].read_mask;
11774 write_mask = reg_tbl[i].write_mask;
11775
11776 /* Save the original register content */
11777 save_val = tr32(offset);
11778
11779 /* Determine the read-only value. */
11780 read_val = save_val & read_mask;
11781
11782 /* Write zero to the register, then make sure the read-only bits
11783 * are not changed and the read/write bits are all zeros.
11784 */
11785 tw32(offset, 0);
11786
11787 val = tr32(offset);
11788
11789 /* Test the read-only and read/write bits. */
11790 if (((val & read_mask) != read_val) || (val & write_mask))
11791 goto out;
11792
11793 /* Write ones to all the bits defined by RdMask and WrMask, then
11794 * make sure the read-only bits are not changed and the
11795 * read/write bits are all ones.
11796 */
11797 tw32(offset, read_mask | write_mask);
11798
11799 val = tr32(offset);
11800
11801 /* Test the read-only bits. */
11802 if ((val & read_mask) != read_val)
11803 goto out;
11804
11805 /* Test the read/write bits. */
11806 if ((val & write_mask) != write_mask)
11807 goto out;
11808
11809 tw32(offset, save_val);
11810 }
11811
11812 return 0;
11813
11814 out:
11815 if (netif_msg_hw(tp))
11816 netdev_err(tp->dev,
11817 "Register test failed at offset %x\n", offset);
11818 tw32(offset, save_val);
11819 return -EIO;
11820 }
11821
11822 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11823 {
11824 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11825 int i;
11826 u32 j;
11827
11828 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11829 for (j = 0; j < len; j += 4) {
11830 u32 val;
11831
11832 tg3_write_mem(tp, offset + j, test_pattern[i]);
11833 tg3_read_mem(tp, offset + j, &val);
11834 if (val != test_pattern[i])
11835 return -EIO;
11836 }
11837 }
11838 return 0;
11839 }
11840
11841 static int tg3_test_memory(struct tg3 *tp)
11842 {
11843 static struct mem_entry {
11844 u32 offset;
11845 u32 len;
11846 } mem_tbl_570x[] = {
11847 { 0x00000000, 0x00b50},
11848 { 0x00002000, 0x1c000},
11849 { 0xffffffff, 0x00000}
11850 }, mem_tbl_5705[] = {
11851 { 0x00000100, 0x0000c},
11852 { 0x00000200, 0x00008},
11853 { 0x00004000, 0x00800},
11854 { 0x00006000, 0x01000},
11855 { 0x00008000, 0x02000},
11856 { 0x00010000, 0x0e000},
11857 { 0xffffffff, 0x00000}
11858 }, mem_tbl_5755[] = {
11859 { 0x00000200, 0x00008},
11860 { 0x00004000, 0x00800},
11861 { 0x00006000, 0x00800},
11862 { 0x00008000, 0x02000},
11863 { 0x00010000, 0x0c000},
11864 { 0xffffffff, 0x00000}
11865 }, mem_tbl_5906[] = {
11866 { 0x00000200, 0x00008},
11867 { 0x00004000, 0x00400},
11868 { 0x00006000, 0x00400},
11869 { 0x00008000, 0x01000},
11870 { 0x00010000, 0x01000},
11871 { 0xffffffff, 0x00000}
11872 }, mem_tbl_5717[] = {
11873 { 0x00000200, 0x00008},
11874 { 0x00010000, 0x0a000},
11875 { 0x00020000, 0x13c00},
11876 { 0xffffffff, 0x00000}
11877 }, mem_tbl_57765[] = {
11878 { 0x00000200, 0x00008},
11879 { 0x00004000, 0x00800},
11880 { 0x00006000, 0x09800},
11881 { 0x00010000, 0x0a000},
11882 { 0xffffffff, 0x00000}
11883 };
11884 struct mem_entry *mem_tbl;
11885 int err = 0;
11886 int i;
11887
11888 if (tg3_flag(tp, 5717_PLUS))
11889 mem_tbl = mem_tbl_5717;
11890 else if (tg3_flag(tp, 57765_CLASS))
11891 mem_tbl = mem_tbl_57765;
11892 else if (tg3_flag(tp, 5755_PLUS))
11893 mem_tbl = mem_tbl_5755;
11894 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11895 mem_tbl = mem_tbl_5906;
11896 else if (tg3_flag(tp, 5705_PLUS))
11897 mem_tbl = mem_tbl_5705;
11898 else
11899 mem_tbl = mem_tbl_570x;
11900
11901 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11902 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11903 if (err)
11904 break;
11905 }
11906
11907 return err;
11908 }
11909
11910 #define TG3_TSO_MSS 500
11911
11912 #define TG3_TSO_IP_HDR_LEN 20
11913 #define TG3_TSO_TCP_HDR_LEN 20
11914 #define TG3_TSO_TCP_OPT_LEN 12
11915
11916 static const u8 tg3_tso_header[] = {
11917 0x08, 0x00,
11918 0x45, 0x00, 0x00, 0x00,
11919 0x00, 0x00, 0x40, 0x00,
11920 0x40, 0x06, 0x00, 0x00,
11921 0x0a, 0x00, 0x00, 0x01,
11922 0x0a, 0x00, 0x00, 0x02,
11923 0x0d, 0x00, 0xe0, 0x00,
11924 0x00, 0x00, 0x01, 0x00,
11925 0x00, 0x00, 0x02, 0x00,
11926 0x80, 0x10, 0x10, 0x00,
11927 0x14, 0x09, 0x00, 0x00,
11928 0x01, 0x01, 0x08, 0x0a,
11929 0x11, 0x11, 0x11, 0x11,
11930 0x11, 0x11, 0x11, 0x11,
11931 };
11932
11933 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11934 {
11935 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11936 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11937 u32 budget;
11938 struct sk_buff *skb;
11939 u8 *tx_data, *rx_data;
11940 dma_addr_t map;
11941 int num_pkts, tx_len, rx_len, i, err;
11942 struct tg3_rx_buffer_desc *desc;
11943 struct tg3_napi *tnapi, *rnapi;
11944 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11945
11946 tnapi = &tp->napi[0];
11947 rnapi = &tp->napi[0];
11948 if (tp->irq_cnt > 1) {
11949 if (tg3_flag(tp, ENABLE_RSS))
11950 rnapi = &tp->napi[1];
11951 if (tg3_flag(tp, ENABLE_TSS))
11952 tnapi = &tp->napi[1];
11953 }
11954 coal_now = tnapi->coal_now | rnapi->coal_now;
11955
11956 err = -EIO;
11957
11958 tx_len = pktsz;
11959 skb = netdev_alloc_skb(tp->dev, tx_len);
11960 if (!skb)
11961 return -ENOMEM;
11962
11963 tx_data = skb_put(skb, tx_len);
11964 memcpy(tx_data, tp->dev->dev_addr, 6);
11965 memset(tx_data + 6, 0x0, 8);
11966
11967 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11968
11969 if (tso_loopback) {
11970 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11971
11972 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11973 TG3_TSO_TCP_OPT_LEN;
11974
11975 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11976 sizeof(tg3_tso_header));
11977 mss = TG3_TSO_MSS;
11978
11979 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11980 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11981
11982 /* Set the total length field in the IP header */
11983 iph->tot_len = htons((u16)(mss + hdr_len));
11984
11985 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11986 TXD_FLAG_CPU_POST_DMA);
11987
11988 if (tg3_flag(tp, HW_TSO_1) ||
11989 tg3_flag(tp, HW_TSO_2) ||
11990 tg3_flag(tp, HW_TSO_3)) {
11991 struct tcphdr *th;
11992 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11993 th = (struct tcphdr *)&tx_data[val];
11994 th->check = 0;
11995 } else
11996 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11997
11998 if (tg3_flag(tp, HW_TSO_3)) {
11999 mss |= (hdr_len & 0xc) << 12;
12000 if (hdr_len & 0x10)
12001 base_flags |= 0x00000010;
12002 base_flags |= (hdr_len & 0x3e0) << 5;
12003 } else if (tg3_flag(tp, HW_TSO_2))
12004 mss |= hdr_len << 9;
12005 else if (tg3_flag(tp, HW_TSO_1) ||
12006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12007 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12008 } else {
12009 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12010 }
12011
12012 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12013 } else {
12014 num_pkts = 1;
12015 data_off = ETH_HLEN;
12016
12017 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12018 tx_len > VLAN_ETH_FRAME_LEN)
12019 base_flags |= TXD_FLAG_JMB_PKT;
12020 }
12021
12022 for (i = data_off; i < tx_len; i++)
12023 tx_data[i] = (u8) (i & 0xff);
12024
12025 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12026 if (pci_dma_mapping_error(tp->pdev, map)) {
12027 dev_kfree_skb(skb);
12028 return -EIO;
12029 }
12030
12031 val = tnapi->tx_prod;
12032 tnapi->tx_buffers[val].skb = skb;
12033 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12034
12035 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12036 rnapi->coal_now);
12037
12038 udelay(10);
12039
12040 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12041
12042 budget = tg3_tx_avail(tnapi);
12043 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12044 base_flags | TXD_FLAG_END, mss, 0)) {
12045 tnapi->tx_buffers[val].skb = NULL;
12046 dev_kfree_skb(skb);
12047 return -EIO;
12048 }
12049
12050 tnapi->tx_prod++;
12051
12052 /* Sync BD data before updating mailbox */
12053 wmb();
12054
12055 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12056 tr32_mailbox(tnapi->prodmbox);
12057
12058 udelay(10);
12059
12060 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12061 for (i = 0; i < 35; i++) {
12062 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12063 coal_now);
12064
12065 udelay(10);
12066
12067 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12068 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12069 if ((tx_idx == tnapi->tx_prod) &&
12070 (rx_idx == (rx_start_idx + num_pkts)))
12071 break;
12072 }
12073
12074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12075 dev_kfree_skb(skb);
12076
12077 if (tx_idx != tnapi->tx_prod)
12078 goto out;
12079
12080 if (rx_idx != rx_start_idx + num_pkts)
12081 goto out;
12082
12083 val = data_off;
12084 while (rx_idx != rx_start_idx) {
12085 desc = &rnapi->rx_rcb[rx_start_idx++];
12086 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12087 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12088
12089 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12090 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12091 goto out;
12092
12093 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12094 - ETH_FCS_LEN;
12095
12096 if (!tso_loopback) {
12097 if (rx_len != tx_len)
12098 goto out;
12099
12100 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12101 if (opaque_key != RXD_OPAQUE_RING_STD)
12102 goto out;
12103 } else {
12104 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12105 goto out;
12106 }
12107 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12108 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12109 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12110 goto out;
12111 }
12112
12113 if (opaque_key == RXD_OPAQUE_RING_STD) {
12114 rx_data = tpr->rx_std_buffers[desc_idx].data;
12115 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12116 mapping);
12117 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12118 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12119 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12120 mapping);
12121 } else
12122 goto out;
12123
12124 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12125 PCI_DMA_FROMDEVICE);
12126
12127 rx_data += TG3_RX_OFFSET(tp);
12128 for (i = data_off; i < rx_len; i++, val++) {
12129 if (*(rx_data + i) != (u8) (val & 0xff))
12130 goto out;
12131 }
12132 }
12133
12134 err = 0;
12135
12136 /* tg3_free_rings will unmap and free the rx_data */
12137 out:
12138 return err;
12139 }
12140
12141 #define TG3_STD_LOOPBACK_FAILED 1
12142 #define TG3_JMB_LOOPBACK_FAILED 2
12143 #define TG3_TSO_LOOPBACK_FAILED 4
12144 #define TG3_LOOPBACK_FAILED \
12145 (TG3_STD_LOOPBACK_FAILED | \
12146 TG3_JMB_LOOPBACK_FAILED | \
12147 TG3_TSO_LOOPBACK_FAILED)
12148
12149 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12150 {
12151 int err = -EIO;
12152 u32 eee_cap;
12153 u32 jmb_pkt_sz = 9000;
12154
12155 if (tp->dma_limit)
12156 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12157
12158 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12159 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12160
12161 if (!netif_running(tp->dev)) {
12162 data[0] = TG3_LOOPBACK_FAILED;
12163 data[1] = TG3_LOOPBACK_FAILED;
12164 if (do_extlpbk)
12165 data[2] = TG3_LOOPBACK_FAILED;
12166 goto done;
12167 }
12168
12169 err = tg3_reset_hw(tp, 1);
12170 if (err) {
12171 data[0] = TG3_LOOPBACK_FAILED;
12172 data[1] = TG3_LOOPBACK_FAILED;
12173 if (do_extlpbk)
12174 data[2] = TG3_LOOPBACK_FAILED;
12175 goto done;
12176 }
12177
12178 if (tg3_flag(tp, ENABLE_RSS)) {
12179 int i;
12180
12181 /* Reroute all rx packets to the 1st queue */
12182 for (i = MAC_RSS_INDIR_TBL_0;
12183 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12184 tw32(i, 0x0);
12185 }
12186
12187 /* HW errata - mac loopback fails in some cases on 5780.
12188 * Normal traffic and PHY loopback are not affected by
12189 * errata. Also, the MAC loopback test is deprecated for
12190 * all newer ASIC revisions.
12191 */
12192 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12193 !tg3_flag(tp, CPMU_PRESENT)) {
12194 tg3_mac_loopback(tp, true);
12195
12196 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12197 data[0] |= TG3_STD_LOOPBACK_FAILED;
12198
12199 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12200 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12201 data[0] |= TG3_JMB_LOOPBACK_FAILED;
12202
12203 tg3_mac_loopback(tp, false);
12204 }
12205
12206 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12207 !tg3_flag(tp, USE_PHYLIB)) {
12208 int i;
12209
12210 tg3_phy_lpbk_set(tp, 0, false);
12211
12212 /* Wait for link */
12213 for (i = 0; i < 100; i++) {
12214 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12215 break;
12216 mdelay(1);
12217 }
12218
12219 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12220 data[1] |= TG3_STD_LOOPBACK_FAILED;
12221 if (tg3_flag(tp, TSO_CAPABLE) &&
12222 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12223 data[1] |= TG3_TSO_LOOPBACK_FAILED;
12224 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12225 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12226 data[1] |= TG3_JMB_LOOPBACK_FAILED;
12227
12228 if (do_extlpbk) {
12229 tg3_phy_lpbk_set(tp, 0, true);
12230
12231 /* All link indications report up, but the hardware
12232 * isn't really ready for about 20 msec. Double it
12233 * to be sure.
12234 */
12235 mdelay(40);
12236
12237 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12238 data[2] |= TG3_STD_LOOPBACK_FAILED;
12239 if (tg3_flag(tp, TSO_CAPABLE) &&
12240 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12241 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12242 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12243 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12244 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12245 }
12246
12247 /* Re-enable gphy autopowerdown. */
12248 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12249 tg3_phy_toggle_apd(tp, true);
12250 }
12251
12252 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12253
12254 done:
12255 tp->phy_flags |= eee_cap;
12256
12257 return err;
12258 }
12259
12260 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12261 u64 *data)
12262 {
12263 struct tg3 *tp = netdev_priv(dev);
12264 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12265
12266 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12267 tg3_power_up(tp)) {
12268 etest->flags |= ETH_TEST_FL_FAILED;
12269 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12270 return;
12271 }
12272
12273 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12274
12275 if (tg3_test_nvram(tp) != 0) {
12276 etest->flags |= ETH_TEST_FL_FAILED;
12277 data[0] = 1;
12278 }
12279 if (!doextlpbk && tg3_test_link(tp)) {
12280 etest->flags |= ETH_TEST_FL_FAILED;
12281 data[1] = 1;
12282 }
12283 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12284 int err, err2 = 0, irq_sync = 0;
12285
12286 if (netif_running(dev)) {
12287 tg3_phy_stop(tp);
12288 tg3_netif_stop(tp);
12289 irq_sync = 1;
12290 }
12291
12292 tg3_full_lock(tp, irq_sync);
12293
12294 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12295 err = tg3_nvram_lock(tp);
12296 tg3_halt_cpu(tp, RX_CPU_BASE);
12297 if (!tg3_flag(tp, 5705_PLUS))
12298 tg3_halt_cpu(tp, TX_CPU_BASE);
12299 if (!err)
12300 tg3_nvram_unlock(tp);
12301
12302 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12303 tg3_phy_reset(tp);
12304
12305 if (tg3_test_registers(tp) != 0) {
12306 etest->flags |= ETH_TEST_FL_FAILED;
12307 data[2] = 1;
12308 }
12309
12310 if (tg3_test_memory(tp) != 0) {
12311 etest->flags |= ETH_TEST_FL_FAILED;
12312 data[3] = 1;
12313 }
12314
12315 if (doextlpbk)
12316 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12317
12318 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12319 etest->flags |= ETH_TEST_FL_FAILED;
12320
12321 tg3_full_unlock(tp);
12322
12323 if (tg3_test_interrupt(tp) != 0) {
12324 etest->flags |= ETH_TEST_FL_FAILED;
12325 data[7] = 1;
12326 }
12327
12328 tg3_full_lock(tp, 0);
12329
12330 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12331 if (netif_running(dev)) {
12332 tg3_flag_set(tp, INIT_COMPLETE);
12333 err2 = tg3_restart_hw(tp, 1);
12334 if (!err2)
12335 tg3_netif_start(tp);
12336 }
12337
12338 tg3_full_unlock(tp);
12339
12340 if (irq_sync && !err2)
12341 tg3_phy_start(tp);
12342 }
12343 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12344 tg3_power_down(tp);
12345
12346 }
12347
12348 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12349 {
12350 struct mii_ioctl_data *data = if_mii(ifr);
12351 struct tg3 *tp = netdev_priv(dev);
12352 int err;
12353
12354 if (tg3_flag(tp, USE_PHYLIB)) {
12355 struct phy_device *phydev;
12356 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12357 return -EAGAIN;
12358 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12359 return phy_mii_ioctl(phydev, ifr, cmd);
12360 }
12361
12362 switch (cmd) {
12363 case SIOCGMIIPHY:
12364 data->phy_id = tp->phy_addr;
12365
12366 /* fallthru */
12367 case SIOCGMIIREG: {
12368 u32 mii_regval;
12369
12370 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12371 break; /* We have no PHY */
12372
12373 if (!netif_running(dev))
12374 return -EAGAIN;
12375
12376 spin_lock_bh(&tp->lock);
12377 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12378 spin_unlock_bh(&tp->lock);
12379
12380 data->val_out = mii_regval;
12381
12382 return err;
12383 }
12384
12385 case SIOCSMIIREG:
12386 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12387 break; /* We have no PHY */
12388
12389 if (!netif_running(dev))
12390 return -EAGAIN;
12391
12392 spin_lock_bh(&tp->lock);
12393 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12394 spin_unlock_bh(&tp->lock);
12395
12396 return err;
12397
12398 default:
12399 /* do nothing */
12400 break;
12401 }
12402 return -EOPNOTSUPP;
12403 }
12404
12405 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12406 {
12407 struct tg3 *tp = netdev_priv(dev);
12408
12409 memcpy(ec, &tp->coal, sizeof(*ec));
12410 return 0;
12411 }
12412
12413 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12414 {
12415 struct tg3 *tp = netdev_priv(dev);
12416 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12417 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12418
12419 if (!tg3_flag(tp, 5705_PLUS)) {
12420 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12421 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12422 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12423 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12424 }
12425
12426 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12427 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12428 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12429 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12430 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12431 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12432 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12433 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12434 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12435 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12436 return -EINVAL;
12437
12438 /* No rx interrupts will be generated if both are zero */
12439 if ((ec->rx_coalesce_usecs == 0) &&
12440 (ec->rx_max_coalesced_frames == 0))
12441 return -EINVAL;
12442
12443 /* No tx interrupts will be generated if both are zero */
12444 if ((ec->tx_coalesce_usecs == 0) &&
12445 (ec->tx_max_coalesced_frames == 0))
12446 return -EINVAL;
12447
12448 /* Only copy relevant parameters, ignore all others. */
12449 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12450 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12451 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12452 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12453 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12454 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12455 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12456 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12457 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12458
12459 if (netif_running(dev)) {
12460 tg3_full_lock(tp, 0);
12461 __tg3_set_coalesce(tp, &tp->coal);
12462 tg3_full_unlock(tp);
12463 }
12464 return 0;
12465 }
12466
12467 static const struct ethtool_ops tg3_ethtool_ops = {
12468 .get_settings = tg3_get_settings,
12469 .set_settings = tg3_set_settings,
12470 .get_drvinfo = tg3_get_drvinfo,
12471 .get_regs_len = tg3_get_regs_len,
12472 .get_regs = tg3_get_regs,
12473 .get_wol = tg3_get_wol,
12474 .set_wol = tg3_set_wol,
12475 .get_msglevel = tg3_get_msglevel,
12476 .set_msglevel = tg3_set_msglevel,
12477 .nway_reset = tg3_nway_reset,
12478 .get_link = ethtool_op_get_link,
12479 .get_eeprom_len = tg3_get_eeprom_len,
12480 .get_eeprom = tg3_get_eeprom,
12481 .set_eeprom = tg3_set_eeprom,
12482 .get_ringparam = tg3_get_ringparam,
12483 .set_ringparam = tg3_set_ringparam,
12484 .get_pauseparam = tg3_get_pauseparam,
12485 .set_pauseparam = tg3_set_pauseparam,
12486 .self_test = tg3_self_test,
12487 .get_strings = tg3_get_strings,
12488 .set_phys_id = tg3_set_phys_id,
12489 .get_ethtool_stats = tg3_get_ethtool_stats,
12490 .get_coalesce = tg3_get_coalesce,
12491 .set_coalesce = tg3_set_coalesce,
12492 .get_sset_count = tg3_get_sset_count,
12493 .get_rxnfc = tg3_get_rxnfc,
12494 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12495 .get_rxfh_indir = tg3_get_rxfh_indir,
12496 .set_rxfh_indir = tg3_set_rxfh_indir,
12497 .get_ts_info = ethtool_op_get_ts_info,
12498 };
12499
12500 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12501 struct rtnl_link_stats64 *stats)
12502 {
12503 struct tg3 *tp = netdev_priv(dev);
12504
12505 spin_lock_bh(&tp->lock);
12506 if (!tp->hw_stats) {
12507 spin_unlock_bh(&tp->lock);
12508 return &tp->net_stats_prev;
12509 }
12510
12511 tg3_get_nstats(tp, stats);
12512 spin_unlock_bh(&tp->lock);
12513
12514 return stats;
12515 }
12516
12517 static void tg3_set_rx_mode(struct net_device *dev)
12518 {
12519 struct tg3 *tp = netdev_priv(dev);
12520
12521 if (!netif_running(dev))
12522 return;
12523
12524 tg3_full_lock(tp, 0);
12525 __tg3_set_rx_mode(dev);
12526 tg3_full_unlock(tp);
12527 }
12528
12529 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12530 int new_mtu)
12531 {
12532 dev->mtu = new_mtu;
12533
12534 if (new_mtu > ETH_DATA_LEN) {
12535 if (tg3_flag(tp, 5780_CLASS)) {
12536 netdev_update_features(dev);
12537 tg3_flag_clear(tp, TSO_CAPABLE);
12538 } else {
12539 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12540 }
12541 } else {
12542 if (tg3_flag(tp, 5780_CLASS)) {
12543 tg3_flag_set(tp, TSO_CAPABLE);
12544 netdev_update_features(dev);
12545 }
12546 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12547 }
12548 }
12549
12550 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12551 {
12552 struct tg3 *tp = netdev_priv(dev);
12553 int err, reset_phy = 0;
12554
12555 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12556 return -EINVAL;
12557
12558 if (!netif_running(dev)) {
12559 /* We'll just catch it later when the
12560 * device is up'd.
12561 */
12562 tg3_set_mtu(dev, tp, new_mtu);
12563 return 0;
12564 }
12565
12566 tg3_phy_stop(tp);
12567
12568 tg3_netif_stop(tp);
12569
12570 tg3_full_lock(tp, 1);
12571
12572 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12573
12574 tg3_set_mtu(dev, tp, new_mtu);
12575
12576 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12577 * breaks all requests to 256 bytes.
12578 */
12579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12580 reset_phy = 1;
12581
12582 err = tg3_restart_hw(tp, reset_phy);
12583
12584 if (!err)
12585 tg3_netif_start(tp);
12586
12587 tg3_full_unlock(tp);
12588
12589 if (!err)
12590 tg3_phy_start(tp);
12591
12592 return err;
12593 }
12594
12595 static const struct net_device_ops tg3_netdev_ops = {
12596 .ndo_open = tg3_open,
12597 .ndo_stop = tg3_close,
12598 .ndo_start_xmit = tg3_start_xmit,
12599 .ndo_get_stats64 = tg3_get_stats64,
12600 .ndo_validate_addr = eth_validate_addr,
12601 .ndo_set_rx_mode = tg3_set_rx_mode,
12602 .ndo_set_mac_address = tg3_set_mac_addr,
12603 .ndo_do_ioctl = tg3_ioctl,
12604 .ndo_tx_timeout = tg3_tx_timeout,
12605 .ndo_change_mtu = tg3_change_mtu,
12606 .ndo_fix_features = tg3_fix_features,
12607 .ndo_set_features = tg3_set_features,
12608 #ifdef CONFIG_NET_POLL_CONTROLLER
12609 .ndo_poll_controller = tg3_poll_controller,
12610 #endif
12611 };
12612
12613 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12614 {
12615 u32 cursize, val, magic;
12616
12617 tp->nvram_size = EEPROM_CHIP_SIZE;
12618
12619 if (tg3_nvram_read(tp, 0, &magic) != 0)
12620 return;
12621
12622 if ((magic != TG3_EEPROM_MAGIC) &&
12623 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12624 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12625 return;
12626
12627 /*
12628 * Size the chip by reading offsets at increasing powers of two.
12629 * When we encounter our validation signature, we know the addressing
12630 * has wrapped around, and thus have our chip size.
12631 */
12632 cursize = 0x10;
12633
12634 while (cursize < tp->nvram_size) {
12635 if (tg3_nvram_read(tp, cursize, &val) != 0)
12636 return;
12637
12638 if (val == magic)
12639 break;
12640
12641 cursize <<= 1;
12642 }
12643
12644 tp->nvram_size = cursize;
12645 }
12646
12647 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12648 {
12649 u32 val;
12650
12651 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12652 return;
12653
12654 /* Selfboot format */
12655 if (val != TG3_EEPROM_MAGIC) {
12656 tg3_get_eeprom_size(tp);
12657 return;
12658 }
12659
12660 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12661 if (val != 0) {
12662 /* This is confusing. We want to operate on the
12663 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12664 * call will read from NVRAM and byteswap the data
12665 * according to the byteswapping settings for all
12666 * other register accesses. This ensures the data we
12667 * want will always reside in the lower 16-bits.
12668 * However, the data in NVRAM is in LE format, which
12669 * means the data from the NVRAM read will always be
12670 * opposite the endianness of the CPU. The 16-bit
12671 * byteswap then brings the data to CPU endianness.
12672 */
12673 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12674 return;
12675 }
12676 }
12677 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12678 }
12679
12680 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12681 {
12682 u32 nvcfg1;
12683
12684 nvcfg1 = tr32(NVRAM_CFG1);
12685 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12686 tg3_flag_set(tp, FLASH);
12687 } else {
12688 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12689 tw32(NVRAM_CFG1, nvcfg1);
12690 }
12691
12692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12693 tg3_flag(tp, 5780_CLASS)) {
12694 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12695 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12696 tp->nvram_jedecnum = JEDEC_ATMEL;
12697 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12698 tg3_flag_set(tp, NVRAM_BUFFERED);
12699 break;
12700 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12701 tp->nvram_jedecnum = JEDEC_ATMEL;
12702 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12703 break;
12704 case FLASH_VENDOR_ATMEL_EEPROM:
12705 tp->nvram_jedecnum = JEDEC_ATMEL;
12706 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12707 tg3_flag_set(tp, NVRAM_BUFFERED);
12708 break;
12709 case FLASH_VENDOR_ST:
12710 tp->nvram_jedecnum = JEDEC_ST;
12711 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12712 tg3_flag_set(tp, NVRAM_BUFFERED);
12713 break;
12714 case FLASH_VENDOR_SAIFUN:
12715 tp->nvram_jedecnum = JEDEC_SAIFUN;
12716 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12717 break;
12718 case FLASH_VENDOR_SST_SMALL:
12719 case FLASH_VENDOR_SST_LARGE:
12720 tp->nvram_jedecnum = JEDEC_SST;
12721 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12722 break;
12723 }
12724 } else {
12725 tp->nvram_jedecnum = JEDEC_ATMEL;
12726 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12727 tg3_flag_set(tp, NVRAM_BUFFERED);
12728 }
12729 }
12730
12731 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12732 {
12733 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12734 case FLASH_5752PAGE_SIZE_256:
12735 tp->nvram_pagesize = 256;
12736 break;
12737 case FLASH_5752PAGE_SIZE_512:
12738 tp->nvram_pagesize = 512;
12739 break;
12740 case FLASH_5752PAGE_SIZE_1K:
12741 tp->nvram_pagesize = 1024;
12742 break;
12743 case FLASH_5752PAGE_SIZE_2K:
12744 tp->nvram_pagesize = 2048;
12745 break;
12746 case FLASH_5752PAGE_SIZE_4K:
12747 tp->nvram_pagesize = 4096;
12748 break;
12749 case FLASH_5752PAGE_SIZE_264:
12750 tp->nvram_pagesize = 264;
12751 break;
12752 case FLASH_5752PAGE_SIZE_528:
12753 tp->nvram_pagesize = 528;
12754 break;
12755 }
12756 }
12757
12758 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12759 {
12760 u32 nvcfg1;
12761
12762 nvcfg1 = tr32(NVRAM_CFG1);
12763
12764 /* NVRAM protection for TPM */
12765 if (nvcfg1 & (1 << 27))
12766 tg3_flag_set(tp, PROTECTED_NVRAM);
12767
12768 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12769 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12770 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12771 tp->nvram_jedecnum = JEDEC_ATMEL;
12772 tg3_flag_set(tp, NVRAM_BUFFERED);
12773 break;
12774 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12775 tp->nvram_jedecnum = JEDEC_ATMEL;
12776 tg3_flag_set(tp, NVRAM_BUFFERED);
12777 tg3_flag_set(tp, FLASH);
12778 break;
12779 case FLASH_5752VENDOR_ST_M45PE10:
12780 case FLASH_5752VENDOR_ST_M45PE20:
12781 case FLASH_5752VENDOR_ST_M45PE40:
12782 tp->nvram_jedecnum = JEDEC_ST;
12783 tg3_flag_set(tp, NVRAM_BUFFERED);
12784 tg3_flag_set(tp, FLASH);
12785 break;
12786 }
12787
12788 if (tg3_flag(tp, FLASH)) {
12789 tg3_nvram_get_pagesize(tp, nvcfg1);
12790 } else {
12791 /* For eeprom, set pagesize to maximum eeprom size */
12792 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12793
12794 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12795 tw32(NVRAM_CFG1, nvcfg1);
12796 }
12797 }
12798
12799 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12800 {
12801 u32 nvcfg1, protect = 0;
12802
12803 nvcfg1 = tr32(NVRAM_CFG1);
12804
12805 /* NVRAM protection for TPM */
12806 if (nvcfg1 & (1 << 27)) {
12807 tg3_flag_set(tp, PROTECTED_NVRAM);
12808 protect = 1;
12809 }
12810
12811 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12812 switch (nvcfg1) {
12813 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12814 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12815 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12816 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12817 tp->nvram_jedecnum = JEDEC_ATMEL;
12818 tg3_flag_set(tp, NVRAM_BUFFERED);
12819 tg3_flag_set(tp, FLASH);
12820 tp->nvram_pagesize = 264;
12821 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12822 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12823 tp->nvram_size = (protect ? 0x3e200 :
12824 TG3_NVRAM_SIZE_512KB);
12825 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12826 tp->nvram_size = (protect ? 0x1f200 :
12827 TG3_NVRAM_SIZE_256KB);
12828 else
12829 tp->nvram_size = (protect ? 0x1f200 :
12830 TG3_NVRAM_SIZE_128KB);
12831 break;
12832 case FLASH_5752VENDOR_ST_M45PE10:
12833 case FLASH_5752VENDOR_ST_M45PE20:
12834 case FLASH_5752VENDOR_ST_M45PE40:
12835 tp->nvram_jedecnum = JEDEC_ST;
12836 tg3_flag_set(tp, NVRAM_BUFFERED);
12837 tg3_flag_set(tp, FLASH);
12838 tp->nvram_pagesize = 256;
12839 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12840 tp->nvram_size = (protect ?
12841 TG3_NVRAM_SIZE_64KB :
12842 TG3_NVRAM_SIZE_128KB);
12843 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12844 tp->nvram_size = (protect ?
12845 TG3_NVRAM_SIZE_64KB :
12846 TG3_NVRAM_SIZE_256KB);
12847 else
12848 tp->nvram_size = (protect ?
12849 TG3_NVRAM_SIZE_128KB :
12850 TG3_NVRAM_SIZE_512KB);
12851 break;
12852 }
12853 }
12854
12855 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12856 {
12857 u32 nvcfg1;
12858
12859 nvcfg1 = tr32(NVRAM_CFG1);
12860
12861 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12862 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12863 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12864 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12865 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12866 tp->nvram_jedecnum = JEDEC_ATMEL;
12867 tg3_flag_set(tp, NVRAM_BUFFERED);
12868 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12869
12870 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12871 tw32(NVRAM_CFG1, nvcfg1);
12872 break;
12873 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12874 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12875 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12876 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12877 tp->nvram_jedecnum = JEDEC_ATMEL;
12878 tg3_flag_set(tp, NVRAM_BUFFERED);
12879 tg3_flag_set(tp, FLASH);
12880 tp->nvram_pagesize = 264;
12881 break;
12882 case FLASH_5752VENDOR_ST_M45PE10:
12883 case FLASH_5752VENDOR_ST_M45PE20:
12884 case FLASH_5752VENDOR_ST_M45PE40:
12885 tp->nvram_jedecnum = JEDEC_ST;
12886 tg3_flag_set(tp, NVRAM_BUFFERED);
12887 tg3_flag_set(tp, FLASH);
12888 tp->nvram_pagesize = 256;
12889 break;
12890 }
12891 }
12892
12893 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12894 {
12895 u32 nvcfg1, protect = 0;
12896
12897 nvcfg1 = tr32(NVRAM_CFG1);
12898
12899 /* NVRAM protection for TPM */
12900 if (nvcfg1 & (1 << 27)) {
12901 tg3_flag_set(tp, PROTECTED_NVRAM);
12902 protect = 1;
12903 }
12904
12905 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12906 switch (nvcfg1) {
12907 case FLASH_5761VENDOR_ATMEL_ADB021D:
12908 case FLASH_5761VENDOR_ATMEL_ADB041D:
12909 case FLASH_5761VENDOR_ATMEL_ADB081D:
12910 case FLASH_5761VENDOR_ATMEL_ADB161D:
12911 case FLASH_5761VENDOR_ATMEL_MDB021D:
12912 case FLASH_5761VENDOR_ATMEL_MDB041D:
12913 case FLASH_5761VENDOR_ATMEL_MDB081D:
12914 case FLASH_5761VENDOR_ATMEL_MDB161D:
12915 tp->nvram_jedecnum = JEDEC_ATMEL;
12916 tg3_flag_set(tp, NVRAM_BUFFERED);
12917 tg3_flag_set(tp, FLASH);
12918 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12919 tp->nvram_pagesize = 256;
12920 break;
12921 case FLASH_5761VENDOR_ST_A_M45PE20:
12922 case FLASH_5761VENDOR_ST_A_M45PE40:
12923 case FLASH_5761VENDOR_ST_A_M45PE80:
12924 case FLASH_5761VENDOR_ST_A_M45PE16:
12925 case FLASH_5761VENDOR_ST_M_M45PE20:
12926 case FLASH_5761VENDOR_ST_M_M45PE40:
12927 case FLASH_5761VENDOR_ST_M_M45PE80:
12928 case FLASH_5761VENDOR_ST_M_M45PE16:
12929 tp->nvram_jedecnum = JEDEC_ST;
12930 tg3_flag_set(tp, NVRAM_BUFFERED);
12931 tg3_flag_set(tp, FLASH);
12932 tp->nvram_pagesize = 256;
12933 break;
12934 }
12935
12936 if (protect) {
12937 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12938 } else {
12939 switch (nvcfg1) {
12940 case FLASH_5761VENDOR_ATMEL_ADB161D:
12941 case FLASH_5761VENDOR_ATMEL_MDB161D:
12942 case FLASH_5761VENDOR_ST_A_M45PE16:
12943 case FLASH_5761VENDOR_ST_M_M45PE16:
12944 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12945 break;
12946 case FLASH_5761VENDOR_ATMEL_ADB081D:
12947 case FLASH_5761VENDOR_ATMEL_MDB081D:
12948 case FLASH_5761VENDOR_ST_A_M45PE80:
12949 case FLASH_5761VENDOR_ST_M_M45PE80:
12950 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12951 break;
12952 case FLASH_5761VENDOR_ATMEL_ADB041D:
12953 case FLASH_5761VENDOR_ATMEL_MDB041D:
12954 case FLASH_5761VENDOR_ST_A_M45PE40:
12955 case FLASH_5761VENDOR_ST_M_M45PE40:
12956 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12957 break;
12958 case FLASH_5761VENDOR_ATMEL_ADB021D:
12959 case FLASH_5761VENDOR_ATMEL_MDB021D:
12960 case FLASH_5761VENDOR_ST_A_M45PE20:
12961 case FLASH_5761VENDOR_ST_M_M45PE20:
12962 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12963 break;
12964 }
12965 }
12966 }
12967
12968 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12969 {
12970 tp->nvram_jedecnum = JEDEC_ATMEL;
12971 tg3_flag_set(tp, NVRAM_BUFFERED);
12972 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12973 }
12974
12975 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12976 {
12977 u32 nvcfg1;
12978
12979 nvcfg1 = tr32(NVRAM_CFG1);
12980
12981 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12982 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12983 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12984 tp->nvram_jedecnum = JEDEC_ATMEL;
12985 tg3_flag_set(tp, NVRAM_BUFFERED);
12986 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12987
12988 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12989 tw32(NVRAM_CFG1, nvcfg1);
12990 return;
12991 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12992 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12993 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12994 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12995 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12996 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12997 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12998 tp->nvram_jedecnum = JEDEC_ATMEL;
12999 tg3_flag_set(tp, NVRAM_BUFFERED);
13000 tg3_flag_set(tp, FLASH);
13001
13002 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13003 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13004 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13005 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13006 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13007 break;
13008 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13009 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13010 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13011 break;
13012 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13013 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13014 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13015 break;
13016 }
13017 break;
13018 case FLASH_5752VENDOR_ST_M45PE10:
13019 case FLASH_5752VENDOR_ST_M45PE20:
13020 case FLASH_5752VENDOR_ST_M45PE40:
13021 tp->nvram_jedecnum = JEDEC_ST;
13022 tg3_flag_set(tp, NVRAM_BUFFERED);
13023 tg3_flag_set(tp, FLASH);
13024
13025 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13026 case FLASH_5752VENDOR_ST_M45PE10:
13027 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13028 break;
13029 case FLASH_5752VENDOR_ST_M45PE20:
13030 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13031 break;
13032 case FLASH_5752VENDOR_ST_M45PE40:
13033 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13034 break;
13035 }
13036 break;
13037 default:
13038 tg3_flag_set(tp, NO_NVRAM);
13039 return;
13040 }
13041
13042 tg3_nvram_get_pagesize(tp, nvcfg1);
13043 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13044 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13045 }
13046
13047
13048 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13049 {
13050 u32 nvcfg1;
13051
13052 nvcfg1 = tr32(NVRAM_CFG1);
13053
13054 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13055 case FLASH_5717VENDOR_ATMEL_EEPROM:
13056 case FLASH_5717VENDOR_MICRO_EEPROM:
13057 tp->nvram_jedecnum = JEDEC_ATMEL;
13058 tg3_flag_set(tp, NVRAM_BUFFERED);
13059 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13060
13061 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13062 tw32(NVRAM_CFG1, nvcfg1);
13063 return;
13064 case FLASH_5717VENDOR_ATMEL_MDB011D:
13065 case FLASH_5717VENDOR_ATMEL_ADB011B:
13066 case FLASH_5717VENDOR_ATMEL_ADB011D:
13067 case FLASH_5717VENDOR_ATMEL_MDB021D:
13068 case FLASH_5717VENDOR_ATMEL_ADB021B:
13069 case FLASH_5717VENDOR_ATMEL_ADB021D:
13070 case FLASH_5717VENDOR_ATMEL_45USPT:
13071 tp->nvram_jedecnum = JEDEC_ATMEL;
13072 tg3_flag_set(tp, NVRAM_BUFFERED);
13073 tg3_flag_set(tp, FLASH);
13074
13075 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13076 case FLASH_5717VENDOR_ATMEL_MDB021D:
13077 /* Detect size with tg3_nvram_get_size() */
13078 break;
13079 case FLASH_5717VENDOR_ATMEL_ADB021B:
13080 case FLASH_5717VENDOR_ATMEL_ADB021D:
13081 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13082 break;
13083 default:
13084 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13085 break;
13086 }
13087 break;
13088 case FLASH_5717VENDOR_ST_M_M25PE10:
13089 case FLASH_5717VENDOR_ST_A_M25PE10:
13090 case FLASH_5717VENDOR_ST_M_M45PE10:
13091 case FLASH_5717VENDOR_ST_A_M45PE10:
13092 case FLASH_5717VENDOR_ST_M_M25PE20:
13093 case FLASH_5717VENDOR_ST_A_M25PE20:
13094 case FLASH_5717VENDOR_ST_M_M45PE20:
13095 case FLASH_5717VENDOR_ST_A_M45PE20:
13096 case FLASH_5717VENDOR_ST_25USPT:
13097 case FLASH_5717VENDOR_ST_45USPT:
13098 tp->nvram_jedecnum = JEDEC_ST;
13099 tg3_flag_set(tp, NVRAM_BUFFERED);
13100 tg3_flag_set(tp, FLASH);
13101
13102 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13103 case FLASH_5717VENDOR_ST_M_M25PE20:
13104 case FLASH_5717VENDOR_ST_M_M45PE20:
13105 /* Detect size with tg3_nvram_get_size() */
13106 break;
13107 case FLASH_5717VENDOR_ST_A_M25PE20:
13108 case FLASH_5717VENDOR_ST_A_M45PE20:
13109 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13110 break;
13111 default:
13112 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13113 break;
13114 }
13115 break;
13116 default:
13117 tg3_flag_set(tp, NO_NVRAM);
13118 return;
13119 }
13120
13121 tg3_nvram_get_pagesize(tp, nvcfg1);
13122 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13123 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13124 }
13125
13126 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13127 {
13128 u32 nvcfg1, nvmpinstrp;
13129
13130 nvcfg1 = tr32(NVRAM_CFG1);
13131 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13132
13133 switch (nvmpinstrp) {
13134 case FLASH_5720_EEPROM_HD:
13135 case FLASH_5720_EEPROM_LD:
13136 tp->nvram_jedecnum = JEDEC_ATMEL;
13137 tg3_flag_set(tp, NVRAM_BUFFERED);
13138
13139 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13140 tw32(NVRAM_CFG1, nvcfg1);
13141 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13142 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13143 else
13144 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13145 return;
13146 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13147 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13148 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13149 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13150 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13151 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13152 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13153 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13154 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13155 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13156 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13157 case FLASH_5720VENDOR_ATMEL_45USPT:
13158 tp->nvram_jedecnum = JEDEC_ATMEL;
13159 tg3_flag_set(tp, NVRAM_BUFFERED);
13160 tg3_flag_set(tp, FLASH);
13161
13162 switch (nvmpinstrp) {
13163 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13164 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13165 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13166 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13167 break;
13168 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13169 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13170 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13171 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13172 break;
13173 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13174 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13175 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13176 break;
13177 default:
13178 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13179 break;
13180 }
13181 break;
13182 case FLASH_5720VENDOR_M_ST_M25PE10:
13183 case FLASH_5720VENDOR_M_ST_M45PE10:
13184 case FLASH_5720VENDOR_A_ST_M25PE10:
13185 case FLASH_5720VENDOR_A_ST_M45PE10:
13186 case FLASH_5720VENDOR_M_ST_M25PE20:
13187 case FLASH_5720VENDOR_M_ST_M45PE20:
13188 case FLASH_5720VENDOR_A_ST_M25PE20:
13189 case FLASH_5720VENDOR_A_ST_M45PE20:
13190 case FLASH_5720VENDOR_M_ST_M25PE40:
13191 case FLASH_5720VENDOR_M_ST_M45PE40:
13192 case FLASH_5720VENDOR_A_ST_M25PE40:
13193 case FLASH_5720VENDOR_A_ST_M45PE40:
13194 case FLASH_5720VENDOR_M_ST_M25PE80:
13195 case FLASH_5720VENDOR_M_ST_M45PE80:
13196 case FLASH_5720VENDOR_A_ST_M25PE80:
13197 case FLASH_5720VENDOR_A_ST_M45PE80:
13198 case FLASH_5720VENDOR_ST_25USPT:
13199 case FLASH_5720VENDOR_ST_45USPT:
13200 tp->nvram_jedecnum = JEDEC_ST;
13201 tg3_flag_set(tp, NVRAM_BUFFERED);
13202 tg3_flag_set(tp, FLASH);
13203
13204 switch (nvmpinstrp) {
13205 case FLASH_5720VENDOR_M_ST_M25PE20:
13206 case FLASH_5720VENDOR_M_ST_M45PE20:
13207 case FLASH_5720VENDOR_A_ST_M25PE20:
13208 case FLASH_5720VENDOR_A_ST_M45PE20:
13209 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13210 break;
13211 case FLASH_5720VENDOR_M_ST_M25PE40:
13212 case FLASH_5720VENDOR_M_ST_M45PE40:
13213 case FLASH_5720VENDOR_A_ST_M25PE40:
13214 case FLASH_5720VENDOR_A_ST_M45PE40:
13215 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13216 break;
13217 case FLASH_5720VENDOR_M_ST_M25PE80:
13218 case FLASH_5720VENDOR_M_ST_M45PE80:
13219 case FLASH_5720VENDOR_A_ST_M25PE80:
13220 case FLASH_5720VENDOR_A_ST_M45PE80:
13221 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13222 break;
13223 default:
13224 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13225 break;
13226 }
13227 break;
13228 default:
13229 tg3_flag_set(tp, NO_NVRAM);
13230 return;
13231 }
13232
13233 tg3_nvram_get_pagesize(tp, nvcfg1);
13234 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13235 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13236 }
13237
13238 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13239 static void __devinit tg3_nvram_init(struct tg3 *tp)
13240 {
13241 tw32_f(GRC_EEPROM_ADDR,
13242 (EEPROM_ADDR_FSM_RESET |
13243 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13244 EEPROM_ADDR_CLKPERD_SHIFT)));
13245
13246 msleep(1);
13247
13248 /* Enable seeprom accesses. */
13249 tw32_f(GRC_LOCAL_CTRL,
13250 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13251 udelay(100);
13252
13253 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13254 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13255 tg3_flag_set(tp, NVRAM);
13256
13257 if (tg3_nvram_lock(tp)) {
13258 netdev_warn(tp->dev,
13259 "Cannot get nvram lock, %s failed\n",
13260 __func__);
13261 return;
13262 }
13263 tg3_enable_nvram_access(tp);
13264
13265 tp->nvram_size = 0;
13266
13267 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13268 tg3_get_5752_nvram_info(tp);
13269 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13270 tg3_get_5755_nvram_info(tp);
13271 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13274 tg3_get_5787_nvram_info(tp);
13275 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13276 tg3_get_5761_nvram_info(tp);
13277 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13278 tg3_get_5906_nvram_info(tp);
13279 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13280 tg3_flag(tp, 57765_CLASS))
13281 tg3_get_57780_nvram_info(tp);
13282 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13284 tg3_get_5717_nvram_info(tp);
13285 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13286 tg3_get_5720_nvram_info(tp);
13287 else
13288 tg3_get_nvram_info(tp);
13289
13290 if (tp->nvram_size == 0)
13291 tg3_get_nvram_size(tp);
13292
13293 tg3_disable_nvram_access(tp);
13294 tg3_nvram_unlock(tp);
13295
13296 } else {
13297 tg3_flag_clear(tp, NVRAM);
13298 tg3_flag_clear(tp, NVRAM_BUFFERED);
13299
13300 tg3_get_eeprom_size(tp);
13301 }
13302 }
13303
13304 struct subsys_tbl_ent {
13305 u16 subsys_vendor, subsys_devid;
13306 u32 phy_id;
13307 };
13308
13309 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13310 /* Broadcom boards. */
13311 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13312 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13313 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13314 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13315 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13316 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13317 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13318 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13319 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13320 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13321 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13322 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13323 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13324 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13325 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13326 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13327 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13328 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13329 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13330 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13331 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13332 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13333
13334 /* 3com boards. */
13335 { TG3PCI_SUBVENDOR_ID_3COM,
13336 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13337 { TG3PCI_SUBVENDOR_ID_3COM,
13338 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13339 { TG3PCI_SUBVENDOR_ID_3COM,
13340 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13341 { TG3PCI_SUBVENDOR_ID_3COM,
13342 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13343 { TG3PCI_SUBVENDOR_ID_3COM,
13344 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13345
13346 /* DELL boards. */
13347 { TG3PCI_SUBVENDOR_ID_DELL,
13348 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13349 { TG3PCI_SUBVENDOR_ID_DELL,
13350 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13351 { TG3PCI_SUBVENDOR_ID_DELL,
13352 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13353 { TG3PCI_SUBVENDOR_ID_DELL,
13354 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13355
13356 /* Compaq boards. */
13357 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13358 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13359 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13360 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13361 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13362 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13363 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13364 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13365 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13366 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13367
13368 /* IBM boards. */
13369 { TG3PCI_SUBVENDOR_ID_IBM,
13370 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13371 };
13372
13373 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13374 {
13375 int i;
13376
13377 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13378 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13379 tp->pdev->subsystem_vendor) &&
13380 (subsys_id_to_phy_id[i].subsys_devid ==
13381 tp->pdev->subsystem_device))
13382 return &subsys_id_to_phy_id[i];
13383 }
13384 return NULL;
13385 }
13386
13387 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13388 {
13389 u32 val;
13390
13391 tp->phy_id = TG3_PHY_ID_INVALID;
13392 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13393
13394 /* Assume an onboard device and WOL capable by default. */
13395 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13396 tg3_flag_set(tp, WOL_CAP);
13397
13398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13399 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13400 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13401 tg3_flag_set(tp, IS_NIC);
13402 }
13403 val = tr32(VCPU_CFGSHDW);
13404 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13405 tg3_flag_set(tp, ASPM_WORKAROUND);
13406 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13407 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13408 tg3_flag_set(tp, WOL_ENABLE);
13409 device_set_wakeup_enable(&tp->pdev->dev, true);
13410 }
13411 goto done;
13412 }
13413
13414 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13415 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13416 u32 nic_cfg, led_cfg;
13417 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13418 int eeprom_phy_serdes = 0;
13419
13420 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13421 tp->nic_sram_data_cfg = nic_cfg;
13422
13423 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13424 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13425 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13426 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13427 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13428 (ver > 0) && (ver < 0x100))
13429 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13430
13431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13432 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13433
13434 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13435 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13436 eeprom_phy_serdes = 1;
13437
13438 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13439 if (nic_phy_id != 0) {
13440 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13441 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13442
13443 eeprom_phy_id = (id1 >> 16) << 10;
13444 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13445 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13446 } else
13447 eeprom_phy_id = 0;
13448
13449 tp->phy_id = eeprom_phy_id;
13450 if (eeprom_phy_serdes) {
13451 if (!tg3_flag(tp, 5705_PLUS))
13452 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13453 else
13454 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13455 }
13456
13457 if (tg3_flag(tp, 5750_PLUS))
13458 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13459 SHASTA_EXT_LED_MODE_MASK);
13460 else
13461 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13462
13463 switch (led_cfg) {
13464 default:
13465 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13466 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13467 break;
13468
13469 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13470 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13471 break;
13472
13473 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13474 tp->led_ctrl = LED_CTRL_MODE_MAC;
13475
13476 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13477 * read on some older 5700/5701 bootcode.
13478 */
13479 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13480 ASIC_REV_5700 ||
13481 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13482 ASIC_REV_5701)
13483 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13484
13485 break;
13486
13487 case SHASTA_EXT_LED_SHARED:
13488 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13489 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13490 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13491 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13492 LED_CTRL_MODE_PHY_2);
13493 break;
13494
13495 case SHASTA_EXT_LED_MAC:
13496 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13497 break;
13498
13499 case SHASTA_EXT_LED_COMBO:
13500 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13501 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13502 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13503 LED_CTRL_MODE_PHY_2);
13504 break;
13505
13506 }
13507
13508 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13510 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13511 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13512
13513 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13514 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13515
13516 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13517 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13518 if ((tp->pdev->subsystem_vendor ==
13519 PCI_VENDOR_ID_ARIMA) &&
13520 (tp->pdev->subsystem_device == 0x205a ||
13521 tp->pdev->subsystem_device == 0x2063))
13522 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13523 } else {
13524 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13525 tg3_flag_set(tp, IS_NIC);
13526 }
13527
13528 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13529 tg3_flag_set(tp, ENABLE_ASF);
13530 if (tg3_flag(tp, 5750_PLUS))
13531 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13532 }
13533
13534 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13535 tg3_flag(tp, 5750_PLUS))
13536 tg3_flag_set(tp, ENABLE_APE);
13537
13538 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13539 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13540 tg3_flag_clear(tp, WOL_CAP);
13541
13542 if (tg3_flag(tp, WOL_CAP) &&
13543 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13544 tg3_flag_set(tp, WOL_ENABLE);
13545 device_set_wakeup_enable(&tp->pdev->dev, true);
13546 }
13547
13548 if (cfg2 & (1 << 17))
13549 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13550
13551 /* serdes signal pre-emphasis in register 0x590 set by */
13552 /* bootcode if bit 18 is set */
13553 if (cfg2 & (1 << 18))
13554 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13555
13556 if ((tg3_flag(tp, 57765_PLUS) ||
13557 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13558 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13559 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13560 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13561
13562 if (tg3_flag(tp, PCI_EXPRESS) &&
13563 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13564 !tg3_flag(tp, 57765_PLUS)) {
13565 u32 cfg3;
13566
13567 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13568 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13569 tg3_flag_set(tp, ASPM_WORKAROUND);
13570 }
13571
13572 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13573 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13574 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13575 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13576 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13577 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13578 }
13579 done:
13580 if (tg3_flag(tp, WOL_CAP))
13581 device_set_wakeup_enable(&tp->pdev->dev,
13582 tg3_flag(tp, WOL_ENABLE));
13583 else
13584 device_set_wakeup_capable(&tp->pdev->dev, false);
13585 }
13586
13587 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13588 {
13589 int i;
13590 u32 val;
13591
13592 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13593 tw32(OTP_CTRL, cmd);
13594
13595 /* Wait for up to 1 ms for command to execute. */
13596 for (i = 0; i < 100; i++) {
13597 val = tr32(OTP_STATUS);
13598 if (val & OTP_STATUS_CMD_DONE)
13599 break;
13600 udelay(10);
13601 }
13602
13603 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13604 }
13605
13606 /* Read the gphy configuration from the OTP region of the chip. The gphy
13607 * configuration is a 32-bit value that straddles the alignment boundary.
13608 * We do two 32-bit reads and then shift and merge the results.
13609 */
13610 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13611 {
13612 u32 bhalf_otp, thalf_otp;
13613
13614 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13615
13616 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13617 return 0;
13618
13619 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13620
13621 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13622 return 0;
13623
13624 thalf_otp = tr32(OTP_READ_DATA);
13625
13626 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13627
13628 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13629 return 0;
13630
13631 bhalf_otp = tr32(OTP_READ_DATA);
13632
13633 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13634 }
13635
13636 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13637 {
13638 u32 adv = ADVERTISED_Autoneg;
13639
13640 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13641 adv |= ADVERTISED_1000baseT_Half |
13642 ADVERTISED_1000baseT_Full;
13643
13644 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13645 adv |= ADVERTISED_100baseT_Half |
13646 ADVERTISED_100baseT_Full |
13647 ADVERTISED_10baseT_Half |
13648 ADVERTISED_10baseT_Full |
13649 ADVERTISED_TP;
13650 else
13651 adv |= ADVERTISED_FIBRE;
13652
13653 tp->link_config.advertising = adv;
13654 tp->link_config.speed = SPEED_UNKNOWN;
13655 tp->link_config.duplex = DUPLEX_UNKNOWN;
13656 tp->link_config.autoneg = AUTONEG_ENABLE;
13657 tp->link_config.active_speed = SPEED_UNKNOWN;
13658 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13659
13660 tp->old_link = -1;
13661 }
13662
13663 static int __devinit tg3_phy_probe(struct tg3 *tp)
13664 {
13665 u32 hw_phy_id_1, hw_phy_id_2;
13666 u32 hw_phy_id, hw_phy_id_masked;
13667 int err;
13668
13669 /* flow control autonegotiation is default behavior */
13670 tg3_flag_set(tp, PAUSE_AUTONEG);
13671 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13672
13673 if (tg3_flag(tp, ENABLE_APE)) {
13674 switch (tp->pci_fn) {
13675 case 0:
13676 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13677 break;
13678 case 1:
13679 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13680 break;
13681 case 2:
13682 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13683 break;
13684 case 3:
13685 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13686 break;
13687 }
13688 }
13689
13690 if (tg3_flag(tp, USE_PHYLIB))
13691 return tg3_phy_init(tp);
13692
13693 /* Reading the PHY ID register can conflict with ASF
13694 * firmware access to the PHY hardware.
13695 */
13696 err = 0;
13697 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13698 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13699 } else {
13700 /* Now read the physical PHY_ID from the chip and verify
13701 * that it is sane. If it doesn't look good, we fall back
13702 * to either the hard-coded table based PHY_ID and failing
13703 * that the value found in the eeprom area.
13704 */
13705 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13706 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13707
13708 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13709 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13710 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13711
13712 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13713 }
13714
13715 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13716 tp->phy_id = hw_phy_id;
13717 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13718 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13719 else
13720 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13721 } else {
13722 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13723 /* Do nothing, phy ID already set up in
13724 * tg3_get_eeprom_hw_cfg().
13725 */
13726 } else {
13727 struct subsys_tbl_ent *p;
13728
13729 /* No eeprom signature? Try the hardcoded
13730 * subsys device table.
13731 */
13732 p = tg3_lookup_by_subsys(tp);
13733 if (!p)
13734 return -ENODEV;
13735
13736 tp->phy_id = p->phy_id;
13737 if (!tp->phy_id ||
13738 tp->phy_id == TG3_PHY_ID_BCM8002)
13739 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13740 }
13741 }
13742
13743 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13744 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13746 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13747 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13748 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13749 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13750 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13751
13752 tg3_phy_init_link_config(tp);
13753
13754 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13755 !tg3_flag(tp, ENABLE_APE) &&
13756 !tg3_flag(tp, ENABLE_ASF)) {
13757 u32 bmsr, dummy;
13758
13759 tg3_readphy(tp, MII_BMSR, &bmsr);
13760 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13761 (bmsr & BMSR_LSTATUS))
13762 goto skip_phy_reset;
13763
13764 err = tg3_phy_reset(tp);
13765 if (err)
13766 return err;
13767
13768 tg3_phy_set_wirespeed(tp);
13769
13770 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13771 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13772 tp->link_config.flowctrl);
13773
13774 tg3_writephy(tp, MII_BMCR,
13775 BMCR_ANENABLE | BMCR_ANRESTART);
13776 }
13777 }
13778
13779 skip_phy_reset:
13780 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13781 err = tg3_init_5401phy_dsp(tp);
13782 if (err)
13783 return err;
13784
13785 err = tg3_init_5401phy_dsp(tp);
13786 }
13787
13788 return err;
13789 }
13790
13791 static void __devinit tg3_read_vpd(struct tg3 *tp)
13792 {
13793 u8 *vpd_data;
13794 unsigned int block_end, rosize, len;
13795 u32 vpdlen;
13796 int j, i = 0;
13797
13798 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13799 if (!vpd_data)
13800 goto out_no_vpd;
13801
13802 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13803 if (i < 0)
13804 goto out_not_found;
13805
13806 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13807 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13808 i += PCI_VPD_LRDT_TAG_SIZE;
13809
13810 if (block_end > vpdlen)
13811 goto out_not_found;
13812
13813 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13814 PCI_VPD_RO_KEYWORD_MFR_ID);
13815 if (j > 0) {
13816 len = pci_vpd_info_field_size(&vpd_data[j]);
13817
13818 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13819 if (j + len > block_end || len != 4 ||
13820 memcmp(&vpd_data[j], "1028", 4))
13821 goto partno;
13822
13823 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13824 PCI_VPD_RO_KEYWORD_VENDOR0);
13825 if (j < 0)
13826 goto partno;
13827
13828 len = pci_vpd_info_field_size(&vpd_data[j]);
13829
13830 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13831 if (j + len > block_end)
13832 goto partno;
13833
13834 memcpy(tp->fw_ver, &vpd_data[j], len);
13835 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13836 }
13837
13838 partno:
13839 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13840 PCI_VPD_RO_KEYWORD_PARTNO);
13841 if (i < 0)
13842 goto out_not_found;
13843
13844 len = pci_vpd_info_field_size(&vpd_data[i]);
13845
13846 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13847 if (len > TG3_BPN_SIZE ||
13848 (len + i) > vpdlen)
13849 goto out_not_found;
13850
13851 memcpy(tp->board_part_number, &vpd_data[i], len);
13852
13853 out_not_found:
13854 kfree(vpd_data);
13855 if (tp->board_part_number[0])
13856 return;
13857
13858 out_no_vpd:
13859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13860 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13861 strcpy(tp->board_part_number, "BCM5717");
13862 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13863 strcpy(tp->board_part_number, "BCM5718");
13864 else
13865 goto nomatch;
13866 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13867 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13868 strcpy(tp->board_part_number, "BCM57780");
13869 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13870 strcpy(tp->board_part_number, "BCM57760");
13871 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13872 strcpy(tp->board_part_number, "BCM57790");
13873 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13874 strcpy(tp->board_part_number, "BCM57788");
13875 else
13876 goto nomatch;
13877 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13878 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13879 strcpy(tp->board_part_number, "BCM57761");
13880 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13881 strcpy(tp->board_part_number, "BCM57765");
13882 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13883 strcpy(tp->board_part_number, "BCM57781");
13884 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13885 strcpy(tp->board_part_number, "BCM57785");
13886 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13887 strcpy(tp->board_part_number, "BCM57791");
13888 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13889 strcpy(tp->board_part_number, "BCM57795");
13890 else
13891 goto nomatch;
13892 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13893 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13894 strcpy(tp->board_part_number, "BCM57762");
13895 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13896 strcpy(tp->board_part_number, "BCM57766");
13897 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13898 strcpy(tp->board_part_number, "BCM57782");
13899 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13900 strcpy(tp->board_part_number, "BCM57786");
13901 else
13902 goto nomatch;
13903 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13904 strcpy(tp->board_part_number, "BCM95906");
13905 } else {
13906 nomatch:
13907 strcpy(tp->board_part_number, "none");
13908 }
13909 }
13910
13911 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13912 {
13913 u32 val;
13914
13915 if (tg3_nvram_read(tp, offset, &val) ||
13916 (val & 0xfc000000) != 0x0c000000 ||
13917 tg3_nvram_read(tp, offset + 4, &val) ||
13918 val != 0)
13919 return 0;
13920
13921 return 1;
13922 }
13923
13924 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13925 {
13926 u32 val, offset, start, ver_offset;
13927 int i, dst_off;
13928 bool newver = false;
13929
13930 if (tg3_nvram_read(tp, 0xc, &offset) ||
13931 tg3_nvram_read(tp, 0x4, &start))
13932 return;
13933
13934 offset = tg3_nvram_logical_addr(tp, offset);
13935
13936 if (tg3_nvram_read(tp, offset, &val))
13937 return;
13938
13939 if ((val & 0xfc000000) == 0x0c000000) {
13940 if (tg3_nvram_read(tp, offset + 4, &val))
13941 return;
13942
13943 if (val == 0)
13944 newver = true;
13945 }
13946
13947 dst_off = strlen(tp->fw_ver);
13948
13949 if (newver) {
13950 if (TG3_VER_SIZE - dst_off < 16 ||
13951 tg3_nvram_read(tp, offset + 8, &ver_offset))
13952 return;
13953
13954 offset = offset + ver_offset - start;
13955 for (i = 0; i < 16; i += 4) {
13956 __be32 v;
13957 if (tg3_nvram_read_be32(tp, offset + i, &v))
13958 return;
13959
13960 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13961 }
13962 } else {
13963 u32 major, minor;
13964
13965 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13966 return;
13967
13968 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13969 TG3_NVM_BCVER_MAJSFT;
13970 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13971 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13972 "v%d.%02d", major, minor);
13973 }
13974 }
13975
13976 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13977 {
13978 u32 val, major, minor;
13979
13980 /* Use native endian representation */
13981 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13982 return;
13983
13984 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13985 TG3_NVM_HWSB_CFG1_MAJSFT;
13986 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13987 TG3_NVM_HWSB_CFG1_MINSFT;
13988
13989 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13990 }
13991
13992 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13993 {
13994 u32 offset, major, minor, build;
13995
13996 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13997
13998 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13999 return;
14000
14001 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14002 case TG3_EEPROM_SB_REVISION_0:
14003 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14004 break;
14005 case TG3_EEPROM_SB_REVISION_2:
14006 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14007 break;
14008 case TG3_EEPROM_SB_REVISION_3:
14009 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14010 break;
14011 case TG3_EEPROM_SB_REVISION_4:
14012 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14013 break;
14014 case TG3_EEPROM_SB_REVISION_5:
14015 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14016 break;
14017 case TG3_EEPROM_SB_REVISION_6:
14018 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14019 break;
14020 default:
14021 return;
14022 }
14023
14024 if (tg3_nvram_read(tp, offset, &val))
14025 return;
14026
14027 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14028 TG3_EEPROM_SB_EDH_BLD_SHFT;
14029 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14030 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14031 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14032
14033 if (minor > 99 || build > 26)
14034 return;
14035
14036 offset = strlen(tp->fw_ver);
14037 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14038 " v%d.%02d", major, minor);
14039
14040 if (build > 0) {
14041 offset = strlen(tp->fw_ver);
14042 if (offset < TG3_VER_SIZE - 1)
14043 tp->fw_ver[offset] = 'a' + build - 1;
14044 }
14045 }
14046
14047 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14048 {
14049 u32 val, offset, start;
14050 int i, vlen;
14051
14052 for (offset = TG3_NVM_DIR_START;
14053 offset < TG3_NVM_DIR_END;
14054 offset += TG3_NVM_DIRENT_SIZE) {
14055 if (tg3_nvram_read(tp, offset, &val))
14056 return;
14057
14058 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14059 break;
14060 }
14061
14062 if (offset == TG3_NVM_DIR_END)
14063 return;
14064
14065 if (!tg3_flag(tp, 5705_PLUS))
14066 start = 0x08000000;
14067 else if (tg3_nvram_read(tp, offset - 4, &start))
14068 return;
14069
14070 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14071 !tg3_fw_img_is_valid(tp, offset) ||
14072 tg3_nvram_read(tp, offset + 8, &val))
14073 return;
14074
14075 offset += val - start;
14076
14077 vlen = strlen(tp->fw_ver);
14078
14079 tp->fw_ver[vlen++] = ',';
14080 tp->fw_ver[vlen++] = ' ';
14081
14082 for (i = 0; i < 4; i++) {
14083 __be32 v;
14084 if (tg3_nvram_read_be32(tp, offset, &v))
14085 return;
14086
14087 offset += sizeof(v);
14088
14089 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14090 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14091 break;
14092 }
14093
14094 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14095 vlen += sizeof(v);
14096 }
14097 }
14098
14099 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14100 {
14101 u32 apedata;
14102
14103 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14104 if (apedata != APE_SEG_SIG_MAGIC)
14105 return;
14106
14107 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14108 if (!(apedata & APE_FW_STATUS_READY))
14109 return;
14110
14111 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14112 tg3_flag_set(tp, APE_HAS_NCSI);
14113 }
14114
14115 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14116 {
14117 int vlen;
14118 u32 apedata;
14119 char *fwtype;
14120
14121 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14122
14123 if (tg3_flag(tp, APE_HAS_NCSI))
14124 fwtype = "NCSI";
14125 else
14126 fwtype = "DASH";
14127
14128 vlen = strlen(tp->fw_ver);
14129
14130 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14131 fwtype,
14132 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14133 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14134 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14135 (apedata & APE_FW_VERSION_BLDMSK));
14136 }
14137
14138 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14139 {
14140 u32 val;
14141 bool vpd_vers = false;
14142
14143 if (tp->fw_ver[0] != 0)
14144 vpd_vers = true;
14145
14146 if (tg3_flag(tp, NO_NVRAM)) {
14147 strcat(tp->fw_ver, "sb");
14148 return;
14149 }
14150
14151 if (tg3_nvram_read(tp, 0, &val))
14152 return;
14153
14154 if (val == TG3_EEPROM_MAGIC)
14155 tg3_read_bc_ver(tp);
14156 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14157 tg3_read_sb_ver(tp, val);
14158 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14159 tg3_read_hwsb_ver(tp);
14160
14161 if (tg3_flag(tp, ENABLE_ASF)) {
14162 if (tg3_flag(tp, ENABLE_APE)) {
14163 tg3_probe_ncsi(tp);
14164 if (!vpd_vers)
14165 tg3_read_dash_ver(tp);
14166 } else if (!vpd_vers) {
14167 tg3_read_mgmtfw_ver(tp);
14168 }
14169 }
14170
14171 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14172 }
14173
14174 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14175 {
14176 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14177 return TG3_RX_RET_MAX_SIZE_5717;
14178 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14179 return TG3_RX_RET_MAX_SIZE_5700;
14180 else
14181 return TG3_RX_RET_MAX_SIZE_5705;
14182 }
14183
14184 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14185 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14186 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14187 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14188 { },
14189 };
14190
14191 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14192 {
14193 struct pci_dev *peer;
14194 unsigned int func, devnr = tp->pdev->devfn & ~7;
14195
14196 for (func = 0; func < 8; func++) {
14197 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14198 if (peer && peer != tp->pdev)
14199 break;
14200 pci_dev_put(peer);
14201 }
14202 /* 5704 can be configured in single-port mode, set peer to
14203 * tp->pdev in that case.
14204 */
14205 if (!peer) {
14206 peer = tp->pdev;
14207 return peer;
14208 }
14209
14210 /*
14211 * We don't need to keep the refcount elevated; there's no way
14212 * to remove one half of this device without removing the other
14213 */
14214 pci_dev_put(peer);
14215
14216 return peer;
14217 }
14218
14219 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14220 {
14221 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14223 u32 reg;
14224
14225 /* All devices that use the alternate
14226 * ASIC REV location have a CPMU.
14227 */
14228 tg3_flag_set(tp, CPMU_PRESENT);
14229
14230 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14231 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14232 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14233 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14234 reg = TG3PCI_GEN2_PRODID_ASICREV;
14235 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14236 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14237 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14238 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14239 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14240 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14241 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14242 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14243 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14244 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14245 reg = TG3PCI_GEN15_PRODID_ASICREV;
14246 else
14247 reg = TG3PCI_PRODID_ASICREV;
14248
14249 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14250 }
14251
14252 /* Wrong chip ID in 5752 A0. This code can be removed later
14253 * as A0 is not in production.
14254 */
14255 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14256 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14257
14258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14261 tg3_flag_set(tp, 5717_PLUS);
14262
14263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14265 tg3_flag_set(tp, 57765_CLASS);
14266
14267 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14268 tg3_flag_set(tp, 57765_PLUS);
14269
14270 /* Intentionally exclude ASIC_REV_5906 */
14271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14274 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14275 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14276 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14277 tg3_flag(tp, 57765_PLUS))
14278 tg3_flag_set(tp, 5755_PLUS);
14279
14280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14281 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14282 tg3_flag_set(tp, 5780_CLASS);
14283
14284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14287 tg3_flag(tp, 5755_PLUS) ||
14288 tg3_flag(tp, 5780_CLASS))
14289 tg3_flag_set(tp, 5750_PLUS);
14290
14291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14292 tg3_flag(tp, 5750_PLUS))
14293 tg3_flag_set(tp, 5705_PLUS);
14294 }
14295
14296 static int __devinit tg3_get_invariants(struct tg3 *tp)
14297 {
14298 u32 misc_ctrl_reg;
14299 u32 pci_state_reg, grc_misc_cfg;
14300 u32 val;
14301 u16 pci_cmd;
14302 int err;
14303
14304 /* Force memory write invalidate off. If we leave it on,
14305 * then on 5700_BX chips we have to enable a workaround.
14306 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14307 * to match the cacheline size. The Broadcom driver have this
14308 * workaround but turns MWI off all the times so never uses
14309 * it. This seems to suggest that the workaround is insufficient.
14310 */
14311 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14312 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14313 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14314
14315 /* Important! -- Make sure register accesses are byteswapped
14316 * correctly. Also, for those chips that require it, make
14317 * sure that indirect register accesses are enabled before
14318 * the first operation.
14319 */
14320 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14321 &misc_ctrl_reg);
14322 tp->misc_host_ctrl |= (misc_ctrl_reg &
14323 MISC_HOST_CTRL_CHIPREV);
14324 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14325 tp->misc_host_ctrl);
14326
14327 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14328
14329 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14330 * we need to disable memory and use config. cycles
14331 * only to access all registers. The 5702/03 chips
14332 * can mistakenly decode the special cycles from the
14333 * ICH chipsets as memory write cycles, causing corruption
14334 * of register and memory space. Only certain ICH bridges
14335 * will drive special cycles with non-zero data during the
14336 * address phase which can fall within the 5703's address
14337 * range. This is not an ICH bug as the PCI spec allows
14338 * non-zero address during special cycles. However, only
14339 * these ICH bridges are known to drive non-zero addresses
14340 * during special cycles.
14341 *
14342 * Since special cycles do not cross PCI bridges, we only
14343 * enable this workaround if the 5703 is on the secondary
14344 * bus of these ICH bridges.
14345 */
14346 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14347 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14348 static struct tg3_dev_id {
14349 u32 vendor;
14350 u32 device;
14351 u32 rev;
14352 } ich_chipsets[] = {
14353 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14354 PCI_ANY_ID },
14355 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14356 PCI_ANY_ID },
14357 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14358 0xa },
14359 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14360 PCI_ANY_ID },
14361 { },
14362 };
14363 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14364 struct pci_dev *bridge = NULL;
14365
14366 while (pci_id->vendor != 0) {
14367 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14368 bridge);
14369 if (!bridge) {
14370 pci_id++;
14371 continue;
14372 }
14373 if (pci_id->rev != PCI_ANY_ID) {
14374 if (bridge->revision > pci_id->rev)
14375 continue;
14376 }
14377 if (bridge->subordinate &&
14378 (bridge->subordinate->number ==
14379 tp->pdev->bus->number)) {
14380 tg3_flag_set(tp, ICH_WORKAROUND);
14381 pci_dev_put(bridge);
14382 break;
14383 }
14384 }
14385 }
14386
14387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14388 static struct tg3_dev_id {
14389 u32 vendor;
14390 u32 device;
14391 } bridge_chipsets[] = {
14392 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14393 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14394 { },
14395 };
14396 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14397 struct pci_dev *bridge = NULL;
14398
14399 while (pci_id->vendor != 0) {
14400 bridge = pci_get_device(pci_id->vendor,
14401 pci_id->device,
14402 bridge);
14403 if (!bridge) {
14404 pci_id++;
14405 continue;
14406 }
14407 if (bridge->subordinate &&
14408 (bridge->subordinate->number <=
14409 tp->pdev->bus->number) &&
14410 (bridge->subordinate->busn_res.end >=
14411 tp->pdev->bus->number)) {
14412 tg3_flag_set(tp, 5701_DMA_BUG);
14413 pci_dev_put(bridge);
14414 break;
14415 }
14416 }
14417 }
14418
14419 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14420 * DMA addresses > 40-bit. This bridge may have other additional
14421 * 57xx devices behind it in some 4-port NIC designs for example.
14422 * Any tg3 device found behind the bridge will also need the 40-bit
14423 * DMA workaround.
14424 */
14425 if (tg3_flag(tp, 5780_CLASS)) {
14426 tg3_flag_set(tp, 40BIT_DMA_BUG);
14427 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14428 } else {
14429 struct pci_dev *bridge = NULL;
14430
14431 do {
14432 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14433 PCI_DEVICE_ID_SERVERWORKS_EPB,
14434 bridge);
14435 if (bridge && bridge->subordinate &&
14436 (bridge->subordinate->number <=
14437 tp->pdev->bus->number) &&
14438 (bridge->subordinate->busn_res.end >=
14439 tp->pdev->bus->number)) {
14440 tg3_flag_set(tp, 40BIT_DMA_BUG);
14441 pci_dev_put(bridge);
14442 break;
14443 }
14444 } while (bridge);
14445 }
14446
14447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14448 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14449 tp->pdev_peer = tg3_find_peer(tp);
14450
14451 /* Determine TSO capabilities */
14452 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14453 ; /* Do nothing. HW bug. */
14454 else if (tg3_flag(tp, 57765_PLUS))
14455 tg3_flag_set(tp, HW_TSO_3);
14456 else if (tg3_flag(tp, 5755_PLUS) ||
14457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14458 tg3_flag_set(tp, HW_TSO_2);
14459 else if (tg3_flag(tp, 5750_PLUS)) {
14460 tg3_flag_set(tp, HW_TSO_1);
14461 tg3_flag_set(tp, TSO_BUG);
14462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14463 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14464 tg3_flag_clear(tp, TSO_BUG);
14465 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14466 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14467 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14468 tg3_flag_set(tp, TSO_BUG);
14469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14470 tp->fw_needed = FIRMWARE_TG3TSO5;
14471 else
14472 tp->fw_needed = FIRMWARE_TG3TSO;
14473 }
14474
14475 /* Selectively allow TSO based on operating conditions */
14476 if (tg3_flag(tp, HW_TSO_1) ||
14477 tg3_flag(tp, HW_TSO_2) ||
14478 tg3_flag(tp, HW_TSO_3) ||
14479 tp->fw_needed) {
14480 /* For firmware TSO, assume ASF is disabled.
14481 * We'll disable TSO later if we discover ASF
14482 * is enabled in tg3_get_eeprom_hw_cfg().
14483 */
14484 tg3_flag_set(tp, TSO_CAPABLE);
14485 } else {
14486 tg3_flag_clear(tp, TSO_CAPABLE);
14487 tg3_flag_clear(tp, TSO_BUG);
14488 tp->fw_needed = NULL;
14489 }
14490
14491 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14492 tp->fw_needed = FIRMWARE_TG3;
14493
14494 tp->irq_max = 1;
14495
14496 if (tg3_flag(tp, 5750_PLUS)) {
14497 tg3_flag_set(tp, SUPPORT_MSI);
14498 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14499 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14500 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14501 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14502 tp->pdev_peer == tp->pdev))
14503 tg3_flag_clear(tp, SUPPORT_MSI);
14504
14505 if (tg3_flag(tp, 5755_PLUS) ||
14506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14507 tg3_flag_set(tp, 1SHOT_MSI);
14508 }
14509
14510 if (tg3_flag(tp, 57765_PLUS)) {
14511 tg3_flag_set(tp, SUPPORT_MSIX);
14512 tp->irq_max = TG3_IRQ_MAX_VECS;
14513 tg3_rss_init_dflt_indir_tbl(tp);
14514 }
14515 }
14516
14517 if (tg3_flag(tp, 5755_PLUS) ||
14518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14519 tg3_flag_set(tp, SHORT_DMA_BUG);
14520
14521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14522 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14523
14524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14527 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14528
14529 if (tg3_flag(tp, 57765_PLUS) &&
14530 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14531 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14532
14533 if (!tg3_flag(tp, 5705_PLUS) ||
14534 tg3_flag(tp, 5780_CLASS) ||
14535 tg3_flag(tp, USE_JUMBO_BDFLAG))
14536 tg3_flag_set(tp, JUMBO_CAPABLE);
14537
14538 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14539 &pci_state_reg);
14540
14541 if (pci_is_pcie(tp->pdev)) {
14542 u16 lnkctl;
14543
14544 tg3_flag_set(tp, PCI_EXPRESS);
14545
14546 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14547 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14548 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14549 ASIC_REV_5906) {
14550 tg3_flag_clear(tp, HW_TSO_2);
14551 tg3_flag_clear(tp, TSO_CAPABLE);
14552 }
14553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14555 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14556 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14557 tg3_flag_set(tp, CLKREQ_BUG);
14558 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14559 tg3_flag_set(tp, L1PLLPD_EN);
14560 }
14561 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14562 /* BCM5785 devices are effectively PCIe devices, and should
14563 * follow PCIe codepaths, but do not have a PCIe capabilities
14564 * section.
14565 */
14566 tg3_flag_set(tp, PCI_EXPRESS);
14567 } else if (!tg3_flag(tp, 5705_PLUS) ||
14568 tg3_flag(tp, 5780_CLASS)) {
14569 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14570 if (!tp->pcix_cap) {
14571 dev_err(&tp->pdev->dev,
14572 "Cannot find PCI-X capability, aborting\n");
14573 return -EIO;
14574 }
14575
14576 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14577 tg3_flag_set(tp, PCIX_MODE);
14578 }
14579
14580 /* If we have an AMD 762 or VIA K8T800 chipset, write
14581 * reordering to the mailbox registers done by the host
14582 * controller can cause major troubles. We read back from
14583 * every mailbox register write to force the writes to be
14584 * posted to the chip in order.
14585 */
14586 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14587 !tg3_flag(tp, PCI_EXPRESS))
14588 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14589
14590 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14591 &tp->pci_cacheline_sz);
14592 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14593 &tp->pci_lat_timer);
14594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14595 tp->pci_lat_timer < 64) {
14596 tp->pci_lat_timer = 64;
14597 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14598 tp->pci_lat_timer);
14599 }
14600
14601 /* Important! -- It is critical that the PCI-X hw workaround
14602 * situation is decided before the first MMIO register access.
14603 */
14604 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14605 /* 5700 BX chips need to have their TX producer index
14606 * mailboxes written twice to workaround a bug.
14607 */
14608 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14609
14610 /* If we are in PCI-X mode, enable register write workaround.
14611 *
14612 * The workaround is to use indirect register accesses
14613 * for all chip writes not to mailbox registers.
14614 */
14615 if (tg3_flag(tp, PCIX_MODE)) {
14616 u32 pm_reg;
14617
14618 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14619
14620 /* The chip can have it's power management PCI config
14621 * space registers clobbered due to this bug.
14622 * So explicitly force the chip into D0 here.
14623 */
14624 pci_read_config_dword(tp->pdev,
14625 tp->pm_cap + PCI_PM_CTRL,
14626 &pm_reg);
14627 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14628 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14629 pci_write_config_dword(tp->pdev,
14630 tp->pm_cap + PCI_PM_CTRL,
14631 pm_reg);
14632
14633 /* Also, force SERR#/PERR# in PCI command. */
14634 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14635 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14636 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14637 }
14638 }
14639
14640 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14641 tg3_flag_set(tp, PCI_HIGH_SPEED);
14642 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14643 tg3_flag_set(tp, PCI_32BIT);
14644
14645 /* Chip-specific fixup from Broadcom driver */
14646 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14647 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14648 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14649 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14650 }
14651
14652 /* Default fast path register access methods */
14653 tp->read32 = tg3_read32;
14654 tp->write32 = tg3_write32;
14655 tp->read32_mbox = tg3_read32;
14656 tp->write32_mbox = tg3_write32;
14657 tp->write32_tx_mbox = tg3_write32;
14658 tp->write32_rx_mbox = tg3_write32;
14659
14660 /* Various workaround register access methods */
14661 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14662 tp->write32 = tg3_write_indirect_reg32;
14663 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14664 (tg3_flag(tp, PCI_EXPRESS) &&
14665 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14666 /*
14667 * Back to back register writes can cause problems on these
14668 * chips, the workaround is to read back all reg writes
14669 * except those to mailbox regs.
14670 *
14671 * See tg3_write_indirect_reg32().
14672 */
14673 tp->write32 = tg3_write_flush_reg32;
14674 }
14675
14676 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14677 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14678 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14679 tp->write32_rx_mbox = tg3_write_flush_reg32;
14680 }
14681
14682 if (tg3_flag(tp, ICH_WORKAROUND)) {
14683 tp->read32 = tg3_read_indirect_reg32;
14684 tp->write32 = tg3_write_indirect_reg32;
14685 tp->read32_mbox = tg3_read_indirect_mbox;
14686 tp->write32_mbox = tg3_write_indirect_mbox;
14687 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14688 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14689
14690 iounmap(tp->regs);
14691 tp->regs = NULL;
14692
14693 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14694 pci_cmd &= ~PCI_COMMAND_MEMORY;
14695 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14696 }
14697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14698 tp->read32_mbox = tg3_read32_mbox_5906;
14699 tp->write32_mbox = tg3_write32_mbox_5906;
14700 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14701 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14702 }
14703
14704 if (tp->write32 == tg3_write_indirect_reg32 ||
14705 (tg3_flag(tp, PCIX_MODE) &&
14706 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14708 tg3_flag_set(tp, SRAM_USE_CONFIG);
14709
14710 /* The memory arbiter has to be enabled in order for SRAM accesses
14711 * to succeed. Normally on powerup the tg3 chip firmware will make
14712 * sure it is enabled, but other entities such as system netboot
14713 * code might disable it.
14714 */
14715 val = tr32(MEMARB_MODE);
14716 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14717
14718 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14720 tg3_flag(tp, 5780_CLASS)) {
14721 if (tg3_flag(tp, PCIX_MODE)) {
14722 pci_read_config_dword(tp->pdev,
14723 tp->pcix_cap + PCI_X_STATUS,
14724 &val);
14725 tp->pci_fn = val & 0x7;
14726 }
14727 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14728 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14729 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14730 NIC_SRAM_CPMUSTAT_SIG) {
14731 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14732 tp->pci_fn = tp->pci_fn ? 1 : 0;
14733 }
14734 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14735 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14736 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14737 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14738 NIC_SRAM_CPMUSTAT_SIG) {
14739 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14740 TG3_CPMU_STATUS_FSHFT_5719;
14741 }
14742 }
14743
14744 /* Get eeprom hw config before calling tg3_set_power_state().
14745 * In particular, the TG3_FLAG_IS_NIC flag must be
14746 * determined before calling tg3_set_power_state() so that
14747 * we know whether or not to switch out of Vaux power.
14748 * When the flag is set, it means that GPIO1 is used for eeprom
14749 * write protect and also implies that it is a LOM where GPIOs
14750 * are not used to switch power.
14751 */
14752 tg3_get_eeprom_hw_cfg(tp);
14753
14754 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14755 tg3_flag_clear(tp, TSO_CAPABLE);
14756 tg3_flag_clear(tp, TSO_BUG);
14757 tp->fw_needed = NULL;
14758 }
14759
14760 if (tg3_flag(tp, ENABLE_APE)) {
14761 /* Allow reads and writes to the
14762 * APE register and memory space.
14763 */
14764 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14765 PCISTATE_ALLOW_APE_SHMEM_WR |
14766 PCISTATE_ALLOW_APE_PSPACE_WR;
14767 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14768 pci_state_reg);
14769
14770 tg3_ape_lock_init(tp);
14771 }
14772
14773 /* Set up tp->grc_local_ctrl before calling
14774 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14775 * will bring 5700's external PHY out of reset.
14776 * It is also used as eeprom write protect on LOMs.
14777 */
14778 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14780 tg3_flag(tp, EEPROM_WRITE_PROT))
14781 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14782 GRC_LCLCTRL_GPIO_OUTPUT1);
14783 /* Unused GPIO3 must be driven as output on 5752 because there
14784 * are no pull-up resistors on unused GPIO pins.
14785 */
14786 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14787 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14788
14789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14790 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14791 tg3_flag(tp, 57765_CLASS))
14792 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14793
14794 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14795 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14796 /* Turn off the debug UART. */
14797 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14798 if (tg3_flag(tp, IS_NIC))
14799 /* Keep VMain power. */
14800 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14801 GRC_LCLCTRL_GPIO_OUTPUT0;
14802 }
14803
14804 /* Switch out of Vaux if it is a NIC */
14805 tg3_pwrsrc_switch_to_vmain(tp);
14806
14807 /* Derive initial jumbo mode from MTU assigned in
14808 * ether_setup() via the alloc_etherdev() call
14809 */
14810 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14811 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14812
14813 /* Determine WakeOnLan speed to use. */
14814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14815 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14816 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14817 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14818 tg3_flag_clear(tp, WOL_SPEED_100MB);
14819 } else {
14820 tg3_flag_set(tp, WOL_SPEED_100MB);
14821 }
14822
14823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14824 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14825
14826 /* A few boards don't want Ethernet@WireSpeed phy feature */
14827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14828 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14829 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14830 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14831 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14832 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14833 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14834
14835 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14836 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14837 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14838 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14839 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14840
14841 if (tg3_flag(tp, 5705_PLUS) &&
14842 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14843 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14844 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14845 !tg3_flag(tp, 57765_PLUS)) {
14846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14850 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14851 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14852 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14853 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14854 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14855 } else
14856 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14857 }
14858
14859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14860 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14861 tp->phy_otp = tg3_read_otp_phycfg(tp);
14862 if (tp->phy_otp == 0)
14863 tp->phy_otp = TG3_OTP_DEFAULT;
14864 }
14865
14866 if (tg3_flag(tp, CPMU_PRESENT))
14867 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14868 else
14869 tp->mi_mode = MAC_MI_MODE_BASE;
14870
14871 tp->coalesce_mode = 0;
14872 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14873 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14874 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14875
14876 /* Set these bits to enable statistics workaround. */
14877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14878 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14879 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14880 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14881 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14882 }
14883
14884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14886 tg3_flag_set(tp, USE_PHYLIB);
14887
14888 err = tg3_mdio_init(tp);
14889 if (err)
14890 return err;
14891
14892 /* Initialize data/descriptor byte/word swapping. */
14893 val = tr32(GRC_MODE);
14894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14895 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14896 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14897 GRC_MODE_B2HRX_ENABLE |
14898 GRC_MODE_HTX2B_ENABLE |
14899 GRC_MODE_HOST_STACKUP);
14900 else
14901 val &= GRC_MODE_HOST_STACKUP;
14902
14903 tw32(GRC_MODE, val | tp->grc_mode);
14904
14905 tg3_switch_clocks(tp);
14906
14907 /* Clear this out for sanity. */
14908 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14909
14910 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14911 &pci_state_reg);
14912 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14913 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14914 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14915
14916 if (chiprevid == CHIPREV_ID_5701_A0 ||
14917 chiprevid == CHIPREV_ID_5701_B0 ||
14918 chiprevid == CHIPREV_ID_5701_B2 ||
14919 chiprevid == CHIPREV_ID_5701_B5) {
14920 void __iomem *sram_base;
14921
14922 /* Write some dummy words into the SRAM status block
14923 * area, see if it reads back correctly. If the return
14924 * value is bad, force enable the PCIX workaround.
14925 */
14926 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14927
14928 writel(0x00000000, sram_base);
14929 writel(0x00000000, sram_base + 4);
14930 writel(0xffffffff, sram_base + 4);
14931 if (readl(sram_base) != 0x00000000)
14932 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14933 }
14934 }
14935
14936 udelay(50);
14937 tg3_nvram_init(tp);
14938
14939 grc_misc_cfg = tr32(GRC_MISC_CFG);
14940 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14941
14942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14943 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14944 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14945 tg3_flag_set(tp, IS_5788);
14946
14947 if (!tg3_flag(tp, IS_5788) &&
14948 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14949 tg3_flag_set(tp, TAGGED_STATUS);
14950 if (tg3_flag(tp, TAGGED_STATUS)) {
14951 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14952 HOSTCC_MODE_CLRTICK_TXBD);
14953
14954 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14955 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14956 tp->misc_host_ctrl);
14957 }
14958
14959 /* Preserve the APE MAC_MODE bits */
14960 if (tg3_flag(tp, ENABLE_APE))
14961 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14962 else
14963 tp->mac_mode = 0;
14964
14965 /* these are limited to 10/100 only */
14966 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14967 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14968 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14969 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14970 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14971 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14972 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14973 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14974 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14975 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14976 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14977 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14978 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14979 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14980 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14981 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14982
14983 err = tg3_phy_probe(tp);
14984 if (err) {
14985 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14986 /* ... but do not return immediately ... */
14987 tg3_mdio_fini(tp);
14988 }
14989
14990 tg3_read_vpd(tp);
14991 tg3_read_fw_ver(tp);
14992
14993 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14994 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14995 } else {
14996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14997 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14998 else
14999 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15000 }
15001
15002 /* 5700 {AX,BX} chips have a broken status block link
15003 * change bit implementation, so we must use the
15004 * status register in those cases.
15005 */
15006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15007 tg3_flag_set(tp, USE_LINKCHG_REG);
15008 else
15009 tg3_flag_clear(tp, USE_LINKCHG_REG);
15010
15011 /* The led_ctrl is set during tg3_phy_probe, here we might
15012 * have to force the link status polling mechanism based
15013 * upon subsystem IDs.
15014 */
15015 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15017 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15018 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15019 tg3_flag_set(tp, USE_LINKCHG_REG);
15020 }
15021
15022 /* For all SERDES we poll the MAC status register. */
15023 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15024 tg3_flag_set(tp, POLL_SERDES);
15025 else
15026 tg3_flag_clear(tp, POLL_SERDES);
15027
15028 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15029 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15031 tg3_flag(tp, PCIX_MODE)) {
15032 tp->rx_offset = NET_SKB_PAD;
15033 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15034 tp->rx_copy_thresh = ~(u16)0;
15035 #endif
15036 }
15037
15038 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15039 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15040 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15041
15042 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15043
15044 /* Increment the rx prod index on the rx std ring by at most
15045 * 8 for these chips to workaround hw errata.
15046 */
15047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15050 tp->rx_std_max_post = 8;
15051
15052 if (tg3_flag(tp, ASPM_WORKAROUND))
15053 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15054 PCIE_PWR_MGMT_L1_THRESH_MSK;
15055
15056 return err;
15057 }
15058
15059 #ifdef CONFIG_SPARC
15060 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15061 {
15062 struct net_device *dev = tp->dev;
15063 struct pci_dev *pdev = tp->pdev;
15064 struct device_node *dp = pci_device_to_OF_node(pdev);
15065 const unsigned char *addr;
15066 int len;
15067
15068 addr = of_get_property(dp, "local-mac-address", &len);
15069 if (addr && len == 6) {
15070 memcpy(dev->dev_addr, addr, 6);
15071 memcpy(dev->perm_addr, dev->dev_addr, 6);
15072 return 0;
15073 }
15074 return -ENODEV;
15075 }
15076
15077 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15078 {
15079 struct net_device *dev = tp->dev;
15080
15081 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15082 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15083 return 0;
15084 }
15085 #endif
15086
15087 static int __devinit tg3_get_device_address(struct tg3 *tp)
15088 {
15089 struct net_device *dev = tp->dev;
15090 u32 hi, lo, mac_offset;
15091 int addr_ok = 0;
15092
15093 #ifdef CONFIG_SPARC
15094 if (!tg3_get_macaddr_sparc(tp))
15095 return 0;
15096 #endif
15097
15098 mac_offset = 0x7c;
15099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15100 tg3_flag(tp, 5780_CLASS)) {
15101 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15102 mac_offset = 0xcc;
15103 if (tg3_nvram_lock(tp))
15104 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15105 else
15106 tg3_nvram_unlock(tp);
15107 } else if (tg3_flag(tp, 5717_PLUS)) {
15108 if (tp->pci_fn & 1)
15109 mac_offset = 0xcc;
15110 if (tp->pci_fn > 1)
15111 mac_offset += 0x18c;
15112 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15113 mac_offset = 0x10;
15114
15115 /* First try to get it from MAC address mailbox. */
15116 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15117 if ((hi >> 16) == 0x484b) {
15118 dev->dev_addr[0] = (hi >> 8) & 0xff;
15119 dev->dev_addr[1] = (hi >> 0) & 0xff;
15120
15121 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15122 dev->dev_addr[2] = (lo >> 24) & 0xff;
15123 dev->dev_addr[3] = (lo >> 16) & 0xff;
15124 dev->dev_addr[4] = (lo >> 8) & 0xff;
15125 dev->dev_addr[5] = (lo >> 0) & 0xff;
15126
15127 /* Some old bootcode may report a 0 MAC address in SRAM */
15128 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15129 }
15130 if (!addr_ok) {
15131 /* Next, try NVRAM. */
15132 if (!tg3_flag(tp, NO_NVRAM) &&
15133 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15134 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15135 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15136 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15137 }
15138 /* Finally just fetch it out of the MAC control regs. */
15139 else {
15140 hi = tr32(MAC_ADDR_0_HIGH);
15141 lo = tr32(MAC_ADDR_0_LOW);
15142
15143 dev->dev_addr[5] = lo & 0xff;
15144 dev->dev_addr[4] = (lo >> 8) & 0xff;
15145 dev->dev_addr[3] = (lo >> 16) & 0xff;
15146 dev->dev_addr[2] = (lo >> 24) & 0xff;
15147 dev->dev_addr[1] = hi & 0xff;
15148 dev->dev_addr[0] = (hi >> 8) & 0xff;
15149 }
15150 }
15151
15152 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15153 #ifdef CONFIG_SPARC
15154 if (!tg3_get_default_macaddr_sparc(tp))
15155 return 0;
15156 #endif
15157 return -EINVAL;
15158 }
15159 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15160 return 0;
15161 }
15162
15163 #define BOUNDARY_SINGLE_CACHELINE 1
15164 #define BOUNDARY_MULTI_CACHELINE 2
15165
15166 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15167 {
15168 int cacheline_size;
15169 u8 byte;
15170 int goal;
15171
15172 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15173 if (byte == 0)
15174 cacheline_size = 1024;
15175 else
15176 cacheline_size = (int) byte * 4;
15177
15178 /* On 5703 and later chips, the boundary bits have no
15179 * effect.
15180 */
15181 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15182 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15183 !tg3_flag(tp, PCI_EXPRESS))
15184 goto out;
15185
15186 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15187 goal = BOUNDARY_MULTI_CACHELINE;
15188 #else
15189 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15190 goal = BOUNDARY_SINGLE_CACHELINE;
15191 #else
15192 goal = 0;
15193 #endif
15194 #endif
15195
15196 if (tg3_flag(tp, 57765_PLUS)) {
15197 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15198 goto out;
15199 }
15200
15201 if (!goal)
15202 goto out;
15203
15204 /* PCI controllers on most RISC systems tend to disconnect
15205 * when a device tries to burst across a cache-line boundary.
15206 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15207 *
15208 * Unfortunately, for PCI-E there are only limited
15209 * write-side controls for this, and thus for reads
15210 * we will still get the disconnects. We'll also waste
15211 * these PCI cycles for both read and write for chips
15212 * other than 5700 and 5701 which do not implement the
15213 * boundary bits.
15214 */
15215 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15216 switch (cacheline_size) {
15217 case 16:
15218 case 32:
15219 case 64:
15220 case 128:
15221 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15222 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15223 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15224 } else {
15225 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15226 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15227 }
15228 break;
15229
15230 case 256:
15231 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15232 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15233 break;
15234
15235 default:
15236 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15237 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15238 break;
15239 }
15240 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15241 switch (cacheline_size) {
15242 case 16:
15243 case 32:
15244 case 64:
15245 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15246 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15247 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15248 break;
15249 }
15250 /* fallthrough */
15251 case 128:
15252 default:
15253 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15254 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15255 break;
15256 }
15257 } else {
15258 switch (cacheline_size) {
15259 case 16:
15260 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15261 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15262 DMA_RWCTRL_WRITE_BNDRY_16);
15263 break;
15264 }
15265 /* fallthrough */
15266 case 32:
15267 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15268 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15269 DMA_RWCTRL_WRITE_BNDRY_32);
15270 break;
15271 }
15272 /* fallthrough */
15273 case 64:
15274 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15275 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15276 DMA_RWCTRL_WRITE_BNDRY_64);
15277 break;
15278 }
15279 /* fallthrough */
15280 case 128:
15281 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15282 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15283 DMA_RWCTRL_WRITE_BNDRY_128);
15284 break;
15285 }
15286 /* fallthrough */
15287 case 256:
15288 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15289 DMA_RWCTRL_WRITE_BNDRY_256);
15290 break;
15291 case 512:
15292 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15293 DMA_RWCTRL_WRITE_BNDRY_512);
15294 break;
15295 case 1024:
15296 default:
15297 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15298 DMA_RWCTRL_WRITE_BNDRY_1024);
15299 break;
15300 }
15301 }
15302
15303 out:
15304 return val;
15305 }
15306
15307 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15308 {
15309 struct tg3_internal_buffer_desc test_desc;
15310 u32 sram_dma_descs;
15311 int i, ret;
15312
15313 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15314
15315 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15316 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15317 tw32(RDMAC_STATUS, 0);
15318 tw32(WDMAC_STATUS, 0);
15319
15320 tw32(BUFMGR_MODE, 0);
15321 tw32(FTQ_RESET, 0);
15322
15323 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15324 test_desc.addr_lo = buf_dma & 0xffffffff;
15325 test_desc.nic_mbuf = 0x00002100;
15326 test_desc.len = size;
15327
15328 /*
15329 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15330 * the *second* time the tg3 driver was getting loaded after an
15331 * initial scan.
15332 *
15333 * Broadcom tells me:
15334 * ...the DMA engine is connected to the GRC block and a DMA
15335 * reset may affect the GRC block in some unpredictable way...
15336 * The behavior of resets to individual blocks has not been tested.
15337 *
15338 * Broadcom noted the GRC reset will also reset all sub-components.
15339 */
15340 if (to_device) {
15341 test_desc.cqid_sqid = (13 << 8) | 2;
15342
15343 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15344 udelay(40);
15345 } else {
15346 test_desc.cqid_sqid = (16 << 8) | 7;
15347
15348 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15349 udelay(40);
15350 }
15351 test_desc.flags = 0x00000005;
15352
15353 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15354 u32 val;
15355
15356 val = *(((u32 *)&test_desc) + i);
15357 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15358 sram_dma_descs + (i * sizeof(u32)));
15359 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15360 }
15361 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15362
15363 if (to_device)
15364 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15365 else
15366 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15367
15368 ret = -ENODEV;
15369 for (i = 0; i < 40; i++) {
15370 u32 val;
15371
15372 if (to_device)
15373 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15374 else
15375 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15376 if ((val & 0xffff) == sram_dma_descs) {
15377 ret = 0;
15378 break;
15379 }
15380
15381 udelay(100);
15382 }
15383
15384 return ret;
15385 }
15386
15387 #define TEST_BUFFER_SIZE 0x2000
15388
15389 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15390 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15391 { },
15392 };
15393
15394 static int __devinit tg3_test_dma(struct tg3 *tp)
15395 {
15396 dma_addr_t buf_dma;
15397 u32 *buf, saved_dma_rwctrl;
15398 int ret = 0;
15399
15400 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15401 &buf_dma, GFP_KERNEL);
15402 if (!buf) {
15403 ret = -ENOMEM;
15404 goto out_nofree;
15405 }
15406
15407 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15408 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15409
15410 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15411
15412 if (tg3_flag(tp, 57765_PLUS))
15413 goto out;
15414
15415 if (tg3_flag(tp, PCI_EXPRESS)) {
15416 /* DMA read watermark not used on PCIE */
15417 tp->dma_rwctrl |= 0x00180000;
15418 } else if (!tg3_flag(tp, PCIX_MODE)) {
15419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15421 tp->dma_rwctrl |= 0x003f0000;
15422 else
15423 tp->dma_rwctrl |= 0x003f000f;
15424 } else {
15425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15426 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15427 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15428 u32 read_water = 0x7;
15429
15430 /* If the 5704 is behind the EPB bridge, we can
15431 * do the less restrictive ONE_DMA workaround for
15432 * better performance.
15433 */
15434 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15436 tp->dma_rwctrl |= 0x8000;
15437 else if (ccval == 0x6 || ccval == 0x7)
15438 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15439
15440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15441 read_water = 4;
15442 /* Set bit 23 to enable PCIX hw bug fix */
15443 tp->dma_rwctrl |=
15444 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15445 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15446 (1 << 23);
15447 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15448 /* 5780 always in PCIX mode */
15449 tp->dma_rwctrl |= 0x00144000;
15450 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15451 /* 5714 always in PCIX mode */
15452 tp->dma_rwctrl |= 0x00148000;
15453 } else {
15454 tp->dma_rwctrl |= 0x001b000f;
15455 }
15456 }
15457
15458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15460 tp->dma_rwctrl &= 0xfffffff0;
15461
15462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15464 /* Remove this if it causes problems for some boards. */
15465 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15466
15467 /* On 5700/5701 chips, we need to set this bit.
15468 * Otherwise the chip will issue cacheline transactions
15469 * to streamable DMA memory with not all the byte
15470 * enables turned on. This is an error on several
15471 * RISC PCI controllers, in particular sparc64.
15472 *
15473 * On 5703/5704 chips, this bit has been reassigned
15474 * a different meaning. In particular, it is used
15475 * on those chips to enable a PCI-X workaround.
15476 */
15477 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15478 }
15479
15480 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15481
15482 #if 0
15483 /* Unneeded, already done by tg3_get_invariants. */
15484 tg3_switch_clocks(tp);
15485 #endif
15486
15487 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15488 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15489 goto out;
15490
15491 /* It is best to perform DMA test with maximum write burst size
15492 * to expose the 5700/5701 write DMA bug.
15493 */
15494 saved_dma_rwctrl = tp->dma_rwctrl;
15495 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15496 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15497
15498 while (1) {
15499 u32 *p = buf, i;
15500
15501 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15502 p[i] = i;
15503
15504 /* Send the buffer to the chip. */
15505 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15506 if (ret) {
15507 dev_err(&tp->pdev->dev,
15508 "%s: Buffer write failed. err = %d\n",
15509 __func__, ret);
15510 break;
15511 }
15512
15513 #if 0
15514 /* validate data reached card RAM correctly. */
15515 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15516 u32 val;
15517 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15518 if (le32_to_cpu(val) != p[i]) {
15519 dev_err(&tp->pdev->dev,
15520 "%s: Buffer corrupted on device! "
15521 "(%d != %d)\n", __func__, val, i);
15522 /* ret = -ENODEV here? */
15523 }
15524 p[i] = 0;
15525 }
15526 #endif
15527 /* Now read it back. */
15528 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15529 if (ret) {
15530 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15531 "err = %d\n", __func__, ret);
15532 break;
15533 }
15534
15535 /* Verify it. */
15536 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15537 if (p[i] == i)
15538 continue;
15539
15540 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15541 DMA_RWCTRL_WRITE_BNDRY_16) {
15542 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15543 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15544 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15545 break;
15546 } else {
15547 dev_err(&tp->pdev->dev,
15548 "%s: Buffer corrupted on read back! "
15549 "(%d != %d)\n", __func__, p[i], i);
15550 ret = -ENODEV;
15551 goto out;
15552 }
15553 }
15554
15555 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15556 /* Success. */
15557 ret = 0;
15558 break;
15559 }
15560 }
15561 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15562 DMA_RWCTRL_WRITE_BNDRY_16) {
15563 /* DMA test passed without adjusting DMA boundary,
15564 * now look for chipsets that are known to expose the
15565 * DMA bug without failing the test.
15566 */
15567 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15568 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15569 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15570 } else {
15571 /* Safe to use the calculated DMA boundary. */
15572 tp->dma_rwctrl = saved_dma_rwctrl;
15573 }
15574
15575 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15576 }
15577
15578 out:
15579 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15580 out_nofree:
15581 return ret;
15582 }
15583
15584 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15585 {
15586 if (tg3_flag(tp, 57765_PLUS)) {
15587 tp->bufmgr_config.mbuf_read_dma_low_water =
15588 DEFAULT_MB_RDMA_LOW_WATER_5705;
15589 tp->bufmgr_config.mbuf_mac_rx_low_water =
15590 DEFAULT_MB_MACRX_LOW_WATER_57765;
15591 tp->bufmgr_config.mbuf_high_water =
15592 DEFAULT_MB_HIGH_WATER_57765;
15593
15594 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15595 DEFAULT_MB_RDMA_LOW_WATER_5705;
15596 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15597 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15598 tp->bufmgr_config.mbuf_high_water_jumbo =
15599 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15600 } else if (tg3_flag(tp, 5705_PLUS)) {
15601 tp->bufmgr_config.mbuf_read_dma_low_water =
15602 DEFAULT_MB_RDMA_LOW_WATER_5705;
15603 tp->bufmgr_config.mbuf_mac_rx_low_water =
15604 DEFAULT_MB_MACRX_LOW_WATER_5705;
15605 tp->bufmgr_config.mbuf_high_water =
15606 DEFAULT_MB_HIGH_WATER_5705;
15607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15608 tp->bufmgr_config.mbuf_mac_rx_low_water =
15609 DEFAULT_MB_MACRX_LOW_WATER_5906;
15610 tp->bufmgr_config.mbuf_high_water =
15611 DEFAULT_MB_HIGH_WATER_5906;
15612 }
15613
15614 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15615 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15616 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15617 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15618 tp->bufmgr_config.mbuf_high_water_jumbo =
15619 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15620 } else {
15621 tp->bufmgr_config.mbuf_read_dma_low_water =
15622 DEFAULT_MB_RDMA_LOW_WATER;
15623 tp->bufmgr_config.mbuf_mac_rx_low_water =
15624 DEFAULT_MB_MACRX_LOW_WATER;
15625 tp->bufmgr_config.mbuf_high_water =
15626 DEFAULT_MB_HIGH_WATER;
15627
15628 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15629 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15630 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15631 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15632 tp->bufmgr_config.mbuf_high_water_jumbo =
15633 DEFAULT_MB_HIGH_WATER_JUMBO;
15634 }
15635
15636 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15637 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15638 }
15639
15640 static char * __devinit tg3_phy_string(struct tg3 *tp)
15641 {
15642 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15643 case TG3_PHY_ID_BCM5400: return "5400";
15644 case TG3_PHY_ID_BCM5401: return "5401";
15645 case TG3_PHY_ID_BCM5411: return "5411";
15646 case TG3_PHY_ID_BCM5701: return "5701";
15647 case TG3_PHY_ID_BCM5703: return "5703";
15648 case TG3_PHY_ID_BCM5704: return "5704";
15649 case TG3_PHY_ID_BCM5705: return "5705";
15650 case TG3_PHY_ID_BCM5750: return "5750";
15651 case TG3_PHY_ID_BCM5752: return "5752";
15652 case TG3_PHY_ID_BCM5714: return "5714";
15653 case TG3_PHY_ID_BCM5780: return "5780";
15654 case TG3_PHY_ID_BCM5755: return "5755";
15655 case TG3_PHY_ID_BCM5787: return "5787";
15656 case TG3_PHY_ID_BCM5784: return "5784";
15657 case TG3_PHY_ID_BCM5756: return "5722/5756";
15658 case TG3_PHY_ID_BCM5906: return "5906";
15659 case TG3_PHY_ID_BCM5761: return "5761";
15660 case TG3_PHY_ID_BCM5718C: return "5718C";
15661 case TG3_PHY_ID_BCM5718S: return "5718S";
15662 case TG3_PHY_ID_BCM57765: return "57765";
15663 case TG3_PHY_ID_BCM5719C: return "5719C";
15664 case TG3_PHY_ID_BCM5720C: return "5720C";
15665 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15666 case 0: return "serdes";
15667 default: return "unknown";
15668 }
15669 }
15670
15671 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15672 {
15673 if (tg3_flag(tp, PCI_EXPRESS)) {
15674 strcpy(str, "PCI Express");
15675 return str;
15676 } else if (tg3_flag(tp, PCIX_MODE)) {
15677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15678
15679 strcpy(str, "PCIX:");
15680
15681 if ((clock_ctrl == 7) ||
15682 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15683 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15684 strcat(str, "133MHz");
15685 else if (clock_ctrl == 0)
15686 strcat(str, "33MHz");
15687 else if (clock_ctrl == 2)
15688 strcat(str, "50MHz");
15689 else if (clock_ctrl == 4)
15690 strcat(str, "66MHz");
15691 else if (clock_ctrl == 6)
15692 strcat(str, "100MHz");
15693 } else {
15694 strcpy(str, "PCI:");
15695 if (tg3_flag(tp, PCI_HIGH_SPEED))
15696 strcat(str, "66MHz");
15697 else
15698 strcat(str, "33MHz");
15699 }
15700 if (tg3_flag(tp, PCI_32BIT))
15701 strcat(str, ":32-bit");
15702 else
15703 strcat(str, ":64-bit");
15704 return str;
15705 }
15706
15707 static void __devinit tg3_init_coal(struct tg3 *tp)
15708 {
15709 struct ethtool_coalesce *ec = &tp->coal;
15710
15711 memset(ec, 0, sizeof(*ec));
15712 ec->cmd = ETHTOOL_GCOALESCE;
15713 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15714 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15715 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15716 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15717 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15718 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15719 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15720 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15721 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15722
15723 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15724 HOSTCC_MODE_CLRTICK_TXBD)) {
15725 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15726 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15727 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15728 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15729 }
15730
15731 if (tg3_flag(tp, 5705_PLUS)) {
15732 ec->rx_coalesce_usecs_irq = 0;
15733 ec->tx_coalesce_usecs_irq = 0;
15734 ec->stats_block_coalesce_usecs = 0;
15735 }
15736 }
15737
15738 static int __devinit tg3_init_one(struct pci_dev *pdev,
15739 const struct pci_device_id *ent)
15740 {
15741 struct net_device *dev;
15742 struct tg3 *tp;
15743 int i, err, pm_cap;
15744 u32 sndmbx, rcvmbx, intmbx;
15745 char str[40];
15746 u64 dma_mask, persist_dma_mask;
15747 netdev_features_t features = 0;
15748
15749 printk_once(KERN_INFO "%s\n", version);
15750
15751 err = pci_enable_device(pdev);
15752 if (err) {
15753 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15754 return err;
15755 }
15756
15757 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15758 if (err) {
15759 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15760 goto err_out_disable_pdev;
15761 }
15762
15763 pci_set_master(pdev);
15764
15765 /* Find power-management capability. */
15766 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15767 if (pm_cap == 0) {
15768 dev_err(&pdev->dev,
15769 "Cannot find Power Management capability, aborting\n");
15770 err = -EIO;
15771 goto err_out_free_res;
15772 }
15773
15774 err = pci_set_power_state(pdev, PCI_D0);
15775 if (err) {
15776 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15777 goto err_out_free_res;
15778 }
15779
15780 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15781 if (!dev) {
15782 err = -ENOMEM;
15783 goto err_out_power_down;
15784 }
15785
15786 SET_NETDEV_DEV(dev, &pdev->dev);
15787
15788 tp = netdev_priv(dev);
15789 tp->pdev = pdev;
15790 tp->dev = dev;
15791 tp->pm_cap = pm_cap;
15792 tp->rx_mode = TG3_DEF_RX_MODE;
15793 tp->tx_mode = TG3_DEF_TX_MODE;
15794
15795 if (tg3_debug > 0)
15796 tp->msg_enable = tg3_debug;
15797 else
15798 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15799
15800 /* The word/byte swap controls here control register access byte
15801 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15802 * setting below.
15803 */
15804 tp->misc_host_ctrl =
15805 MISC_HOST_CTRL_MASK_PCI_INT |
15806 MISC_HOST_CTRL_WORD_SWAP |
15807 MISC_HOST_CTRL_INDIR_ACCESS |
15808 MISC_HOST_CTRL_PCISTATE_RW;
15809
15810 /* The NONFRM (non-frame) byte/word swap controls take effect
15811 * on descriptor entries, anything which isn't packet data.
15812 *
15813 * The StrongARM chips on the board (one for tx, one for rx)
15814 * are running in big-endian mode.
15815 */
15816 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15817 GRC_MODE_WSWAP_NONFRM_DATA);
15818 #ifdef __BIG_ENDIAN
15819 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15820 #endif
15821 spin_lock_init(&tp->lock);
15822 spin_lock_init(&tp->indirect_lock);
15823 INIT_WORK(&tp->reset_task, tg3_reset_task);
15824
15825 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15826 if (!tp->regs) {
15827 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15828 err = -ENOMEM;
15829 goto err_out_free_dev;
15830 }
15831
15832 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15833 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15834 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15835 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15836 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15837 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15838 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15839 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15840 tg3_flag_set(tp, ENABLE_APE);
15841 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15842 if (!tp->aperegs) {
15843 dev_err(&pdev->dev,
15844 "Cannot map APE registers, aborting\n");
15845 err = -ENOMEM;
15846 goto err_out_iounmap;
15847 }
15848 }
15849
15850 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15851 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15852
15853 dev->ethtool_ops = &tg3_ethtool_ops;
15854 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15855 dev->netdev_ops = &tg3_netdev_ops;
15856 dev->irq = pdev->irq;
15857
15858 err = tg3_get_invariants(tp);
15859 if (err) {
15860 dev_err(&pdev->dev,
15861 "Problem fetching invariants of chip, aborting\n");
15862 goto err_out_apeunmap;
15863 }
15864
15865 /* The EPB bridge inside 5714, 5715, and 5780 and any
15866 * device behind the EPB cannot support DMA addresses > 40-bit.
15867 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15868 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15869 * do DMA address check in tg3_start_xmit().
15870 */
15871 if (tg3_flag(tp, IS_5788))
15872 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15873 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15874 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15875 #ifdef CONFIG_HIGHMEM
15876 dma_mask = DMA_BIT_MASK(64);
15877 #endif
15878 } else
15879 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15880
15881 /* Configure DMA attributes. */
15882 if (dma_mask > DMA_BIT_MASK(32)) {
15883 err = pci_set_dma_mask(pdev, dma_mask);
15884 if (!err) {
15885 features |= NETIF_F_HIGHDMA;
15886 err = pci_set_consistent_dma_mask(pdev,
15887 persist_dma_mask);
15888 if (err < 0) {
15889 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15890 "DMA for consistent allocations\n");
15891 goto err_out_apeunmap;
15892 }
15893 }
15894 }
15895 if (err || dma_mask == DMA_BIT_MASK(32)) {
15896 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15897 if (err) {
15898 dev_err(&pdev->dev,
15899 "No usable DMA configuration, aborting\n");
15900 goto err_out_apeunmap;
15901 }
15902 }
15903
15904 tg3_init_bufmgr_config(tp);
15905
15906 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15907
15908 /* 5700 B0 chips do not support checksumming correctly due
15909 * to hardware bugs.
15910 */
15911 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15912 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15913
15914 if (tg3_flag(tp, 5755_PLUS))
15915 features |= NETIF_F_IPV6_CSUM;
15916 }
15917
15918 /* TSO is on by default on chips that support hardware TSO.
15919 * Firmware TSO on older chips gives lower performance, so it
15920 * is off by default, but can be enabled using ethtool.
15921 */
15922 if ((tg3_flag(tp, HW_TSO_1) ||
15923 tg3_flag(tp, HW_TSO_2) ||
15924 tg3_flag(tp, HW_TSO_3)) &&
15925 (features & NETIF_F_IP_CSUM))
15926 features |= NETIF_F_TSO;
15927 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15928 if (features & NETIF_F_IPV6_CSUM)
15929 features |= NETIF_F_TSO6;
15930 if (tg3_flag(tp, HW_TSO_3) ||
15931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15932 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15933 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15936 features |= NETIF_F_TSO_ECN;
15937 }
15938
15939 dev->features |= features;
15940 dev->vlan_features |= features;
15941
15942 /*
15943 * Add loopback capability only for a subset of devices that support
15944 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15945 * loopback for the remaining devices.
15946 */
15947 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15948 !tg3_flag(tp, CPMU_PRESENT))
15949 /* Add the loopback capability */
15950 features |= NETIF_F_LOOPBACK;
15951
15952 dev->hw_features |= features;
15953
15954 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15955 !tg3_flag(tp, TSO_CAPABLE) &&
15956 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15957 tg3_flag_set(tp, MAX_RXPEND_64);
15958 tp->rx_pending = 63;
15959 }
15960
15961 err = tg3_get_device_address(tp);
15962 if (err) {
15963 dev_err(&pdev->dev,
15964 "Could not obtain valid ethernet address, aborting\n");
15965 goto err_out_apeunmap;
15966 }
15967
15968 /*
15969 * Reset chip in case UNDI or EFI driver did not shutdown
15970 * DMA self test will enable WDMAC and we'll see (spurious)
15971 * pending DMA on the PCI bus at that point.
15972 */
15973 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15974 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15975 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15976 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15977 }
15978
15979 err = tg3_test_dma(tp);
15980 if (err) {
15981 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15982 goto err_out_apeunmap;
15983 }
15984
15985 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15986 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15987 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15988 for (i = 0; i < tp->irq_max; i++) {
15989 struct tg3_napi *tnapi = &tp->napi[i];
15990
15991 tnapi->tp = tp;
15992 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15993
15994 tnapi->int_mbox = intmbx;
15995 if (i <= 4)
15996 intmbx += 0x8;
15997 else
15998 intmbx += 0x4;
15999
16000 tnapi->consmbox = rcvmbx;
16001 tnapi->prodmbox = sndmbx;
16002
16003 if (i)
16004 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16005 else
16006 tnapi->coal_now = HOSTCC_MODE_NOW;
16007
16008 if (!tg3_flag(tp, SUPPORT_MSIX))
16009 break;
16010
16011 /*
16012 * If we support MSIX, we'll be using RSS. If we're using
16013 * RSS, the first vector only handles link interrupts and the
16014 * remaining vectors handle rx and tx interrupts. Reuse the
16015 * mailbox values for the next iteration. The values we setup
16016 * above are still useful for the single vectored mode.
16017 */
16018 if (!i)
16019 continue;
16020
16021 rcvmbx += 0x8;
16022
16023 if (sndmbx & 0x4)
16024 sndmbx -= 0x4;
16025 else
16026 sndmbx += 0xc;
16027 }
16028
16029 tg3_init_coal(tp);
16030
16031 pci_set_drvdata(pdev, dev);
16032
16033 if (tg3_flag(tp, 5717_PLUS)) {
16034 /* Resume a low-power mode */
16035 tg3_frob_aux_power(tp, false);
16036 }
16037
16038 tg3_timer_init(tp);
16039
16040 err = register_netdev(dev);
16041 if (err) {
16042 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16043 goto err_out_apeunmap;
16044 }
16045
16046 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16047 tp->board_part_number,
16048 tp->pci_chip_rev_id,
16049 tg3_bus_string(tp, str),
16050 dev->dev_addr);
16051
16052 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16053 struct phy_device *phydev;
16054 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16055 netdev_info(dev,
16056 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16057 phydev->drv->name, dev_name(&phydev->dev));
16058 } else {
16059 char *ethtype;
16060
16061 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16062 ethtype = "10/100Base-TX";
16063 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16064 ethtype = "1000Base-SX";
16065 else
16066 ethtype = "10/100/1000Base-T";
16067
16068 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16069 "(WireSpeed[%d], EEE[%d])\n",
16070 tg3_phy_string(tp), ethtype,
16071 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16072 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16073 }
16074
16075 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16076 (dev->features & NETIF_F_RXCSUM) != 0,
16077 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16078 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16079 tg3_flag(tp, ENABLE_ASF) != 0,
16080 tg3_flag(tp, TSO_CAPABLE) != 0);
16081 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16082 tp->dma_rwctrl,
16083 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16084 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16085
16086 pci_save_state(pdev);
16087
16088 return 0;
16089
16090 err_out_apeunmap:
16091 if (tp->aperegs) {
16092 iounmap(tp->aperegs);
16093 tp->aperegs = NULL;
16094 }
16095
16096 err_out_iounmap:
16097 if (tp->regs) {
16098 iounmap(tp->regs);
16099 tp->regs = NULL;
16100 }
16101
16102 err_out_free_dev:
16103 free_netdev(dev);
16104
16105 err_out_power_down:
16106 pci_set_power_state(pdev, PCI_D3hot);
16107
16108 err_out_free_res:
16109 pci_release_regions(pdev);
16110
16111 err_out_disable_pdev:
16112 pci_disable_device(pdev);
16113 pci_set_drvdata(pdev, NULL);
16114 return err;
16115 }
16116
16117 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16118 {
16119 struct net_device *dev = pci_get_drvdata(pdev);
16120
16121 if (dev) {
16122 struct tg3 *tp = netdev_priv(dev);
16123
16124 release_firmware(tp->fw);
16125
16126 tg3_reset_task_cancel(tp);
16127
16128 if (tg3_flag(tp, USE_PHYLIB)) {
16129 tg3_phy_fini(tp);
16130 tg3_mdio_fini(tp);
16131 }
16132
16133 unregister_netdev(dev);
16134 if (tp->aperegs) {
16135 iounmap(tp->aperegs);
16136 tp->aperegs = NULL;
16137 }
16138 if (tp->regs) {
16139 iounmap(tp->regs);
16140 tp->regs = NULL;
16141 }
16142 free_netdev(dev);
16143 pci_release_regions(pdev);
16144 pci_disable_device(pdev);
16145 pci_set_drvdata(pdev, NULL);
16146 }
16147 }
16148
16149 #ifdef CONFIG_PM_SLEEP
16150 static int tg3_suspend(struct device *device)
16151 {
16152 struct pci_dev *pdev = to_pci_dev(device);
16153 struct net_device *dev = pci_get_drvdata(pdev);
16154 struct tg3 *tp = netdev_priv(dev);
16155 int err;
16156
16157 if (!netif_running(dev))
16158 return 0;
16159
16160 tg3_reset_task_cancel(tp);
16161 tg3_phy_stop(tp);
16162 tg3_netif_stop(tp);
16163
16164 tg3_timer_stop(tp);
16165
16166 tg3_full_lock(tp, 1);
16167 tg3_disable_ints(tp);
16168 tg3_full_unlock(tp);
16169
16170 netif_device_detach(dev);
16171
16172 tg3_full_lock(tp, 0);
16173 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16174 tg3_flag_clear(tp, INIT_COMPLETE);
16175 tg3_full_unlock(tp);
16176
16177 err = tg3_power_down_prepare(tp);
16178 if (err) {
16179 int err2;
16180
16181 tg3_full_lock(tp, 0);
16182
16183 tg3_flag_set(tp, INIT_COMPLETE);
16184 err2 = tg3_restart_hw(tp, 1);
16185 if (err2)
16186 goto out;
16187
16188 tg3_timer_start(tp);
16189
16190 netif_device_attach(dev);
16191 tg3_netif_start(tp);
16192
16193 out:
16194 tg3_full_unlock(tp);
16195
16196 if (!err2)
16197 tg3_phy_start(tp);
16198 }
16199
16200 return err;
16201 }
16202
16203 static int tg3_resume(struct device *device)
16204 {
16205 struct pci_dev *pdev = to_pci_dev(device);
16206 struct net_device *dev = pci_get_drvdata(pdev);
16207 struct tg3 *tp = netdev_priv(dev);
16208 int err;
16209
16210 if (!netif_running(dev))
16211 return 0;
16212
16213 netif_device_attach(dev);
16214
16215 tg3_full_lock(tp, 0);
16216
16217 tg3_flag_set(tp, INIT_COMPLETE);
16218 err = tg3_restart_hw(tp, 1);
16219 if (err)
16220 goto out;
16221
16222 tg3_timer_start(tp);
16223
16224 tg3_netif_start(tp);
16225
16226 out:
16227 tg3_full_unlock(tp);
16228
16229 if (!err)
16230 tg3_phy_start(tp);
16231
16232 return err;
16233 }
16234
16235 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16236 #define TG3_PM_OPS (&tg3_pm_ops)
16237
16238 #else
16239
16240 #define TG3_PM_OPS NULL
16241
16242 #endif /* CONFIG_PM_SLEEP */
16243
16244 /**
16245 * tg3_io_error_detected - called when PCI error is detected
16246 * @pdev: Pointer to PCI device
16247 * @state: The current pci connection state
16248 *
16249 * This function is called after a PCI bus error affecting
16250 * this device has been detected.
16251 */
16252 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16253 pci_channel_state_t state)
16254 {
16255 struct net_device *netdev = pci_get_drvdata(pdev);
16256 struct tg3 *tp = netdev_priv(netdev);
16257 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16258
16259 netdev_info(netdev, "PCI I/O error detected\n");
16260
16261 rtnl_lock();
16262
16263 if (!netif_running(netdev))
16264 goto done;
16265
16266 tg3_phy_stop(tp);
16267
16268 tg3_netif_stop(tp);
16269
16270 tg3_timer_stop(tp);
16271
16272 /* Want to make sure that the reset task doesn't run */
16273 tg3_reset_task_cancel(tp);
16274
16275 netif_device_detach(netdev);
16276
16277 /* Clean up software state, even if MMIO is blocked */
16278 tg3_full_lock(tp, 0);
16279 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16280 tg3_full_unlock(tp);
16281
16282 done:
16283 if (state == pci_channel_io_perm_failure)
16284 err = PCI_ERS_RESULT_DISCONNECT;
16285 else
16286 pci_disable_device(pdev);
16287
16288 rtnl_unlock();
16289
16290 return err;
16291 }
16292
16293 /**
16294 * tg3_io_slot_reset - called after the pci bus has been reset.
16295 * @pdev: Pointer to PCI device
16296 *
16297 * Restart the card from scratch, as if from a cold-boot.
16298 * At this point, the card has exprienced a hard reset,
16299 * followed by fixups by BIOS, and has its config space
16300 * set up identically to what it was at cold boot.
16301 */
16302 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16303 {
16304 struct net_device *netdev = pci_get_drvdata(pdev);
16305 struct tg3 *tp = netdev_priv(netdev);
16306 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16307 int err;
16308
16309 rtnl_lock();
16310
16311 if (pci_enable_device(pdev)) {
16312 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16313 goto done;
16314 }
16315
16316 pci_set_master(pdev);
16317 pci_restore_state(pdev);
16318 pci_save_state(pdev);
16319
16320 if (!netif_running(netdev)) {
16321 rc = PCI_ERS_RESULT_RECOVERED;
16322 goto done;
16323 }
16324
16325 err = tg3_power_up(tp);
16326 if (err)
16327 goto done;
16328
16329 rc = PCI_ERS_RESULT_RECOVERED;
16330
16331 done:
16332 rtnl_unlock();
16333
16334 return rc;
16335 }
16336
16337 /**
16338 * tg3_io_resume - called when traffic can start flowing again.
16339 * @pdev: Pointer to PCI device
16340 *
16341 * This callback is called when the error recovery driver tells
16342 * us that its OK to resume normal operation.
16343 */
16344 static void tg3_io_resume(struct pci_dev *pdev)
16345 {
16346 struct net_device *netdev = pci_get_drvdata(pdev);
16347 struct tg3 *tp = netdev_priv(netdev);
16348 int err;
16349
16350 rtnl_lock();
16351
16352 if (!netif_running(netdev))
16353 goto done;
16354
16355 tg3_full_lock(tp, 0);
16356 tg3_flag_set(tp, INIT_COMPLETE);
16357 err = tg3_restart_hw(tp, 1);
16358 tg3_full_unlock(tp);
16359 if (err) {
16360 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16361 goto done;
16362 }
16363
16364 netif_device_attach(netdev);
16365
16366 tg3_timer_start(tp);
16367
16368 tg3_netif_start(tp);
16369
16370 tg3_phy_start(tp);
16371
16372 done:
16373 rtnl_unlock();
16374 }
16375
16376 static const struct pci_error_handlers tg3_err_handler = {
16377 .error_detected = tg3_io_error_detected,
16378 .slot_reset = tg3_io_slot_reset,
16379 .resume = tg3_io_resume
16380 };
16381
16382 static struct pci_driver tg3_driver = {
16383 .name = DRV_MODULE_NAME,
16384 .id_table = tg3_pci_tbl,
16385 .probe = tg3_init_one,
16386 .remove = __devexit_p(tg3_remove_one),
16387 .err_handler = &tg3_err_handler,
16388 .driver.pm = TG3_PM_OPS,
16389 };
16390
16391 static int __init tg3_init(void)
16392 {
16393 return pci_register_driver(&tg3_driver);
16394 }
16395
16396 static void __exit tg3_cleanup(void)
16397 {
16398 pci_unregister_driver(&tg3_driver);
16399 }
16400
16401 module_init(tg3_init);
16402 module_exit(tg3_cleanup);
This page took 0.404777 seconds and 5 git commands to generate.