tg3: Remove short DMA check for 1st fragment
[deliverable/linux.git] / drivers / net / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0 0
62 #define BAR_2 2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70 return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
111
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
114 */
115
116 #define TG3_TX_TIMEOUT (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
126 */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
142 */
143
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB 64
158
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
177 *
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
183 */
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
197
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299 {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
333
334 { "tx_octets" },
335 { "tx_collisions" },
336
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
366
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
373
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
377
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
383
384 { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391 const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406 writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411 return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416 writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421 return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426 unsigned long flags;
427
428 spin_lock_irqsave(&tp->indirect_lock, flags);
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442 unsigned long flags;
443 u32 val;
444
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454 unsigned long flags;
455
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
460 }
461 if (off == TG3_RX_STD_PROD_IDX_REG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
465 }
466
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
474 */
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479 }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484 unsigned long flags;
485 u32 val;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498 */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
510 }
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
513 */
514 if (usec_wait)
515 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520 tp->write32_mbox(tp, off, val);
521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
530 writel(val, mbox);
531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
532 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537 return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542 writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558 unsigned long flags;
559
560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
563
564 spin_lock_irqsave(&tp->indirect_lock, flags);
565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577 }
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583 unsigned long flags;
584
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
589 }
590
591 spin_lock_irqsave(&tp->indirect_lock, flags);
592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604 }
605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610 int i;
611 u32 regbase, bit;
612
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
617
618 /* Make sure the driver hasn't any stale locks. */
619 for (i = 0; i < 8; i++) {
620 if (i == TG3_APE_LOCK_GPIO)
621 continue;
622 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623 }
624
625 /* Clear the correct bit of the GPIO lock too. */
626 if (!tp->pci_fn)
627 bit = APE_LOCK_GRANT_DRIVER;
628 else
629 bit = 1 << tp->pci_fn;
630
631 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
632 }
633
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
635 {
636 int i, off;
637 int ret = 0;
638 u32 status, req, gnt, bit;
639
640 if (!tg3_flag(tp, ENABLE_APE))
641 return 0;
642
643 switch (locknum) {
644 case TG3_APE_LOCK_GPIO:
645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646 return 0;
647 case TG3_APE_LOCK_GRC:
648 case TG3_APE_LOCK_MEM:
649 break;
650 default:
651 return -EINVAL;
652 }
653
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655 req = TG3_APE_LOCK_REQ;
656 gnt = TG3_APE_LOCK_GRANT;
657 } else {
658 req = TG3_APE_PER_LOCK_REQ;
659 gnt = TG3_APE_PER_LOCK_GRANT;
660 }
661
662 off = 4 * locknum;
663
664 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665 bit = APE_LOCK_REQ_DRIVER;
666 else
667 bit = 1 << tp->pci_fn;
668
669 tg3_ape_write32(tp, req + off, bit);
670
671 /* Wait for up to 1 millisecond to acquire lock. */
672 for (i = 0; i < 100; i++) {
673 status = tg3_ape_read32(tp, gnt + off);
674 if (status == bit)
675 break;
676 udelay(10);
677 }
678
679 if (status != bit) {
680 /* Revoke the lock request. */
681 tg3_ape_write32(tp, gnt + off, bit);
682 ret = -EBUSY;
683 }
684
685 return ret;
686 }
687
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689 {
690 u32 gnt, bit;
691
692 if (!tg3_flag(tp, ENABLE_APE))
693 return;
694
695 switch (locknum) {
696 case TG3_APE_LOCK_GPIO:
697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698 return;
699 case TG3_APE_LOCK_GRC:
700 case TG3_APE_LOCK_MEM:
701 break;
702 default:
703 return;
704 }
705
706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707 gnt = TG3_APE_LOCK_GRANT;
708 else
709 gnt = TG3_APE_PER_LOCK_GRANT;
710
711 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712 bit = APE_LOCK_GRANT_DRIVER;
713 else
714 bit = 1 << tp->pci_fn;
715
716 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
717 }
718
719 static void tg3_disable_ints(struct tg3 *tp)
720 {
721 int i;
722
723 tw32(TG3PCI_MISC_HOST_CTRL,
724 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725 for (i = 0; i < tp->irq_max; i++)
726 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
727 }
728
729 static void tg3_enable_ints(struct tg3 *tp)
730 {
731 int i;
732
733 tp->irq_sync = 0;
734 wmb();
735
736 tw32(TG3PCI_MISC_HOST_CTRL,
737 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
738
739 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740 for (i = 0; i < tp->irq_cnt; i++) {
741 struct tg3_napi *tnapi = &tp->napi[i];
742
743 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744 if (tg3_flag(tp, 1SHOT_MSI))
745 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
746
747 tp->coal_now |= tnapi->coal_now;
748 }
749
750 /* Force an initial interrupt */
751 if (!tg3_flag(tp, TAGGED_STATUS) &&
752 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754 else
755 tw32(HOSTCC_MODE, tp->coal_now);
756
757 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
758 }
759
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
761 {
762 struct tg3 *tp = tnapi->tp;
763 struct tg3_hw_status *sblk = tnapi->hw_status;
764 unsigned int work_exists = 0;
765
766 /* check for phy events */
767 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768 if (sblk->status & SD_STATUS_LINK_CHG)
769 work_exists = 1;
770 }
771 /* check for RX/TX work to do */
772 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774 work_exists = 1;
775
776 return work_exists;
777 }
778
779 /* tg3_int_reenable
780 * similar to tg3_enable_ints, but it accurately determines whether there
781 * is new work pending and can return without flushing the PIO write
782 * which reenables interrupts
783 */
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
785 {
786 struct tg3 *tp = tnapi->tp;
787
788 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789 mmiowb();
790
791 /* When doing tagged status, this work check is unnecessary.
792 * The last_tag we write above tells the chip which piece of
793 * work we've completed.
794 */
795 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796 tw32(HOSTCC_MODE, tp->coalesce_mode |
797 HOSTCC_MODE_ENABLE | tnapi->coal_now);
798 }
799
800 static void tg3_switch_clocks(struct tg3 *tp)
801 {
802 u32 clock_ctrl;
803 u32 orig_clock_ctrl;
804
805 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806 return;
807
808 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
810 orig_clock_ctrl = clock_ctrl;
811 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812 CLOCK_CTRL_CLKRUN_OENABLE |
813 0x1f);
814 tp->pci_clock_ctrl = clock_ctrl;
815
816 if (tg3_flag(tp, 5705_PLUS)) {
817 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
820 }
821 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823 clock_ctrl |
824 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825 40);
826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827 clock_ctrl | (CLOCK_CTRL_ALTCLK),
828 40);
829 }
830 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
831 }
832
833 #define PHY_BUSY_LOOPS 5000
834
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 {
837 u32 frame_val;
838 unsigned int loops;
839 int ret;
840
841 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842 tw32_f(MAC_MI_MODE,
843 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 udelay(80);
845 }
846
847 *val = 0x0;
848
849 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850 MI_COM_PHY_ADDR_MASK);
851 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852 MI_COM_REG_ADDR_MASK);
853 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
854
855 tw32_f(MAC_MI_COM, frame_val);
856
857 loops = PHY_BUSY_LOOPS;
858 while (loops != 0) {
859 udelay(10);
860 frame_val = tr32(MAC_MI_COM);
861
862 if ((frame_val & MI_COM_BUSY) == 0) {
863 udelay(5);
864 frame_val = tr32(MAC_MI_COM);
865 break;
866 }
867 loops -= 1;
868 }
869
870 ret = -EBUSY;
871 if (loops != 0) {
872 *val = frame_val & MI_COM_DATA_MASK;
873 ret = 0;
874 }
875
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
878 udelay(80);
879 }
880
881 return ret;
882 }
883
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 {
886 u32 frame_val;
887 unsigned int loops;
888 int ret;
889
890 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892 return 0;
893
894 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895 tw32_f(MAC_MI_MODE,
896 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897 udelay(80);
898 }
899
900 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901 MI_COM_PHY_ADDR_MASK);
902 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903 MI_COM_REG_ADDR_MASK);
904 frame_val |= (val & MI_COM_DATA_MASK);
905 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
906
907 tw32_f(MAC_MI_COM, frame_val);
908
909 loops = PHY_BUSY_LOOPS;
910 while (loops != 0) {
911 udelay(10);
912 frame_val = tr32(MAC_MI_COM);
913 if ((frame_val & MI_COM_BUSY) == 0) {
914 udelay(5);
915 frame_val = tr32(MAC_MI_COM);
916 break;
917 }
918 loops -= 1;
919 }
920
921 ret = -EBUSY;
922 if (loops != 0)
923 ret = 0;
924
925 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926 tw32_f(MAC_MI_MODE, tp->mi_mode);
927 udelay(80);
928 }
929
930 return ret;
931 }
932
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934 {
935 int err;
936
937 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938 if (err)
939 goto done;
940
941 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942 if (err)
943 goto done;
944
945 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947 if (err)
948 goto done;
949
950 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953 return err;
954 }
955
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957 {
958 int err;
959
960 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961 if (err)
962 goto done;
963
964 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965 if (err)
966 goto done;
967
968 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970 if (err)
971 goto done;
972
973 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975 done:
976 return err;
977 }
978
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980 {
981 int err;
982
983 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984 if (!err)
985 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987 return err;
988 }
989
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991 {
992 int err;
993
994 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995 if (!err)
996 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998 return err;
999 }
1000
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002 {
1003 int err;
1004
1005 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007 MII_TG3_AUXCTL_SHDWSEL_MISC);
1008 if (!err)
1009 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011 return err;
1012 }
1013
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015 {
1016 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020 }
1021
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025 MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029 MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1032 {
1033 u32 phy_control;
1034 int limit, err;
1035
1036 /* OK, reset it, and poll the BMCR_RESET bit until it
1037 * clears or we time out.
1038 */
1039 phy_control = BMCR_RESET;
1040 err = tg3_writephy(tp, MII_BMCR, phy_control);
1041 if (err != 0)
1042 return -EBUSY;
1043
1044 limit = 5000;
1045 while (limit--) {
1046 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047 if (err != 0)
1048 return -EBUSY;
1049
1050 if ((phy_control & BMCR_RESET) == 0) {
1051 udelay(40);
1052 break;
1053 }
1054 udelay(10);
1055 }
1056 if (limit < 0)
1057 return -EBUSY;
1058
1059 return 0;
1060 }
1061
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063 {
1064 struct tg3 *tp = bp->priv;
1065 u32 val;
1066
1067 spin_lock_bh(&tp->lock);
1068
1069 if (tg3_readphy(tp, reg, &val))
1070 val = -EIO;
1071
1072 spin_unlock_bh(&tp->lock);
1073
1074 return val;
1075 }
1076
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078 {
1079 struct tg3 *tp = bp->priv;
1080 u32 ret = 0;
1081
1082 spin_lock_bh(&tp->lock);
1083
1084 if (tg3_writephy(tp, reg, val))
1085 ret = -EIO;
1086
1087 spin_unlock_bh(&tp->lock);
1088
1089 return ret;
1090 }
1091
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1093 {
1094 return 0;
1095 }
1096
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1098 {
1099 u32 val;
1100 struct phy_device *phydev;
1101
1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104 case PHY_ID_BCM50610:
1105 case PHY_ID_BCM50610M:
1106 val = MAC_PHYCFG2_50610_LED_MODES;
1107 break;
1108 case PHY_ID_BCMAC131:
1109 val = MAC_PHYCFG2_AC131_LED_MODES;
1110 break;
1111 case PHY_ID_RTL8211C:
1112 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113 break;
1114 case PHY_ID_RTL8201E:
1115 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116 break;
1117 default:
1118 return;
1119 }
1120
1121 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122 tw32(MAC_PHYCFG2, val);
1123
1124 val = tr32(MAC_PHYCFG1);
1125 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128 tw32(MAC_PHYCFG1, val);
1129
1130 return;
1131 }
1132
1133 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135 MAC_PHYCFG2_FMODE_MASK_MASK |
1136 MAC_PHYCFG2_GMODE_MASK_MASK |
1137 MAC_PHYCFG2_ACT_MASK_MASK |
1138 MAC_PHYCFG2_QUAL_MASK_MASK |
1139 MAC_PHYCFG2_INBAND_ENABLE;
1140
1141 tw32(MAC_PHYCFG2, val);
1142
1143 val = tr32(MAC_PHYCFG1);
1144 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151 }
1152 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154 tw32(MAC_PHYCFG1, val);
1155
1156 val = tr32(MAC_EXT_RGMII_MODE);
1157 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158 MAC_RGMII_MODE_RX_QUALITY |
1159 MAC_RGMII_MODE_RX_ACTIVITY |
1160 MAC_RGMII_MODE_RX_ENG_DET |
1161 MAC_RGMII_MODE_TX_ENABLE |
1162 MAC_RGMII_MODE_TX_LOWPWR |
1163 MAC_RGMII_MODE_TX_RESET);
1164 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166 val |= MAC_RGMII_MODE_RX_INT_B |
1167 MAC_RGMII_MODE_RX_QUALITY |
1168 MAC_RGMII_MODE_RX_ACTIVITY |
1169 MAC_RGMII_MODE_RX_ENG_DET;
1170 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171 val |= MAC_RGMII_MODE_TX_ENABLE |
1172 MAC_RGMII_MODE_TX_LOWPWR |
1173 MAC_RGMII_MODE_TX_RESET;
1174 }
1175 tw32(MAC_EXT_RGMII_MODE, val);
1176 }
1177
1178 static void tg3_mdio_start(struct tg3 *tp)
1179 {
1180 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181 tw32_f(MAC_MI_MODE, tp->mi_mode);
1182 udelay(80);
1183
1184 if (tg3_flag(tp, MDIOBUS_INITED) &&
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186 tg3_mdio_config_5785(tp);
1187 }
1188
1189 static int tg3_mdio_init(struct tg3 *tp)
1190 {
1191 int i;
1192 u32 reg;
1193 struct phy_device *phydev;
1194
1195 if (tg3_flag(tp, 5717_PLUS)) {
1196 u32 is_serdes;
1197
1198 tp->phy_addr = tp->pci_fn + 1;
1199
1200 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202 else
1203 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204 TG3_CPMU_PHY_STRAP_IS_SERDES;
1205 if (is_serdes)
1206 tp->phy_addr += 7;
1207 } else
1208 tp->phy_addr = TG3_PHY_MII_ADDR;
1209
1210 tg3_mdio_start(tp);
1211
1212 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213 return 0;
1214
1215 tp->mdio_bus = mdiobus_alloc();
1216 if (tp->mdio_bus == NULL)
1217 return -ENOMEM;
1218
1219 tp->mdio_bus->name = "tg3 mdio bus";
1220 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222 tp->mdio_bus->priv = tp;
1223 tp->mdio_bus->parent = &tp->pdev->dev;
1224 tp->mdio_bus->read = &tg3_mdio_read;
1225 tp->mdio_bus->write = &tg3_mdio_write;
1226 tp->mdio_bus->reset = &tg3_mdio_reset;
1227 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228 tp->mdio_bus->irq = &tp->mdio_irq[0];
1229
1230 for (i = 0; i < PHY_MAX_ADDR; i++)
1231 tp->mdio_bus->irq[i] = PHY_POLL;
1232
1233 /* The bus registration will look for all the PHYs on the mdio bus.
1234 * Unfortunately, it does not ensure the PHY is powered up before
1235 * accessing the PHY ID registers. A chip reset is the
1236 * quickest way to bring the device back to an operational state..
1237 */
1238 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239 tg3_bmcr_reset(tp);
1240
1241 i = mdiobus_register(tp->mdio_bus);
1242 if (i) {
1243 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244 mdiobus_free(tp->mdio_bus);
1245 return i;
1246 }
1247
1248 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1249
1250 if (!phydev || !phydev->drv) {
1251 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252 mdiobus_unregister(tp->mdio_bus);
1253 mdiobus_free(tp->mdio_bus);
1254 return -ENODEV;
1255 }
1256
1257 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258 case PHY_ID_BCM57780:
1259 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261 break;
1262 case PHY_ID_BCM50610:
1263 case PHY_ID_BCM50610M:
1264 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265 PHY_BRCM_RX_REFCLK_UNUSED |
1266 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274 /* fallthru */
1275 case PHY_ID_RTL8211C:
1276 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277 break;
1278 case PHY_ID_RTL8201E:
1279 case PHY_ID_BCMAC131:
1280 phydev->interface = PHY_INTERFACE_MODE_MII;
1281 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283 break;
1284 }
1285
1286 tg3_flag_set(tp, MDIOBUS_INITED);
1287
1288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289 tg3_mdio_config_5785(tp);
1290
1291 return 0;
1292 }
1293
1294 static void tg3_mdio_fini(struct tg3 *tp)
1295 {
1296 if (tg3_flag(tp, MDIOBUS_INITED)) {
1297 tg3_flag_clear(tp, MDIOBUS_INITED);
1298 mdiobus_unregister(tp->mdio_bus);
1299 mdiobus_free(tp->mdio_bus);
1300 }
1301 }
1302
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1305 {
1306 u32 val;
1307
1308 val = tr32(GRC_RX_CPU_EVENT);
1309 val |= GRC_RX_CPU_DRIVER_EVENT;
1310 tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312 tp->last_event_jiffies = jiffies;
1313 }
1314
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1319 {
1320 int i;
1321 unsigned int delay_cnt;
1322 long time_remain;
1323
1324 /* If enough time has passed, no wait is necessary. */
1325 time_remain = (long)(tp->last_event_jiffies + 1 +
1326 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327 (long)jiffies;
1328 if (time_remain < 0)
1329 return;
1330
1331 /* Check if we can shorten the wait time. */
1332 delay_cnt = jiffies_to_usecs(time_remain);
1333 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335 delay_cnt = (delay_cnt >> 3) + 1;
1336
1337 for (i = 0; i < delay_cnt; i++) {
1338 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339 break;
1340 udelay(8);
1341 }
1342 }
1343
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1346 {
1347 u32 reg;
1348 u32 val;
1349
1350 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351 return;
1352
1353 tg3_wait_for_event_ack(tp);
1354
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359 val = 0;
1360 if (!tg3_readphy(tp, MII_BMCR, &reg))
1361 val = reg << 16;
1362 if (!tg3_readphy(tp, MII_BMSR, &reg))
1363 val |= (reg & 0xffff);
1364 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366 val = 0;
1367 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368 val = reg << 16;
1369 if (!tg3_readphy(tp, MII_LPA, &reg))
1370 val |= (reg & 0xffff);
1371 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373 val = 0;
1374 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376 val = reg << 16;
1377 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378 val |= (reg & 0xffff);
1379 }
1380 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383 val = reg << 16;
1384 else
1385 val = 0;
1386 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
1388 tg3_generate_fw_event(tp);
1389 }
1390
1391 static void tg3_link_report(struct tg3 *tp)
1392 {
1393 if (!netif_carrier_ok(tp->dev)) {
1394 netif_info(tp, link, tp->dev, "Link is down\n");
1395 tg3_ump_link_report(tp);
1396 } else if (netif_msg_link(tp)) {
1397 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398 (tp->link_config.active_speed == SPEED_1000 ?
1399 1000 :
1400 (tp->link_config.active_speed == SPEED_100 ?
1401 100 : 10)),
1402 (tp->link_config.active_duplex == DUPLEX_FULL ?
1403 "full" : "half"));
1404
1405 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407 "on" : "off",
1408 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409 "on" : "off");
1410
1411 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412 netdev_info(tp->dev, "EEE is %s\n",
1413 tp->setlpicnt ? "enabled" : "disabled");
1414
1415 tg3_ump_link_report(tp);
1416 }
1417 }
1418
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420 {
1421 u16 miireg;
1422
1423 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424 miireg = ADVERTISE_PAUSE_CAP;
1425 else if (flow_ctrl & FLOW_CTRL_TX)
1426 miireg = ADVERTISE_PAUSE_ASYM;
1427 else if (flow_ctrl & FLOW_CTRL_RX)
1428 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429 else
1430 miireg = 0;
1431
1432 return miireg;
1433 }
1434
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436 {
1437 u16 miireg;
1438
1439 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440 miireg = ADVERTISE_1000XPAUSE;
1441 else if (flow_ctrl & FLOW_CTRL_TX)
1442 miireg = ADVERTISE_1000XPSE_ASYM;
1443 else if (flow_ctrl & FLOW_CTRL_RX)
1444 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445 else
1446 miireg = 0;
1447
1448 return miireg;
1449 }
1450
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452 {
1453 u8 cap = 0;
1454
1455 if (lcladv & ADVERTISE_1000XPAUSE) {
1456 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457 if (rmtadv & LPA_1000XPAUSE)
1458 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460 cap = FLOW_CTRL_RX;
1461 } else {
1462 if (rmtadv & LPA_1000XPAUSE)
1463 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1464 }
1465 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467 cap = FLOW_CTRL_TX;
1468 }
1469
1470 return cap;
1471 }
1472
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1474 {
1475 u8 autoneg;
1476 u8 flowctrl = 0;
1477 u32 old_rx_mode = tp->rx_mode;
1478 u32 old_tx_mode = tp->tx_mode;
1479
1480 if (tg3_flag(tp, USE_PHYLIB))
1481 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482 else
1483 autoneg = tp->link_config.autoneg;
1484
1485 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488 else
1489 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490 } else
1491 flowctrl = tp->link_config.flowctrl;
1492
1493 tp->link_config.active_flowctrl = flowctrl;
1494
1495 if (flowctrl & FLOW_CTRL_RX)
1496 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497 else
1498 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500 if (old_rx_mode != tp->rx_mode)
1501 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502
1503 if (flowctrl & FLOW_CTRL_TX)
1504 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505 else
1506 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508 if (old_tx_mode != tp->tx_mode)
1509 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510 }
1511
1512 static void tg3_adjust_link(struct net_device *dev)
1513 {
1514 u8 oldflowctrl, linkmesg = 0;
1515 u32 mac_mode, lcl_adv, rmt_adv;
1516 struct tg3 *tp = netdev_priv(dev);
1517 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1518
1519 spin_lock_bh(&tp->lock);
1520
1521 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522 MAC_MODE_HALF_DUPLEX);
1523
1524 oldflowctrl = tp->link_config.active_flowctrl;
1525
1526 if (phydev->link) {
1527 lcl_adv = 0;
1528 rmt_adv = 0;
1529
1530 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531 mac_mode |= MAC_MODE_PORT_MODE_MII;
1532 else if (phydev->speed == SPEED_1000 ||
1533 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535 else
1536 mac_mode |= MAC_MODE_PORT_MODE_MII;
1537
1538 if (phydev->duplex == DUPLEX_HALF)
1539 mac_mode |= MAC_MODE_HALF_DUPLEX;
1540 else {
1541 lcl_adv = tg3_advert_flowctrl_1000T(
1542 tp->link_config.flowctrl);
1543
1544 if (phydev->pause)
1545 rmt_adv = LPA_PAUSE_CAP;
1546 if (phydev->asym_pause)
1547 rmt_adv |= LPA_PAUSE_ASYM;
1548 }
1549
1550 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551 } else
1552 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554 if (mac_mode != tp->mac_mode) {
1555 tp->mac_mode = mac_mode;
1556 tw32_f(MAC_MODE, tp->mac_mode);
1557 udelay(40);
1558 }
1559
1560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561 if (phydev->speed == SPEED_10)
1562 tw32(MAC_MI_STAT,
1563 MAC_MI_STAT_10MBPS_MODE |
1564 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565 else
1566 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567 }
1568
1569 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570 tw32(MAC_TX_LENGTHS,
1571 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572 (6 << TX_LENGTHS_IPG_SHIFT) |
1573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574 else
1575 tw32(MAC_TX_LENGTHS,
1576 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577 (6 << TX_LENGTHS_IPG_SHIFT) |
1578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582 phydev->speed != tp->link_config.active_speed ||
1583 phydev->duplex != tp->link_config.active_duplex ||
1584 oldflowctrl != tp->link_config.active_flowctrl)
1585 linkmesg = 1;
1586
1587 tp->link_config.active_speed = phydev->speed;
1588 tp->link_config.active_duplex = phydev->duplex;
1589
1590 spin_unlock_bh(&tp->lock);
1591
1592 if (linkmesg)
1593 tg3_link_report(tp);
1594 }
1595
1596 static int tg3_phy_init(struct tg3 *tp)
1597 {
1598 struct phy_device *phydev;
1599
1600 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601 return 0;
1602
1603 /* Bring the PHY back to a known state. */
1604 tg3_bmcr_reset(tp);
1605
1606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607
1608 /* Attach the MAC to the PHY. */
1609 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610 phydev->dev_flags, phydev->interface);
1611 if (IS_ERR(phydev)) {
1612 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613 return PTR_ERR(phydev);
1614 }
1615
1616 /* Mask with MAC supported features. */
1617 switch (phydev->interface) {
1618 case PHY_INTERFACE_MODE_GMII:
1619 case PHY_INTERFACE_MODE_RGMII:
1620 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621 phydev->supported &= (PHY_GBIT_FEATURES |
1622 SUPPORTED_Pause |
1623 SUPPORTED_Asym_Pause);
1624 break;
1625 }
1626 /* fallthru */
1627 case PHY_INTERFACE_MODE_MII:
1628 phydev->supported &= (PHY_BASIC_FEATURES |
1629 SUPPORTED_Pause |
1630 SUPPORTED_Asym_Pause);
1631 break;
1632 default:
1633 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634 return -EINVAL;
1635 }
1636
1637 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1638
1639 phydev->advertising = phydev->supported;
1640
1641 return 0;
1642 }
1643
1644 static void tg3_phy_start(struct tg3 *tp)
1645 {
1646 struct phy_device *phydev;
1647
1648 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649 return;
1650
1651 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1652
1653 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655 phydev->speed = tp->link_config.orig_speed;
1656 phydev->duplex = tp->link_config.orig_duplex;
1657 phydev->autoneg = tp->link_config.orig_autoneg;
1658 phydev->advertising = tp->link_config.orig_advertising;
1659 }
1660
1661 phy_start(phydev);
1662
1663 phy_start_aneg(phydev);
1664 }
1665
1666 static void tg3_phy_stop(struct tg3 *tp)
1667 {
1668 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669 return;
1670
1671 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1672 }
1673
1674 static void tg3_phy_fini(struct tg3 *tp)
1675 {
1676 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1679 }
1680 }
1681
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683 {
1684 u32 phytest;
1685
1686 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687 u32 phy;
1688
1689 tg3_writephy(tp, MII_TG3_FET_TEST,
1690 phytest | MII_TG3_FET_SHADOW_EN);
1691 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692 if (enable)
1693 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694 else
1695 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697 }
1698 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699 }
1700 }
1701
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703 {
1704 u32 reg;
1705
1706 if (!tg3_flag(tp, 5705_PLUS) ||
1707 (tg3_flag(tp, 5717_PLUS) &&
1708 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709 return;
1710
1711 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712 tg3_phy_fet_toggle_apd(tp, enable);
1713 return;
1714 }
1715
1716 reg = MII_TG3_MISC_SHDW_WREN |
1717 MII_TG3_MISC_SHDW_SCR5_SEL |
1718 MII_TG3_MISC_SHDW_SCR5_LPED |
1719 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720 MII_TG3_MISC_SHDW_SCR5_SDTL |
1721 MII_TG3_MISC_SHDW_SCR5_C125OE;
1722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728 reg = MII_TG3_MISC_SHDW_WREN |
1729 MII_TG3_MISC_SHDW_APD_SEL |
1730 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731 if (enable)
1732 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735 }
1736
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738 {
1739 u32 phy;
1740
1741 if (!tg3_flag(tp, 5705_PLUS) ||
1742 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743 return;
1744
1745 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746 u32 ephy;
1747
1748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751 tg3_writephy(tp, MII_TG3_FET_TEST,
1752 ephy | MII_TG3_FET_SHADOW_EN);
1753 if (!tg3_readphy(tp, reg, &phy)) {
1754 if (enable)
1755 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756 else
1757 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758 tg3_writephy(tp, reg, phy);
1759 }
1760 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1761 }
1762 } else {
1763 int ret;
1764
1765 ret = tg3_phy_auxctl_read(tp,
1766 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767 if (!ret) {
1768 if (enable)
1769 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770 else
1771 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772 tg3_phy_auxctl_write(tp,
1773 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1774 }
1775 }
1776 }
1777
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779 {
1780 int ret;
1781 u32 val;
1782
1783 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784 return;
1785
1786 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787 if (!ret)
1788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1790 }
1791
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1793 {
1794 u32 otp, phy;
1795
1796 if (!tp->phy_otp)
1797 return;
1798
1799 otp = tp->phy_otp;
1800
1801 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802 return;
1803
1804 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1826 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1827 }
1828
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830 {
1831 u32 val;
1832
1833 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834 return;
1835
1836 tp->setlpicnt = 0;
1837
1838 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839 current_link_up == 1 &&
1840 tp->link_config.active_duplex == DUPLEX_FULL &&
1841 (tp->link_config.active_speed == SPEED_100 ||
1842 tp->link_config.active_speed == SPEED_1000)) {
1843 u32 eeectl;
1844
1845 if (tp->link_config.active_speed == SPEED_1000)
1846 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847 else
1848 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
1852 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853 TG3_CL45_D7_EEERES_STAT, &val);
1854
1855 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857 tp->setlpicnt = 2;
1858 }
1859
1860 if (!tp->setlpicnt) {
1861 if (current_link_up == 1 &&
1862 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1865 }
1866
1867 val = tr32(TG3_CPMU_EEE_MODE);
1868 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1869 }
1870 }
1871
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1873 {
1874 u32 val;
1875
1876 if (tp->link_config.active_speed == SPEED_1000 &&
1877 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881 val = MII_TG3_DSP_TAP26_ALNOKO |
1882 MII_TG3_DSP_TAP26_RMRXSTO;
1883 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1885 }
1886
1887 val = tr32(TG3_CPMU_EEE_MODE);
1888 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1889 }
1890
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1892 {
1893 int limit = 100;
1894
1895 while (limit--) {
1896 u32 tmp32;
1897
1898 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899 if ((tmp32 & 0x1000) == 0)
1900 break;
1901 }
1902 }
1903 if (limit < 0)
1904 return -EBUSY;
1905
1906 return 0;
1907 }
1908
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1910 {
1911 static const u32 test_pat[4][6] = {
1912 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1916 };
1917 int chan;
1918
1919 for (chan = 0; chan < 4; chan++) {
1920 int i;
1921
1922 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923 (chan * 0x2000) | 0x0200);
1924 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1925
1926 for (i = 0; i < 6; i++)
1927 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928 test_pat[chan][i]);
1929
1930 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931 if (tg3_wait_macro_done(tp)) {
1932 *resetp = 1;
1933 return -EBUSY;
1934 }
1935
1936 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937 (chan * 0x2000) | 0x0200);
1938 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939 if (tg3_wait_macro_done(tp)) {
1940 *resetp = 1;
1941 return -EBUSY;
1942 }
1943
1944 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945 if (tg3_wait_macro_done(tp)) {
1946 *resetp = 1;
1947 return -EBUSY;
1948 }
1949
1950 for (i = 0; i < 6; i += 2) {
1951 u32 low, high;
1952
1953 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955 tg3_wait_macro_done(tp)) {
1956 *resetp = 1;
1957 return -EBUSY;
1958 }
1959 low &= 0x7fff;
1960 high &= 0x000f;
1961 if (low != test_pat[chan][i] ||
1962 high != test_pat[chan][i+1]) {
1963 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1966
1967 return -EBUSY;
1968 }
1969 }
1970 }
1971
1972 return 0;
1973 }
1974
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1976 {
1977 int chan;
1978
1979 for (chan = 0; chan < 4; chan++) {
1980 int i;
1981
1982 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983 (chan * 0x2000) | 0x0200);
1984 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985 for (i = 0; i < 6; i++)
1986 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988 if (tg3_wait_macro_done(tp))
1989 return -EBUSY;
1990 }
1991
1992 return 0;
1993 }
1994
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1996 {
1997 u32 reg32, phy9_orig;
1998 int retries, do_phy_reset, err;
1999
2000 retries = 10;
2001 do_phy_reset = 1;
2002 do {
2003 if (do_phy_reset) {
2004 err = tg3_bmcr_reset(tp);
2005 if (err)
2006 return err;
2007 do_phy_reset = 0;
2008 }
2009
2010 /* Disable transmitter and interrupt. */
2011 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012 continue;
2013
2014 reg32 |= 0x3000;
2015 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2016
2017 /* Set full-duplex, 1000 mbps. */
2018 tg3_writephy(tp, MII_BMCR,
2019 BMCR_FULLDPLX | BMCR_SPEED1000);
2020
2021 /* Set to master mode. */
2022 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023 continue;
2024
2025 tg3_writephy(tp, MII_CTRL1000,
2026 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2027
2028 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029 if (err)
2030 return err;
2031
2032 /* Block the PHY control access. */
2033 tg3_phydsp_write(tp, 0x8005, 0x0800);
2034
2035 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036 if (!err)
2037 break;
2038 } while (--retries);
2039
2040 err = tg3_phy_reset_chanpat(tp);
2041 if (err)
2042 return err;
2043
2044 tg3_phydsp_write(tp, 0x8005, 0x0000);
2045
2046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2048
2049 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2050
2051 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2052
2053 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054 reg32 &= ~0x3000;
2055 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056 } else if (!err)
2057 err = -EBUSY;
2058
2059 return err;
2060 }
2061
2062 /* This will reset the tigon3 PHY if there is no valid
2063 * link unless the FORCE argument is non-zero.
2064 */
2065 static int tg3_phy_reset(struct tg3 *tp)
2066 {
2067 u32 val, cpmuctrl;
2068 int err;
2069
2070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071 val = tr32(GRC_MISC_CFG);
2072 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073 udelay(40);
2074 }
2075 err = tg3_readphy(tp, MII_BMSR, &val);
2076 err |= tg3_readphy(tp, MII_BMSR, &val);
2077 if (err != 0)
2078 return -EBUSY;
2079
2080 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081 netif_carrier_off(tp->dev);
2082 tg3_link_report(tp);
2083 }
2084
2085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088 err = tg3_phy_reset_5703_4_5(tp);
2089 if (err)
2090 return err;
2091 goto out;
2092 }
2093
2094 cpmuctrl = 0;
2095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099 tw32(TG3_CPMU_CTRL,
2100 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2101 }
2102
2103 err = tg3_bmcr_reset(tp);
2104 if (err)
2105 return err;
2106
2107 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2110
2111 tw32(TG3_CPMU_CTRL, cpmuctrl);
2112 }
2113
2114 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118 CPMU_LSPD_1000MB_MACCLK_12_5) {
2119 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120 udelay(40);
2121 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2122 }
2123 }
2124
2125 if (tg3_flag(tp, 5717_PLUS) &&
2126 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127 return 0;
2128
2129 tg3_phy_apply_otp(tp);
2130
2131 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132 tg3_phy_toggle_apd(tp, true);
2133 else
2134 tg3_phy_toggle_apd(tp, false);
2135
2136 out:
2137 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142 }
2143
2144 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147 }
2148
2149 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151 tg3_phydsp_write(tp, 0x000a, 0x310b);
2152 tg3_phydsp_write(tp, 0x201f, 0x9506);
2153 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2155 }
2156 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161 tg3_writephy(tp, MII_TG3_TEST1,
2162 MII_TG3_TEST1_TRIM_EN | 0x4);
2163 } else
2164 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2165
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167 }
2168 }
2169
2170 /* Set Extended packet length bit (bit 14) on all chips that */
2171 /* support jumbo frames */
2172 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173 /* Cannot do read-modify-write on 5401 */
2174 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176 /* Set bit 14 with read-modify-write to preserve other bits */
2177 err = tg3_phy_auxctl_read(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179 if (!err)
2180 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2182 }
2183
2184 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185 * jumbo frames transmission.
2186 */
2187 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2191 }
2192
2193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194 /* adjust output voltage */
2195 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2196 }
2197
2198 tg3_phy_toggle_automdix(tp, 1);
2199 tg3_phy_set_wirespeed(tp);
2200 return 0;
2201 }
2202
2203 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2205 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2206 TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211 (TG3_GPIO_MSG_DRVR_PRES << 12))
2212
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217 (TG3_GPIO_MSG_NEED_VAUX << 12))
2218
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2220 {
2221 u32 status, shift;
2222
2223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226 else
2227 status = tr32(TG3_CPMU_DRV_STATUS);
2228
2229 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230 status &= ~(TG3_GPIO_MSG_MASK << shift);
2231 status |= (newstat << shift);
2232
2233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236 else
2237 tw32(TG3_CPMU_DRV_STATUS, status);
2238
2239 return status >> TG3_APE_GPIO_MSG_SHIFT;
2240 }
2241
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2243 {
2244 if (!tg3_flag(tp, IS_NIC))
2245 return 0;
2246
2247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251 return -EIO;
2252
2253 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2254
2255 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256 TG3_GRC_LCLCTL_PWRSW_DELAY);
2257
2258 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259 } else {
2260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261 TG3_GRC_LCLCTL_PWRSW_DELAY);
2262 }
2263
2264 return 0;
2265 }
2266
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2268 {
2269 u32 grc_local_ctrl;
2270
2271 if (!tg3_flag(tp, IS_NIC) ||
2272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274 return;
2275
2276 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2277
2278 tw32_wait_f(GRC_LOCAL_CTRL,
2279 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280 TG3_GRC_LCLCTL_PWRSW_DELAY);
2281
2282 tw32_wait_f(GRC_LOCAL_CTRL,
2283 grc_local_ctrl,
2284 TG3_GRC_LCLCTL_PWRSW_DELAY);
2285
2286 tw32_wait_f(GRC_LOCAL_CTRL,
2287 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288 TG3_GRC_LCLCTL_PWRSW_DELAY);
2289 }
2290
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2292 {
2293 if (!tg3_flag(tp, IS_NIC))
2294 return;
2295
2296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299 (GRC_LCLCTRL_GPIO_OE0 |
2300 GRC_LCLCTRL_GPIO_OE1 |
2301 GRC_LCLCTRL_GPIO_OE2 |
2302 GRC_LCLCTRL_GPIO_OUTPUT0 |
2303 GRC_LCLCTRL_GPIO_OUTPUT1),
2304 TG3_GRC_LCLCTL_PWRSW_DELAY);
2305 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309 GRC_LCLCTRL_GPIO_OE1 |
2310 GRC_LCLCTRL_GPIO_OE2 |
2311 GRC_LCLCTRL_GPIO_OUTPUT0 |
2312 GRC_LCLCTRL_GPIO_OUTPUT1 |
2313 tp->grc_local_ctrl;
2314 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315 TG3_GRC_LCLCTL_PWRSW_DELAY);
2316
2317 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319 TG3_GRC_LCLCTL_PWRSW_DELAY);
2320
2321 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323 TG3_GRC_LCLCTL_PWRSW_DELAY);
2324 } else {
2325 u32 no_gpio2;
2326 u32 grc_local_ctrl = 0;
2327
2328 /* Workaround to prevent overdrawing Amps. */
2329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332 grc_local_ctrl,
2333 TG3_GRC_LCLCTL_PWRSW_DELAY);
2334 }
2335
2336 /* On 5753 and variants, GPIO2 cannot be used. */
2337 no_gpio2 = tp->nic_sram_data_cfg &
2338 NIC_SRAM_DATA_CFG_NO_GPIO2;
2339
2340 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341 GRC_LCLCTRL_GPIO_OE1 |
2342 GRC_LCLCTRL_GPIO_OE2 |
2343 GRC_LCLCTRL_GPIO_OUTPUT1 |
2344 GRC_LCLCTRL_GPIO_OUTPUT2;
2345 if (no_gpio2) {
2346 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347 GRC_LCLCTRL_GPIO_OUTPUT2);
2348 }
2349 tw32_wait_f(GRC_LOCAL_CTRL,
2350 tp->grc_local_ctrl | grc_local_ctrl,
2351 TG3_GRC_LCLCTL_PWRSW_DELAY);
2352
2353 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2354
2355 tw32_wait_f(GRC_LOCAL_CTRL,
2356 tp->grc_local_ctrl | grc_local_ctrl,
2357 TG3_GRC_LCLCTL_PWRSW_DELAY);
2358
2359 if (!no_gpio2) {
2360 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361 tw32_wait_f(GRC_LOCAL_CTRL,
2362 tp->grc_local_ctrl | grc_local_ctrl,
2363 TG3_GRC_LCLCTL_PWRSW_DELAY);
2364 }
2365 }
2366 }
2367
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2369 {
2370 u32 msg = 0;
2371
2372 /* Serialize power state transitions */
2373 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374 return;
2375
2376 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377 msg = TG3_GPIO_MSG_NEED_VAUX;
2378
2379 msg = tg3_set_function_status(tp, msg);
2380
2381 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382 goto done;
2383
2384 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385 tg3_pwrsrc_switch_to_vaux(tp);
2386 else
2387 tg3_pwrsrc_die_with_vmain(tp);
2388
2389 done:
2390 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2391 }
2392
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2394 {
2395 bool need_vaux = false;
2396
2397 /* The GPIOs do something completely different on 57765. */
2398 if (!tg3_flag(tp, IS_NIC) ||
2399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400 return;
2401
2402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405 tg3_frob_aux_power_5717(tp, include_wol ?
2406 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407 return;
2408 }
2409
2410 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411 struct net_device *dev_peer;
2412
2413 dev_peer = pci_get_drvdata(tp->pdev_peer);
2414
2415 /* remove_one() may have been run on the peer. */
2416 if (dev_peer) {
2417 struct tg3 *tp_peer = netdev_priv(dev_peer);
2418
2419 if (tg3_flag(tp_peer, INIT_COMPLETE))
2420 return;
2421
2422 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423 tg3_flag(tp_peer, ENABLE_ASF))
2424 need_vaux = true;
2425 }
2426 }
2427
2428 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429 tg3_flag(tp, ENABLE_ASF))
2430 need_vaux = true;
2431
2432 if (need_vaux)
2433 tg3_pwrsrc_switch_to_vaux(tp);
2434 else
2435 tg3_pwrsrc_die_with_vmain(tp);
2436 }
2437
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2439 {
2440 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441 return 1;
2442 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443 if (speed != SPEED_10)
2444 return 1;
2445 } else if (speed == SPEED_10)
2446 return 1;
2447
2448 return 0;
2449 }
2450
2451 static int tg3_setup_phy(struct tg3 *, int);
2452
2453 #define RESET_KIND_SHUTDOWN 0
2454 #define RESET_KIND_INIT 1
2455 #define RESET_KIND_SUSPEND 2
2456
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2459
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2461 {
2462 u32 val;
2463
2464 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2468
2469 sg_dig_ctrl |=
2470 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2473 }
2474 return;
2475 }
2476
2477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478 tg3_bmcr_reset(tp);
2479 val = tr32(GRC_MISC_CFG);
2480 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481 udelay(40);
2482 return;
2483 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484 u32 phytest;
2485 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486 u32 phy;
2487
2488 tg3_writephy(tp, MII_ADVERTISE, 0);
2489 tg3_writephy(tp, MII_BMCR,
2490 BMCR_ANENABLE | BMCR_ANRESTART);
2491
2492 tg3_writephy(tp, MII_TG3_FET_TEST,
2493 phytest | MII_TG3_FET_SHADOW_EN);
2494 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496 tg3_writephy(tp,
2497 MII_TG3_FET_SHDW_AUXMODE4,
2498 phy);
2499 }
2500 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2501 }
2502 return;
2503 } else if (do_low_power) {
2504 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2506
2507 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509 MII_TG3_AUXCTL_PCTL_VREG_11V;
2510 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2511 }
2512
2513 /* The PHY should not be powered down on some chips because
2514 * of bugs.
2515 */
2516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520 return;
2521
2522 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2528 }
2529
2530 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2531 }
2532
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2535 {
2536 if (tg3_flag(tp, NVRAM)) {
2537 int i;
2538
2539 if (tp->nvram_lock_cnt == 0) {
2540 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541 for (i = 0; i < 8000; i++) {
2542 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543 break;
2544 udelay(20);
2545 }
2546 if (i == 8000) {
2547 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548 return -ENODEV;
2549 }
2550 }
2551 tp->nvram_lock_cnt++;
2552 }
2553 return 0;
2554 }
2555
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2558 {
2559 if (tg3_flag(tp, NVRAM)) {
2560 if (tp->nvram_lock_cnt > 0)
2561 tp->nvram_lock_cnt--;
2562 if (tp->nvram_lock_cnt == 0)
2563 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2564 }
2565 }
2566
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2569 {
2570 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571 u32 nvaccess = tr32(NVRAM_ACCESS);
2572
2573 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2574 }
2575 }
2576
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2579 {
2580 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581 u32 nvaccess = tr32(NVRAM_ACCESS);
2582
2583 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2584 }
2585 }
2586
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588 u32 offset, u32 *val)
2589 {
2590 u32 tmp;
2591 int i;
2592
2593 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594 return -EINVAL;
2595
2596 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597 EEPROM_ADDR_DEVID_MASK |
2598 EEPROM_ADDR_READ);
2599 tw32(GRC_EEPROM_ADDR,
2600 tmp |
2601 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603 EEPROM_ADDR_ADDR_MASK) |
2604 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2605
2606 for (i = 0; i < 1000; i++) {
2607 tmp = tr32(GRC_EEPROM_ADDR);
2608
2609 if (tmp & EEPROM_ADDR_COMPLETE)
2610 break;
2611 msleep(1);
2612 }
2613 if (!(tmp & EEPROM_ADDR_COMPLETE))
2614 return -EBUSY;
2615
2616 tmp = tr32(GRC_EEPROM_DATA);
2617
2618 /*
2619 * The data will always be opposite the native endian
2620 * format. Perform a blind byteswap to compensate.
2621 */
2622 *val = swab32(tmp);
2623
2624 return 0;
2625 }
2626
2627 #define NVRAM_CMD_TIMEOUT 10000
2628
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2630 {
2631 int i;
2632
2633 tw32(NVRAM_CMD, nvram_cmd);
2634 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635 udelay(10);
2636 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637 udelay(10);
2638 break;
2639 }
2640 }
2641
2642 if (i == NVRAM_CMD_TIMEOUT)
2643 return -EBUSY;
2644
2645 return 0;
2646 }
2647
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2649 {
2650 if (tg3_flag(tp, NVRAM) &&
2651 tg3_flag(tp, NVRAM_BUFFERED) &&
2652 tg3_flag(tp, FLASH) &&
2653 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654 (tp->nvram_jedecnum == JEDEC_ATMEL))
2655
2656 addr = ((addr / tp->nvram_pagesize) <<
2657 ATMEL_AT45DB0X1B_PAGE_POS) +
2658 (addr % tp->nvram_pagesize);
2659
2660 return addr;
2661 }
2662
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2664 {
2665 if (tg3_flag(tp, NVRAM) &&
2666 tg3_flag(tp, NVRAM_BUFFERED) &&
2667 tg3_flag(tp, FLASH) &&
2668 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669 (tp->nvram_jedecnum == JEDEC_ATMEL))
2670
2671 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672 tp->nvram_pagesize) +
2673 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2674
2675 return addr;
2676 }
2677
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679 * the byteswapping settings for all other register accesses.
2680 * tg3 devices are BE devices, so on a BE machine, the data
2681 * returned will be exactly as it is seen in NVRAM. On a LE
2682 * machine, the 32-bit value will be byteswapped.
2683 */
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2685 {
2686 int ret;
2687
2688 if (!tg3_flag(tp, NVRAM))
2689 return tg3_nvram_read_using_eeprom(tp, offset, val);
2690
2691 offset = tg3_nvram_phys_addr(tp, offset);
2692
2693 if (offset > NVRAM_ADDR_MSK)
2694 return -EINVAL;
2695
2696 ret = tg3_nvram_lock(tp);
2697 if (ret)
2698 return ret;
2699
2700 tg3_enable_nvram_access(tp);
2701
2702 tw32(NVRAM_ADDR, offset);
2703 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2705
2706 if (ret == 0)
2707 *val = tr32(NVRAM_RDDATA);
2708
2709 tg3_disable_nvram_access(tp);
2710
2711 tg3_nvram_unlock(tp);
2712
2713 return ret;
2714 }
2715
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2718 {
2719 u32 v;
2720 int res = tg3_nvram_read(tp, offset, &v);
2721 if (!res)
2722 *val = cpu_to_be32(v);
2723 return res;
2724 }
2725
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2728 {
2729 u32 addr_high, addr_low;
2730 int i;
2731
2732 addr_high = ((tp->dev->dev_addr[0] << 8) |
2733 tp->dev->dev_addr[1]);
2734 addr_low = ((tp->dev->dev_addr[2] << 24) |
2735 (tp->dev->dev_addr[3] << 16) |
2736 (tp->dev->dev_addr[4] << 8) |
2737 (tp->dev->dev_addr[5] << 0));
2738 for (i = 0; i < 4; i++) {
2739 if (i == 1 && skip_mac_1)
2740 continue;
2741 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2743 }
2744
2745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747 for (i = 0; i < 12; i++) {
2748 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2750 }
2751 }
2752
2753 addr_high = (tp->dev->dev_addr[0] +
2754 tp->dev->dev_addr[1] +
2755 tp->dev->dev_addr[2] +
2756 tp->dev->dev_addr[3] +
2757 tp->dev->dev_addr[4] +
2758 tp->dev->dev_addr[5]) &
2759 TX_BACKOFF_SEED_MASK;
2760 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2761 }
2762
2763 static void tg3_enable_register_access(struct tg3 *tp)
2764 {
2765 /*
2766 * Make sure register accesses (indirect or otherwise) will function
2767 * correctly.
2768 */
2769 pci_write_config_dword(tp->pdev,
2770 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2771 }
2772
2773 static int tg3_power_up(struct tg3 *tp)
2774 {
2775 int err;
2776
2777 tg3_enable_register_access(tp);
2778
2779 err = pci_set_power_state(tp->pdev, PCI_D0);
2780 if (!err) {
2781 /* Switch out of Vaux if it is a NIC */
2782 tg3_pwrsrc_switch_to_vmain(tp);
2783 } else {
2784 netdev_err(tp->dev, "Transition to D0 failed\n");
2785 }
2786
2787 return err;
2788 }
2789
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2791 {
2792 u32 misc_host_ctrl;
2793 bool device_should_wake, do_low_power;
2794
2795 tg3_enable_register_access(tp);
2796
2797 /* Restore the CLKREQ setting. */
2798 if (tg3_flag(tp, CLKREQ_BUG)) {
2799 u16 lnkctl;
2800
2801 pci_read_config_word(tp->pdev,
2802 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803 &lnkctl);
2804 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805 pci_write_config_word(tp->pdev,
2806 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807 lnkctl);
2808 }
2809
2810 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811 tw32(TG3PCI_MISC_HOST_CTRL,
2812 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2813
2814 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815 tg3_flag(tp, WOL_ENABLE);
2816
2817 if (tg3_flag(tp, USE_PHYLIB)) {
2818 do_low_power = false;
2819 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821 struct phy_device *phydev;
2822 u32 phyid, advertising;
2823
2824 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2825
2826 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2827
2828 tp->link_config.orig_speed = phydev->speed;
2829 tp->link_config.orig_duplex = phydev->duplex;
2830 tp->link_config.orig_autoneg = phydev->autoneg;
2831 tp->link_config.orig_advertising = phydev->advertising;
2832
2833 advertising = ADVERTISED_TP |
2834 ADVERTISED_Pause |
2835 ADVERTISED_Autoneg |
2836 ADVERTISED_10baseT_Half;
2837
2838 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839 if (tg3_flag(tp, WOL_SPEED_100MB))
2840 advertising |=
2841 ADVERTISED_100baseT_Half |
2842 ADVERTISED_100baseT_Full |
2843 ADVERTISED_10baseT_Full;
2844 else
2845 advertising |= ADVERTISED_10baseT_Full;
2846 }
2847
2848 phydev->advertising = advertising;
2849
2850 phy_start_aneg(phydev);
2851
2852 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853 if (phyid != PHY_ID_BCMAC131) {
2854 phyid &= PHY_BCM_OUI_MASK;
2855 if (phyid == PHY_BCM_OUI_1 ||
2856 phyid == PHY_BCM_OUI_2 ||
2857 phyid == PHY_BCM_OUI_3)
2858 do_low_power = true;
2859 }
2860 }
2861 } else {
2862 do_low_power = true;
2863
2864 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866 tp->link_config.orig_speed = tp->link_config.speed;
2867 tp->link_config.orig_duplex = tp->link_config.duplex;
2868 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2869 }
2870
2871 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872 tp->link_config.speed = SPEED_10;
2873 tp->link_config.duplex = DUPLEX_HALF;
2874 tp->link_config.autoneg = AUTONEG_ENABLE;
2875 tg3_setup_phy(tp, 0);
2876 }
2877 }
2878
2879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880 u32 val;
2881
2882 val = tr32(GRC_VCPU_EXT_CTRL);
2883 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885 int i;
2886 u32 val;
2887
2888 for (i = 0; i < 200; i++) {
2889 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891 break;
2892 msleep(1);
2893 }
2894 }
2895 if (tg3_flag(tp, WOL_CAP))
2896 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897 WOL_DRV_STATE_SHUTDOWN |
2898 WOL_DRV_WOL |
2899 WOL_SET_MAGIC_PKT);
2900
2901 if (device_should_wake) {
2902 u32 mac_mode;
2903
2904 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905 if (do_low_power &&
2906 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907 tg3_phy_auxctl_write(tp,
2908 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909 MII_TG3_AUXCTL_PCTL_WOL_EN |
2910 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912 udelay(40);
2913 }
2914
2915 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917 else
2918 mac_mode = MAC_MODE_PORT_MODE_MII;
2919
2920 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922 ASIC_REV_5700) {
2923 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924 SPEED_100 : SPEED_10;
2925 if (tg3_5700_link_polarity(tp, speed))
2926 mac_mode |= MAC_MODE_LINK_POLARITY;
2927 else
2928 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2929 }
2930 } else {
2931 mac_mode = MAC_MODE_PORT_MODE_TBI;
2932 }
2933
2934 if (!tg3_flag(tp, 5750_PLUS))
2935 tw32(MAC_LED_CTRL, tp->led_ctrl);
2936
2937 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2941
2942 if (tg3_flag(tp, ENABLE_APE))
2943 mac_mode |= MAC_MODE_APE_TX_EN |
2944 MAC_MODE_APE_RX_EN |
2945 MAC_MODE_TDE_ENABLE;
2946
2947 tw32_f(MAC_MODE, mac_mode);
2948 udelay(100);
2949
2950 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951 udelay(10);
2952 }
2953
2954 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957 u32 base_val;
2958
2959 base_val = tp->pci_clock_ctrl;
2960 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961 CLOCK_CTRL_TXCLK_DISABLE);
2962
2963 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965 } else if (tg3_flag(tp, 5780_CLASS) ||
2966 tg3_flag(tp, CPMU_PRESENT) ||
2967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968 /* do nothing */
2969 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970 u32 newbits1, newbits2;
2971
2972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975 CLOCK_CTRL_TXCLK_DISABLE |
2976 CLOCK_CTRL_ALTCLK);
2977 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978 } else if (tg3_flag(tp, 5705_PLUS)) {
2979 newbits1 = CLOCK_CTRL_625_CORE;
2980 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981 } else {
2982 newbits1 = CLOCK_CTRL_ALTCLK;
2983 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2984 }
2985
2986 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987 40);
2988
2989 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990 40);
2991
2992 if (!tg3_flag(tp, 5705_PLUS)) {
2993 u32 newbits3;
2994
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998 CLOCK_CTRL_TXCLK_DISABLE |
2999 CLOCK_CTRL_44MHZ_CORE);
3000 } else {
3001 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3002 }
3003
3004 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005 tp->pci_clock_ctrl | newbits3, 40);
3006 }
3007 }
3008
3009 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010 tg3_power_down_phy(tp, do_low_power);
3011
3012 tg3_frob_aux_power(tp, true);
3013
3014 /* Workaround for unstable PLL clock */
3015 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017 u32 val = tr32(0x7d00);
3018
3019 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020 tw32(0x7d00, val);
3021 if (!tg3_flag(tp, ENABLE_ASF)) {
3022 int err;
3023
3024 err = tg3_nvram_lock(tp);
3025 tg3_halt_cpu(tp, RX_CPU_BASE);
3026 if (!err)
3027 tg3_nvram_unlock(tp);
3028 }
3029 }
3030
3031 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3032
3033 return 0;
3034 }
3035
3036 static void tg3_power_down(struct tg3 *tp)
3037 {
3038 tg3_power_down_prepare(tp);
3039
3040 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041 pci_set_power_state(tp->pdev, PCI_D3hot);
3042 }
3043
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3045 {
3046 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047 case MII_TG3_AUX_STAT_10HALF:
3048 *speed = SPEED_10;
3049 *duplex = DUPLEX_HALF;
3050 break;
3051
3052 case MII_TG3_AUX_STAT_10FULL:
3053 *speed = SPEED_10;
3054 *duplex = DUPLEX_FULL;
3055 break;
3056
3057 case MII_TG3_AUX_STAT_100HALF:
3058 *speed = SPEED_100;
3059 *duplex = DUPLEX_HALF;
3060 break;
3061
3062 case MII_TG3_AUX_STAT_100FULL:
3063 *speed = SPEED_100;
3064 *duplex = DUPLEX_FULL;
3065 break;
3066
3067 case MII_TG3_AUX_STAT_1000HALF:
3068 *speed = SPEED_1000;
3069 *duplex = DUPLEX_HALF;
3070 break;
3071
3072 case MII_TG3_AUX_STAT_1000FULL:
3073 *speed = SPEED_1000;
3074 *duplex = DUPLEX_FULL;
3075 break;
3076
3077 default:
3078 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080 SPEED_10;
3081 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082 DUPLEX_HALF;
3083 break;
3084 }
3085 *speed = SPEED_INVALID;
3086 *duplex = DUPLEX_INVALID;
3087 break;
3088 }
3089 }
3090
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3092 {
3093 int err = 0;
3094 u32 val, new_adv;
3095
3096 new_adv = ADVERTISE_CSMA;
3097 if (advertise & ADVERTISED_10baseT_Half)
3098 new_adv |= ADVERTISE_10HALF;
3099 if (advertise & ADVERTISED_10baseT_Full)
3100 new_adv |= ADVERTISE_10FULL;
3101 if (advertise & ADVERTISED_100baseT_Half)
3102 new_adv |= ADVERTISE_100HALF;
3103 if (advertise & ADVERTISED_100baseT_Full)
3104 new_adv |= ADVERTISE_100FULL;
3105
3106 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3107
3108 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109 if (err)
3110 goto done;
3111
3112 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113 goto done;
3114
3115 new_adv = 0;
3116 if (advertise & ADVERTISED_1000baseT_Half)
3117 new_adv |= ADVERTISE_1000HALF;
3118 if (advertise & ADVERTISED_1000baseT_Full)
3119 new_adv |= ADVERTISE_1000FULL;
3120
3121 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3124
3125 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126 if (err)
3127 goto done;
3128
3129 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130 goto done;
3131
3132 tw32(TG3_CPMU_EEE_MODE,
3133 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3134
3135 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136 if (!err) {
3137 u32 err2;
3138
3139 val = 0;
3140 /* Advertise 100-BaseTX EEE ability */
3141 if (advertise & ADVERTISED_100baseT_Full)
3142 val |= MDIO_AN_EEE_ADV_100TX;
3143 /* Advertise 1000-BaseT EEE ability */
3144 if (advertise & ADVERTISED_1000baseT_Full)
3145 val |= MDIO_AN_EEE_ADV_1000T;
3146 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147 if (err)
3148 val = 0;
3149
3150 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151 case ASIC_REV_5717:
3152 case ASIC_REV_57765:
3153 case ASIC_REV_5719:
3154 /* If we advertised any eee advertisements above... */
3155 if (val)
3156 val = MII_TG3_DSP_TAP26_ALNOKO |
3157 MII_TG3_DSP_TAP26_RMRXSTO |
3158 MII_TG3_DSP_TAP26_OPCSINPT;
3159 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160 /* Fall through */
3161 case ASIC_REV_5720:
3162 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164 MII_TG3_DSP_CH34TP2_HIBW01);
3165 }
3166
3167 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168 if (!err)
3169 err = err2;
3170 }
3171
3172 done:
3173 return err;
3174 }
3175
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3177 {
3178 u32 new_adv;
3179 int i;
3180
3181 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182 new_adv = ADVERTISED_10baseT_Half |
3183 ADVERTISED_10baseT_Full;
3184 if (tg3_flag(tp, WOL_SPEED_100MB))
3185 new_adv |= ADVERTISED_100baseT_Half |
3186 ADVERTISED_100baseT_Full;
3187
3188 tg3_phy_autoneg_cfg(tp, new_adv,
3189 FLOW_CTRL_TX | FLOW_CTRL_RX);
3190 } else if (tp->link_config.speed == SPEED_INVALID) {
3191 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192 tp->link_config.advertising &=
3193 ~(ADVERTISED_1000baseT_Half |
3194 ADVERTISED_1000baseT_Full);
3195
3196 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197 tp->link_config.flowctrl);
3198 } else {
3199 /* Asking for a specific link mode. */
3200 if (tp->link_config.speed == SPEED_1000) {
3201 if (tp->link_config.duplex == DUPLEX_FULL)
3202 new_adv = ADVERTISED_1000baseT_Full;
3203 else
3204 new_adv = ADVERTISED_1000baseT_Half;
3205 } else if (tp->link_config.speed == SPEED_100) {
3206 if (tp->link_config.duplex == DUPLEX_FULL)
3207 new_adv = ADVERTISED_100baseT_Full;
3208 else
3209 new_adv = ADVERTISED_100baseT_Half;
3210 } else {
3211 if (tp->link_config.duplex == DUPLEX_FULL)
3212 new_adv = ADVERTISED_10baseT_Full;
3213 else
3214 new_adv = ADVERTISED_10baseT_Half;
3215 }
3216
3217 tg3_phy_autoneg_cfg(tp, new_adv,
3218 tp->link_config.flowctrl);
3219 }
3220
3221 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222 tp->link_config.speed != SPEED_INVALID) {
3223 u32 bmcr, orig_bmcr;
3224
3225 tp->link_config.active_speed = tp->link_config.speed;
3226 tp->link_config.active_duplex = tp->link_config.duplex;
3227
3228 bmcr = 0;
3229 switch (tp->link_config.speed) {
3230 default:
3231 case SPEED_10:
3232 break;
3233
3234 case SPEED_100:
3235 bmcr |= BMCR_SPEED100;
3236 break;
3237
3238 case SPEED_1000:
3239 bmcr |= BMCR_SPEED1000;
3240 break;
3241 }
3242
3243 if (tp->link_config.duplex == DUPLEX_FULL)
3244 bmcr |= BMCR_FULLDPLX;
3245
3246 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247 (bmcr != orig_bmcr)) {
3248 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249 for (i = 0; i < 1500; i++) {
3250 u32 tmp;
3251
3252 udelay(10);
3253 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254 tg3_readphy(tp, MII_BMSR, &tmp))
3255 continue;
3256 if (!(tmp & BMSR_LSTATUS)) {
3257 udelay(40);
3258 break;
3259 }
3260 }
3261 tg3_writephy(tp, MII_BMCR, bmcr);
3262 udelay(40);
3263 }
3264 } else {
3265 tg3_writephy(tp, MII_BMCR,
3266 BMCR_ANENABLE | BMCR_ANRESTART);
3267 }
3268 }
3269
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3271 {
3272 int err;
3273
3274 /* Turn off tap power management. */
3275 /* Set Extended packet length bit */
3276 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3277
3278 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3283
3284 udelay(40);
3285
3286 return err;
3287 }
3288
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3290 {
3291 u32 adv_reg, all_mask = 0;
3292
3293 if (mask & ADVERTISED_10baseT_Half)
3294 all_mask |= ADVERTISE_10HALF;
3295 if (mask & ADVERTISED_10baseT_Full)
3296 all_mask |= ADVERTISE_10FULL;
3297 if (mask & ADVERTISED_100baseT_Half)
3298 all_mask |= ADVERTISE_100HALF;
3299 if (mask & ADVERTISED_100baseT_Full)
3300 all_mask |= ADVERTISE_100FULL;
3301
3302 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303 return 0;
3304
3305 if ((adv_reg & all_mask) != all_mask)
3306 return 0;
3307 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308 u32 tg3_ctrl;
3309
3310 all_mask = 0;
3311 if (mask & ADVERTISED_1000baseT_Half)
3312 all_mask |= ADVERTISE_1000HALF;
3313 if (mask & ADVERTISED_1000baseT_Full)
3314 all_mask |= ADVERTISE_1000FULL;
3315
3316 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317 return 0;
3318
3319 if ((tg3_ctrl & all_mask) != all_mask)
3320 return 0;
3321 }
3322 return 1;
3323 }
3324
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3326 {
3327 u32 curadv, reqadv;
3328
3329 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330 return 1;
3331
3332 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3334
3335 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336 if (curadv != reqadv)
3337 return 0;
3338
3339 if (tg3_flag(tp, PAUSE_AUTONEG))
3340 tg3_readphy(tp, MII_LPA, rmtadv);
3341 } else {
3342 /* Reprogram the advertisement register, even if it
3343 * does not affect the current link. If the link
3344 * gets renegotiated in the future, we can save an
3345 * additional renegotiation cycle by advertising
3346 * it correctly in the first place.
3347 */
3348 if (curadv != reqadv) {
3349 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350 ADVERTISE_PAUSE_ASYM);
3351 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3352 }
3353 }
3354
3355 return 1;
3356 }
3357
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3359 {
3360 int current_link_up;
3361 u32 bmsr, val;
3362 u32 lcl_adv, rmt_adv;
3363 u16 current_speed;
3364 u8 current_duplex;
3365 int i, err;
3366
3367 tw32(MAC_EVENT, 0);
3368
3369 tw32_f(MAC_STATUS,
3370 (MAC_STATUS_SYNC_CHANGED |
3371 MAC_STATUS_CFG_CHANGED |
3372 MAC_STATUS_MI_COMPLETION |
3373 MAC_STATUS_LNKSTATE_CHANGED));
3374 udelay(40);
3375
3376 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377 tw32_f(MAC_MI_MODE,
3378 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379 udelay(80);
3380 }
3381
3382 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3383
3384 /* Some third-party PHYs need to be reset on link going
3385 * down.
3386 */
3387 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390 netif_carrier_ok(tp->dev)) {
3391 tg3_readphy(tp, MII_BMSR, &bmsr);
3392 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393 !(bmsr & BMSR_LSTATUS))
3394 force_reset = 1;
3395 }
3396 if (force_reset)
3397 tg3_phy_reset(tp);
3398
3399 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400 tg3_readphy(tp, MII_BMSR, &bmsr);
3401 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402 !tg3_flag(tp, INIT_COMPLETE))
3403 bmsr = 0;
3404
3405 if (!(bmsr & BMSR_LSTATUS)) {
3406 err = tg3_init_5401phy_dsp(tp);
3407 if (err)
3408 return err;
3409
3410 tg3_readphy(tp, MII_BMSR, &bmsr);
3411 for (i = 0; i < 1000; i++) {
3412 udelay(10);
3413 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414 (bmsr & BMSR_LSTATUS)) {
3415 udelay(40);
3416 break;
3417 }
3418 }
3419
3420 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421 TG3_PHY_REV_BCM5401_B0 &&
3422 !(bmsr & BMSR_LSTATUS) &&
3423 tp->link_config.active_speed == SPEED_1000) {
3424 err = tg3_phy_reset(tp);
3425 if (!err)
3426 err = tg3_init_5401phy_dsp(tp);
3427 if (err)
3428 return err;
3429 }
3430 }
3431 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433 /* 5701 {A0,B0} CRC bug workaround */
3434 tg3_writephy(tp, 0x15, 0x0a75);
3435 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3438 }
3439
3440 /* Clear pending interrupts... */
3441 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443
3444 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3448
3449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454 else
3455 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3456 }
3457
3458 current_link_up = 0;
3459 current_speed = SPEED_INVALID;
3460 current_duplex = DUPLEX_INVALID;
3461
3462 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463 err = tg3_phy_auxctl_read(tp,
3464 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465 &val);
3466 if (!err && !(val & (1 << 10))) {
3467 tg3_phy_auxctl_write(tp,
3468 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469 val | (1 << 10));
3470 goto relink;
3471 }
3472 }
3473
3474 bmsr = 0;
3475 for (i = 0; i < 100; i++) {
3476 tg3_readphy(tp, MII_BMSR, &bmsr);
3477 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478 (bmsr & BMSR_LSTATUS))
3479 break;
3480 udelay(40);
3481 }
3482
3483 if (bmsr & BMSR_LSTATUS) {
3484 u32 aux_stat, bmcr;
3485
3486 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487 for (i = 0; i < 2000; i++) {
3488 udelay(10);
3489 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490 aux_stat)
3491 break;
3492 }
3493
3494 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495 &current_speed,
3496 &current_duplex);
3497
3498 bmcr = 0;
3499 for (i = 0; i < 200; i++) {
3500 tg3_readphy(tp, MII_BMCR, &bmcr);
3501 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502 continue;
3503 if (bmcr && bmcr != 0x7fff)
3504 break;
3505 udelay(10);
3506 }
3507
3508 lcl_adv = 0;
3509 rmt_adv = 0;
3510
3511 tp->link_config.active_speed = current_speed;
3512 tp->link_config.active_duplex = current_duplex;
3513
3514 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515 if ((bmcr & BMCR_ANENABLE) &&
3516 tg3_copper_is_advertising_all(tp,
3517 tp->link_config.advertising)) {
3518 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519 &rmt_adv))
3520 current_link_up = 1;
3521 }
3522 } else {
3523 if (!(bmcr & BMCR_ANENABLE) &&
3524 tp->link_config.speed == current_speed &&
3525 tp->link_config.duplex == current_duplex &&
3526 tp->link_config.flowctrl ==
3527 tp->link_config.active_flowctrl) {
3528 current_link_up = 1;
3529 }
3530 }
3531
3532 if (current_link_up == 1 &&
3533 tp->link_config.active_duplex == DUPLEX_FULL)
3534 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3535 }
3536
3537 relink:
3538 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539 tg3_phy_copper_begin(tp);
3540
3541 tg3_readphy(tp, MII_BMSR, &bmsr);
3542 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544 current_link_up = 1;
3545 }
3546
3547 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548 if (current_link_up == 1) {
3549 if (tp->link_config.active_speed == SPEED_100 ||
3550 tp->link_config.active_speed == SPEED_10)
3551 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552 else
3553 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556 else
3557 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3558
3559 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560 if (tp->link_config.active_duplex == DUPLEX_HALF)
3561 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3562
3563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564 if (current_link_up == 1 &&
3565 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567 else
3568 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3569 }
3570
3571 /* ??? Without this setting Netgear GA302T PHY does not
3572 * ??? send/receive packets...
3573 */
3574 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578 udelay(80);
3579 }
3580
3581 tw32_f(MAC_MODE, tp->mac_mode);
3582 udelay(40);
3583
3584 tg3_phy_eee_adjust(tp, current_link_up);
3585
3586 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587 /* Polled via timer. */
3588 tw32_f(MAC_EVENT, 0);
3589 } else {
3590 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3591 }
3592 udelay(40);
3593
3594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595 current_link_up == 1 &&
3596 tp->link_config.active_speed == SPEED_1000 &&
3597 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598 udelay(120);
3599 tw32_f(MAC_STATUS,
3600 (MAC_STATUS_SYNC_CHANGED |
3601 MAC_STATUS_CFG_CHANGED));
3602 udelay(40);
3603 tg3_write_mem(tp,
3604 NIC_SRAM_FIRMWARE_MBOX,
3605 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3606 }
3607
3608 /* Prevent send BD corruption. */
3609 if (tg3_flag(tp, CLKREQ_BUG)) {
3610 u16 oldlnkctl, newlnkctl;
3611
3612 pci_read_config_word(tp->pdev,
3613 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614 &oldlnkctl);
3615 if (tp->link_config.active_speed == SPEED_100 ||
3616 tp->link_config.active_speed == SPEED_10)
3617 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618 else
3619 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620 if (newlnkctl != oldlnkctl)
3621 pci_write_config_word(tp->pdev,
3622 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623 newlnkctl);
3624 }
3625
3626 if (current_link_up != netif_carrier_ok(tp->dev)) {
3627 if (current_link_up)
3628 netif_carrier_on(tp->dev);
3629 else
3630 netif_carrier_off(tp->dev);
3631 tg3_link_report(tp);
3632 }
3633
3634 return 0;
3635 }
3636
3637 struct tg3_fiber_aneginfo {
3638 int state;
3639 #define ANEG_STATE_UNKNOWN 0
3640 #define ANEG_STATE_AN_ENABLE 1
3641 #define ANEG_STATE_RESTART_INIT 2
3642 #define ANEG_STATE_RESTART 3
3643 #define ANEG_STATE_DISABLE_LINK_OK 4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3645 #define ANEG_STATE_ABILITY_DETECT 6
3646 #define ANEG_STATE_ACK_DETECT_INIT 7
3647 #define ANEG_STATE_ACK_DETECT 8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3649 #define ANEG_STATE_COMPLETE_ACK 10
3650 #define ANEG_STATE_IDLE_DETECT_INIT 11
3651 #define ANEG_STATE_IDLE_DETECT 12
3652 #define ANEG_STATE_LINK_OK 13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3655
3656 u32 flags;
3657 #define MR_AN_ENABLE 0x00000001
3658 #define MR_RESTART_AN 0x00000002
3659 #define MR_AN_COMPLETE 0x00000004
3660 #define MR_PAGE_RX 0x00000008
3661 #define MR_NP_LOADED 0x00000010
3662 #define MR_TOGGLE_TX 0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3670 #define MR_TOGGLE_RX 0x00002000
3671 #define MR_NP_RX 0x00004000
3672
3673 #define MR_LINK_OK 0x80000000
3674
3675 unsigned long link_time, cur_time;
3676
3677 u32 ability_match_cfg;
3678 int ability_match_count;
3679
3680 char ability_match, idle_match, ack_match;
3681
3682 u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP 0x00000080
3684 #define ANEG_CFG_ACK 0x00000040
3685 #define ANEG_CFG_RF2 0x00000020
3686 #define ANEG_CFG_RF1 0x00000010
3687 #define ANEG_CFG_PS2 0x00000001
3688 #define ANEG_CFG_PS1 0x00008000
3689 #define ANEG_CFG_HD 0x00004000
3690 #define ANEG_CFG_FD 0x00002000
3691 #define ANEG_CFG_INVAL 0x00001f06
3692
3693 };
3694 #define ANEG_OK 0
3695 #define ANEG_DONE 1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED -1
3698
3699 #define ANEG_STATE_SETTLE_TIME 10000
3700
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702 struct tg3_fiber_aneginfo *ap)
3703 {
3704 u16 flowctrl;
3705 unsigned long delta;
3706 u32 rx_cfg_reg;
3707 int ret;
3708
3709 if (ap->state == ANEG_STATE_UNKNOWN) {
3710 ap->rxconfig = 0;
3711 ap->link_time = 0;
3712 ap->cur_time = 0;
3713 ap->ability_match_cfg = 0;
3714 ap->ability_match_count = 0;
3715 ap->ability_match = 0;
3716 ap->idle_match = 0;
3717 ap->ack_match = 0;
3718 }
3719 ap->cur_time++;
3720
3721 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3723
3724 if (rx_cfg_reg != ap->ability_match_cfg) {
3725 ap->ability_match_cfg = rx_cfg_reg;
3726 ap->ability_match = 0;
3727 ap->ability_match_count = 0;
3728 } else {
3729 if (++ap->ability_match_count > 1) {
3730 ap->ability_match = 1;
3731 ap->ability_match_cfg = rx_cfg_reg;
3732 }
3733 }
3734 if (rx_cfg_reg & ANEG_CFG_ACK)
3735 ap->ack_match = 1;
3736 else
3737 ap->ack_match = 0;
3738
3739 ap->idle_match = 0;
3740 } else {
3741 ap->idle_match = 1;
3742 ap->ability_match_cfg = 0;
3743 ap->ability_match_count = 0;
3744 ap->ability_match = 0;
3745 ap->ack_match = 0;
3746
3747 rx_cfg_reg = 0;
3748 }
3749
3750 ap->rxconfig = rx_cfg_reg;
3751 ret = ANEG_OK;
3752
3753 switch (ap->state) {
3754 case ANEG_STATE_UNKNOWN:
3755 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756 ap->state = ANEG_STATE_AN_ENABLE;
3757
3758 /* fallthru */
3759 case ANEG_STATE_AN_ENABLE:
3760 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761 if (ap->flags & MR_AN_ENABLE) {
3762 ap->link_time = 0;
3763 ap->cur_time = 0;
3764 ap->ability_match_cfg = 0;
3765 ap->ability_match_count = 0;
3766 ap->ability_match = 0;
3767 ap->idle_match = 0;
3768 ap->ack_match = 0;
3769
3770 ap->state = ANEG_STATE_RESTART_INIT;
3771 } else {
3772 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3773 }
3774 break;
3775
3776 case ANEG_STATE_RESTART_INIT:
3777 ap->link_time = ap->cur_time;
3778 ap->flags &= ~(MR_NP_LOADED);
3779 ap->txconfig = 0;
3780 tw32(MAC_TX_AUTO_NEG, 0);
3781 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782 tw32_f(MAC_MODE, tp->mac_mode);
3783 udelay(40);
3784
3785 ret = ANEG_TIMER_ENAB;
3786 ap->state = ANEG_STATE_RESTART;
3787
3788 /* fallthru */
3789 case ANEG_STATE_RESTART:
3790 delta = ap->cur_time - ap->link_time;
3791 if (delta > ANEG_STATE_SETTLE_TIME)
3792 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793 else
3794 ret = ANEG_TIMER_ENAB;
3795 break;
3796
3797 case ANEG_STATE_DISABLE_LINK_OK:
3798 ret = ANEG_DONE;
3799 break;
3800
3801 case ANEG_STATE_ABILITY_DETECT_INIT:
3802 ap->flags &= ~(MR_TOGGLE_TX);
3803 ap->txconfig = ANEG_CFG_FD;
3804 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805 if (flowctrl & ADVERTISE_1000XPAUSE)
3806 ap->txconfig |= ANEG_CFG_PS1;
3807 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808 ap->txconfig |= ANEG_CFG_PS2;
3809 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811 tw32_f(MAC_MODE, tp->mac_mode);
3812 udelay(40);
3813
3814 ap->state = ANEG_STATE_ABILITY_DETECT;
3815 break;
3816
3817 case ANEG_STATE_ABILITY_DETECT:
3818 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820 break;
3821
3822 case ANEG_STATE_ACK_DETECT_INIT:
3823 ap->txconfig |= ANEG_CFG_ACK;
3824 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826 tw32_f(MAC_MODE, tp->mac_mode);
3827 udelay(40);
3828
3829 ap->state = ANEG_STATE_ACK_DETECT;
3830
3831 /* fallthru */
3832 case ANEG_STATE_ACK_DETECT:
3833 if (ap->ack_match != 0) {
3834 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837 } else {
3838 ap->state = ANEG_STATE_AN_ENABLE;
3839 }
3840 } else if (ap->ability_match != 0 &&
3841 ap->rxconfig == 0) {
3842 ap->state = ANEG_STATE_AN_ENABLE;
3843 }
3844 break;
3845
3846 case ANEG_STATE_COMPLETE_ACK_INIT:
3847 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848 ret = ANEG_FAILED;
3849 break;
3850 }
3851 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852 MR_LP_ADV_HALF_DUPLEX |
3853 MR_LP_ADV_SYM_PAUSE |
3854 MR_LP_ADV_ASYM_PAUSE |
3855 MR_LP_ADV_REMOTE_FAULT1 |
3856 MR_LP_ADV_REMOTE_FAULT2 |
3857 MR_LP_ADV_NEXT_PAGE |
3858 MR_TOGGLE_RX |
3859 MR_NP_RX);
3860 if (ap->rxconfig & ANEG_CFG_FD)
3861 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862 if (ap->rxconfig & ANEG_CFG_HD)
3863 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864 if (ap->rxconfig & ANEG_CFG_PS1)
3865 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866 if (ap->rxconfig & ANEG_CFG_PS2)
3867 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868 if (ap->rxconfig & ANEG_CFG_RF1)
3869 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870 if (ap->rxconfig & ANEG_CFG_RF2)
3871 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872 if (ap->rxconfig & ANEG_CFG_NP)
3873 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3874
3875 ap->link_time = ap->cur_time;
3876
3877 ap->flags ^= (MR_TOGGLE_TX);
3878 if (ap->rxconfig & 0x0008)
3879 ap->flags |= MR_TOGGLE_RX;
3880 if (ap->rxconfig & ANEG_CFG_NP)
3881 ap->flags |= MR_NP_RX;
3882 ap->flags |= MR_PAGE_RX;
3883
3884 ap->state = ANEG_STATE_COMPLETE_ACK;
3885 ret = ANEG_TIMER_ENAB;
3886 break;
3887
3888 case ANEG_STATE_COMPLETE_ACK:
3889 if (ap->ability_match != 0 &&
3890 ap->rxconfig == 0) {
3891 ap->state = ANEG_STATE_AN_ENABLE;
3892 break;
3893 }
3894 delta = ap->cur_time - ap->link_time;
3895 if (delta > ANEG_STATE_SETTLE_TIME) {
3896 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898 } else {
3899 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900 !(ap->flags & MR_NP_RX)) {
3901 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902 } else {
3903 ret = ANEG_FAILED;
3904 }
3905 }
3906 }
3907 break;
3908
3909 case ANEG_STATE_IDLE_DETECT_INIT:
3910 ap->link_time = ap->cur_time;
3911 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912 tw32_f(MAC_MODE, tp->mac_mode);
3913 udelay(40);
3914
3915 ap->state = ANEG_STATE_IDLE_DETECT;
3916 ret = ANEG_TIMER_ENAB;
3917 break;
3918
3919 case ANEG_STATE_IDLE_DETECT:
3920 if (ap->ability_match != 0 &&
3921 ap->rxconfig == 0) {
3922 ap->state = ANEG_STATE_AN_ENABLE;
3923 break;
3924 }
3925 delta = ap->cur_time - ap->link_time;
3926 if (delta > ANEG_STATE_SETTLE_TIME) {
3927 /* XXX another gem from the Broadcom driver :( */
3928 ap->state = ANEG_STATE_LINK_OK;
3929 }
3930 break;
3931
3932 case ANEG_STATE_LINK_OK:
3933 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934 ret = ANEG_DONE;
3935 break;
3936
3937 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938 /* ??? unimplemented */
3939 break;
3940
3941 case ANEG_STATE_NEXT_PAGE_WAIT:
3942 /* ??? unimplemented */
3943 break;
3944
3945 default:
3946 ret = ANEG_FAILED;
3947 break;
3948 }
3949
3950 return ret;
3951 }
3952
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3954 {
3955 int res = 0;
3956 struct tg3_fiber_aneginfo aninfo;
3957 int status = ANEG_FAILED;
3958 unsigned int tick;
3959 u32 tmp;
3960
3961 tw32_f(MAC_TX_AUTO_NEG, 0);
3962
3963 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965 udelay(40);
3966
3967 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968 udelay(40);
3969
3970 memset(&aninfo, 0, sizeof(aninfo));
3971 aninfo.flags |= MR_AN_ENABLE;
3972 aninfo.state = ANEG_STATE_UNKNOWN;
3973 aninfo.cur_time = 0;
3974 tick = 0;
3975 while (++tick < 195000) {
3976 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977 if (status == ANEG_DONE || status == ANEG_FAILED)
3978 break;
3979
3980 udelay(1);
3981 }
3982
3983 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984 tw32_f(MAC_MODE, tp->mac_mode);
3985 udelay(40);
3986
3987 *txflags = aninfo.txconfig;
3988 *rxflags = aninfo.flags;
3989
3990 if (status == ANEG_DONE &&
3991 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992 MR_LP_ADV_FULL_DUPLEX)))
3993 res = 1;
3994
3995 return res;
3996 }
3997
3998 static void tg3_init_bcm8002(struct tg3 *tp)
3999 {
4000 u32 mac_status = tr32(MAC_STATUS);
4001 int i;
4002
4003 /* Reset when initting first time or we have a link. */
4004 if (tg3_flag(tp, INIT_COMPLETE) &&
4005 !(mac_status & MAC_STATUS_PCS_SYNCED))
4006 return;
4007
4008 /* Set PLL lock range. */
4009 tg3_writephy(tp, 0x16, 0x8007);
4010
4011 /* SW reset */
4012 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4013
4014 /* Wait for reset to complete. */
4015 /* XXX schedule_timeout() ... */
4016 for (i = 0; i < 500; i++)
4017 udelay(10);
4018
4019 /* Config mode; select PMA/Ch 1 regs. */
4020 tg3_writephy(tp, 0x10, 0x8411);
4021
4022 /* Enable auto-lock and comdet, select txclk for tx. */
4023 tg3_writephy(tp, 0x11, 0x0a10);
4024
4025 tg3_writephy(tp, 0x18, 0x00a0);
4026 tg3_writephy(tp, 0x16, 0x41ff);
4027
4028 /* Assert and deassert POR. */
4029 tg3_writephy(tp, 0x13, 0x0400);
4030 udelay(40);
4031 tg3_writephy(tp, 0x13, 0x0000);
4032
4033 tg3_writephy(tp, 0x11, 0x0a50);
4034 udelay(40);
4035 tg3_writephy(tp, 0x11, 0x0a10);
4036
4037 /* Wait for signal to stabilize */
4038 /* XXX schedule_timeout() ... */
4039 for (i = 0; i < 15000; i++)
4040 udelay(10);
4041
4042 /* Deselect the channel register so we can read the PHYID
4043 * later.
4044 */
4045 tg3_writephy(tp, 0x10, 0x8011);
4046 }
4047
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4049 {
4050 u16 flowctrl;
4051 u32 sg_dig_ctrl, sg_dig_status;
4052 u32 serdes_cfg, expected_sg_dig_ctrl;
4053 int workaround, port_a;
4054 int current_link_up;
4055
4056 serdes_cfg = 0;
4057 expected_sg_dig_ctrl = 0;
4058 workaround = 0;
4059 port_a = 1;
4060 current_link_up = 0;
4061
4062 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064 workaround = 1;
4065 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066 port_a = 0;
4067
4068 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069 /* preserve bits 20-23 for voltage regulator */
4070 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4071 }
4072
4073 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4074
4075 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077 if (workaround) {
4078 u32 val = serdes_cfg;
4079
4080 if (port_a)
4081 val |= 0xc010000;
4082 else
4083 val |= 0x4010000;
4084 tw32_f(MAC_SERDES_CFG, val);
4085 }
4086
4087 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4088 }
4089 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090 tg3_setup_flow_control(tp, 0, 0);
4091 current_link_up = 1;
4092 }
4093 goto out;
4094 }
4095
4096 /* Want auto-negotiation. */
4097 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4098
4099 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100 if (flowctrl & ADVERTISE_1000XPAUSE)
4101 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4104
4105 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107 tp->serdes_counter &&
4108 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109 MAC_STATUS_RCVD_CFG)) ==
4110 MAC_STATUS_PCS_SYNCED)) {
4111 tp->serdes_counter--;
4112 current_link_up = 1;
4113 goto out;
4114 }
4115 restart_autoneg:
4116 if (workaround)
4117 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119 udelay(5);
4120 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4121
4122 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125 MAC_STATUS_SIGNAL_DET)) {
4126 sg_dig_status = tr32(SG_DIG_STATUS);
4127 mac_status = tr32(MAC_STATUS);
4128
4129 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131 u32 local_adv = 0, remote_adv = 0;
4132
4133 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134 local_adv |= ADVERTISE_1000XPAUSE;
4135 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136 local_adv |= ADVERTISE_1000XPSE_ASYM;
4137
4138 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139 remote_adv |= LPA_1000XPAUSE;
4140 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141 remote_adv |= LPA_1000XPAUSE_ASYM;
4142
4143 tg3_setup_flow_control(tp, local_adv, remote_adv);
4144 current_link_up = 1;
4145 tp->serdes_counter = 0;
4146 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148 if (tp->serdes_counter)
4149 tp->serdes_counter--;
4150 else {
4151 if (workaround) {
4152 u32 val = serdes_cfg;
4153
4154 if (port_a)
4155 val |= 0xc010000;
4156 else
4157 val |= 0x4010000;
4158
4159 tw32_f(MAC_SERDES_CFG, val);
4160 }
4161
4162 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163 udelay(40);
4164
4165 /* Link parallel detection - link is up */
4166 /* only if we have PCS_SYNC and not */
4167 /* receiving config code words */
4168 mac_status = tr32(MAC_STATUS);
4169 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171 tg3_setup_flow_control(tp, 0, 0);
4172 current_link_up = 1;
4173 tp->phy_flags |=
4174 TG3_PHYFLG_PARALLEL_DETECT;
4175 tp->serdes_counter =
4176 SERDES_PARALLEL_DET_TIMEOUT;
4177 } else
4178 goto restart_autoneg;
4179 }
4180 }
4181 } else {
4182 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4184 }
4185
4186 out:
4187 return current_link_up;
4188 }
4189
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4191 {
4192 int current_link_up = 0;
4193
4194 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195 goto out;
4196
4197 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198 u32 txflags, rxflags;
4199 int i;
4200
4201 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202 u32 local_adv = 0, remote_adv = 0;
4203
4204 if (txflags & ANEG_CFG_PS1)
4205 local_adv |= ADVERTISE_1000XPAUSE;
4206 if (txflags & ANEG_CFG_PS2)
4207 local_adv |= ADVERTISE_1000XPSE_ASYM;
4208
4209 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210 remote_adv |= LPA_1000XPAUSE;
4211 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212 remote_adv |= LPA_1000XPAUSE_ASYM;
4213
4214 tg3_setup_flow_control(tp, local_adv, remote_adv);
4215
4216 current_link_up = 1;
4217 }
4218 for (i = 0; i < 30; i++) {
4219 udelay(20);
4220 tw32_f(MAC_STATUS,
4221 (MAC_STATUS_SYNC_CHANGED |
4222 MAC_STATUS_CFG_CHANGED));
4223 udelay(40);
4224 if ((tr32(MAC_STATUS) &
4225 (MAC_STATUS_SYNC_CHANGED |
4226 MAC_STATUS_CFG_CHANGED)) == 0)
4227 break;
4228 }
4229
4230 mac_status = tr32(MAC_STATUS);
4231 if (current_link_up == 0 &&
4232 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233 !(mac_status & MAC_STATUS_RCVD_CFG))
4234 current_link_up = 1;
4235 } else {
4236 tg3_setup_flow_control(tp, 0, 0);
4237
4238 /* Forcing 1000FD link up. */
4239 current_link_up = 1;
4240
4241 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242 udelay(40);
4243
4244 tw32_f(MAC_MODE, tp->mac_mode);
4245 udelay(40);
4246 }
4247
4248 out:
4249 return current_link_up;
4250 }
4251
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4253 {
4254 u32 orig_pause_cfg;
4255 u16 orig_active_speed;
4256 u8 orig_active_duplex;
4257 u32 mac_status;
4258 int current_link_up;
4259 int i;
4260
4261 orig_pause_cfg = tp->link_config.active_flowctrl;
4262 orig_active_speed = tp->link_config.active_speed;
4263 orig_active_duplex = tp->link_config.active_duplex;
4264
4265 if (!tg3_flag(tp, HW_AUTONEG) &&
4266 netif_carrier_ok(tp->dev) &&
4267 tg3_flag(tp, INIT_COMPLETE)) {
4268 mac_status = tr32(MAC_STATUS);
4269 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270 MAC_STATUS_SIGNAL_DET |
4271 MAC_STATUS_CFG_CHANGED |
4272 MAC_STATUS_RCVD_CFG);
4273 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274 MAC_STATUS_SIGNAL_DET)) {
4275 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276 MAC_STATUS_CFG_CHANGED));
4277 return 0;
4278 }
4279 }
4280
4281 tw32_f(MAC_TX_AUTO_NEG, 0);
4282
4283 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285 tw32_f(MAC_MODE, tp->mac_mode);
4286 udelay(40);
4287
4288 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289 tg3_init_bcm8002(tp);
4290
4291 /* Enable link change event even when serdes polling. */
4292 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293 udelay(40);
4294
4295 current_link_up = 0;
4296 mac_status = tr32(MAC_STATUS);
4297
4298 if (tg3_flag(tp, HW_AUTONEG))
4299 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300 else
4301 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4302
4303 tp->napi[0].hw_status->status =
4304 (SD_STATUS_UPDATED |
4305 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4306
4307 for (i = 0; i < 100; i++) {
4308 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309 MAC_STATUS_CFG_CHANGED));
4310 udelay(5);
4311 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312 MAC_STATUS_CFG_CHANGED |
4313 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314 break;
4315 }
4316
4317 mac_status = tr32(MAC_STATUS);
4318 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319 current_link_up = 0;
4320 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321 tp->serdes_counter == 0) {
4322 tw32_f(MAC_MODE, (tp->mac_mode |
4323 MAC_MODE_SEND_CONFIGS));
4324 udelay(1);
4325 tw32_f(MAC_MODE, tp->mac_mode);
4326 }
4327 }
4328
4329 if (current_link_up == 1) {
4330 tp->link_config.active_speed = SPEED_1000;
4331 tp->link_config.active_duplex = DUPLEX_FULL;
4332 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333 LED_CTRL_LNKLED_OVERRIDE |
4334 LED_CTRL_1000MBPS_ON));
4335 } else {
4336 tp->link_config.active_speed = SPEED_INVALID;
4337 tp->link_config.active_duplex = DUPLEX_INVALID;
4338 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339 LED_CTRL_LNKLED_OVERRIDE |
4340 LED_CTRL_TRAFFIC_OVERRIDE));
4341 }
4342
4343 if (current_link_up != netif_carrier_ok(tp->dev)) {
4344 if (current_link_up)
4345 netif_carrier_on(tp->dev);
4346 else
4347 netif_carrier_off(tp->dev);
4348 tg3_link_report(tp);
4349 } else {
4350 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351 if (orig_pause_cfg != now_pause_cfg ||
4352 orig_active_speed != tp->link_config.active_speed ||
4353 orig_active_duplex != tp->link_config.active_duplex)
4354 tg3_link_report(tp);
4355 }
4356
4357 return 0;
4358 }
4359
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4361 {
4362 int current_link_up, err = 0;
4363 u32 bmsr, bmcr;
4364 u16 current_speed;
4365 u8 current_duplex;
4366 u32 local_adv, remote_adv;
4367
4368 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369 tw32_f(MAC_MODE, tp->mac_mode);
4370 udelay(40);
4371
4372 tw32(MAC_EVENT, 0);
4373
4374 tw32_f(MAC_STATUS,
4375 (MAC_STATUS_SYNC_CHANGED |
4376 MAC_STATUS_CFG_CHANGED |
4377 MAC_STATUS_MI_COMPLETION |
4378 MAC_STATUS_LNKSTATE_CHANGED));
4379 udelay(40);
4380
4381 if (force_reset)
4382 tg3_phy_reset(tp);
4383
4384 current_link_up = 0;
4385 current_speed = SPEED_INVALID;
4386 current_duplex = DUPLEX_INVALID;
4387
4388 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392 bmsr |= BMSR_LSTATUS;
4393 else
4394 bmsr &= ~BMSR_LSTATUS;
4395 }
4396
4397 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4398
4399 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401 /* do nothing, just check for link up at the end */
4402 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403 u32 adv, new_adv;
4404
4405 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407 ADVERTISE_1000XPAUSE |
4408 ADVERTISE_1000XPSE_ASYM |
4409 ADVERTISE_SLCT);
4410
4411 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4412
4413 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414 new_adv |= ADVERTISE_1000XHALF;
4415 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416 new_adv |= ADVERTISE_1000XFULL;
4417
4418 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421 tg3_writephy(tp, MII_BMCR, bmcr);
4422
4423 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427 return err;
4428 }
4429 } else {
4430 u32 new_bmcr;
4431
4432 bmcr &= ~BMCR_SPEED1000;
4433 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4434
4435 if (tp->link_config.duplex == DUPLEX_FULL)
4436 new_bmcr |= BMCR_FULLDPLX;
4437
4438 if (new_bmcr != bmcr) {
4439 /* BMCR_SPEED1000 is a reserved bit that needs
4440 * to be set on write.
4441 */
4442 new_bmcr |= BMCR_SPEED1000;
4443
4444 /* Force a linkdown */
4445 if (netif_carrier_ok(tp->dev)) {
4446 u32 adv;
4447
4448 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449 adv &= ~(ADVERTISE_1000XFULL |
4450 ADVERTISE_1000XHALF |
4451 ADVERTISE_SLCT);
4452 tg3_writephy(tp, MII_ADVERTISE, adv);
4453 tg3_writephy(tp, MII_BMCR, bmcr |
4454 BMCR_ANRESTART |
4455 BMCR_ANENABLE);
4456 udelay(10);
4457 netif_carrier_off(tp->dev);
4458 }
4459 tg3_writephy(tp, MII_BMCR, new_bmcr);
4460 bmcr = new_bmcr;
4461 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464 ASIC_REV_5714) {
4465 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466 bmsr |= BMSR_LSTATUS;
4467 else
4468 bmsr &= ~BMSR_LSTATUS;
4469 }
4470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4471 }
4472 }
4473
4474 if (bmsr & BMSR_LSTATUS) {
4475 current_speed = SPEED_1000;
4476 current_link_up = 1;
4477 if (bmcr & BMCR_FULLDPLX)
4478 current_duplex = DUPLEX_FULL;
4479 else
4480 current_duplex = DUPLEX_HALF;
4481
4482 local_adv = 0;
4483 remote_adv = 0;
4484
4485 if (bmcr & BMCR_ANENABLE) {
4486 u32 common;
4487
4488 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490 common = local_adv & remote_adv;
4491 if (common & (ADVERTISE_1000XHALF |
4492 ADVERTISE_1000XFULL)) {
4493 if (common & ADVERTISE_1000XFULL)
4494 current_duplex = DUPLEX_FULL;
4495 else
4496 current_duplex = DUPLEX_HALF;
4497 } else if (!tg3_flag(tp, 5780_CLASS)) {
4498 /* Link is up via parallel detect */
4499 } else {
4500 current_link_up = 0;
4501 }
4502 }
4503 }
4504
4505 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506 tg3_setup_flow_control(tp, local_adv, remote_adv);
4507
4508 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509 if (tp->link_config.active_duplex == DUPLEX_HALF)
4510 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4511
4512 tw32_f(MAC_MODE, tp->mac_mode);
4513 udelay(40);
4514
4515 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4516
4517 tp->link_config.active_speed = current_speed;
4518 tp->link_config.active_duplex = current_duplex;
4519
4520 if (current_link_up != netif_carrier_ok(tp->dev)) {
4521 if (current_link_up)
4522 netif_carrier_on(tp->dev);
4523 else {
4524 netif_carrier_off(tp->dev);
4525 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4526 }
4527 tg3_link_report(tp);
4528 }
4529 return err;
4530 }
4531
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4533 {
4534 if (tp->serdes_counter) {
4535 /* Give autoneg time to complete. */
4536 tp->serdes_counter--;
4537 return;
4538 }
4539
4540 if (!netif_carrier_ok(tp->dev) &&
4541 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542 u32 bmcr;
4543
4544 tg3_readphy(tp, MII_BMCR, &bmcr);
4545 if (bmcr & BMCR_ANENABLE) {
4546 u32 phy1, phy2;
4547
4548 /* Select shadow register 0x1f */
4549 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4551
4552 /* Select expansion interrupt status register */
4553 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554 MII_TG3_DSP_EXP1_INT_STAT);
4555 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557
4558 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559 /* We have signal detect and not receiving
4560 * config code words, link is up by parallel
4561 * detection.
4562 */
4563
4564 bmcr &= ~BMCR_ANENABLE;
4565 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566 tg3_writephy(tp, MII_BMCR, bmcr);
4567 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4568 }
4569 }
4570 } else if (netif_carrier_ok(tp->dev) &&
4571 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573 u32 phy2;
4574
4575 /* Select expansion interrupt status register */
4576 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577 MII_TG3_DSP_EXP1_INT_STAT);
4578 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579 if (phy2 & 0x20) {
4580 u32 bmcr;
4581
4582 /* Config code words received, turn on autoneg. */
4583 tg3_readphy(tp, MII_BMCR, &bmcr);
4584 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4585
4586 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4587
4588 }
4589 }
4590 }
4591
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4593 {
4594 u32 val;
4595 int err;
4596
4597 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598 err = tg3_setup_fiber_phy(tp, force_reset);
4599 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601 else
4602 err = tg3_setup_copper_phy(tp, force_reset);
4603
4604 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605 u32 scale;
4606
4607 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609 scale = 65;
4610 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611 scale = 6;
4612 else
4613 scale = 12;
4614
4615 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617 tw32(GRC_MISC_CFG, val);
4618 }
4619
4620 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621 (6 << TX_LENGTHS_IPG_SHIFT);
4622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623 val |= tr32(MAC_TX_LENGTHS) &
4624 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625 TX_LENGTHS_CNT_DWN_VAL_MSK);
4626
4627 if (tp->link_config.active_speed == SPEED_1000 &&
4628 tp->link_config.active_duplex == DUPLEX_HALF)
4629 tw32(MAC_TX_LENGTHS, val |
4630 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631 else
4632 tw32(MAC_TX_LENGTHS, val |
4633 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4634
4635 if (!tg3_flag(tp, 5705_PLUS)) {
4636 if (netif_carrier_ok(tp->dev)) {
4637 tw32(HOSTCC_STAT_COAL_TICKS,
4638 tp->coal.stats_block_coalesce_usecs);
4639 } else {
4640 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4641 }
4642 }
4643
4644 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645 val = tr32(PCIE_PWR_MGMT_THRESH);
4646 if (!netif_carrier_ok(tp->dev))
4647 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648 tp->pwrmgmt_thresh;
4649 else
4650 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651 tw32(PCIE_PWR_MGMT_THRESH, val);
4652 }
4653
4654 return err;
4655 }
4656
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4658 {
4659 return tp->irq_sync;
4660 }
4661
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4663 {
4664 int i;
4665
4666 dst = (u32 *)((u8 *)dst + off);
4667 for (i = 0; i < len; i += sizeof(u32))
4668 *dst++ = tr32(off + i);
4669 }
4670
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4672 {
4673 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4692
4693 if (tg3_flag(tp, SUPPORT_MSIX))
4694 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4695
4696 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4704
4705 if (!tg3_flag(tp, 5705_PLUS)) {
4706 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4709 }
4710
4711 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4716
4717 if (tg3_flag(tp, NVRAM))
4718 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4719 }
4720
4721 static void tg3_dump_state(struct tg3 *tp)
4722 {
4723 int i;
4724 u32 *regs;
4725
4726 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727 if (!regs) {
4728 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729 return;
4730 }
4731
4732 if (tg3_flag(tp, PCI_EXPRESS)) {
4733 /* Read up to but not including private PCI registers */
4734 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735 regs[i / sizeof(u32)] = tr32(i);
4736 } else
4737 tg3_dump_legacy_regs(tp, regs);
4738
4739 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740 if (!regs[i + 0] && !regs[i + 1] &&
4741 !regs[i + 2] && !regs[i + 3])
4742 continue;
4743
4744 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745 i * 4,
4746 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4747 }
4748
4749 kfree(regs);
4750
4751 for (i = 0; i < tp->irq_cnt; i++) {
4752 struct tg3_napi *tnapi = &tp->napi[i];
4753
4754 /* SW status block */
4755 netdev_err(tp->dev,
4756 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4757 i,
4758 tnapi->hw_status->status,
4759 tnapi->hw_status->status_tag,
4760 tnapi->hw_status->rx_jumbo_consumer,
4761 tnapi->hw_status->rx_consumer,
4762 tnapi->hw_status->rx_mini_consumer,
4763 tnapi->hw_status->idx[0].rx_producer,
4764 tnapi->hw_status->idx[0].tx_consumer);
4765
4766 netdev_err(tp->dev,
4767 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4768 i,
4769 tnapi->last_tag, tnapi->last_irq_tag,
4770 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771 tnapi->rx_rcb_ptr,
4772 tnapi->prodring.rx_std_prod_idx,
4773 tnapi->prodring.rx_std_cons_idx,
4774 tnapi->prodring.rx_jmb_prod_idx,
4775 tnapi->prodring.rx_jmb_cons_idx);
4776 }
4777 }
4778
4779 /* This is called whenever we suspect that the system chipset is re-
4780 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781 * is bogus tx completions. We try to recover by setting the
4782 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783 * in the workqueue.
4784 */
4785 static void tg3_tx_recover(struct tg3 *tp)
4786 {
4787 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4789
4790 netdev_warn(tp->dev,
4791 "The system may be re-ordering memory-mapped I/O "
4792 "cycles to the network device, attempting to recover. "
4793 "Please report the problem to the driver maintainer "
4794 "and include system chipset information.\n");
4795
4796 spin_lock(&tp->lock);
4797 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798 spin_unlock(&tp->lock);
4799 }
4800
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4802 {
4803 /* Tell compiler to fetch tx indices from memory. */
4804 barrier();
4805 return tnapi->tx_pending -
4806 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4807 }
4808
4809 /* Tigon3 never reports partial packet sends. So we do not
4810 * need special logic to handle SKBs that have not had all
4811 * of their frags sent yet, like SunGEM does.
4812 */
4813 static void tg3_tx(struct tg3_napi *tnapi)
4814 {
4815 struct tg3 *tp = tnapi->tp;
4816 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817 u32 sw_idx = tnapi->tx_cons;
4818 struct netdev_queue *txq;
4819 int index = tnapi - tp->napi;
4820
4821 if (tg3_flag(tp, ENABLE_TSS))
4822 index--;
4823
4824 txq = netdev_get_tx_queue(tp->dev, index);
4825
4826 while (sw_idx != hw_idx) {
4827 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828 struct sk_buff *skb = ri->skb;
4829 int i, tx_bug = 0;
4830
4831 if (unlikely(skb == NULL)) {
4832 tg3_tx_recover(tp);
4833 return;
4834 }
4835
4836 pci_unmap_single(tp->pdev,
4837 dma_unmap_addr(ri, mapping),
4838 skb_headlen(skb),
4839 PCI_DMA_TODEVICE);
4840
4841 ri->skb = NULL;
4842
4843 sw_idx = NEXT_TX(sw_idx);
4844
4845 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4846 ri = &tnapi->tx_buffers[sw_idx];
4847 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4848 tx_bug = 1;
4849
4850 pci_unmap_page(tp->pdev,
4851 dma_unmap_addr(ri, mapping),
4852 skb_shinfo(skb)->frags[i].size,
4853 PCI_DMA_TODEVICE);
4854 sw_idx = NEXT_TX(sw_idx);
4855 }
4856
4857 dev_kfree_skb(skb);
4858
4859 if (unlikely(tx_bug)) {
4860 tg3_tx_recover(tp);
4861 return;
4862 }
4863 }
4864
4865 tnapi->tx_cons = sw_idx;
4866
4867 /* Need to make the tx_cons update visible to tg3_start_xmit()
4868 * before checking for netif_queue_stopped(). Without the
4869 * memory barrier, there is a small possibility that tg3_start_xmit()
4870 * will miss it and cause the queue to be stopped forever.
4871 */
4872 smp_mb();
4873
4874 if (unlikely(netif_tx_queue_stopped(txq) &&
4875 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4876 __netif_tx_lock(txq, smp_processor_id());
4877 if (netif_tx_queue_stopped(txq) &&
4878 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4879 netif_tx_wake_queue(txq);
4880 __netif_tx_unlock(txq);
4881 }
4882 }
4883
4884 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4885 {
4886 if (!ri->skb)
4887 return;
4888
4889 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4890 map_sz, PCI_DMA_FROMDEVICE);
4891 dev_kfree_skb_any(ri->skb);
4892 ri->skb = NULL;
4893 }
4894
4895 /* Returns size of skb allocated or < 0 on error.
4896 *
4897 * We only need to fill in the address because the other members
4898 * of the RX descriptor are invariant, see tg3_init_rings.
4899 *
4900 * Note the purposeful assymetry of cpu vs. chip accesses. For
4901 * posting buffers we only dirty the first cache line of the RX
4902 * descriptor (containing the address). Whereas for the RX status
4903 * buffers the cpu only reads the last cacheline of the RX descriptor
4904 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4905 */
4906 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4907 u32 opaque_key, u32 dest_idx_unmasked)
4908 {
4909 struct tg3_rx_buffer_desc *desc;
4910 struct ring_info *map;
4911 struct sk_buff *skb;
4912 dma_addr_t mapping;
4913 int skb_size, dest_idx;
4914
4915 switch (opaque_key) {
4916 case RXD_OPAQUE_RING_STD:
4917 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4918 desc = &tpr->rx_std[dest_idx];
4919 map = &tpr->rx_std_buffers[dest_idx];
4920 skb_size = tp->rx_pkt_map_sz;
4921 break;
4922
4923 case RXD_OPAQUE_RING_JUMBO:
4924 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4925 desc = &tpr->rx_jmb[dest_idx].std;
4926 map = &tpr->rx_jmb_buffers[dest_idx];
4927 skb_size = TG3_RX_JMB_MAP_SZ;
4928 break;
4929
4930 default:
4931 return -EINVAL;
4932 }
4933
4934 /* Do not overwrite any of the map or rp information
4935 * until we are sure we can commit to a new buffer.
4936 *
4937 * Callers depend upon this behavior and assume that
4938 * we leave everything unchanged if we fail.
4939 */
4940 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4941 if (skb == NULL)
4942 return -ENOMEM;
4943
4944 skb_reserve(skb, tp->rx_offset);
4945
4946 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4947 PCI_DMA_FROMDEVICE);
4948 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4949 dev_kfree_skb(skb);
4950 return -EIO;
4951 }
4952
4953 map->skb = skb;
4954 dma_unmap_addr_set(map, mapping, mapping);
4955
4956 desc->addr_hi = ((u64)mapping >> 32);
4957 desc->addr_lo = ((u64)mapping & 0xffffffff);
4958
4959 return skb_size;
4960 }
4961
4962 /* We only need to move over in the address because the other
4963 * members of the RX descriptor are invariant. See notes above
4964 * tg3_alloc_rx_skb for full details.
4965 */
4966 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4967 struct tg3_rx_prodring_set *dpr,
4968 u32 opaque_key, int src_idx,
4969 u32 dest_idx_unmasked)
4970 {
4971 struct tg3 *tp = tnapi->tp;
4972 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4973 struct ring_info *src_map, *dest_map;
4974 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4975 int dest_idx;
4976
4977 switch (opaque_key) {
4978 case RXD_OPAQUE_RING_STD:
4979 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4980 dest_desc = &dpr->rx_std[dest_idx];
4981 dest_map = &dpr->rx_std_buffers[dest_idx];
4982 src_desc = &spr->rx_std[src_idx];
4983 src_map = &spr->rx_std_buffers[src_idx];
4984 break;
4985
4986 case RXD_OPAQUE_RING_JUMBO:
4987 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4988 dest_desc = &dpr->rx_jmb[dest_idx].std;
4989 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4990 src_desc = &spr->rx_jmb[src_idx].std;
4991 src_map = &spr->rx_jmb_buffers[src_idx];
4992 break;
4993
4994 default:
4995 return;
4996 }
4997
4998 dest_map->skb = src_map->skb;
4999 dma_unmap_addr_set(dest_map, mapping,
5000 dma_unmap_addr(src_map, mapping));
5001 dest_desc->addr_hi = src_desc->addr_hi;
5002 dest_desc->addr_lo = src_desc->addr_lo;
5003
5004 /* Ensure that the update to the skb happens after the physical
5005 * addresses have been transferred to the new BD location.
5006 */
5007 smp_wmb();
5008
5009 src_map->skb = NULL;
5010 }
5011
5012 /* The RX ring scheme is composed of multiple rings which post fresh
5013 * buffers to the chip, and one special ring the chip uses to report
5014 * status back to the host.
5015 *
5016 * The special ring reports the status of received packets to the
5017 * host. The chip does not write into the original descriptor the
5018 * RX buffer was obtained from. The chip simply takes the original
5019 * descriptor as provided by the host, updates the status and length
5020 * field, then writes this into the next status ring entry.
5021 *
5022 * Each ring the host uses to post buffers to the chip is described
5023 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5024 * it is first placed into the on-chip ram. When the packet's length
5025 * is known, it walks down the TG3_BDINFO entries to select the ring.
5026 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5027 * which is within the range of the new packet's length is chosen.
5028 *
5029 * The "separate ring for rx status" scheme may sound queer, but it makes
5030 * sense from a cache coherency perspective. If only the host writes
5031 * to the buffer post rings, and only the chip writes to the rx status
5032 * rings, then cache lines never move beyond shared-modified state.
5033 * If both the host and chip were to write into the same ring, cache line
5034 * eviction could occur since both entities want it in an exclusive state.
5035 */
5036 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5037 {
5038 struct tg3 *tp = tnapi->tp;
5039 u32 work_mask, rx_std_posted = 0;
5040 u32 std_prod_idx, jmb_prod_idx;
5041 u32 sw_idx = tnapi->rx_rcb_ptr;
5042 u16 hw_idx;
5043 int received;
5044 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5045
5046 hw_idx = *(tnapi->rx_rcb_prod_idx);
5047 /*
5048 * We need to order the read of hw_idx and the read of
5049 * the opaque cookie.
5050 */
5051 rmb();
5052 work_mask = 0;
5053 received = 0;
5054 std_prod_idx = tpr->rx_std_prod_idx;
5055 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5056 while (sw_idx != hw_idx && budget > 0) {
5057 struct ring_info *ri;
5058 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5059 unsigned int len;
5060 struct sk_buff *skb;
5061 dma_addr_t dma_addr;
5062 u32 opaque_key, desc_idx, *post_ptr;
5063
5064 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5065 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5066 if (opaque_key == RXD_OPAQUE_RING_STD) {
5067 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5068 dma_addr = dma_unmap_addr(ri, mapping);
5069 skb = ri->skb;
5070 post_ptr = &std_prod_idx;
5071 rx_std_posted++;
5072 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5073 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5074 dma_addr = dma_unmap_addr(ri, mapping);
5075 skb = ri->skb;
5076 post_ptr = &jmb_prod_idx;
5077 } else
5078 goto next_pkt_nopost;
5079
5080 work_mask |= opaque_key;
5081
5082 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5083 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5084 drop_it:
5085 tg3_recycle_rx(tnapi, tpr, opaque_key,
5086 desc_idx, *post_ptr);
5087 drop_it_no_recycle:
5088 /* Other statistics kept track of by card. */
5089 tp->rx_dropped++;
5090 goto next_pkt;
5091 }
5092
5093 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5094 ETH_FCS_LEN;
5095
5096 if (len > TG3_RX_COPY_THRESH(tp)) {
5097 int skb_size;
5098
5099 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5100 *post_ptr);
5101 if (skb_size < 0)
5102 goto drop_it;
5103
5104 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5105 PCI_DMA_FROMDEVICE);
5106
5107 /* Ensure that the update to the skb happens
5108 * after the usage of the old DMA mapping.
5109 */
5110 smp_wmb();
5111
5112 ri->skb = NULL;
5113
5114 skb_put(skb, len);
5115 } else {
5116 struct sk_buff *copy_skb;
5117
5118 tg3_recycle_rx(tnapi, tpr, opaque_key,
5119 desc_idx, *post_ptr);
5120
5121 copy_skb = netdev_alloc_skb(tp->dev, len +
5122 TG3_RAW_IP_ALIGN);
5123 if (copy_skb == NULL)
5124 goto drop_it_no_recycle;
5125
5126 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5127 skb_put(copy_skb, len);
5128 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5129 skb_copy_from_linear_data(skb, copy_skb->data, len);
5130 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5131
5132 /* We'll reuse the original ring buffer. */
5133 skb = copy_skb;
5134 }
5135
5136 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5137 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5138 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5139 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5140 skb->ip_summed = CHECKSUM_UNNECESSARY;
5141 else
5142 skb_checksum_none_assert(skb);
5143
5144 skb->protocol = eth_type_trans(skb, tp->dev);
5145
5146 if (len > (tp->dev->mtu + ETH_HLEN) &&
5147 skb->protocol != htons(ETH_P_8021Q)) {
5148 dev_kfree_skb(skb);
5149 goto drop_it_no_recycle;
5150 }
5151
5152 if (desc->type_flags & RXD_FLAG_VLAN &&
5153 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5154 __vlan_hwaccel_put_tag(skb,
5155 desc->err_vlan & RXD_VLAN_MASK);
5156
5157 napi_gro_receive(&tnapi->napi, skb);
5158
5159 received++;
5160 budget--;
5161
5162 next_pkt:
5163 (*post_ptr)++;
5164
5165 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5166 tpr->rx_std_prod_idx = std_prod_idx &
5167 tp->rx_std_ring_mask;
5168 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5169 tpr->rx_std_prod_idx);
5170 work_mask &= ~RXD_OPAQUE_RING_STD;
5171 rx_std_posted = 0;
5172 }
5173 next_pkt_nopost:
5174 sw_idx++;
5175 sw_idx &= tp->rx_ret_ring_mask;
5176
5177 /* Refresh hw_idx to see if there is new work */
5178 if (sw_idx == hw_idx) {
5179 hw_idx = *(tnapi->rx_rcb_prod_idx);
5180 rmb();
5181 }
5182 }
5183
5184 /* ACK the status ring. */
5185 tnapi->rx_rcb_ptr = sw_idx;
5186 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5187
5188 /* Refill RX ring(s). */
5189 if (!tg3_flag(tp, ENABLE_RSS)) {
5190 if (work_mask & RXD_OPAQUE_RING_STD) {
5191 tpr->rx_std_prod_idx = std_prod_idx &
5192 tp->rx_std_ring_mask;
5193 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5194 tpr->rx_std_prod_idx);
5195 }
5196 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5197 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5198 tp->rx_jmb_ring_mask;
5199 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5200 tpr->rx_jmb_prod_idx);
5201 }
5202 mmiowb();
5203 } else if (work_mask) {
5204 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5205 * updated before the producer indices can be updated.
5206 */
5207 smp_wmb();
5208
5209 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5210 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5211
5212 if (tnapi != &tp->napi[1])
5213 napi_schedule(&tp->napi[1].napi);
5214 }
5215
5216 return received;
5217 }
5218
5219 static void tg3_poll_link(struct tg3 *tp)
5220 {
5221 /* handle link change and other phy events */
5222 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5223 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5224
5225 if (sblk->status & SD_STATUS_LINK_CHG) {
5226 sblk->status = SD_STATUS_UPDATED |
5227 (sblk->status & ~SD_STATUS_LINK_CHG);
5228 spin_lock(&tp->lock);
5229 if (tg3_flag(tp, USE_PHYLIB)) {
5230 tw32_f(MAC_STATUS,
5231 (MAC_STATUS_SYNC_CHANGED |
5232 MAC_STATUS_CFG_CHANGED |
5233 MAC_STATUS_MI_COMPLETION |
5234 MAC_STATUS_LNKSTATE_CHANGED));
5235 udelay(40);
5236 } else
5237 tg3_setup_phy(tp, 0);
5238 spin_unlock(&tp->lock);
5239 }
5240 }
5241 }
5242
5243 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5244 struct tg3_rx_prodring_set *dpr,
5245 struct tg3_rx_prodring_set *spr)
5246 {
5247 u32 si, di, cpycnt, src_prod_idx;
5248 int i, err = 0;
5249
5250 while (1) {
5251 src_prod_idx = spr->rx_std_prod_idx;
5252
5253 /* Make sure updates to the rx_std_buffers[] entries and the
5254 * standard producer index are seen in the correct order.
5255 */
5256 smp_rmb();
5257
5258 if (spr->rx_std_cons_idx == src_prod_idx)
5259 break;
5260
5261 if (spr->rx_std_cons_idx < src_prod_idx)
5262 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5263 else
5264 cpycnt = tp->rx_std_ring_mask + 1 -
5265 spr->rx_std_cons_idx;
5266
5267 cpycnt = min(cpycnt,
5268 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5269
5270 si = spr->rx_std_cons_idx;
5271 di = dpr->rx_std_prod_idx;
5272
5273 for (i = di; i < di + cpycnt; i++) {
5274 if (dpr->rx_std_buffers[i].skb) {
5275 cpycnt = i - di;
5276 err = -ENOSPC;
5277 break;
5278 }
5279 }
5280
5281 if (!cpycnt)
5282 break;
5283
5284 /* Ensure that updates to the rx_std_buffers ring and the
5285 * shadowed hardware producer ring from tg3_recycle_skb() are
5286 * ordered correctly WRT the skb check above.
5287 */
5288 smp_rmb();
5289
5290 memcpy(&dpr->rx_std_buffers[di],
5291 &spr->rx_std_buffers[si],
5292 cpycnt * sizeof(struct ring_info));
5293
5294 for (i = 0; i < cpycnt; i++, di++, si++) {
5295 struct tg3_rx_buffer_desc *sbd, *dbd;
5296 sbd = &spr->rx_std[si];
5297 dbd = &dpr->rx_std[di];
5298 dbd->addr_hi = sbd->addr_hi;
5299 dbd->addr_lo = sbd->addr_lo;
5300 }
5301
5302 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5303 tp->rx_std_ring_mask;
5304 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5305 tp->rx_std_ring_mask;
5306 }
5307
5308 while (1) {
5309 src_prod_idx = spr->rx_jmb_prod_idx;
5310
5311 /* Make sure updates to the rx_jmb_buffers[] entries and
5312 * the jumbo producer index are seen in the correct order.
5313 */
5314 smp_rmb();
5315
5316 if (spr->rx_jmb_cons_idx == src_prod_idx)
5317 break;
5318
5319 if (spr->rx_jmb_cons_idx < src_prod_idx)
5320 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5321 else
5322 cpycnt = tp->rx_jmb_ring_mask + 1 -
5323 spr->rx_jmb_cons_idx;
5324
5325 cpycnt = min(cpycnt,
5326 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5327
5328 si = spr->rx_jmb_cons_idx;
5329 di = dpr->rx_jmb_prod_idx;
5330
5331 for (i = di; i < di + cpycnt; i++) {
5332 if (dpr->rx_jmb_buffers[i].skb) {
5333 cpycnt = i - di;
5334 err = -ENOSPC;
5335 break;
5336 }
5337 }
5338
5339 if (!cpycnt)
5340 break;
5341
5342 /* Ensure that updates to the rx_jmb_buffers ring and the
5343 * shadowed hardware producer ring from tg3_recycle_skb() are
5344 * ordered correctly WRT the skb check above.
5345 */
5346 smp_rmb();
5347
5348 memcpy(&dpr->rx_jmb_buffers[di],
5349 &spr->rx_jmb_buffers[si],
5350 cpycnt * sizeof(struct ring_info));
5351
5352 for (i = 0; i < cpycnt; i++, di++, si++) {
5353 struct tg3_rx_buffer_desc *sbd, *dbd;
5354 sbd = &spr->rx_jmb[si].std;
5355 dbd = &dpr->rx_jmb[di].std;
5356 dbd->addr_hi = sbd->addr_hi;
5357 dbd->addr_lo = sbd->addr_lo;
5358 }
5359
5360 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5361 tp->rx_jmb_ring_mask;
5362 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5363 tp->rx_jmb_ring_mask;
5364 }
5365
5366 return err;
5367 }
5368
5369 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5370 {
5371 struct tg3 *tp = tnapi->tp;
5372
5373 /* run TX completion thread */
5374 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5375 tg3_tx(tnapi);
5376 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5377 return work_done;
5378 }
5379
5380 /* run RX thread, within the bounds set by NAPI.
5381 * All RX "locking" is done by ensuring outside
5382 * code synchronizes with tg3->napi.poll()
5383 */
5384 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5385 work_done += tg3_rx(tnapi, budget - work_done);
5386
5387 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5388 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5389 int i, err = 0;
5390 u32 std_prod_idx = dpr->rx_std_prod_idx;
5391 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5392
5393 for (i = 1; i < tp->irq_cnt; i++)
5394 err |= tg3_rx_prodring_xfer(tp, dpr,
5395 &tp->napi[i].prodring);
5396
5397 wmb();
5398
5399 if (std_prod_idx != dpr->rx_std_prod_idx)
5400 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5401 dpr->rx_std_prod_idx);
5402
5403 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5404 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5405 dpr->rx_jmb_prod_idx);
5406
5407 mmiowb();
5408
5409 if (err)
5410 tw32_f(HOSTCC_MODE, tp->coal_now);
5411 }
5412
5413 return work_done;
5414 }
5415
5416 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5417 {
5418 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5419 struct tg3 *tp = tnapi->tp;
5420 int work_done = 0;
5421 struct tg3_hw_status *sblk = tnapi->hw_status;
5422
5423 while (1) {
5424 work_done = tg3_poll_work(tnapi, work_done, budget);
5425
5426 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5427 goto tx_recovery;
5428
5429 if (unlikely(work_done >= budget))
5430 break;
5431
5432 /* tp->last_tag is used in tg3_int_reenable() below
5433 * to tell the hw how much work has been processed,
5434 * so we must read it before checking for more work.
5435 */
5436 tnapi->last_tag = sblk->status_tag;
5437 tnapi->last_irq_tag = tnapi->last_tag;
5438 rmb();
5439
5440 /* check for RX/TX work to do */
5441 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5442 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5443 napi_complete(napi);
5444 /* Reenable interrupts. */
5445 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5446 mmiowb();
5447 break;
5448 }
5449 }
5450
5451 return work_done;
5452
5453 tx_recovery:
5454 /* work_done is guaranteed to be less than budget. */
5455 napi_complete(napi);
5456 schedule_work(&tp->reset_task);
5457 return work_done;
5458 }
5459
5460 static void tg3_process_error(struct tg3 *tp)
5461 {
5462 u32 val;
5463 bool real_error = false;
5464
5465 if (tg3_flag(tp, ERROR_PROCESSED))
5466 return;
5467
5468 /* Check Flow Attention register */
5469 val = tr32(HOSTCC_FLOW_ATTN);
5470 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5471 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5472 real_error = true;
5473 }
5474
5475 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5476 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5477 real_error = true;
5478 }
5479
5480 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5481 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5482 real_error = true;
5483 }
5484
5485 if (!real_error)
5486 return;
5487
5488 tg3_dump_state(tp);
5489
5490 tg3_flag_set(tp, ERROR_PROCESSED);
5491 schedule_work(&tp->reset_task);
5492 }
5493
5494 static int tg3_poll(struct napi_struct *napi, int budget)
5495 {
5496 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5497 struct tg3 *tp = tnapi->tp;
5498 int work_done = 0;
5499 struct tg3_hw_status *sblk = tnapi->hw_status;
5500
5501 while (1) {
5502 if (sblk->status & SD_STATUS_ERROR)
5503 tg3_process_error(tp);
5504
5505 tg3_poll_link(tp);
5506
5507 work_done = tg3_poll_work(tnapi, work_done, budget);
5508
5509 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5510 goto tx_recovery;
5511
5512 if (unlikely(work_done >= budget))
5513 break;
5514
5515 if (tg3_flag(tp, TAGGED_STATUS)) {
5516 /* tp->last_tag is used in tg3_int_reenable() below
5517 * to tell the hw how much work has been processed,
5518 * so we must read it before checking for more work.
5519 */
5520 tnapi->last_tag = sblk->status_tag;
5521 tnapi->last_irq_tag = tnapi->last_tag;
5522 rmb();
5523 } else
5524 sblk->status &= ~SD_STATUS_UPDATED;
5525
5526 if (likely(!tg3_has_work(tnapi))) {
5527 napi_complete(napi);
5528 tg3_int_reenable(tnapi);
5529 break;
5530 }
5531 }
5532
5533 return work_done;
5534
5535 tx_recovery:
5536 /* work_done is guaranteed to be less than budget. */
5537 napi_complete(napi);
5538 schedule_work(&tp->reset_task);
5539 return work_done;
5540 }
5541
5542 static void tg3_napi_disable(struct tg3 *tp)
5543 {
5544 int i;
5545
5546 for (i = tp->irq_cnt - 1; i >= 0; i--)
5547 napi_disable(&tp->napi[i].napi);
5548 }
5549
5550 static void tg3_napi_enable(struct tg3 *tp)
5551 {
5552 int i;
5553
5554 for (i = 0; i < tp->irq_cnt; i++)
5555 napi_enable(&tp->napi[i].napi);
5556 }
5557
5558 static void tg3_napi_init(struct tg3 *tp)
5559 {
5560 int i;
5561
5562 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5563 for (i = 1; i < tp->irq_cnt; i++)
5564 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5565 }
5566
5567 static void tg3_napi_fini(struct tg3 *tp)
5568 {
5569 int i;
5570
5571 for (i = 0; i < tp->irq_cnt; i++)
5572 netif_napi_del(&tp->napi[i].napi);
5573 }
5574
5575 static inline void tg3_netif_stop(struct tg3 *tp)
5576 {
5577 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5578 tg3_napi_disable(tp);
5579 netif_tx_disable(tp->dev);
5580 }
5581
5582 static inline void tg3_netif_start(struct tg3 *tp)
5583 {
5584 /* NOTE: unconditional netif_tx_wake_all_queues is only
5585 * appropriate so long as all callers are assured to
5586 * have free tx slots (such as after tg3_init_hw)
5587 */
5588 netif_tx_wake_all_queues(tp->dev);
5589
5590 tg3_napi_enable(tp);
5591 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5592 tg3_enable_ints(tp);
5593 }
5594
5595 static void tg3_irq_quiesce(struct tg3 *tp)
5596 {
5597 int i;
5598
5599 BUG_ON(tp->irq_sync);
5600
5601 tp->irq_sync = 1;
5602 smp_mb();
5603
5604 for (i = 0; i < tp->irq_cnt; i++)
5605 synchronize_irq(tp->napi[i].irq_vec);
5606 }
5607
5608 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5609 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5610 * with as well. Most of the time, this is not necessary except when
5611 * shutting down the device.
5612 */
5613 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5614 {
5615 spin_lock_bh(&tp->lock);
5616 if (irq_sync)
5617 tg3_irq_quiesce(tp);
5618 }
5619
5620 static inline void tg3_full_unlock(struct tg3 *tp)
5621 {
5622 spin_unlock_bh(&tp->lock);
5623 }
5624
5625 /* One-shot MSI handler - Chip automatically disables interrupt
5626 * after sending MSI so driver doesn't have to do it.
5627 */
5628 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5629 {
5630 struct tg3_napi *tnapi = dev_id;
5631 struct tg3 *tp = tnapi->tp;
5632
5633 prefetch(tnapi->hw_status);
5634 if (tnapi->rx_rcb)
5635 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5636
5637 if (likely(!tg3_irq_sync(tp)))
5638 napi_schedule(&tnapi->napi);
5639
5640 return IRQ_HANDLED;
5641 }
5642
5643 /* MSI ISR - No need to check for interrupt sharing and no need to
5644 * flush status block and interrupt mailbox. PCI ordering rules
5645 * guarantee that MSI will arrive after the status block.
5646 */
5647 static irqreturn_t tg3_msi(int irq, void *dev_id)
5648 {
5649 struct tg3_napi *tnapi = dev_id;
5650 struct tg3 *tp = tnapi->tp;
5651
5652 prefetch(tnapi->hw_status);
5653 if (tnapi->rx_rcb)
5654 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5655 /*
5656 * Writing any value to intr-mbox-0 clears PCI INTA# and
5657 * chip-internal interrupt pending events.
5658 * Writing non-zero to intr-mbox-0 additional tells the
5659 * NIC to stop sending us irqs, engaging "in-intr-handler"
5660 * event coalescing.
5661 */
5662 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5663 if (likely(!tg3_irq_sync(tp)))
5664 napi_schedule(&tnapi->napi);
5665
5666 return IRQ_RETVAL(1);
5667 }
5668
5669 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5670 {
5671 struct tg3_napi *tnapi = dev_id;
5672 struct tg3 *tp = tnapi->tp;
5673 struct tg3_hw_status *sblk = tnapi->hw_status;
5674 unsigned int handled = 1;
5675
5676 /* In INTx mode, it is possible for the interrupt to arrive at
5677 * the CPU before the status block posted prior to the interrupt.
5678 * Reading the PCI State register will confirm whether the
5679 * interrupt is ours and will flush the status block.
5680 */
5681 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5682 if (tg3_flag(tp, CHIP_RESETTING) ||
5683 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5684 handled = 0;
5685 goto out;
5686 }
5687 }
5688
5689 /*
5690 * Writing any value to intr-mbox-0 clears PCI INTA# and
5691 * chip-internal interrupt pending events.
5692 * Writing non-zero to intr-mbox-0 additional tells the
5693 * NIC to stop sending us irqs, engaging "in-intr-handler"
5694 * event coalescing.
5695 *
5696 * Flush the mailbox to de-assert the IRQ immediately to prevent
5697 * spurious interrupts. The flush impacts performance but
5698 * excessive spurious interrupts can be worse in some cases.
5699 */
5700 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5701 if (tg3_irq_sync(tp))
5702 goto out;
5703 sblk->status &= ~SD_STATUS_UPDATED;
5704 if (likely(tg3_has_work(tnapi))) {
5705 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5706 napi_schedule(&tnapi->napi);
5707 } else {
5708 /* No work, shared interrupt perhaps? re-enable
5709 * interrupts, and flush that PCI write
5710 */
5711 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5712 0x00000000);
5713 }
5714 out:
5715 return IRQ_RETVAL(handled);
5716 }
5717
5718 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5719 {
5720 struct tg3_napi *tnapi = dev_id;
5721 struct tg3 *tp = tnapi->tp;
5722 struct tg3_hw_status *sblk = tnapi->hw_status;
5723 unsigned int handled = 1;
5724
5725 /* In INTx mode, it is possible for the interrupt to arrive at
5726 * the CPU before the status block posted prior to the interrupt.
5727 * Reading the PCI State register will confirm whether the
5728 * interrupt is ours and will flush the status block.
5729 */
5730 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5731 if (tg3_flag(tp, CHIP_RESETTING) ||
5732 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5733 handled = 0;
5734 goto out;
5735 }
5736 }
5737
5738 /*
5739 * writing any value to intr-mbox-0 clears PCI INTA# and
5740 * chip-internal interrupt pending events.
5741 * writing non-zero to intr-mbox-0 additional tells the
5742 * NIC to stop sending us irqs, engaging "in-intr-handler"
5743 * event coalescing.
5744 *
5745 * Flush the mailbox to de-assert the IRQ immediately to prevent
5746 * spurious interrupts. The flush impacts performance but
5747 * excessive spurious interrupts can be worse in some cases.
5748 */
5749 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5750
5751 /*
5752 * In a shared interrupt configuration, sometimes other devices'
5753 * interrupts will scream. We record the current status tag here
5754 * so that the above check can report that the screaming interrupts
5755 * are unhandled. Eventually they will be silenced.
5756 */
5757 tnapi->last_irq_tag = sblk->status_tag;
5758
5759 if (tg3_irq_sync(tp))
5760 goto out;
5761
5762 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5763
5764 napi_schedule(&tnapi->napi);
5765
5766 out:
5767 return IRQ_RETVAL(handled);
5768 }
5769
5770 /* ISR for interrupt test */
5771 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5772 {
5773 struct tg3_napi *tnapi = dev_id;
5774 struct tg3 *tp = tnapi->tp;
5775 struct tg3_hw_status *sblk = tnapi->hw_status;
5776
5777 if ((sblk->status & SD_STATUS_UPDATED) ||
5778 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5779 tg3_disable_ints(tp);
5780 return IRQ_RETVAL(1);
5781 }
5782 return IRQ_RETVAL(0);
5783 }
5784
5785 static int tg3_init_hw(struct tg3 *, int);
5786 static int tg3_halt(struct tg3 *, int, int);
5787
5788 /* Restart hardware after configuration changes, self-test, etc.
5789 * Invoked with tp->lock held.
5790 */
5791 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5792 __releases(tp->lock)
5793 __acquires(tp->lock)
5794 {
5795 int err;
5796
5797 err = tg3_init_hw(tp, reset_phy);
5798 if (err) {
5799 netdev_err(tp->dev,
5800 "Failed to re-initialize device, aborting\n");
5801 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5802 tg3_full_unlock(tp);
5803 del_timer_sync(&tp->timer);
5804 tp->irq_sync = 0;
5805 tg3_napi_enable(tp);
5806 dev_close(tp->dev);
5807 tg3_full_lock(tp, 0);
5808 }
5809 return err;
5810 }
5811
5812 #ifdef CONFIG_NET_POLL_CONTROLLER
5813 static void tg3_poll_controller(struct net_device *dev)
5814 {
5815 int i;
5816 struct tg3 *tp = netdev_priv(dev);
5817
5818 for (i = 0; i < tp->irq_cnt; i++)
5819 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5820 }
5821 #endif
5822
5823 static void tg3_reset_task(struct work_struct *work)
5824 {
5825 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5826 int err;
5827 unsigned int restart_timer;
5828
5829 tg3_full_lock(tp, 0);
5830
5831 if (!netif_running(tp->dev)) {
5832 tg3_full_unlock(tp);
5833 return;
5834 }
5835
5836 tg3_full_unlock(tp);
5837
5838 tg3_phy_stop(tp);
5839
5840 tg3_netif_stop(tp);
5841
5842 tg3_full_lock(tp, 1);
5843
5844 restart_timer = tg3_flag(tp, RESTART_TIMER);
5845 tg3_flag_clear(tp, RESTART_TIMER);
5846
5847 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5848 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5849 tp->write32_rx_mbox = tg3_write_flush_reg32;
5850 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5851 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5852 }
5853
5854 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5855 err = tg3_init_hw(tp, 1);
5856 if (err)
5857 goto out;
5858
5859 tg3_netif_start(tp);
5860
5861 if (restart_timer)
5862 mod_timer(&tp->timer, jiffies + 1);
5863
5864 out:
5865 tg3_full_unlock(tp);
5866
5867 if (!err)
5868 tg3_phy_start(tp);
5869 }
5870
5871 static void tg3_tx_timeout(struct net_device *dev)
5872 {
5873 struct tg3 *tp = netdev_priv(dev);
5874
5875 if (netif_msg_tx_err(tp)) {
5876 netdev_err(dev, "transmit timed out, resetting\n");
5877 tg3_dump_state(tp);
5878 }
5879
5880 schedule_work(&tp->reset_task);
5881 }
5882
5883 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5884 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5885 {
5886 u32 base = (u32) mapping & 0xffffffff;
5887
5888 return (base > 0xffffdcc0) && (base + len + 8 < base);
5889 }
5890
5891 /* Test for DMA addresses > 40-bit */
5892 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5893 int len)
5894 {
5895 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5896 if (tg3_flag(tp, 40BIT_DMA_BUG))
5897 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5898 return 0;
5899 #else
5900 return 0;
5901 #endif
5902 }
5903
5904 static inline void tg3_tx_set_bd(struct tg3_napi *tnapi, u32 entry,
5905 dma_addr_t mapping, u32 len, u32 flags,
5906 u32 mss, u32 vlan)
5907 {
5908 struct tg3_tx_buffer_desc *txbd = &tnapi->tx_ring[entry];
5909
5910 txbd->addr_hi = ((u64) mapping >> 32);
5911 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5912 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5913 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5914 }
5915
5916 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5917 struct sk_buff *skb, int last)
5918 {
5919 int i;
5920 u32 entry = tnapi->tx_prod;
5921 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5922
5923 pci_unmap_single(tnapi->tp->pdev,
5924 dma_unmap_addr(txb, mapping),
5925 skb_headlen(skb),
5926 PCI_DMA_TODEVICE);
5927 for (i = 0; i < last; i++) {
5928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5929
5930 entry = NEXT_TX(entry);
5931 txb = &tnapi->tx_buffers[entry];
5932
5933 pci_unmap_page(tnapi->tp->pdev,
5934 dma_unmap_addr(txb, mapping),
5935 frag->size, PCI_DMA_TODEVICE);
5936 }
5937 }
5938
5939 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5940 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5941 struct sk_buff *skb,
5942 u32 base_flags, u32 mss, u32 vlan)
5943 {
5944 struct tg3 *tp = tnapi->tp;
5945 struct sk_buff *new_skb;
5946 dma_addr_t new_addr = 0;
5947 u32 entry = tnapi->tx_prod;
5948 int ret = 0;
5949
5950 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5951 new_skb = skb_copy(skb, GFP_ATOMIC);
5952 else {
5953 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5954
5955 new_skb = skb_copy_expand(skb,
5956 skb_headroom(skb) + more_headroom,
5957 skb_tailroom(skb), GFP_ATOMIC);
5958 }
5959
5960 if (!new_skb) {
5961 ret = -1;
5962 } else {
5963 /* New SKB is guaranteed to be linear. */
5964 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5965 PCI_DMA_TODEVICE);
5966 /* Make sure the mapping succeeded */
5967 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5968 ret = -1;
5969 dev_kfree_skb(new_skb);
5970
5971 /* Make sure new skb does not cross any 4G boundaries.
5972 * Drop the packet if it does.
5973 */
5974 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5975 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5976 PCI_DMA_TODEVICE);
5977 ret = -1;
5978 dev_kfree_skb(new_skb);
5979 } else {
5980 base_flags |= TXD_FLAG_END;
5981
5982 tnapi->tx_buffers[entry].skb = new_skb;
5983 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5984 mapping, new_addr);
5985
5986 tg3_tx_set_bd(tnapi, entry, new_addr, new_skb->len,
5987 base_flags, mss, vlan);
5988 }
5989 }
5990
5991 dev_kfree_skb(skb);
5992
5993 return ret;
5994 }
5995
5996 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5997
5998 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5999 * TSO header is greater than 80 bytes.
6000 */
6001 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6002 {
6003 struct sk_buff *segs, *nskb;
6004 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6005
6006 /* Estimate the number of fragments in the worst case */
6007 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6008 netif_stop_queue(tp->dev);
6009
6010 /* netif_tx_stop_queue() must be done before checking
6011 * checking tx index in tg3_tx_avail() below, because in
6012 * tg3_tx(), we update tx index before checking for
6013 * netif_tx_queue_stopped().
6014 */
6015 smp_mb();
6016 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6017 return NETDEV_TX_BUSY;
6018
6019 netif_wake_queue(tp->dev);
6020 }
6021
6022 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6023 if (IS_ERR(segs))
6024 goto tg3_tso_bug_end;
6025
6026 do {
6027 nskb = segs;
6028 segs = segs->next;
6029 nskb->next = NULL;
6030 tg3_start_xmit(nskb, tp->dev);
6031 } while (segs);
6032
6033 tg3_tso_bug_end:
6034 dev_kfree_skb(skb);
6035
6036 return NETDEV_TX_OK;
6037 }
6038
6039 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6040 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6041 */
6042 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6043 {
6044 struct tg3 *tp = netdev_priv(dev);
6045 u32 len, entry, base_flags, mss, vlan = 0;
6046 int i = -1, would_hit_hwbug;
6047 dma_addr_t mapping;
6048 struct tg3_napi *tnapi;
6049 struct netdev_queue *txq;
6050 unsigned int last;
6051
6052 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6053 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6054 if (tg3_flag(tp, ENABLE_TSS))
6055 tnapi++;
6056
6057 /* We are running in BH disabled context with netif_tx_lock
6058 * and TX reclaim runs via tp->napi.poll inside of a software
6059 * interrupt. Furthermore, IRQ processing runs lockless so we have
6060 * no IRQ context deadlocks to worry about either. Rejoice!
6061 */
6062 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6063 if (!netif_tx_queue_stopped(txq)) {
6064 netif_tx_stop_queue(txq);
6065
6066 /* This is a hard error, log it. */
6067 netdev_err(dev,
6068 "BUG! Tx Ring full when queue awake!\n");
6069 }
6070 return NETDEV_TX_BUSY;
6071 }
6072
6073 entry = tnapi->tx_prod;
6074 base_flags = 0;
6075 if (skb->ip_summed == CHECKSUM_PARTIAL)
6076 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6077
6078 mss = skb_shinfo(skb)->gso_size;
6079 if (mss) {
6080 struct iphdr *iph;
6081 u32 tcp_opt_len, hdr_len;
6082
6083 if (skb_header_cloned(skb) &&
6084 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6085 dev_kfree_skb(skb);
6086 goto out_unlock;
6087 }
6088
6089 iph = ip_hdr(skb);
6090 tcp_opt_len = tcp_optlen(skb);
6091
6092 if (skb_is_gso_v6(skb)) {
6093 hdr_len = skb_headlen(skb) - ETH_HLEN;
6094 } else {
6095 u32 ip_tcp_len;
6096
6097 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6098 hdr_len = ip_tcp_len + tcp_opt_len;
6099
6100 iph->check = 0;
6101 iph->tot_len = htons(mss + hdr_len);
6102 }
6103
6104 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6105 tg3_flag(tp, TSO_BUG))
6106 return tg3_tso_bug(tp, skb);
6107
6108 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6109 TXD_FLAG_CPU_POST_DMA);
6110
6111 if (tg3_flag(tp, HW_TSO_1) ||
6112 tg3_flag(tp, HW_TSO_2) ||
6113 tg3_flag(tp, HW_TSO_3)) {
6114 tcp_hdr(skb)->check = 0;
6115 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6116 } else
6117 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6118 iph->daddr, 0,
6119 IPPROTO_TCP,
6120 0);
6121
6122 if (tg3_flag(tp, HW_TSO_3)) {
6123 mss |= (hdr_len & 0xc) << 12;
6124 if (hdr_len & 0x10)
6125 base_flags |= 0x00000010;
6126 base_flags |= (hdr_len & 0x3e0) << 5;
6127 } else if (tg3_flag(tp, HW_TSO_2))
6128 mss |= hdr_len << 9;
6129 else if (tg3_flag(tp, HW_TSO_1) ||
6130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6131 if (tcp_opt_len || iph->ihl > 5) {
6132 int tsflags;
6133
6134 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6135 mss |= (tsflags << 11);
6136 }
6137 } else {
6138 if (tcp_opt_len || iph->ihl > 5) {
6139 int tsflags;
6140
6141 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6142 base_flags |= tsflags << 12;
6143 }
6144 }
6145 }
6146
6147 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6148 if (vlan_tx_tag_present(skb)) {
6149 base_flags |= TXD_FLAG_VLAN;
6150 vlan = vlan_tx_tag_get(skb);
6151 }
6152 #endif
6153
6154 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6155 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6156 base_flags |= TXD_FLAG_JMB_PKT;
6157
6158 len = skb_headlen(skb);
6159
6160 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6161 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6162 dev_kfree_skb(skb);
6163 goto out_unlock;
6164 }
6165
6166 tnapi->tx_buffers[entry].skb = skb;
6167 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6168
6169 would_hit_hwbug = 0;
6170
6171 if (tg3_4g_overflow_test(mapping, len))
6172 would_hit_hwbug = 1;
6173
6174 if (tg3_40bit_overflow_test(tp, mapping, len))
6175 would_hit_hwbug = 1;
6176
6177 if (tg3_flag(tp, 5701_DMA_BUG))
6178 would_hit_hwbug = 1;
6179
6180 tg3_tx_set_bd(tnapi, entry, mapping, len, base_flags |
6181 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6182 mss, vlan);
6183
6184 entry = NEXT_TX(entry);
6185
6186 /* Now loop through additional data fragments, and queue them. */
6187 if (skb_shinfo(skb)->nr_frags > 0) {
6188 u32 tmp_mss = mss;
6189
6190 if (!tg3_flag(tp, HW_TSO_1) &&
6191 !tg3_flag(tp, HW_TSO_2) &&
6192 !tg3_flag(tp, HW_TSO_3))
6193 tmp_mss = 0;
6194
6195 last = skb_shinfo(skb)->nr_frags - 1;
6196 for (i = 0; i <= last; i++) {
6197 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6198
6199 len = frag->size;
6200 mapping = pci_map_page(tp->pdev,
6201 frag->page,
6202 frag->page_offset,
6203 len, PCI_DMA_TODEVICE);
6204
6205 tnapi->tx_buffers[entry].skb = NULL;
6206 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6207 mapping);
6208 if (pci_dma_mapping_error(tp->pdev, mapping))
6209 goto dma_error;
6210
6211 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6212 len <= 8)
6213 would_hit_hwbug = 1;
6214
6215 if (tg3_4g_overflow_test(mapping, len))
6216 would_hit_hwbug = 1;
6217
6218 if (tg3_40bit_overflow_test(tp, mapping, len))
6219 would_hit_hwbug = 1;
6220
6221 tg3_tx_set_bd(tnapi, entry, mapping, len, base_flags |
6222 ((i == last) ? TXD_FLAG_END : 0),
6223 tmp_mss, vlan);
6224
6225 entry = NEXT_TX(entry);
6226 }
6227 }
6228
6229 if (would_hit_hwbug) {
6230 tg3_skb_error_unmap(tnapi, skb, i);
6231
6232 /* If the workaround fails due to memory/mapping
6233 * failure, silently drop this packet.
6234 */
6235 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags,
6236 mss, vlan))
6237 goto out_unlock;
6238
6239 entry = NEXT_TX(tnapi->tx_prod);
6240 }
6241
6242 skb_tx_timestamp(skb);
6243
6244 /* Packets are ready, update Tx producer idx local and on card. */
6245 tw32_tx_mbox(tnapi->prodmbox, entry);
6246
6247 tnapi->tx_prod = entry;
6248 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6249 netif_tx_stop_queue(txq);
6250
6251 /* netif_tx_stop_queue() must be done before checking
6252 * checking tx index in tg3_tx_avail() below, because in
6253 * tg3_tx(), we update tx index before checking for
6254 * netif_tx_queue_stopped().
6255 */
6256 smp_mb();
6257 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6258 netif_tx_wake_queue(txq);
6259 }
6260
6261 out_unlock:
6262 mmiowb();
6263
6264 return NETDEV_TX_OK;
6265
6266 dma_error:
6267 tg3_skb_error_unmap(tnapi, skb, i);
6268 dev_kfree_skb(skb);
6269 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6270 return NETDEV_TX_OK;
6271 }
6272
6273 static void tg3_set_loopback(struct net_device *dev, u32 features)
6274 {
6275 struct tg3 *tp = netdev_priv(dev);
6276
6277 if (features & NETIF_F_LOOPBACK) {
6278 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6279 return;
6280
6281 /*
6282 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6283 * loopback mode if Half-Duplex mode was negotiated earlier.
6284 */
6285 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6286
6287 /* Enable internal MAC loopback mode */
6288 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6289 spin_lock_bh(&tp->lock);
6290 tw32(MAC_MODE, tp->mac_mode);
6291 netif_carrier_on(tp->dev);
6292 spin_unlock_bh(&tp->lock);
6293 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6294 } else {
6295 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6296 return;
6297
6298 /* Disable internal MAC loopback mode */
6299 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6300 spin_lock_bh(&tp->lock);
6301 tw32(MAC_MODE, tp->mac_mode);
6302 /* Force link status check */
6303 tg3_setup_phy(tp, 1);
6304 spin_unlock_bh(&tp->lock);
6305 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6306 }
6307 }
6308
6309 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6310 {
6311 struct tg3 *tp = netdev_priv(dev);
6312
6313 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6314 features &= ~NETIF_F_ALL_TSO;
6315
6316 return features;
6317 }
6318
6319 static int tg3_set_features(struct net_device *dev, u32 features)
6320 {
6321 u32 changed = dev->features ^ features;
6322
6323 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6324 tg3_set_loopback(dev, features);
6325
6326 return 0;
6327 }
6328
6329 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6330 int new_mtu)
6331 {
6332 dev->mtu = new_mtu;
6333
6334 if (new_mtu > ETH_DATA_LEN) {
6335 if (tg3_flag(tp, 5780_CLASS)) {
6336 netdev_update_features(dev);
6337 tg3_flag_clear(tp, TSO_CAPABLE);
6338 } else {
6339 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6340 }
6341 } else {
6342 if (tg3_flag(tp, 5780_CLASS)) {
6343 tg3_flag_set(tp, TSO_CAPABLE);
6344 netdev_update_features(dev);
6345 }
6346 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6347 }
6348 }
6349
6350 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6351 {
6352 struct tg3 *tp = netdev_priv(dev);
6353 int err;
6354
6355 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6356 return -EINVAL;
6357
6358 if (!netif_running(dev)) {
6359 /* We'll just catch it later when the
6360 * device is up'd.
6361 */
6362 tg3_set_mtu(dev, tp, new_mtu);
6363 return 0;
6364 }
6365
6366 tg3_phy_stop(tp);
6367
6368 tg3_netif_stop(tp);
6369
6370 tg3_full_lock(tp, 1);
6371
6372 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6373
6374 tg3_set_mtu(dev, tp, new_mtu);
6375
6376 err = tg3_restart_hw(tp, 0);
6377
6378 if (!err)
6379 tg3_netif_start(tp);
6380
6381 tg3_full_unlock(tp);
6382
6383 if (!err)
6384 tg3_phy_start(tp);
6385
6386 return err;
6387 }
6388
6389 static void tg3_rx_prodring_free(struct tg3 *tp,
6390 struct tg3_rx_prodring_set *tpr)
6391 {
6392 int i;
6393
6394 if (tpr != &tp->napi[0].prodring) {
6395 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6396 i = (i + 1) & tp->rx_std_ring_mask)
6397 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6398 tp->rx_pkt_map_sz);
6399
6400 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6401 for (i = tpr->rx_jmb_cons_idx;
6402 i != tpr->rx_jmb_prod_idx;
6403 i = (i + 1) & tp->rx_jmb_ring_mask) {
6404 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6405 TG3_RX_JMB_MAP_SZ);
6406 }
6407 }
6408
6409 return;
6410 }
6411
6412 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6413 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6414 tp->rx_pkt_map_sz);
6415
6416 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6417 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6418 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6419 TG3_RX_JMB_MAP_SZ);
6420 }
6421 }
6422
6423 /* Initialize rx rings for packet processing.
6424 *
6425 * The chip has been shut down and the driver detached from
6426 * the networking, so no interrupts or new tx packets will
6427 * end up in the driver. tp->{tx,}lock are held and thus
6428 * we may not sleep.
6429 */
6430 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6431 struct tg3_rx_prodring_set *tpr)
6432 {
6433 u32 i, rx_pkt_dma_sz;
6434
6435 tpr->rx_std_cons_idx = 0;
6436 tpr->rx_std_prod_idx = 0;
6437 tpr->rx_jmb_cons_idx = 0;
6438 tpr->rx_jmb_prod_idx = 0;
6439
6440 if (tpr != &tp->napi[0].prodring) {
6441 memset(&tpr->rx_std_buffers[0], 0,
6442 TG3_RX_STD_BUFF_RING_SIZE(tp));
6443 if (tpr->rx_jmb_buffers)
6444 memset(&tpr->rx_jmb_buffers[0], 0,
6445 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6446 goto done;
6447 }
6448
6449 /* Zero out all descriptors. */
6450 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6451
6452 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6453 if (tg3_flag(tp, 5780_CLASS) &&
6454 tp->dev->mtu > ETH_DATA_LEN)
6455 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6456 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6457
6458 /* Initialize invariants of the rings, we only set this
6459 * stuff once. This works because the card does not
6460 * write into the rx buffer posting rings.
6461 */
6462 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6463 struct tg3_rx_buffer_desc *rxd;
6464
6465 rxd = &tpr->rx_std[i];
6466 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6467 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6468 rxd->opaque = (RXD_OPAQUE_RING_STD |
6469 (i << RXD_OPAQUE_INDEX_SHIFT));
6470 }
6471
6472 /* Now allocate fresh SKBs for each rx ring. */
6473 for (i = 0; i < tp->rx_pending; i++) {
6474 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6475 netdev_warn(tp->dev,
6476 "Using a smaller RX standard ring. Only "
6477 "%d out of %d buffers were allocated "
6478 "successfully\n", i, tp->rx_pending);
6479 if (i == 0)
6480 goto initfail;
6481 tp->rx_pending = i;
6482 break;
6483 }
6484 }
6485
6486 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6487 goto done;
6488
6489 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6490
6491 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6492 goto done;
6493
6494 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6495 struct tg3_rx_buffer_desc *rxd;
6496
6497 rxd = &tpr->rx_jmb[i].std;
6498 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6499 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6500 RXD_FLAG_JUMBO;
6501 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6502 (i << RXD_OPAQUE_INDEX_SHIFT));
6503 }
6504
6505 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6506 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6507 netdev_warn(tp->dev,
6508 "Using a smaller RX jumbo ring. Only %d "
6509 "out of %d buffers were allocated "
6510 "successfully\n", i, tp->rx_jumbo_pending);
6511 if (i == 0)
6512 goto initfail;
6513 tp->rx_jumbo_pending = i;
6514 break;
6515 }
6516 }
6517
6518 done:
6519 return 0;
6520
6521 initfail:
6522 tg3_rx_prodring_free(tp, tpr);
6523 return -ENOMEM;
6524 }
6525
6526 static void tg3_rx_prodring_fini(struct tg3 *tp,
6527 struct tg3_rx_prodring_set *tpr)
6528 {
6529 kfree(tpr->rx_std_buffers);
6530 tpr->rx_std_buffers = NULL;
6531 kfree(tpr->rx_jmb_buffers);
6532 tpr->rx_jmb_buffers = NULL;
6533 if (tpr->rx_std) {
6534 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6535 tpr->rx_std, tpr->rx_std_mapping);
6536 tpr->rx_std = NULL;
6537 }
6538 if (tpr->rx_jmb) {
6539 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6540 tpr->rx_jmb, tpr->rx_jmb_mapping);
6541 tpr->rx_jmb = NULL;
6542 }
6543 }
6544
6545 static int tg3_rx_prodring_init(struct tg3 *tp,
6546 struct tg3_rx_prodring_set *tpr)
6547 {
6548 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6549 GFP_KERNEL);
6550 if (!tpr->rx_std_buffers)
6551 return -ENOMEM;
6552
6553 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6554 TG3_RX_STD_RING_BYTES(tp),
6555 &tpr->rx_std_mapping,
6556 GFP_KERNEL);
6557 if (!tpr->rx_std)
6558 goto err_out;
6559
6560 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6561 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6562 GFP_KERNEL);
6563 if (!tpr->rx_jmb_buffers)
6564 goto err_out;
6565
6566 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6567 TG3_RX_JMB_RING_BYTES(tp),
6568 &tpr->rx_jmb_mapping,
6569 GFP_KERNEL);
6570 if (!tpr->rx_jmb)
6571 goto err_out;
6572 }
6573
6574 return 0;
6575
6576 err_out:
6577 tg3_rx_prodring_fini(tp, tpr);
6578 return -ENOMEM;
6579 }
6580
6581 /* Free up pending packets in all rx/tx rings.
6582 *
6583 * The chip has been shut down and the driver detached from
6584 * the networking, so no interrupts or new tx packets will
6585 * end up in the driver. tp->{tx,}lock is not held and we are not
6586 * in an interrupt context and thus may sleep.
6587 */
6588 static void tg3_free_rings(struct tg3 *tp)
6589 {
6590 int i, j;
6591
6592 for (j = 0; j < tp->irq_cnt; j++) {
6593 struct tg3_napi *tnapi = &tp->napi[j];
6594
6595 tg3_rx_prodring_free(tp, &tnapi->prodring);
6596
6597 if (!tnapi->tx_buffers)
6598 continue;
6599
6600 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6601 struct tg3_tx_ring_info *txp;
6602 struct sk_buff *skb;
6603 unsigned int k;
6604
6605 txp = &tnapi->tx_buffers[i];
6606 skb = txp->skb;
6607
6608 if (skb == NULL) {
6609 i++;
6610 continue;
6611 }
6612
6613 pci_unmap_single(tp->pdev,
6614 dma_unmap_addr(txp, mapping),
6615 skb_headlen(skb),
6616 PCI_DMA_TODEVICE);
6617 txp->skb = NULL;
6618
6619 i++;
6620
6621 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6622 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6623 pci_unmap_page(tp->pdev,
6624 dma_unmap_addr(txp, mapping),
6625 skb_shinfo(skb)->frags[k].size,
6626 PCI_DMA_TODEVICE);
6627 i++;
6628 }
6629
6630 dev_kfree_skb_any(skb);
6631 }
6632 }
6633 }
6634
6635 /* Initialize tx/rx rings for packet processing.
6636 *
6637 * The chip has been shut down and the driver detached from
6638 * the networking, so no interrupts or new tx packets will
6639 * end up in the driver. tp->{tx,}lock are held and thus
6640 * we may not sleep.
6641 */
6642 static int tg3_init_rings(struct tg3 *tp)
6643 {
6644 int i;
6645
6646 /* Free up all the SKBs. */
6647 tg3_free_rings(tp);
6648
6649 for (i = 0; i < tp->irq_cnt; i++) {
6650 struct tg3_napi *tnapi = &tp->napi[i];
6651
6652 tnapi->last_tag = 0;
6653 tnapi->last_irq_tag = 0;
6654 tnapi->hw_status->status = 0;
6655 tnapi->hw_status->status_tag = 0;
6656 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6657
6658 tnapi->tx_prod = 0;
6659 tnapi->tx_cons = 0;
6660 if (tnapi->tx_ring)
6661 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6662
6663 tnapi->rx_rcb_ptr = 0;
6664 if (tnapi->rx_rcb)
6665 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6666
6667 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6668 tg3_free_rings(tp);
6669 return -ENOMEM;
6670 }
6671 }
6672
6673 return 0;
6674 }
6675
6676 /*
6677 * Must not be invoked with interrupt sources disabled and
6678 * the hardware shutdown down.
6679 */
6680 static void tg3_free_consistent(struct tg3 *tp)
6681 {
6682 int i;
6683
6684 for (i = 0; i < tp->irq_cnt; i++) {
6685 struct tg3_napi *tnapi = &tp->napi[i];
6686
6687 if (tnapi->tx_ring) {
6688 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6689 tnapi->tx_ring, tnapi->tx_desc_mapping);
6690 tnapi->tx_ring = NULL;
6691 }
6692
6693 kfree(tnapi->tx_buffers);
6694 tnapi->tx_buffers = NULL;
6695
6696 if (tnapi->rx_rcb) {
6697 dma_free_coherent(&tp->pdev->dev,
6698 TG3_RX_RCB_RING_BYTES(tp),
6699 tnapi->rx_rcb,
6700 tnapi->rx_rcb_mapping);
6701 tnapi->rx_rcb = NULL;
6702 }
6703
6704 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6705
6706 if (tnapi->hw_status) {
6707 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6708 tnapi->hw_status,
6709 tnapi->status_mapping);
6710 tnapi->hw_status = NULL;
6711 }
6712 }
6713
6714 if (tp->hw_stats) {
6715 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6716 tp->hw_stats, tp->stats_mapping);
6717 tp->hw_stats = NULL;
6718 }
6719 }
6720
6721 /*
6722 * Must not be invoked with interrupt sources disabled and
6723 * the hardware shutdown down. Can sleep.
6724 */
6725 static int tg3_alloc_consistent(struct tg3 *tp)
6726 {
6727 int i;
6728
6729 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6730 sizeof(struct tg3_hw_stats),
6731 &tp->stats_mapping,
6732 GFP_KERNEL);
6733 if (!tp->hw_stats)
6734 goto err_out;
6735
6736 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6737
6738 for (i = 0; i < tp->irq_cnt; i++) {
6739 struct tg3_napi *tnapi = &tp->napi[i];
6740 struct tg3_hw_status *sblk;
6741
6742 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6743 TG3_HW_STATUS_SIZE,
6744 &tnapi->status_mapping,
6745 GFP_KERNEL);
6746 if (!tnapi->hw_status)
6747 goto err_out;
6748
6749 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6750 sblk = tnapi->hw_status;
6751
6752 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6753 goto err_out;
6754
6755 /* If multivector TSS is enabled, vector 0 does not handle
6756 * tx interrupts. Don't allocate any resources for it.
6757 */
6758 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6759 (i && tg3_flag(tp, ENABLE_TSS))) {
6760 tnapi->tx_buffers = kzalloc(
6761 sizeof(struct tg3_tx_ring_info) *
6762 TG3_TX_RING_SIZE, GFP_KERNEL);
6763 if (!tnapi->tx_buffers)
6764 goto err_out;
6765
6766 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6767 TG3_TX_RING_BYTES,
6768 &tnapi->tx_desc_mapping,
6769 GFP_KERNEL);
6770 if (!tnapi->tx_ring)
6771 goto err_out;
6772 }
6773
6774 /*
6775 * When RSS is enabled, the status block format changes
6776 * slightly. The "rx_jumbo_consumer", "reserved",
6777 * and "rx_mini_consumer" members get mapped to the
6778 * other three rx return ring producer indexes.
6779 */
6780 switch (i) {
6781 default:
6782 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6783 break;
6784 case 2:
6785 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6786 break;
6787 case 3:
6788 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6789 break;
6790 case 4:
6791 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6792 break;
6793 }
6794
6795 /*
6796 * If multivector RSS is enabled, vector 0 does not handle
6797 * rx or tx interrupts. Don't allocate any resources for it.
6798 */
6799 if (!i && tg3_flag(tp, ENABLE_RSS))
6800 continue;
6801
6802 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6803 TG3_RX_RCB_RING_BYTES(tp),
6804 &tnapi->rx_rcb_mapping,
6805 GFP_KERNEL);
6806 if (!tnapi->rx_rcb)
6807 goto err_out;
6808
6809 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6810 }
6811
6812 return 0;
6813
6814 err_out:
6815 tg3_free_consistent(tp);
6816 return -ENOMEM;
6817 }
6818
6819 #define MAX_WAIT_CNT 1000
6820
6821 /* To stop a block, clear the enable bit and poll till it
6822 * clears. tp->lock is held.
6823 */
6824 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6825 {
6826 unsigned int i;
6827 u32 val;
6828
6829 if (tg3_flag(tp, 5705_PLUS)) {
6830 switch (ofs) {
6831 case RCVLSC_MODE:
6832 case DMAC_MODE:
6833 case MBFREE_MODE:
6834 case BUFMGR_MODE:
6835 case MEMARB_MODE:
6836 /* We can't enable/disable these bits of the
6837 * 5705/5750, just say success.
6838 */
6839 return 0;
6840
6841 default:
6842 break;
6843 }
6844 }
6845
6846 val = tr32(ofs);
6847 val &= ~enable_bit;
6848 tw32_f(ofs, val);
6849
6850 for (i = 0; i < MAX_WAIT_CNT; i++) {
6851 udelay(100);
6852 val = tr32(ofs);
6853 if ((val & enable_bit) == 0)
6854 break;
6855 }
6856
6857 if (i == MAX_WAIT_CNT && !silent) {
6858 dev_err(&tp->pdev->dev,
6859 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6860 ofs, enable_bit);
6861 return -ENODEV;
6862 }
6863
6864 return 0;
6865 }
6866
6867 /* tp->lock is held. */
6868 static int tg3_abort_hw(struct tg3 *tp, int silent)
6869 {
6870 int i, err;
6871
6872 tg3_disable_ints(tp);
6873
6874 tp->rx_mode &= ~RX_MODE_ENABLE;
6875 tw32_f(MAC_RX_MODE, tp->rx_mode);
6876 udelay(10);
6877
6878 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6879 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6880 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6881 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6882 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6883 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6884
6885 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6886 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6887 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6888 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6889 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6890 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6891 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6892
6893 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6894 tw32_f(MAC_MODE, tp->mac_mode);
6895 udelay(40);
6896
6897 tp->tx_mode &= ~TX_MODE_ENABLE;
6898 tw32_f(MAC_TX_MODE, tp->tx_mode);
6899
6900 for (i = 0; i < MAX_WAIT_CNT; i++) {
6901 udelay(100);
6902 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6903 break;
6904 }
6905 if (i >= MAX_WAIT_CNT) {
6906 dev_err(&tp->pdev->dev,
6907 "%s timed out, TX_MODE_ENABLE will not clear "
6908 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6909 err |= -ENODEV;
6910 }
6911
6912 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6913 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6914 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6915
6916 tw32(FTQ_RESET, 0xffffffff);
6917 tw32(FTQ_RESET, 0x00000000);
6918
6919 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6920 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6921
6922 for (i = 0; i < tp->irq_cnt; i++) {
6923 struct tg3_napi *tnapi = &tp->napi[i];
6924 if (tnapi->hw_status)
6925 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6926 }
6927 if (tp->hw_stats)
6928 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6929
6930 return err;
6931 }
6932
6933 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6934 {
6935 int i;
6936 u32 apedata;
6937
6938 /* NCSI does not support APE events */
6939 if (tg3_flag(tp, APE_HAS_NCSI))
6940 return;
6941
6942 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6943 if (apedata != APE_SEG_SIG_MAGIC)
6944 return;
6945
6946 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6947 if (!(apedata & APE_FW_STATUS_READY))
6948 return;
6949
6950 /* Wait for up to 1 millisecond for APE to service previous event. */
6951 for (i = 0; i < 10; i++) {
6952 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6953 return;
6954
6955 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6956
6957 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6958 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6959 event | APE_EVENT_STATUS_EVENT_PENDING);
6960
6961 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6962
6963 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6964 break;
6965
6966 udelay(100);
6967 }
6968
6969 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6970 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6971 }
6972
6973 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6974 {
6975 u32 event;
6976 u32 apedata;
6977
6978 if (!tg3_flag(tp, ENABLE_APE))
6979 return;
6980
6981 switch (kind) {
6982 case RESET_KIND_INIT:
6983 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6984 APE_HOST_SEG_SIG_MAGIC);
6985 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6986 APE_HOST_SEG_LEN_MAGIC);
6987 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6988 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6989 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6990 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6991 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6992 APE_HOST_BEHAV_NO_PHYLOCK);
6993 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6994 TG3_APE_HOST_DRVR_STATE_START);
6995
6996 event = APE_EVENT_STATUS_STATE_START;
6997 break;
6998 case RESET_KIND_SHUTDOWN:
6999 /* With the interface we are currently using,
7000 * APE does not track driver state. Wiping
7001 * out the HOST SEGMENT SIGNATURE forces
7002 * the APE to assume OS absent status.
7003 */
7004 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7005
7006 if (device_may_wakeup(&tp->pdev->dev) &&
7007 tg3_flag(tp, WOL_ENABLE)) {
7008 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7009 TG3_APE_HOST_WOL_SPEED_AUTO);
7010 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7011 } else
7012 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7013
7014 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7015
7016 event = APE_EVENT_STATUS_STATE_UNLOAD;
7017 break;
7018 case RESET_KIND_SUSPEND:
7019 event = APE_EVENT_STATUS_STATE_SUSPEND;
7020 break;
7021 default:
7022 return;
7023 }
7024
7025 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7026
7027 tg3_ape_send_event(tp, event);
7028 }
7029
7030 /* tp->lock is held. */
7031 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7032 {
7033 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7034 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7035
7036 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7037 switch (kind) {
7038 case RESET_KIND_INIT:
7039 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7040 DRV_STATE_START);
7041 break;
7042
7043 case RESET_KIND_SHUTDOWN:
7044 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7045 DRV_STATE_UNLOAD);
7046 break;
7047
7048 case RESET_KIND_SUSPEND:
7049 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7050 DRV_STATE_SUSPEND);
7051 break;
7052
7053 default:
7054 break;
7055 }
7056 }
7057
7058 if (kind == RESET_KIND_INIT ||
7059 kind == RESET_KIND_SUSPEND)
7060 tg3_ape_driver_state_change(tp, kind);
7061 }
7062
7063 /* tp->lock is held. */
7064 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7065 {
7066 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7067 switch (kind) {
7068 case RESET_KIND_INIT:
7069 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7070 DRV_STATE_START_DONE);
7071 break;
7072
7073 case RESET_KIND_SHUTDOWN:
7074 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7075 DRV_STATE_UNLOAD_DONE);
7076 break;
7077
7078 default:
7079 break;
7080 }
7081 }
7082
7083 if (kind == RESET_KIND_SHUTDOWN)
7084 tg3_ape_driver_state_change(tp, kind);
7085 }
7086
7087 /* tp->lock is held. */
7088 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7089 {
7090 if (tg3_flag(tp, ENABLE_ASF)) {
7091 switch (kind) {
7092 case RESET_KIND_INIT:
7093 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7094 DRV_STATE_START);
7095 break;
7096
7097 case RESET_KIND_SHUTDOWN:
7098 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7099 DRV_STATE_UNLOAD);
7100 break;
7101
7102 case RESET_KIND_SUSPEND:
7103 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7104 DRV_STATE_SUSPEND);
7105 break;
7106
7107 default:
7108 break;
7109 }
7110 }
7111 }
7112
7113 static int tg3_poll_fw(struct tg3 *tp)
7114 {
7115 int i;
7116 u32 val;
7117
7118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7119 /* Wait up to 20ms for init done. */
7120 for (i = 0; i < 200; i++) {
7121 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7122 return 0;
7123 udelay(100);
7124 }
7125 return -ENODEV;
7126 }
7127
7128 /* Wait for firmware initialization to complete. */
7129 for (i = 0; i < 100000; i++) {
7130 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7131 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7132 break;
7133 udelay(10);
7134 }
7135
7136 /* Chip might not be fitted with firmware. Some Sun onboard
7137 * parts are configured like that. So don't signal the timeout
7138 * of the above loop as an error, but do report the lack of
7139 * running firmware once.
7140 */
7141 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7142 tg3_flag_set(tp, NO_FWARE_REPORTED);
7143
7144 netdev_info(tp->dev, "No firmware running\n");
7145 }
7146
7147 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7148 /* The 57765 A0 needs a little more
7149 * time to do some important work.
7150 */
7151 mdelay(10);
7152 }
7153
7154 return 0;
7155 }
7156
7157 /* Save PCI command register before chip reset */
7158 static void tg3_save_pci_state(struct tg3 *tp)
7159 {
7160 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7161 }
7162
7163 /* Restore PCI state after chip reset */
7164 static void tg3_restore_pci_state(struct tg3 *tp)
7165 {
7166 u32 val;
7167
7168 /* Re-enable indirect register accesses. */
7169 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7170 tp->misc_host_ctrl);
7171
7172 /* Set MAX PCI retry to zero. */
7173 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7174 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7175 tg3_flag(tp, PCIX_MODE))
7176 val |= PCISTATE_RETRY_SAME_DMA;
7177 /* Allow reads and writes to the APE register and memory space. */
7178 if (tg3_flag(tp, ENABLE_APE))
7179 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7180 PCISTATE_ALLOW_APE_SHMEM_WR |
7181 PCISTATE_ALLOW_APE_PSPACE_WR;
7182 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7183
7184 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7185
7186 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7187 if (tg3_flag(tp, PCI_EXPRESS))
7188 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7189 else {
7190 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7191 tp->pci_cacheline_sz);
7192 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7193 tp->pci_lat_timer);
7194 }
7195 }
7196
7197 /* Make sure PCI-X relaxed ordering bit is clear. */
7198 if (tg3_flag(tp, PCIX_MODE)) {
7199 u16 pcix_cmd;
7200
7201 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7202 &pcix_cmd);
7203 pcix_cmd &= ~PCI_X_CMD_ERO;
7204 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7205 pcix_cmd);
7206 }
7207
7208 if (tg3_flag(tp, 5780_CLASS)) {
7209
7210 /* Chip reset on 5780 will reset MSI enable bit,
7211 * so need to restore it.
7212 */
7213 if (tg3_flag(tp, USING_MSI)) {
7214 u16 ctrl;
7215
7216 pci_read_config_word(tp->pdev,
7217 tp->msi_cap + PCI_MSI_FLAGS,
7218 &ctrl);
7219 pci_write_config_word(tp->pdev,
7220 tp->msi_cap + PCI_MSI_FLAGS,
7221 ctrl | PCI_MSI_FLAGS_ENABLE);
7222 val = tr32(MSGINT_MODE);
7223 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7224 }
7225 }
7226 }
7227
7228 static void tg3_stop_fw(struct tg3 *);
7229
7230 /* tp->lock is held. */
7231 static int tg3_chip_reset(struct tg3 *tp)
7232 {
7233 u32 val;
7234 void (*write_op)(struct tg3 *, u32, u32);
7235 int i, err;
7236
7237 tg3_nvram_lock(tp);
7238
7239 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7240
7241 /* No matching tg3_nvram_unlock() after this because
7242 * chip reset below will undo the nvram lock.
7243 */
7244 tp->nvram_lock_cnt = 0;
7245
7246 /* GRC_MISC_CFG core clock reset will clear the memory
7247 * enable bit in PCI register 4 and the MSI enable bit
7248 * on some chips, so we save relevant registers here.
7249 */
7250 tg3_save_pci_state(tp);
7251
7252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7253 tg3_flag(tp, 5755_PLUS))
7254 tw32(GRC_FASTBOOT_PC, 0);
7255
7256 /*
7257 * We must avoid the readl() that normally takes place.
7258 * It locks machines, causes machine checks, and other
7259 * fun things. So, temporarily disable the 5701
7260 * hardware workaround, while we do the reset.
7261 */
7262 write_op = tp->write32;
7263 if (write_op == tg3_write_flush_reg32)
7264 tp->write32 = tg3_write32;
7265
7266 /* Prevent the irq handler from reading or writing PCI registers
7267 * during chip reset when the memory enable bit in the PCI command
7268 * register may be cleared. The chip does not generate interrupt
7269 * at this time, but the irq handler may still be called due to irq
7270 * sharing or irqpoll.
7271 */
7272 tg3_flag_set(tp, CHIP_RESETTING);
7273 for (i = 0; i < tp->irq_cnt; i++) {
7274 struct tg3_napi *tnapi = &tp->napi[i];
7275 if (tnapi->hw_status) {
7276 tnapi->hw_status->status = 0;
7277 tnapi->hw_status->status_tag = 0;
7278 }
7279 tnapi->last_tag = 0;
7280 tnapi->last_irq_tag = 0;
7281 }
7282 smp_mb();
7283
7284 for (i = 0; i < tp->irq_cnt; i++)
7285 synchronize_irq(tp->napi[i].irq_vec);
7286
7287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7288 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7289 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7290 }
7291
7292 /* do the reset */
7293 val = GRC_MISC_CFG_CORECLK_RESET;
7294
7295 if (tg3_flag(tp, PCI_EXPRESS)) {
7296 /* Force PCIe 1.0a mode */
7297 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7298 !tg3_flag(tp, 57765_PLUS) &&
7299 tr32(TG3_PCIE_PHY_TSTCTL) ==
7300 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7301 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7302
7303 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7304 tw32(GRC_MISC_CFG, (1 << 29));
7305 val |= (1 << 29);
7306 }
7307 }
7308
7309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7310 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7311 tw32(GRC_VCPU_EXT_CTRL,
7312 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7313 }
7314
7315 /* Manage gphy power for all CPMU absent PCIe devices. */
7316 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7317 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7318
7319 tw32(GRC_MISC_CFG, val);
7320
7321 /* restore 5701 hardware bug workaround write method */
7322 tp->write32 = write_op;
7323
7324 /* Unfortunately, we have to delay before the PCI read back.
7325 * Some 575X chips even will not respond to a PCI cfg access
7326 * when the reset command is given to the chip.
7327 *
7328 * How do these hardware designers expect things to work
7329 * properly if the PCI write is posted for a long period
7330 * of time? It is always necessary to have some method by
7331 * which a register read back can occur to push the write
7332 * out which does the reset.
7333 *
7334 * For most tg3 variants the trick below was working.
7335 * Ho hum...
7336 */
7337 udelay(120);
7338
7339 /* Flush PCI posted writes. The normal MMIO registers
7340 * are inaccessible at this time so this is the only
7341 * way to make this reliably (actually, this is no longer
7342 * the case, see above). I tried to use indirect
7343 * register read/write but this upset some 5701 variants.
7344 */
7345 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7346
7347 udelay(120);
7348
7349 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7350 u16 val16;
7351
7352 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7353 int i;
7354 u32 cfg_val;
7355
7356 /* Wait for link training to complete. */
7357 for (i = 0; i < 5000; i++)
7358 udelay(100);
7359
7360 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7361 pci_write_config_dword(tp->pdev, 0xc4,
7362 cfg_val | (1 << 15));
7363 }
7364
7365 /* Clear the "no snoop" and "relaxed ordering" bits. */
7366 pci_read_config_word(tp->pdev,
7367 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7368 &val16);
7369 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7370 PCI_EXP_DEVCTL_NOSNOOP_EN);
7371 /*
7372 * Older PCIe devices only support the 128 byte
7373 * MPS setting. Enforce the restriction.
7374 */
7375 if (!tg3_flag(tp, CPMU_PRESENT))
7376 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7377 pci_write_config_word(tp->pdev,
7378 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7379 val16);
7380
7381 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7382
7383 /* Clear error status */
7384 pci_write_config_word(tp->pdev,
7385 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7386 PCI_EXP_DEVSTA_CED |
7387 PCI_EXP_DEVSTA_NFED |
7388 PCI_EXP_DEVSTA_FED |
7389 PCI_EXP_DEVSTA_URD);
7390 }
7391
7392 tg3_restore_pci_state(tp);
7393
7394 tg3_flag_clear(tp, CHIP_RESETTING);
7395 tg3_flag_clear(tp, ERROR_PROCESSED);
7396
7397 val = 0;
7398 if (tg3_flag(tp, 5780_CLASS))
7399 val = tr32(MEMARB_MODE);
7400 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7401
7402 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7403 tg3_stop_fw(tp);
7404 tw32(0x5000, 0x400);
7405 }
7406
7407 tw32(GRC_MODE, tp->grc_mode);
7408
7409 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7410 val = tr32(0xc4);
7411
7412 tw32(0xc4, val | (1 << 15));
7413 }
7414
7415 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7417 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7418 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7419 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7420 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7421 }
7422
7423 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7424 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7425 val = tp->mac_mode;
7426 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7427 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7428 val = tp->mac_mode;
7429 } else
7430 val = 0;
7431
7432 tw32_f(MAC_MODE, val);
7433 udelay(40);
7434
7435 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7436
7437 err = tg3_poll_fw(tp);
7438 if (err)
7439 return err;
7440
7441 tg3_mdio_start(tp);
7442
7443 if (tg3_flag(tp, PCI_EXPRESS) &&
7444 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7445 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7446 !tg3_flag(tp, 57765_PLUS)) {
7447 val = tr32(0x7c00);
7448
7449 tw32(0x7c00, val | (1 << 25));
7450 }
7451
7452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7453 val = tr32(TG3_CPMU_CLCK_ORIDE);
7454 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7455 }
7456
7457 /* Reprobe ASF enable state. */
7458 tg3_flag_clear(tp, ENABLE_ASF);
7459 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7460 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7461 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7462 u32 nic_cfg;
7463
7464 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7465 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7466 tg3_flag_set(tp, ENABLE_ASF);
7467 tp->last_event_jiffies = jiffies;
7468 if (tg3_flag(tp, 5750_PLUS))
7469 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7470 }
7471 }
7472
7473 return 0;
7474 }
7475
7476 /* tp->lock is held. */
7477 static void tg3_stop_fw(struct tg3 *tp)
7478 {
7479 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7480 /* Wait for RX cpu to ACK the previous event. */
7481 tg3_wait_for_event_ack(tp);
7482
7483 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7484
7485 tg3_generate_fw_event(tp);
7486
7487 /* Wait for RX cpu to ACK this event. */
7488 tg3_wait_for_event_ack(tp);
7489 }
7490 }
7491
7492 /* tp->lock is held. */
7493 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7494 {
7495 int err;
7496
7497 tg3_stop_fw(tp);
7498
7499 tg3_write_sig_pre_reset(tp, kind);
7500
7501 tg3_abort_hw(tp, silent);
7502 err = tg3_chip_reset(tp);
7503
7504 __tg3_set_mac_addr(tp, 0);
7505
7506 tg3_write_sig_legacy(tp, kind);
7507 tg3_write_sig_post_reset(tp, kind);
7508
7509 if (err)
7510 return err;
7511
7512 return 0;
7513 }
7514
7515 #define RX_CPU_SCRATCH_BASE 0x30000
7516 #define RX_CPU_SCRATCH_SIZE 0x04000
7517 #define TX_CPU_SCRATCH_BASE 0x34000
7518 #define TX_CPU_SCRATCH_SIZE 0x04000
7519
7520 /* tp->lock is held. */
7521 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7522 {
7523 int i;
7524
7525 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7526
7527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7528 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7529
7530 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7531 return 0;
7532 }
7533 if (offset == RX_CPU_BASE) {
7534 for (i = 0; i < 10000; i++) {
7535 tw32(offset + CPU_STATE, 0xffffffff);
7536 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7537 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7538 break;
7539 }
7540
7541 tw32(offset + CPU_STATE, 0xffffffff);
7542 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7543 udelay(10);
7544 } else {
7545 for (i = 0; i < 10000; i++) {
7546 tw32(offset + CPU_STATE, 0xffffffff);
7547 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7548 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7549 break;
7550 }
7551 }
7552
7553 if (i >= 10000) {
7554 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7555 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7556 return -ENODEV;
7557 }
7558
7559 /* Clear firmware's nvram arbitration. */
7560 if (tg3_flag(tp, NVRAM))
7561 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7562 return 0;
7563 }
7564
7565 struct fw_info {
7566 unsigned int fw_base;
7567 unsigned int fw_len;
7568 const __be32 *fw_data;
7569 };
7570
7571 /* tp->lock is held. */
7572 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7573 int cpu_scratch_size, struct fw_info *info)
7574 {
7575 int err, lock_err, i;
7576 void (*write_op)(struct tg3 *, u32, u32);
7577
7578 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7579 netdev_err(tp->dev,
7580 "%s: Trying to load TX cpu firmware which is 5705\n",
7581 __func__);
7582 return -EINVAL;
7583 }
7584
7585 if (tg3_flag(tp, 5705_PLUS))
7586 write_op = tg3_write_mem;
7587 else
7588 write_op = tg3_write_indirect_reg32;
7589
7590 /* It is possible that bootcode is still loading at this point.
7591 * Get the nvram lock first before halting the cpu.
7592 */
7593 lock_err = tg3_nvram_lock(tp);
7594 err = tg3_halt_cpu(tp, cpu_base);
7595 if (!lock_err)
7596 tg3_nvram_unlock(tp);
7597 if (err)
7598 goto out;
7599
7600 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7601 write_op(tp, cpu_scratch_base + i, 0);
7602 tw32(cpu_base + CPU_STATE, 0xffffffff);
7603 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7604 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7605 write_op(tp, (cpu_scratch_base +
7606 (info->fw_base & 0xffff) +
7607 (i * sizeof(u32))),
7608 be32_to_cpu(info->fw_data[i]));
7609
7610 err = 0;
7611
7612 out:
7613 return err;
7614 }
7615
7616 /* tp->lock is held. */
7617 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7618 {
7619 struct fw_info info;
7620 const __be32 *fw_data;
7621 int err, i;
7622
7623 fw_data = (void *)tp->fw->data;
7624
7625 /* Firmware blob starts with version numbers, followed by
7626 start address and length. We are setting complete length.
7627 length = end_address_of_bss - start_address_of_text.
7628 Remainder is the blob to be loaded contiguously
7629 from start address. */
7630
7631 info.fw_base = be32_to_cpu(fw_data[1]);
7632 info.fw_len = tp->fw->size - 12;
7633 info.fw_data = &fw_data[3];
7634
7635 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7636 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7637 &info);
7638 if (err)
7639 return err;
7640
7641 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7642 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7643 &info);
7644 if (err)
7645 return err;
7646
7647 /* Now startup only the RX cpu. */
7648 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7649 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7650
7651 for (i = 0; i < 5; i++) {
7652 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7653 break;
7654 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7655 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7656 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7657 udelay(1000);
7658 }
7659 if (i >= 5) {
7660 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7661 "should be %08x\n", __func__,
7662 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7663 return -ENODEV;
7664 }
7665 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7666 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7667
7668 return 0;
7669 }
7670
7671 /* tp->lock is held. */
7672 static int tg3_load_tso_firmware(struct tg3 *tp)
7673 {
7674 struct fw_info info;
7675 const __be32 *fw_data;
7676 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7677 int err, i;
7678
7679 if (tg3_flag(tp, HW_TSO_1) ||
7680 tg3_flag(tp, HW_TSO_2) ||
7681 tg3_flag(tp, HW_TSO_3))
7682 return 0;
7683
7684 fw_data = (void *)tp->fw->data;
7685
7686 /* Firmware blob starts with version numbers, followed by
7687 start address and length. We are setting complete length.
7688 length = end_address_of_bss - start_address_of_text.
7689 Remainder is the blob to be loaded contiguously
7690 from start address. */
7691
7692 info.fw_base = be32_to_cpu(fw_data[1]);
7693 cpu_scratch_size = tp->fw_len;
7694 info.fw_len = tp->fw->size - 12;
7695 info.fw_data = &fw_data[3];
7696
7697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7698 cpu_base = RX_CPU_BASE;
7699 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7700 } else {
7701 cpu_base = TX_CPU_BASE;
7702 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7703 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7704 }
7705
7706 err = tg3_load_firmware_cpu(tp, cpu_base,
7707 cpu_scratch_base, cpu_scratch_size,
7708 &info);
7709 if (err)
7710 return err;
7711
7712 /* Now startup the cpu. */
7713 tw32(cpu_base + CPU_STATE, 0xffffffff);
7714 tw32_f(cpu_base + CPU_PC, info.fw_base);
7715
7716 for (i = 0; i < 5; i++) {
7717 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7718 break;
7719 tw32(cpu_base + CPU_STATE, 0xffffffff);
7720 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7721 tw32_f(cpu_base + CPU_PC, info.fw_base);
7722 udelay(1000);
7723 }
7724 if (i >= 5) {
7725 netdev_err(tp->dev,
7726 "%s fails to set CPU PC, is %08x should be %08x\n",
7727 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7728 return -ENODEV;
7729 }
7730 tw32(cpu_base + CPU_STATE, 0xffffffff);
7731 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7732 return 0;
7733 }
7734
7735
7736 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7737 {
7738 struct tg3 *tp = netdev_priv(dev);
7739 struct sockaddr *addr = p;
7740 int err = 0, skip_mac_1 = 0;
7741
7742 if (!is_valid_ether_addr(addr->sa_data))
7743 return -EINVAL;
7744
7745 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7746
7747 if (!netif_running(dev))
7748 return 0;
7749
7750 if (tg3_flag(tp, ENABLE_ASF)) {
7751 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7752
7753 addr0_high = tr32(MAC_ADDR_0_HIGH);
7754 addr0_low = tr32(MAC_ADDR_0_LOW);
7755 addr1_high = tr32(MAC_ADDR_1_HIGH);
7756 addr1_low = tr32(MAC_ADDR_1_LOW);
7757
7758 /* Skip MAC addr 1 if ASF is using it. */
7759 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7760 !(addr1_high == 0 && addr1_low == 0))
7761 skip_mac_1 = 1;
7762 }
7763 spin_lock_bh(&tp->lock);
7764 __tg3_set_mac_addr(tp, skip_mac_1);
7765 spin_unlock_bh(&tp->lock);
7766
7767 return err;
7768 }
7769
7770 /* tp->lock is held. */
7771 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7772 dma_addr_t mapping, u32 maxlen_flags,
7773 u32 nic_addr)
7774 {
7775 tg3_write_mem(tp,
7776 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7777 ((u64) mapping >> 32));
7778 tg3_write_mem(tp,
7779 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7780 ((u64) mapping & 0xffffffff));
7781 tg3_write_mem(tp,
7782 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7783 maxlen_flags);
7784
7785 if (!tg3_flag(tp, 5705_PLUS))
7786 tg3_write_mem(tp,
7787 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7788 nic_addr);
7789 }
7790
7791 static void __tg3_set_rx_mode(struct net_device *);
7792 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7793 {
7794 int i;
7795
7796 if (!tg3_flag(tp, ENABLE_TSS)) {
7797 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7798 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7799 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7800 } else {
7801 tw32(HOSTCC_TXCOL_TICKS, 0);
7802 tw32(HOSTCC_TXMAX_FRAMES, 0);
7803 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7804 }
7805
7806 if (!tg3_flag(tp, ENABLE_RSS)) {
7807 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7808 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7809 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7810 } else {
7811 tw32(HOSTCC_RXCOL_TICKS, 0);
7812 tw32(HOSTCC_RXMAX_FRAMES, 0);
7813 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7814 }
7815
7816 if (!tg3_flag(tp, 5705_PLUS)) {
7817 u32 val = ec->stats_block_coalesce_usecs;
7818
7819 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7820 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7821
7822 if (!netif_carrier_ok(tp->dev))
7823 val = 0;
7824
7825 tw32(HOSTCC_STAT_COAL_TICKS, val);
7826 }
7827
7828 for (i = 0; i < tp->irq_cnt - 1; i++) {
7829 u32 reg;
7830
7831 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7832 tw32(reg, ec->rx_coalesce_usecs);
7833 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7834 tw32(reg, ec->rx_max_coalesced_frames);
7835 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7836 tw32(reg, ec->rx_max_coalesced_frames_irq);
7837
7838 if (tg3_flag(tp, ENABLE_TSS)) {
7839 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7840 tw32(reg, ec->tx_coalesce_usecs);
7841 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7842 tw32(reg, ec->tx_max_coalesced_frames);
7843 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7844 tw32(reg, ec->tx_max_coalesced_frames_irq);
7845 }
7846 }
7847
7848 for (; i < tp->irq_max - 1; i++) {
7849 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7850 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7851 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7852
7853 if (tg3_flag(tp, ENABLE_TSS)) {
7854 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7855 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7856 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7857 }
7858 }
7859 }
7860
7861 /* tp->lock is held. */
7862 static void tg3_rings_reset(struct tg3 *tp)
7863 {
7864 int i;
7865 u32 stblk, txrcb, rxrcb, limit;
7866 struct tg3_napi *tnapi = &tp->napi[0];
7867
7868 /* Disable all transmit rings but the first. */
7869 if (!tg3_flag(tp, 5705_PLUS))
7870 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7871 else if (tg3_flag(tp, 5717_PLUS))
7872 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7873 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7874 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7875 else
7876 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7877
7878 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7879 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7880 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7881 BDINFO_FLAGS_DISABLED);
7882
7883
7884 /* Disable all receive return rings but the first. */
7885 if (tg3_flag(tp, 5717_PLUS))
7886 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7887 else if (!tg3_flag(tp, 5705_PLUS))
7888 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7889 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7891 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7892 else
7893 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7894
7895 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7896 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7897 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7898 BDINFO_FLAGS_DISABLED);
7899
7900 /* Disable interrupts */
7901 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7902 tp->napi[0].chk_msi_cnt = 0;
7903 tp->napi[0].last_rx_cons = 0;
7904 tp->napi[0].last_tx_cons = 0;
7905
7906 /* Zero mailbox registers. */
7907 if (tg3_flag(tp, SUPPORT_MSIX)) {
7908 for (i = 1; i < tp->irq_max; i++) {
7909 tp->napi[i].tx_prod = 0;
7910 tp->napi[i].tx_cons = 0;
7911 if (tg3_flag(tp, ENABLE_TSS))
7912 tw32_mailbox(tp->napi[i].prodmbox, 0);
7913 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7914 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7915 tp->napi[0].chk_msi_cnt = 0;
7916 tp->napi[i].last_rx_cons = 0;
7917 tp->napi[i].last_tx_cons = 0;
7918 }
7919 if (!tg3_flag(tp, ENABLE_TSS))
7920 tw32_mailbox(tp->napi[0].prodmbox, 0);
7921 } else {
7922 tp->napi[0].tx_prod = 0;
7923 tp->napi[0].tx_cons = 0;
7924 tw32_mailbox(tp->napi[0].prodmbox, 0);
7925 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7926 }
7927
7928 /* Make sure the NIC-based send BD rings are disabled. */
7929 if (!tg3_flag(tp, 5705_PLUS)) {
7930 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7931 for (i = 0; i < 16; i++)
7932 tw32_tx_mbox(mbox + i * 8, 0);
7933 }
7934
7935 txrcb = NIC_SRAM_SEND_RCB;
7936 rxrcb = NIC_SRAM_RCV_RET_RCB;
7937
7938 /* Clear status block in ram. */
7939 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7940
7941 /* Set status block DMA address */
7942 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7943 ((u64) tnapi->status_mapping >> 32));
7944 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7945 ((u64) tnapi->status_mapping & 0xffffffff));
7946
7947 if (tnapi->tx_ring) {
7948 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7949 (TG3_TX_RING_SIZE <<
7950 BDINFO_FLAGS_MAXLEN_SHIFT),
7951 NIC_SRAM_TX_BUFFER_DESC);
7952 txrcb += TG3_BDINFO_SIZE;
7953 }
7954
7955 if (tnapi->rx_rcb) {
7956 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7957 (tp->rx_ret_ring_mask + 1) <<
7958 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7959 rxrcb += TG3_BDINFO_SIZE;
7960 }
7961
7962 stblk = HOSTCC_STATBLCK_RING1;
7963
7964 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7965 u64 mapping = (u64)tnapi->status_mapping;
7966 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7967 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7968
7969 /* Clear status block in ram. */
7970 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7971
7972 if (tnapi->tx_ring) {
7973 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7974 (TG3_TX_RING_SIZE <<
7975 BDINFO_FLAGS_MAXLEN_SHIFT),
7976 NIC_SRAM_TX_BUFFER_DESC);
7977 txrcb += TG3_BDINFO_SIZE;
7978 }
7979
7980 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7981 ((tp->rx_ret_ring_mask + 1) <<
7982 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7983
7984 stblk += 8;
7985 rxrcb += TG3_BDINFO_SIZE;
7986 }
7987 }
7988
7989 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7990 {
7991 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7992
7993 if (!tg3_flag(tp, 5750_PLUS) ||
7994 tg3_flag(tp, 5780_CLASS) ||
7995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7996 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7997 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7998 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8000 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8001 else
8002 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8003
8004 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8005 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8006
8007 val = min(nic_rep_thresh, host_rep_thresh);
8008 tw32(RCVBDI_STD_THRESH, val);
8009
8010 if (tg3_flag(tp, 57765_PLUS))
8011 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8012
8013 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8014 return;
8015
8016 if (!tg3_flag(tp, 5705_PLUS))
8017 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8018 else
8019 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8020
8021 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8022
8023 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8024 tw32(RCVBDI_JUMBO_THRESH, val);
8025
8026 if (tg3_flag(tp, 57765_PLUS))
8027 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8028 }
8029
8030 /* tp->lock is held. */
8031 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8032 {
8033 u32 val, rdmac_mode;
8034 int i, err, limit;
8035 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8036
8037 tg3_disable_ints(tp);
8038
8039 tg3_stop_fw(tp);
8040
8041 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8042
8043 if (tg3_flag(tp, INIT_COMPLETE))
8044 tg3_abort_hw(tp, 1);
8045
8046 /* Enable MAC control of LPI */
8047 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8048 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8049 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8050 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8051
8052 tw32_f(TG3_CPMU_EEE_CTRL,
8053 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8054
8055 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8056 TG3_CPMU_EEEMD_LPI_IN_TX |
8057 TG3_CPMU_EEEMD_LPI_IN_RX |
8058 TG3_CPMU_EEEMD_EEE_ENABLE;
8059
8060 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8061 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8062
8063 if (tg3_flag(tp, ENABLE_APE))
8064 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8065
8066 tw32_f(TG3_CPMU_EEE_MODE, val);
8067
8068 tw32_f(TG3_CPMU_EEE_DBTMR1,
8069 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8070 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8071
8072 tw32_f(TG3_CPMU_EEE_DBTMR2,
8073 TG3_CPMU_DBTMR2_APE_TX_2047US |
8074 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8075 }
8076
8077 if (reset_phy)
8078 tg3_phy_reset(tp);
8079
8080 err = tg3_chip_reset(tp);
8081 if (err)
8082 return err;
8083
8084 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8085
8086 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8087 val = tr32(TG3_CPMU_CTRL);
8088 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8089 tw32(TG3_CPMU_CTRL, val);
8090
8091 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8092 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8093 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8094 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8095
8096 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8097 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8098 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8099 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8100
8101 val = tr32(TG3_CPMU_HST_ACC);
8102 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8103 val |= CPMU_HST_ACC_MACCLK_6_25;
8104 tw32(TG3_CPMU_HST_ACC, val);
8105 }
8106
8107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8108 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8109 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8110 PCIE_PWR_MGMT_L1_THRESH_4MS;
8111 tw32(PCIE_PWR_MGMT_THRESH, val);
8112
8113 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8114 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8115
8116 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8117
8118 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8119 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8120 }
8121
8122 if (tg3_flag(tp, L1PLLPD_EN)) {
8123 u32 grc_mode = tr32(GRC_MODE);
8124
8125 /* Access the lower 1K of PL PCIE block registers. */
8126 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8127 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8128
8129 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8130 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8131 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8132
8133 tw32(GRC_MODE, grc_mode);
8134 }
8135
8136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8137 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8138 u32 grc_mode = tr32(GRC_MODE);
8139
8140 /* Access the lower 1K of PL PCIE block registers. */
8141 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8142 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8143
8144 val = tr32(TG3_PCIE_TLDLPL_PORT +
8145 TG3_PCIE_PL_LO_PHYCTL5);
8146 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8147 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8148
8149 tw32(GRC_MODE, grc_mode);
8150 }
8151
8152 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8153 u32 grc_mode = tr32(GRC_MODE);
8154
8155 /* Access the lower 1K of DL PCIE block registers. */
8156 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8157 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8158
8159 val = tr32(TG3_PCIE_TLDLPL_PORT +
8160 TG3_PCIE_DL_LO_FTSMAX);
8161 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8162 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8163 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8164
8165 tw32(GRC_MODE, grc_mode);
8166 }
8167
8168 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8169 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8170 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8171 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8172 }
8173
8174 /* This works around an issue with Athlon chipsets on
8175 * B3 tigon3 silicon. This bit has no effect on any
8176 * other revision. But do not set this on PCI Express
8177 * chips and don't even touch the clocks if the CPMU is present.
8178 */
8179 if (!tg3_flag(tp, CPMU_PRESENT)) {
8180 if (!tg3_flag(tp, PCI_EXPRESS))
8181 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8182 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8183 }
8184
8185 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8186 tg3_flag(tp, PCIX_MODE)) {
8187 val = tr32(TG3PCI_PCISTATE);
8188 val |= PCISTATE_RETRY_SAME_DMA;
8189 tw32(TG3PCI_PCISTATE, val);
8190 }
8191
8192 if (tg3_flag(tp, ENABLE_APE)) {
8193 /* Allow reads and writes to the
8194 * APE register and memory space.
8195 */
8196 val = tr32(TG3PCI_PCISTATE);
8197 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8198 PCISTATE_ALLOW_APE_SHMEM_WR |
8199 PCISTATE_ALLOW_APE_PSPACE_WR;
8200 tw32(TG3PCI_PCISTATE, val);
8201 }
8202
8203 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8204 /* Enable some hw fixes. */
8205 val = tr32(TG3PCI_MSI_DATA);
8206 val |= (1 << 26) | (1 << 28) | (1 << 29);
8207 tw32(TG3PCI_MSI_DATA, val);
8208 }
8209
8210 /* Descriptor ring init may make accesses to the
8211 * NIC SRAM area to setup the TX descriptors, so we
8212 * can only do this after the hardware has been
8213 * successfully reset.
8214 */
8215 err = tg3_init_rings(tp);
8216 if (err)
8217 return err;
8218
8219 if (tg3_flag(tp, 57765_PLUS)) {
8220 val = tr32(TG3PCI_DMA_RW_CTRL) &
8221 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8222 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8223 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8224 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8225 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8226 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8227 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8228 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8229 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8230 /* This value is determined during the probe time DMA
8231 * engine test, tg3_test_dma.
8232 */
8233 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8234 }
8235
8236 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8237 GRC_MODE_4X_NIC_SEND_RINGS |
8238 GRC_MODE_NO_TX_PHDR_CSUM |
8239 GRC_MODE_NO_RX_PHDR_CSUM);
8240 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8241
8242 /* Pseudo-header checksum is done by hardware logic and not
8243 * the offload processers, so make the chip do the pseudo-
8244 * header checksums on receive. For transmit it is more
8245 * convenient to do the pseudo-header checksum in software
8246 * as Linux does that on transmit for us in all cases.
8247 */
8248 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8249
8250 tw32(GRC_MODE,
8251 tp->grc_mode |
8252 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8253
8254 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8255 val = tr32(GRC_MISC_CFG);
8256 val &= ~0xff;
8257 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8258 tw32(GRC_MISC_CFG, val);
8259
8260 /* Initialize MBUF/DESC pool. */
8261 if (tg3_flag(tp, 5750_PLUS)) {
8262 /* Do nothing. */
8263 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8264 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8266 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8267 else
8268 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8269 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8270 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8271 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8272 int fw_len;
8273
8274 fw_len = tp->fw_len;
8275 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8276 tw32(BUFMGR_MB_POOL_ADDR,
8277 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8278 tw32(BUFMGR_MB_POOL_SIZE,
8279 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8280 }
8281
8282 if (tp->dev->mtu <= ETH_DATA_LEN) {
8283 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8284 tp->bufmgr_config.mbuf_read_dma_low_water);
8285 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8286 tp->bufmgr_config.mbuf_mac_rx_low_water);
8287 tw32(BUFMGR_MB_HIGH_WATER,
8288 tp->bufmgr_config.mbuf_high_water);
8289 } else {
8290 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8291 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8292 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8293 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8294 tw32(BUFMGR_MB_HIGH_WATER,
8295 tp->bufmgr_config.mbuf_high_water_jumbo);
8296 }
8297 tw32(BUFMGR_DMA_LOW_WATER,
8298 tp->bufmgr_config.dma_low_water);
8299 tw32(BUFMGR_DMA_HIGH_WATER,
8300 tp->bufmgr_config.dma_high_water);
8301
8302 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8304 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8306 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8307 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8308 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8309 tw32(BUFMGR_MODE, val);
8310 for (i = 0; i < 2000; i++) {
8311 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8312 break;
8313 udelay(10);
8314 }
8315 if (i >= 2000) {
8316 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8317 return -ENODEV;
8318 }
8319
8320 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8321 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8322
8323 tg3_setup_rxbd_thresholds(tp);
8324
8325 /* Initialize TG3_BDINFO's at:
8326 * RCVDBDI_STD_BD: standard eth size rx ring
8327 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8328 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8329 *
8330 * like so:
8331 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8332 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8333 * ring attribute flags
8334 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8335 *
8336 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8337 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8338 *
8339 * The size of each ring is fixed in the firmware, but the location is
8340 * configurable.
8341 */
8342 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8343 ((u64) tpr->rx_std_mapping >> 32));
8344 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8345 ((u64) tpr->rx_std_mapping & 0xffffffff));
8346 if (!tg3_flag(tp, 5717_PLUS))
8347 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8348 NIC_SRAM_RX_BUFFER_DESC);
8349
8350 /* Disable the mini ring */
8351 if (!tg3_flag(tp, 5705_PLUS))
8352 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8353 BDINFO_FLAGS_DISABLED);
8354
8355 /* Program the jumbo buffer descriptor ring control
8356 * blocks on those devices that have them.
8357 */
8358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8359 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8360
8361 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8362 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8363 ((u64) tpr->rx_jmb_mapping >> 32));
8364 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8365 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8366 val = TG3_RX_JMB_RING_SIZE(tp) <<
8367 BDINFO_FLAGS_MAXLEN_SHIFT;
8368 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8369 val | BDINFO_FLAGS_USE_EXT_RECV);
8370 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8372 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8373 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8374 } else {
8375 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8376 BDINFO_FLAGS_DISABLED);
8377 }
8378
8379 if (tg3_flag(tp, 57765_PLUS)) {
8380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8381 val = TG3_RX_STD_MAX_SIZE_5700;
8382 else
8383 val = TG3_RX_STD_MAX_SIZE_5717;
8384 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8385 val |= (TG3_RX_STD_DMA_SZ << 2);
8386 } else
8387 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8388 } else
8389 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8390
8391 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8392
8393 tpr->rx_std_prod_idx = tp->rx_pending;
8394 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8395
8396 tpr->rx_jmb_prod_idx =
8397 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8398 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8399
8400 tg3_rings_reset(tp);
8401
8402 /* Initialize MAC address and backoff seed. */
8403 __tg3_set_mac_addr(tp, 0);
8404
8405 /* MTU + ethernet header + FCS + optional VLAN tag */
8406 tw32(MAC_RX_MTU_SIZE,
8407 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8408
8409 /* The slot time is changed by tg3_setup_phy if we
8410 * run at gigabit with half duplex.
8411 */
8412 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8413 (6 << TX_LENGTHS_IPG_SHIFT) |
8414 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8415
8416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8417 val |= tr32(MAC_TX_LENGTHS) &
8418 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8419 TX_LENGTHS_CNT_DWN_VAL_MSK);
8420
8421 tw32(MAC_TX_LENGTHS, val);
8422
8423 /* Receive rules. */
8424 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8425 tw32(RCVLPC_CONFIG, 0x0181);
8426
8427 /* Calculate RDMAC_MODE setting early, we need it to determine
8428 * the RCVLPC_STATE_ENABLE mask.
8429 */
8430 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8431 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8432 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8433 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8434 RDMAC_MODE_LNGREAD_ENAB);
8435
8436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8437 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8438
8439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8442 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8443 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8444 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8445
8446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8447 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8448 if (tg3_flag(tp, TSO_CAPABLE) &&
8449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8450 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8451 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8452 !tg3_flag(tp, IS_5788)) {
8453 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8454 }
8455 }
8456
8457 if (tg3_flag(tp, PCI_EXPRESS))
8458 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8459
8460 if (tg3_flag(tp, HW_TSO_1) ||
8461 tg3_flag(tp, HW_TSO_2) ||
8462 tg3_flag(tp, HW_TSO_3))
8463 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8464
8465 if (tg3_flag(tp, 57765_PLUS) ||
8466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8467 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8468 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8469
8470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8471 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8472
8473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8477 tg3_flag(tp, 57765_PLUS)) {
8478 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8481 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8482 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8483 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8484 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8485 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8486 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8487 }
8488 tw32(TG3_RDMA_RSRVCTRL_REG,
8489 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8490 }
8491
8492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8494 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8495 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8496 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8497 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8498 }
8499
8500 /* Receive/send statistics. */
8501 if (tg3_flag(tp, 5750_PLUS)) {
8502 val = tr32(RCVLPC_STATS_ENABLE);
8503 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8504 tw32(RCVLPC_STATS_ENABLE, val);
8505 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8506 tg3_flag(tp, TSO_CAPABLE)) {
8507 val = tr32(RCVLPC_STATS_ENABLE);
8508 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8509 tw32(RCVLPC_STATS_ENABLE, val);
8510 } else {
8511 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8512 }
8513 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8514 tw32(SNDDATAI_STATSENAB, 0xffffff);
8515 tw32(SNDDATAI_STATSCTRL,
8516 (SNDDATAI_SCTRL_ENABLE |
8517 SNDDATAI_SCTRL_FASTUPD));
8518
8519 /* Setup host coalescing engine. */
8520 tw32(HOSTCC_MODE, 0);
8521 for (i = 0; i < 2000; i++) {
8522 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8523 break;
8524 udelay(10);
8525 }
8526
8527 __tg3_set_coalesce(tp, &tp->coal);
8528
8529 if (!tg3_flag(tp, 5705_PLUS)) {
8530 /* Status/statistics block address. See tg3_timer,
8531 * the tg3_periodic_fetch_stats call there, and
8532 * tg3_get_stats to see how this works for 5705/5750 chips.
8533 */
8534 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8535 ((u64) tp->stats_mapping >> 32));
8536 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8537 ((u64) tp->stats_mapping & 0xffffffff));
8538 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8539
8540 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8541
8542 /* Clear statistics and status block memory areas */
8543 for (i = NIC_SRAM_STATS_BLK;
8544 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8545 i += sizeof(u32)) {
8546 tg3_write_mem(tp, i, 0);
8547 udelay(40);
8548 }
8549 }
8550
8551 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8552
8553 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8554 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8555 if (!tg3_flag(tp, 5705_PLUS))
8556 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8557
8558 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8559 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8560 /* reset to prevent losing 1st rx packet intermittently */
8561 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8562 udelay(10);
8563 }
8564
8565 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8566 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8567 MAC_MODE_FHDE_ENABLE;
8568 if (tg3_flag(tp, ENABLE_APE))
8569 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8570 if (!tg3_flag(tp, 5705_PLUS) &&
8571 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8572 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8573 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8574 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8575 udelay(40);
8576
8577 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8578 * If TG3_FLAG_IS_NIC is zero, we should read the
8579 * register to preserve the GPIO settings for LOMs. The GPIOs,
8580 * whether used as inputs or outputs, are set by boot code after
8581 * reset.
8582 */
8583 if (!tg3_flag(tp, IS_NIC)) {
8584 u32 gpio_mask;
8585
8586 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8587 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8588 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8589
8590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8591 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8592 GRC_LCLCTRL_GPIO_OUTPUT3;
8593
8594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8595 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8596
8597 tp->grc_local_ctrl &= ~gpio_mask;
8598 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8599
8600 /* GPIO1 must be driven high for eeprom write protect */
8601 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8602 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8603 GRC_LCLCTRL_GPIO_OUTPUT1);
8604 }
8605 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8606 udelay(100);
8607
8608 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8609 val = tr32(MSGINT_MODE);
8610 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8611 tw32(MSGINT_MODE, val);
8612 }
8613
8614 if (!tg3_flag(tp, 5705_PLUS)) {
8615 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8616 udelay(40);
8617 }
8618
8619 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8620 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8621 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8622 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8623 WDMAC_MODE_LNGREAD_ENAB);
8624
8625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8626 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8627 if (tg3_flag(tp, TSO_CAPABLE) &&
8628 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8629 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8630 /* nothing */
8631 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8632 !tg3_flag(tp, IS_5788)) {
8633 val |= WDMAC_MODE_RX_ACCEL;
8634 }
8635 }
8636
8637 /* Enable host coalescing bug fix */
8638 if (tg3_flag(tp, 5755_PLUS))
8639 val |= WDMAC_MODE_STATUS_TAG_FIX;
8640
8641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8642 val |= WDMAC_MODE_BURST_ALL_DATA;
8643
8644 tw32_f(WDMAC_MODE, val);
8645 udelay(40);
8646
8647 if (tg3_flag(tp, PCIX_MODE)) {
8648 u16 pcix_cmd;
8649
8650 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8651 &pcix_cmd);
8652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8653 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8654 pcix_cmd |= PCI_X_CMD_READ_2K;
8655 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8656 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8657 pcix_cmd |= PCI_X_CMD_READ_2K;
8658 }
8659 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8660 pcix_cmd);
8661 }
8662
8663 tw32_f(RDMAC_MODE, rdmac_mode);
8664 udelay(40);
8665
8666 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8667 if (!tg3_flag(tp, 5705_PLUS))
8668 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8669
8670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8671 tw32(SNDDATAC_MODE,
8672 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8673 else
8674 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8675
8676 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8677 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8678 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8679 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8680 val |= RCVDBDI_MODE_LRG_RING_SZ;
8681 tw32(RCVDBDI_MODE, val);
8682 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8683 if (tg3_flag(tp, HW_TSO_1) ||
8684 tg3_flag(tp, HW_TSO_2) ||
8685 tg3_flag(tp, HW_TSO_3))
8686 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8687 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8688 if (tg3_flag(tp, ENABLE_TSS))
8689 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8690 tw32(SNDBDI_MODE, val);
8691 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8692
8693 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8694 err = tg3_load_5701_a0_firmware_fix(tp);
8695 if (err)
8696 return err;
8697 }
8698
8699 if (tg3_flag(tp, TSO_CAPABLE)) {
8700 err = tg3_load_tso_firmware(tp);
8701 if (err)
8702 return err;
8703 }
8704
8705 tp->tx_mode = TX_MODE_ENABLE;
8706
8707 if (tg3_flag(tp, 5755_PLUS) ||
8708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8709 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8710
8711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8712 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8713 tp->tx_mode &= ~val;
8714 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8715 }
8716
8717 tw32_f(MAC_TX_MODE, tp->tx_mode);
8718 udelay(100);
8719
8720 if (tg3_flag(tp, ENABLE_RSS)) {
8721 int i = 0;
8722 u32 reg = MAC_RSS_INDIR_TBL_0;
8723
8724 if (tp->irq_cnt == 2) {
8725 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8726 tw32(reg, 0x0);
8727 reg += 4;
8728 }
8729 } else {
8730 u32 val;
8731
8732 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8733 val = i % (tp->irq_cnt - 1);
8734 i++;
8735 for (; i % 8; i++) {
8736 val <<= 4;
8737 val |= (i % (tp->irq_cnt - 1));
8738 }
8739 tw32(reg, val);
8740 reg += 4;
8741 }
8742 }
8743
8744 /* Setup the "secret" hash key. */
8745 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8746 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8747 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8748 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8749 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8750 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8751 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8752 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8753 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8754 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8755 }
8756
8757 tp->rx_mode = RX_MODE_ENABLE;
8758 if (tg3_flag(tp, 5755_PLUS))
8759 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8760
8761 if (tg3_flag(tp, ENABLE_RSS))
8762 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8763 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8764 RX_MODE_RSS_IPV6_HASH_EN |
8765 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8766 RX_MODE_RSS_IPV4_HASH_EN |
8767 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8768
8769 tw32_f(MAC_RX_MODE, tp->rx_mode);
8770 udelay(10);
8771
8772 tw32(MAC_LED_CTRL, tp->led_ctrl);
8773
8774 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8775 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8776 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8777 udelay(10);
8778 }
8779 tw32_f(MAC_RX_MODE, tp->rx_mode);
8780 udelay(10);
8781
8782 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8783 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8784 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8785 /* Set drive transmission level to 1.2V */
8786 /* only if the signal pre-emphasis bit is not set */
8787 val = tr32(MAC_SERDES_CFG);
8788 val &= 0xfffff000;
8789 val |= 0x880;
8790 tw32(MAC_SERDES_CFG, val);
8791 }
8792 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8793 tw32(MAC_SERDES_CFG, 0x616000);
8794 }
8795
8796 /* Prevent chip from dropping frames when flow control
8797 * is enabled.
8798 */
8799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8800 val = 1;
8801 else
8802 val = 2;
8803 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8804
8805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8806 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8807 /* Use hardware link auto-negotiation */
8808 tg3_flag_set(tp, HW_AUTONEG);
8809 }
8810
8811 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8813 u32 tmp;
8814
8815 tmp = tr32(SERDES_RX_CTRL);
8816 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8817 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8818 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8819 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8820 }
8821
8822 if (!tg3_flag(tp, USE_PHYLIB)) {
8823 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8824 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8825 tp->link_config.speed = tp->link_config.orig_speed;
8826 tp->link_config.duplex = tp->link_config.orig_duplex;
8827 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8828 }
8829
8830 err = tg3_setup_phy(tp, 0);
8831 if (err)
8832 return err;
8833
8834 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8835 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8836 u32 tmp;
8837
8838 /* Clear CRC stats. */
8839 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8840 tg3_writephy(tp, MII_TG3_TEST1,
8841 tmp | MII_TG3_TEST1_CRC_EN);
8842 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8843 }
8844 }
8845 }
8846
8847 __tg3_set_rx_mode(tp->dev);
8848
8849 /* Initialize receive rules. */
8850 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8851 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8852 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8853 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8854
8855 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8856 limit = 8;
8857 else
8858 limit = 16;
8859 if (tg3_flag(tp, ENABLE_ASF))
8860 limit -= 4;
8861 switch (limit) {
8862 case 16:
8863 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8864 case 15:
8865 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8866 case 14:
8867 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8868 case 13:
8869 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8870 case 12:
8871 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8872 case 11:
8873 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8874 case 10:
8875 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8876 case 9:
8877 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8878 case 8:
8879 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8880 case 7:
8881 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8882 case 6:
8883 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8884 case 5:
8885 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8886 case 4:
8887 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8888 case 3:
8889 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8890 case 2:
8891 case 1:
8892
8893 default:
8894 break;
8895 }
8896
8897 if (tg3_flag(tp, ENABLE_APE))
8898 /* Write our heartbeat update interval to APE. */
8899 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8900 APE_HOST_HEARTBEAT_INT_DISABLE);
8901
8902 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8903
8904 return 0;
8905 }
8906
8907 /* Called at device open time to get the chip ready for
8908 * packet processing. Invoked with tp->lock held.
8909 */
8910 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8911 {
8912 tg3_switch_clocks(tp);
8913
8914 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8915
8916 return tg3_reset_hw(tp, reset_phy);
8917 }
8918
8919 #define TG3_STAT_ADD32(PSTAT, REG) \
8920 do { u32 __val = tr32(REG); \
8921 (PSTAT)->low += __val; \
8922 if ((PSTAT)->low < __val) \
8923 (PSTAT)->high += 1; \
8924 } while (0)
8925
8926 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8927 {
8928 struct tg3_hw_stats *sp = tp->hw_stats;
8929
8930 if (!netif_carrier_ok(tp->dev))
8931 return;
8932
8933 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8934 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8935 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8936 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8937 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8938 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8939 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8940 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8941 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8942 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8943 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8944 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8945 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8946
8947 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8948 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8949 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8950 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8951 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8952 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8953 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8954 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8955 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8956 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8957 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8958 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8959 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8960 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8961
8962 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8963 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8964 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8965 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8966 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8967 } else {
8968 u32 val = tr32(HOSTCC_FLOW_ATTN);
8969 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8970 if (val) {
8971 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8972 sp->rx_discards.low += val;
8973 if (sp->rx_discards.low < val)
8974 sp->rx_discards.high += 1;
8975 }
8976 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8977 }
8978 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8979 }
8980
8981 static void tg3_chk_missed_msi(struct tg3 *tp)
8982 {
8983 u32 i;
8984
8985 for (i = 0; i < tp->irq_cnt; i++) {
8986 struct tg3_napi *tnapi = &tp->napi[i];
8987
8988 if (tg3_has_work(tnapi)) {
8989 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8990 tnapi->last_tx_cons == tnapi->tx_cons) {
8991 if (tnapi->chk_msi_cnt < 1) {
8992 tnapi->chk_msi_cnt++;
8993 return;
8994 }
8995 tw32_mailbox(tnapi->int_mbox,
8996 tnapi->last_tag << 24);
8997 }
8998 }
8999 tnapi->chk_msi_cnt = 0;
9000 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9001 tnapi->last_tx_cons = tnapi->tx_cons;
9002 }
9003 }
9004
9005 static void tg3_timer(unsigned long __opaque)
9006 {
9007 struct tg3 *tp = (struct tg3 *) __opaque;
9008
9009 if (tp->irq_sync)
9010 goto restart_timer;
9011
9012 spin_lock(&tp->lock);
9013
9014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9016 tg3_chk_missed_msi(tp);
9017
9018 if (!tg3_flag(tp, TAGGED_STATUS)) {
9019 /* All of this garbage is because when using non-tagged
9020 * IRQ status the mailbox/status_block protocol the chip
9021 * uses with the cpu is race prone.
9022 */
9023 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9024 tw32(GRC_LOCAL_CTRL,
9025 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9026 } else {
9027 tw32(HOSTCC_MODE, tp->coalesce_mode |
9028 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9029 }
9030
9031 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9032 tg3_flag_set(tp, RESTART_TIMER);
9033 spin_unlock(&tp->lock);
9034 schedule_work(&tp->reset_task);
9035 return;
9036 }
9037 }
9038
9039 /* This part only runs once per second. */
9040 if (!--tp->timer_counter) {
9041 if (tg3_flag(tp, 5705_PLUS))
9042 tg3_periodic_fetch_stats(tp);
9043
9044 if (tp->setlpicnt && !--tp->setlpicnt)
9045 tg3_phy_eee_enable(tp);
9046
9047 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9048 u32 mac_stat;
9049 int phy_event;
9050
9051 mac_stat = tr32(MAC_STATUS);
9052
9053 phy_event = 0;
9054 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9055 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9056 phy_event = 1;
9057 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9058 phy_event = 1;
9059
9060 if (phy_event)
9061 tg3_setup_phy(tp, 0);
9062 } else if (tg3_flag(tp, POLL_SERDES)) {
9063 u32 mac_stat = tr32(MAC_STATUS);
9064 int need_setup = 0;
9065
9066 if (netif_carrier_ok(tp->dev) &&
9067 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9068 need_setup = 1;
9069 }
9070 if (!netif_carrier_ok(tp->dev) &&
9071 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9072 MAC_STATUS_SIGNAL_DET))) {
9073 need_setup = 1;
9074 }
9075 if (need_setup) {
9076 if (!tp->serdes_counter) {
9077 tw32_f(MAC_MODE,
9078 (tp->mac_mode &
9079 ~MAC_MODE_PORT_MODE_MASK));
9080 udelay(40);
9081 tw32_f(MAC_MODE, tp->mac_mode);
9082 udelay(40);
9083 }
9084 tg3_setup_phy(tp, 0);
9085 }
9086 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9087 tg3_flag(tp, 5780_CLASS)) {
9088 tg3_serdes_parallel_detect(tp);
9089 }
9090
9091 tp->timer_counter = tp->timer_multiplier;
9092 }
9093
9094 /* Heartbeat is only sent once every 2 seconds.
9095 *
9096 * The heartbeat is to tell the ASF firmware that the host
9097 * driver is still alive. In the event that the OS crashes,
9098 * ASF needs to reset the hardware to free up the FIFO space
9099 * that may be filled with rx packets destined for the host.
9100 * If the FIFO is full, ASF will no longer function properly.
9101 *
9102 * Unintended resets have been reported on real time kernels
9103 * where the timer doesn't run on time. Netpoll will also have
9104 * same problem.
9105 *
9106 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9107 * to check the ring condition when the heartbeat is expiring
9108 * before doing the reset. This will prevent most unintended
9109 * resets.
9110 */
9111 if (!--tp->asf_counter) {
9112 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9113 tg3_wait_for_event_ack(tp);
9114
9115 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9116 FWCMD_NICDRV_ALIVE3);
9117 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9118 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9119 TG3_FW_UPDATE_TIMEOUT_SEC);
9120
9121 tg3_generate_fw_event(tp);
9122 }
9123 tp->asf_counter = tp->asf_multiplier;
9124 }
9125
9126 spin_unlock(&tp->lock);
9127
9128 restart_timer:
9129 tp->timer.expires = jiffies + tp->timer_offset;
9130 add_timer(&tp->timer);
9131 }
9132
9133 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9134 {
9135 irq_handler_t fn;
9136 unsigned long flags;
9137 char *name;
9138 struct tg3_napi *tnapi = &tp->napi[irq_num];
9139
9140 if (tp->irq_cnt == 1)
9141 name = tp->dev->name;
9142 else {
9143 name = &tnapi->irq_lbl[0];
9144 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9145 name[IFNAMSIZ-1] = 0;
9146 }
9147
9148 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9149 fn = tg3_msi;
9150 if (tg3_flag(tp, 1SHOT_MSI))
9151 fn = tg3_msi_1shot;
9152 flags = 0;
9153 } else {
9154 fn = tg3_interrupt;
9155 if (tg3_flag(tp, TAGGED_STATUS))
9156 fn = tg3_interrupt_tagged;
9157 flags = IRQF_SHARED;
9158 }
9159
9160 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9161 }
9162
9163 static int tg3_test_interrupt(struct tg3 *tp)
9164 {
9165 struct tg3_napi *tnapi = &tp->napi[0];
9166 struct net_device *dev = tp->dev;
9167 int err, i, intr_ok = 0;
9168 u32 val;
9169
9170 if (!netif_running(dev))
9171 return -ENODEV;
9172
9173 tg3_disable_ints(tp);
9174
9175 free_irq(tnapi->irq_vec, tnapi);
9176
9177 /*
9178 * Turn off MSI one shot mode. Otherwise this test has no
9179 * observable way to know whether the interrupt was delivered.
9180 */
9181 if (tg3_flag(tp, 57765_PLUS)) {
9182 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9183 tw32(MSGINT_MODE, val);
9184 }
9185
9186 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9187 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9188 if (err)
9189 return err;
9190
9191 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9192 tg3_enable_ints(tp);
9193
9194 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9195 tnapi->coal_now);
9196
9197 for (i = 0; i < 5; i++) {
9198 u32 int_mbox, misc_host_ctrl;
9199
9200 int_mbox = tr32_mailbox(tnapi->int_mbox);
9201 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9202
9203 if ((int_mbox != 0) ||
9204 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9205 intr_ok = 1;
9206 break;
9207 }
9208
9209 if (tg3_flag(tp, 57765_PLUS) &&
9210 tnapi->hw_status->status_tag != tnapi->last_tag)
9211 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9212
9213 msleep(10);
9214 }
9215
9216 tg3_disable_ints(tp);
9217
9218 free_irq(tnapi->irq_vec, tnapi);
9219
9220 err = tg3_request_irq(tp, 0);
9221
9222 if (err)
9223 return err;
9224
9225 if (intr_ok) {
9226 /* Reenable MSI one shot mode. */
9227 if (tg3_flag(tp, 57765_PLUS)) {
9228 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9229 tw32(MSGINT_MODE, val);
9230 }
9231 return 0;
9232 }
9233
9234 return -EIO;
9235 }
9236
9237 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9238 * successfully restored
9239 */
9240 static int tg3_test_msi(struct tg3 *tp)
9241 {
9242 int err;
9243 u16 pci_cmd;
9244
9245 if (!tg3_flag(tp, USING_MSI))
9246 return 0;
9247
9248 /* Turn off SERR reporting in case MSI terminates with Master
9249 * Abort.
9250 */
9251 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9252 pci_write_config_word(tp->pdev, PCI_COMMAND,
9253 pci_cmd & ~PCI_COMMAND_SERR);
9254
9255 err = tg3_test_interrupt(tp);
9256
9257 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9258
9259 if (!err)
9260 return 0;
9261
9262 /* other failures */
9263 if (err != -EIO)
9264 return err;
9265
9266 /* MSI test failed, go back to INTx mode */
9267 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9268 "to INTx mode. Please report this failure to the PCI "
9269 "maintainer and include system chipset information\n");
9270
9271 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9272
9273 pci_disable_msi(tp->pdev);
9274
9275 tg3_flag_clear(tp, USING_MSI);
9276 tp->napi[0].irq_vec = tp->pdev->irq;
9277
9278 err = tg3_request_irq(tp, 0);
9279 if (err)
9280 return err;
9281
9282 /* Need to reset the chip because the MSI cycle may have terminated
9283 * with Master Abort.
9284 */
9285 tg3_full_lock(tp, 1);
9286
9287 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9288 err = tg3_init_hw(tp, 1);
9289
9290 tg3_full_unlock(tp);
9291
9292 if (err)
9293 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9294
9295 return err;
9296 }
9297
9298 static int tg3_request_firmware(struct tg3 *tp)
9299 {
9300 const __be32 *fw_data;
9301
9302 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9303 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9304 tp->fw_needed);
9305 return -ENOENT;
9306 }
9307
9308 fw_data = (void *)tp->fw->data;
9309
9310 /* Firmware blob starts with version numbers, followed by
9311 * start address and _full_ length including BSS sections
9312 * (which must be longer than the actual data, of course
9313 */
9314
9315 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9316 if (tp->fw_len < (tp->fw->size - 12)) {
9317 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9318 tp->fw_len, tp->fw_needed);
9319 release_firmware(tp->fw);
9320 tp->fw = NULL;
9321 return -EINVAL;
9322 }
9323
9324 /* We no longer need firmware; we have it. */
9325 tp->fw_needed = NULL;
9326 return 0;
9327 }
9328
9329 static bool tg3_enable_msix(struct tg3 *tp)
9330 {
9331 int i, rc, cpus = num_online_cpus();
9332 struct msix_entry msix_ent[tp->irq_max];
9333
9334 if (cpus == 1)
9335 /* Just fallback to the simpler MSI mode. */
9336 return false;
9337
9338 /*
9339 * We want as many rx rings enabled as there are cpus.
9340 * The first MSIX vector only deals with link interrupts, etc,
9341 * so we add one to the number of vectors we are requesting.
9342 */
9343 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9344
9345 for (i = 0; i < tp->irq_max; i++) {
9346 msix_ent[i].entry = i;
9347 msix_ent[i].vector = 0;
9348 }
9349
9350 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9351 if (rc < 0) {
9352 return false;
9353 } else if (rc != 0) {
9354 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9355 return false;
9356 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9357 tp->irq_cnt, rc);
9358 tp->irq_cnt = rc;
9359 }
9360
9361 for (i = 0; i < tp->irq_max; i++)
9362 tp->napi[i].irq_vec = msix_ent[i].vector;
9363
9364 netif_set_real_num_tx_queues(tp->dev, 1);
9365 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9366 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9367 pci_disable_msix(tp->pdev);
9368 return false;
9369 }
9370
9371 if (tp->irq_cnt > 1) {
9372 tg3_flag_set(tp, ENABLE_RSS);
9373
9374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9376 tg3_flag_set(tp, ENABLE_TSS);
9377 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9378 }
9379 }
9380
9381 return true;
9382 }
9383
9384 static void tg3_ints_init(struct tg3 *tp)
9385 {
9386 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9387 !tg3_flag(tp, TAGGED_STATUS)) {
9388 /* All MSI supporting chips should support tagged
9389 * status. Assert that this is the case.
9390 */
9391 netdev_warn(tp->dev,
9392 "MSI without TAGGED_STATUS? Not using MSI\n");
9393 goto defcfg;
9394 }
9395
9396 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9397 tg3_flag_set(tp, USING_MSIX);
9398 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9399 tg3_flag_set(tp, USING_MSI);
9400
9401 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9402 u32 msi_mode = tr32(MSGINT_MODE);
9403 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9404 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9405 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9406 }
9407 defcfg:
9408 if (!tg3_flag(tp, USING_MSIX)) {
9409 tp->irq_cnt = 1;
9410 tp->napi[0].irq_vec = tp->pdev->irq;
9411 netif_set_real_num_tx_queues(tp->dev, 1);
9412 netif_set_real_num_rx_queues(tp->dev, 1);
9413 }
9414 }
9415
9416 static void tg3_ints_fini(struct tg3 *tp)
9417 {
9418 if (tg3_flag(tp, USING_MSIX))
9419 pci_disable_msix(tp->pdev);
9420 else if (tg3_flag(tp, USING_MSI))
9421 pci_disable_msi(tp->pdev);
9422 tg3_flag_clear(tp, USING_MSI);
9423 tg3_flag_clear(tp, USING_MSIX);
9424 tg3_flag_clear(tp, ENABLE_RSS);
9425 tg3_flag_clear(tp, ENABLE_TSS);
9426 }
9427
9428 static int tg3_open(struct net_device *dev)
9429 {
9430 struct tg3 *tp = netdev_priv(dev);
9431 int i, err;
9432
9433 if (tp->fw_needed) {
9434 err = tg3_request_firmware(tp);
9435 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9436 if (err)
9437 return err;
9438 } else if (err) {
9439 netdev_warn(tp->dev, "TSO capability disabled\n");
9440 tg3_flag_clear(tp, TSO_CAPABLE);
9441 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9442 netdev_notice(tp->dev, "TSO capability restored\n");
9443 tg3_flag_set(tp, TSO_CAPABLE);
9444 }
9445 }
9446
9447 netif_carrier_off(tp->dev);
9448
9449 err = tg3_power_up(tp);
9450 if (err)
9451 return err;
9452
9453 tg3_full_lock(tp, 0);
9454
9455 tg3_disable_ints(tp);
9456 tg3_flag_clear(tp, INIT_COMPLETE);
9457
9458 tg3_full_unlock(tp);
9459
9460 /*
9461 * Setup interrupts first so we know how
9462 * many NAPI resources to allocate
9463 */
9464 tg3_ints_init(tp);
9465
9466 /* The placement of this call is tied
9467 * to the setup and use of Host TX descriptors.
9468 */
9469 err = tg3_alloc_consistent(tp);
9470 if (err)
9471 goto err_out1;
9472
9473 tg3_napi_init(tp);
9474
9475 tg3_napi_enable(tp);
9476
9477 for (i = 0; i < tp->irq_cnt; i++) {
9478 struct tg3_napi *tnapi = &tp->napi[i];
9479 err = tg3_request_irq(tp, i);
9480 if (err) {
9481 for (i--; i >= 0; i--)
9482 free_irq(tnapi->irq_vec, tnapi);
9483 break;
9484 }
9485 }
9486
9487 if (err)
9488 goto err_out2;
9489
9490 tg3_full_lock(tp, 0);
9491
9492 err = tg3_init_hw(tp, 1);
9493 if (err) {
9494 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9495 tg3_free_rings(tp);
9496 } else {
9497 if (tg3_flag(tp, TAGGED_STATUS) &&
9498 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9499 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9500 tp->timer_offset = HZ;
9501 else
9502 tp->timer_offset = HZ / 10;
9503
9504 BUG_ON(tp->timer_offset > HZ);
9505 tp->timer_counter = tp->timer_multiplier =
9506 (HZ / tp->timer_offset);
9507 tp->asf_counter = tp->asf_multiplier =
9508 ((HZ / tp->timer_offset) * 2);
9509
9510 init_timer(&tp->timer);
9511 tp->timer.expires = jiffies + tp->timer_offset;
9512 tp->timer.data = (unsigned long) tp;
9513 tp->timer.function = tg3_timer;
9514 }
9515
9516 tg3_full_unlock(tp);
9517
9518 if (err)
9519 goto err_out3;
9520
9521 if (tg3_flag(tp, USING_MSI)) {
9522 err = tg3_test_msi(tp);
9523
9524 if (err) {
9525 tg3_full_lock(tp, 0);
9526 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9527 tg3_free_rings(tp);
9528 tg3_full_unlock(tp);
9529
9530 goto err_out2;
9531 }
9532
9533 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9534 u32 val = tr32(PCIE_TRANSACTION_CFG);
9535
9536 tw32(PCIE_TRANSACTION_CFG,
9537 val | PCIE_TRANS_CFG_1SHOT_MSI);
9538 }
9539 }
9540
9541 tg3_phy_start(tp);
9542
9543 tg3_full_lock(tp, 0);
9544
9545 add_timer(&tp->timer);
9546 tg3_flag_set(tp, INIT_COMPLETE);
9547 tg3_enable_ints(tp);
9548
9549 tg3_full_unlock(tp);
9550
9551 netif_tx_start_all_queues(dev);
9552
9553 /*
9554 * Reset loopback feature if it was turned on while the device was down
9555 * make sure that it's installed properly now.
9556 */
9557 if (dev->features & NETIF_F_LOOPBACK)
9558 tg3_set_loopback(dev, dev->features);
9559
9560 return 0;
9561
9562 err_out3:
9563 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9564 struct tg3_napi *tnapi = &tp->napi[i];
9565 free_irq(tnapi->irq_vec, tnapi);
9566 }
9567
9568 err_out2:
9569 tg3_napi_disable(tp);
9570 tg3_napi_fini(tp);
9571 tg3_free_consistent(tp);
9572
9573 err_out1:
9574 tg3_ints_fini(tp);
9575 tg3_frob_aux_power(tp, false);
9576 pci_set_power_state(tp->pdev, PCI_D3hot);
9577 return err;
9578 }
9579
9580 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9581 struct rtnl_link_stats64 *);
9582 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9583
9584 static int tg3_close(struct net_device *dev)
9585 {
9586 int i;
9587 struct tg3 *tp = netdev_priv(dev);
9588
9589 tg3_napi_disable(tp);
9590 cancel_work_sync(&tp->reset_task);
9591
9592 netif_tx_stop_all_queues(dev);
9593
9594 del_timer_sync(&tp->timer);
9595
9596 tg3_phy_stop(tp);
9597
9598 tg3_full_lock(tp, 1);
9599
9600 tg3_disable_ints(tp);
9601
9602 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9603 tg3_free_rings(tp);
9604 tg3_flag_clear(tp, INIT_COMPLETE);
9605
9606 tg3_full_unlock(tp);
9607
9608 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9609 struct tg3_napi *tnapi = &tp->napi[i];
9610 free_irq(tnapi->irq_vec, tnapi);
9611 }
9612
9613 tg3_ints_fini(tp);
9614
9615 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9616
9617 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9618 sizeof(tp->estats_prev));
9619
9620 tg3_napi_fini(tp);
9621
9622 tg3_free_consistent(tp);
9623
9624 tg3_power_down(tp);
9625
9626 netif_carrier_off(tp->dev);
9627
9628 return 0;
9629 }
9630
9631 static inline u64 get_stat64(tg3_stat64_t *val)
9632 {
9633 return ((u64)val->high << 32) | ((u64)val->low);
9634 }
9635
9636 static u64 calc_crc_errors(struct tg3 *tp)
9637 {
9638 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9639
9640 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9641 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9643 u32 val;
9644
9645 spin_lock_bh(&tp->lock);
9646 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9647 tg3_writephy(tp, MII_TG3_TEST1,
9648 val | MII_TG3_TEST1_CRC_EN);
9649 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9650 } else
9651 val = 0;
9652 spin_unlock_bh(&tp->lock);
9653
9654 tp->phy_crc_errors += val;
9655
9656 return tp->phy_crc_errors;
9657 }
9658
9659 return get_stat64(&hw_stats->rx_fcs_errors);
9660 }
9661
9662 #define ESTAT_ADD(member) \
9663 estats->member = old_estats->member + \
9664 get_stat64(&hw_stats->member)
9665
9666 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9667 {
9668 struct tg3_ethtool_stats *estats = &tp->estats;
9669 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9670 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9671
9672 if (!hw_stats)
9673 return old_estats;
9674
9675 ESTAT_ADD(rx_octets);
9676 ESTAT_ADD(rx_fragments);
9677 ESTAT_ADD(rx_ucast_packets);
9678 ESTAT_ADD(rx_mcast_packets);
9679 ESTAT_ADD(rx_bcast_packets);
9680 ESTAT_ADD(rx_fcs_errors);
9681 ESTAT_ADD(rx_align_errors);
9682 ESTAT_ADD(rx_xon_pause_rcvd);
9683 ESTAT_ADD(rx_xoff_pause_rcvd);
9684 ESTAT_ADD(rx_mac_ctrl_rcvd);
9685 ESTAT_ADD(rx_xoff_entered);
9686 ESTAT_ADD(rx_frame_too_long_errors);
9687 ESTAT_ADD(rx_jabbers);
9688 ESTAT_ADD(rx_undersize_packets);
9689 ESTAT_ADD(rx_in_length_errors);
9690 ESTAT_ADD(rx_out_length_errors);
9691 ESTAT_ADD(rx_64_or_less_octet_packets);
9692 ESTAT_ADD(rx_65_to_127_octet_packets);
9693 ESTAT_ADD(rx_128_to_255_octet_packets);
9694 ESTAT_ADD(rx_256_to_511_octet_packets);
9695 ESTAT_ADD(rx_512_to_1023_octet_packets);
9696 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9697 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9698 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9699 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9700 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9701
9702 ESTAT_ADD(tx_octets);
9703 ESTAT_ADD(tx_collisions);
9704 ESTAT_ADD(tx_xon_sent);
9705 ESTAT_ADD(tx_xoff_sent);
9706 ESTAT_ADD(tx_flow_control);
9707 ESTAT_ADD(tx_mac_errors);
9708 ESTAT_ADD(tx_single_collisions);
9709 ESTAT_ADD(tx_mult_collisions);
9710 ESTAT_ADD(tx_deferred);
9711 ESTAT_ADD(tx_excessive_collisions);
9712 ESTAT_ADD(tx_late_collisions);
9713 ESTAT_ADD(tx_collide_2times);
9714 ESTAT_ADD(tx_collide_3times);
9715 ESTAT_ADD(tx_collide_4times);
9716 ESTAT_ADD(tx_collide_5times);
9717 ESTAT_ADD(tx_collide_6times);
9718 ESTAT_ADD(tx_collide_7times);
9719 ESTAT_ADD(tx_collide_8times);
9720 ESTAT_ADD(tx_collide_9times);
9721 ESTAT_ADD(tx_collide_10times);
9722 ESTAT_ADD(tx_collide_11times);
9723 ESTAT_ADD(tx_collide_12times);
9724 ESTAT_ADD(tx_collide_13times);
9725 ESTAT_ADD(tx_collide_14times);
9726 ESTAT_ADD(tx_collide_15times);
9727 ESTAT_ADD(tx_ucast_packets);
9728 ESTAT_ADD(tx_mcast_packets);
9729 ESTAT_ADD(tx_bcast_packets);
9730 ESTAT_ADD(tx_carrier_sense_errors);
9731 ESTAT_ADD(tx_discards);
9732 ESTAT_ADD(tx_errors);
9733
9734 ESTAT_ADD(dma_writeq_full);
9735 ESTAT_ADD(dma_write_prioq_full);
9736 ESTAT_ADD(rxbds_empty);
9737 ESTAT_ADD(rx_discards);
9738 ESTAT_ADD(rx_errors);
9739 ESTAT_ADD(rx_threshold_hit);
9740
9741 ESTAT_ADD(dma_readq_full);
9742 ESTAT_ADD(dma_read_prioq_full);
9743 ESTAT_ADD(tx_comp_queue_full);
9744
9745 ESTAT_ADD(ring_set_send_prod_index);
9746 ESTAT_ADD(ring_status_update);
9747 ESTAT_ADD(nic_irqs);
9748 ESTAT_ADD(nic_avoided_irqs);
9749 ESTAT_ADD(nic_tx_threshold_hit);
9750
9751 ESTAT_ADD(mbuf_lwm_thresh_hit);
9752
9753 return estats;
9754 }
9755
9756 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9757 struct rtnl_link_stats64 *stats)
9758 {
9759 struct tg3 *tp = netdev_priv(dev);
9760 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9761 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9762
9763 if (!hw_stats)
9764 return old_stats;
9765
9766 stats->rx_packets = old_stats->rx_packets +
9767 get_stat64(&hw_stats->rx_ucast_packets) +
9768 get_stat64(&hw_stats->rx_mcast_packets) +
9769 get_stat64(&hw_stats->rx_bcast_packets);
9770
9771 stats->tx_packets = old_stats->tx_packets +
9772 get_stat64(&hw_stats->tx_ucast_packets) +
9773 get_stat64(&hw_stats->tx_mcast_packets) +
9774 get_stat64(&hw_stats->tx_bcast_packets);
9775
9776 stats->rx_bytes = old_stats->rx_bytes +
9777 get_stat64(&hw_stats->rx_octets);
9778 stats->tx_bytes = old_stats->tx_bytes +
9779 get_stat64(&hw_stats->tx_octets);
9780
9781 stats->rx_errors = old_stats->rx_errors +
9782 get_stat64(&hw_stats->rx_errors);
9783 stats->tx_errors = old_stats->tx_errors +
9784 get_stat64(&hw_stats->tx_errors) +
9785 get_stat64(&hw_stats->tx_mac_errors) +
9786 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9787 get_stat64(&hw_stats->tx_discards);
9788
9789 stats->multicast = old_stats->multicast +
9790 get_stat64(&hw_stats->rx_mcast_packets);
9791 stats->collisions = old_stats->collisions +
9792 get_stat64(&hw_stats->tx_collisions);
9793
9794 stats->rx_length_errors = old_stats->rx_length_errors +
9795 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9796 get_stat64(&hw_stats->rx_undersize_packets);
9797
9798 stats->rx_over_errors = old_stats->rx_over_errors +
9799 get_stat64(&hw_stats->rxbds_empty);
9800 stats->rx_frame_errors = old_stats->rx_frame_errors +
9801 get_stat64(&hw_stats->rx_align_errors);
9802 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9803 get_stat64(&hw_stats->tx_discards);
9804 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9805 get_stat64(&hw_stats->tx_carrier_sense_errors);
9806
9807 stats->rx_crc_errors = old_stats->rx_crc_errors +
9808 calc_crc_errors(tp);
9809
9810 stats->rx_missed_errors = old_stats->rx_missed_errors +
9811 get_stat64(&hw_stats->rx_discards);
9812
9813 stats->rx_dropped = tp->rx_dropped;
9814
9815 return stats;
9816 }
9817
9818 static inline u32 calc_crc(unsigned char *buf, int len)
9819 {
9820 u32 reg;
9821 u32 tmp;
9822 int j, k;
9823
9824 reg = 0xffffffff;
9825
9826 for (j = 0; j < len; j++) {
9827 reg ^= buf[j];
9828
9829 for (k = 0; k < 8; k++) {
9830 tmp = reg & 0x01;
9831
9832 reg >>= 1;
9833
9834 if (tmp)
9835 reg ^= 0xedb88320;
9836 }
9837 }
9838
9839 return ~reg;
9840 }
9841
9842 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9843 {
9844 /* accept or reject all multicast frames */
9845 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9846 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9847 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9848 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9849 }
9850
9851 static void __tg3_set_rx_mode(struct net_device *dev)
9852 {
9853 struct tg3 *tp = netdev_priv(dev);
9854 u32 rx_mode;
9855
9856 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9857 RX_MODE_KEEP_VLAN_TAG);
9858
9859 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9860 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9861 * flag clear.
9862 */
9863 if (!tg3_flag(tp, ENABLE_ASF))
9864 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9865 #endif
9866
9867 if (dev->flags & IFF_PROMISC) {
9868 /* Promiscuous mode. */
9869 rx_mode |= RX_MODE_PROMISC;
9870 } else if (dev->flags & IFF_ALLMULTI) {
9871 /* Accept all multicast. */
9872 tg3_set_multi(tp, 1);
9873 } else if (netdev_mc_empty(dev)) {
9874 /* Reject all multicast. */
9875 tg3_set_multi(tp, 0);
9876 } else {
9877 /* Accept one or more multicast(s). */
9878 struct netdev_hw_addr *ha;
9879 u32 mc_filter[4] = { 0, };
9880 u32 regidx;
9881 u32 bit;
9882 u32 crc;
9883
9884 netdev_for_each_mc_addr(ha, dev) {
9885 crc = calc_crc(ha->addr, ETH_ALEN);
9886 bit = ~crc & 0x7f;
9887 regidx = (bit & 0x60) >> 5;
9888 bit &= 0x1f;
9889 mc_filter[regidx] |= (1 << bit);
9890 }
9891
9892 tw32(MAC_HASH_REG_0, mc_filter[0]);
9893 tw32(MAC_HASH_REG_1, mc_filter[1]);
9894 tw32(MAC_HASH_REG_2, mc_filter[2]);
9895 tw32(MAC_HASH_REG_3, mc_filter[3]);
9896 }
9897
9898 if (rx_mode != tp->rx_mode) {
9899 tp->rx_mode = rx_mode;
9900 tw32_f(MAC_RX_MODE, rx_mode);
9901 udelay(10);
9902 }
9903 }
9904
9905 static void tg3_set_rx_mode(struct net_device *dev)
9906 {
9907 struct tg3 *tp = netdev_priv(dev);
9908
9909 if (!netif_running(dev))
9910 return;
9911
9912 tg3_full_lock(tp, 0);
9913 __tg3_set_rx_mode(dev);
9914 tg3_full_unlock(tp);
9915 }
9916
9917 static int tg3_get_regs_len(struct net_device *dev)
9918 {
9919 return TG3_REG_BLK_SIZE;
9920 }
9921
9922 static void tg3_get_regs(struct net_device *dev,
9923 struct ethtool_regs *regs, void *_p)
9924 {
9925 struct tg3 *tp = netdev_priv(dev);
9926
9927 regs->version = 0;
9928
9929 memset(_p, 0, TG3_REG_BLK_SIZE);
9930
9931 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9932 return;
9933
9934 tg3_full_lock(tp, 0);
9935
9936 tg3_dump_legacy_regs(tp, (u32 *)_p);
9937
9938 tg3_full_unlock(tp);
9939 }
9940
9941 static int tg3_get_eeprom_len(struct net_device *dev)
9942 {
9943 struct tg3 *tp = netdev_priv(dev);
9944
9945 return tp->nvram_size;
9946 }
9947
9948 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9949 {
9950 struct tg3 *tp = netdev_priv(dev);
9951 int ret;
9952 u8 *pd;
9953 u32 i, offset, len, b_offset, b_count;
9954 __be32 val;
9955
9956 if (tg3_flag(tp, NO_NVRAM))
9957 return -EINVAL;
9958
9959 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9960 return -EAGAIN;
9961
9962 offset = eeprom->offset;
9963 len = eeprom->len;
9964 eeprom->len = 0;
9965
9966 eeprom->magic = TG3_EEPROM_MAGIC;
9967
9968 if (offset & 3) {
9969 /* adjustments to start on required 4 byte boundary */
9970 b_offset = offset & 3;
9971 b_count = 4 - b_offset;
9972 if (b_count > len) {
9973 /* i.e. offset=1 len=2 */
9974 b_count = len;
9975 }
9976 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9977 if (ret)
9978 return ret;
9979 memcpy(data, ((char *)&val) + b_offset, b_count);
9980 len -= b_count;
9981 offset += b_count;
9982 eeprom->len += b_count;
9983 }
9984
9985 /* read bytes up to the last 4 byte boundary */
9986 pd = &data[eeprom->len];
9987 for (i = 0; i < (len - (len & 3)); i += 4) {
9988 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9989 if (ret) {
9990 eeprom->len += i;
9991 return ret;
9992 }
9993 memcpy(pd + i, &val, 4);
9994 }
9995 eeprom->len += i;
9996
9997 if (len & 3) {
9998 /* read last bytes not ending on 4 byte boundary */
9999 pd = &data[eeprom->len];
10000 b_count = len & 3;
10001 b_offset = offset + len - b_count;
10002 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10003 if (ret)
10004 return ret;
10005 memcpy(pd, &val, b_count);
10006 eeprom->len += b_count;
10007 }
10008 return 0;
10009 }
10010
10011 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10012
10013 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10014 {
10015 struct tg3 *tp = netdev_priv(dev);
10016 int ret;
10017 u32 offset, len, b_offset, odd_len;
10018 u8 *buf;
10019 __be32 start, end;
10020
10021 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10022 return -EAGAIN;
10023
10024 if (tg3_flag(tp, NO_NVRAM) ||
10025 eeprom->magic != TG3_EEPROM_MAGIC)
10026 return -EINVAL;
10027
10028 offset = eeprom->offset;
10029 len = eeprom->len;
10030
10031 if ((b_offset = (offset & 3))) {
10032 /* adjustments to start on required 4 byte boundary */
10033 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10034 if (ret)
10035 return ret;
10036 len += b_offset;
10037 offset &= ~3;
10038 if (len < 4)
10039 len = 4;
10040 }
10041
10042 odd_len = 0;
10043 if (len & 3) {
10044 /* adjustments to end on required 4 byte boundary */
10045 odd_len = 1;
10046 len = (len + 3) & ~3;
10047 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10048 if (ret)
10049 return ret;
10050 }
10051
10052 buf = data;
10053 if (b_offset || odd_len) {
10054 buf = kmalloc(len, GFP_KERNEL);
10055 if (!buf)
10056 return -ENOMEM;
10057 if (b_offset)
10058 memcpy(buf, &start, 4);
10059 if (odd_len)
10060 memcpy(buf+len-4, &end, 4);
10061 memcpy(buf + b_offset, data, eeprom->len);
10062 }
10063
10064 ret = tg3_nvram_write_block(tp, offset, len, buf);
10065
10066 if (buf != data)
10067 kfree(buf);
10068
10069 return ret;
10070 }
10071
10072 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10073 {
10074 struct tg3 *tp = netdev_priv(dev);
10075
10076 if (tg3_flag(tp, USE_PHYLIB)) {
10077 struct phy_device *phydev;
10078 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10079 return -EAGAIN;
10080 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10081 return phy_ethtool_gset(phydev, cmd);
10082 }
10083
10084 cmd->supported = (SUPPORTED_Autoneg);
10085
10086 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10087 cmd->supported |= (SUPPORTED_1000baseT_Half |
10088 SUPPORTED_1000baseT_Full);
10089
10090 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10091 cmd->supported |= (SUPPORTED_100baseT_Half |
10092 SUPPORTED_100baseT_Full |
10093 SUPPORTED_10baseT_Half |
10094 SUPPORTED_10baseT_Full |
10095 SUPPORTED_TP);
10096 cmd->port = PORT_TP;
10097 } else {
10098 cmd->supported |= SUPPORTED_FIBRE;
10099 cmd->port = PORT_FIBRE;
10100 }
10101
10102 cmd->advertising = tp->link_config.advertising;
10103 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10104 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10105 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10106 cmd->advertising |= ADVERTISED_Pause;
10107 } else {
10108 cmd->advertising |= ADVERTISED_Pause |
10109 ADVERTISED_Asym_Pause;
10110 }
10111 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10112 cmd->advertising |= ADVERTISED_Asym_Pause;
10113 }
10114 }
10115 if (netif_running(dev)) {
10116 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10117 cmd->duplex = tp->link_config.active_duplex;
10118 } else {
10119 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10120 cmd->duplex = DUPLEX_INVALID;
10121 }
10122 cmd->phy_address = tp->phy_addr;
10123 cmd->transceiver = XCVR_INTERNAL;
10124 cmd->autoneg = tp->link_config.autoneg;
10125 cmd->maxtxpkt = 0;
10126 cmd->maxrxpkt = 0;
10127 return 0;
10128 }
10129
10130 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10131 {
10132 struct tg3 *tp = netdev_priv(dev);
10133 u32 speed = ethtool_cmd_speed(cmd);
10134
10135 if (tg3_flag(tp, USE_PHYLIB)) {
10136 struct phy_device *phydev;
10137 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10138 return -EAGAIN;
10139 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10140 return phy_ethtool_sset(phydev, cmd);
10141 }
10142
10143 if (cmd->autoneg != AUTONEG_ENABLE &&
10144 cmd->autoneg != AUTONEG_DISABLE)
10145 return -EINVAL;
10146
10147 if (cmd->autoneg == AUTONEG_DISABLE &&
10148 cmd->duplex != DUPLEX_FULL &&
10149 cmd->duplex != DUPLEX_HALF)
10150 return -EINVAL;
10151
10152 if (cmd->autoneg == AUTONEG_ENABLE) {
10153 u32 mask = ADVERTISED_Autoneg |
10154 ADVERTISED_Pause |
10155 ADVERTISED_Asym_Pause;
10156
10157 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10158 mask |= ADVERTISED_1000baseT_Half |
10159 ADVERTISED_1000baseT_Full;
10160
10161 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10162 mask |= ADVERTISED_100baseT_Half |
10163 ADVERTISED_100baseT_Full |
10164 ADVERTISED_10baseT_Half |
10165 ADVERTISED_10baseT_Full |
10166 ADVERTISED_TP;
10167 else
10168 mask |= ADVERTISED_FIBRE;
10169
10170 if (cmd->advertising & ~mask)
10171 return -EINVAL;
10172
10173 mask &= (ADVERTISED_1000baseT_Half |
10174 ADVERTISED_1000baseT_Full |
10175 ADVERTISED_100baseT_Half |
10176 ADVERTISED_100baseT_Full |
10177 ADVERTISED_10baseT_Half |
10178 ADVERTISED_10baseT_Full);
10179
10180 cmd->advertising &= mask;
10181 } else {
10182 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10183 if (speed != SPEED_1000)
10184 return -EINVAL;
10185
10186 if (cmd->duplex != DUPLEX_FULL)
10187 return -EINVAL;
10188 } else {
10189 if (speed != SPEED_100 &&
10190 speed != SPEED_10)
10191 return -EINVAL;
10192 }
10193 }
10194
10195 tg3_full_lock(tp, 0);
10196
10197 tp->link_config.autoneg = cmd->autoneg;
10198 if (cmd->autoneg == AUTONEG_ENABLE) {
10199 tp->link_config.advertising = (cmd->advertising |
10200 ADVERTISED_Autoneg);
10201 tp->link_config.speed = SPEED_INVALID;
10202 tp->link_config.duplex = DUPLEX_INVALID;
10203 } else {
10204 tp->link_config.advertising = 0;
10205 tp->link_config.speed = speed;
10206 tp->link_config.duplex = cmd->duplex;
10207 }
10208
10209 tp->link_config.orig_speed = tp->link_config.speed;
10210 tp->link_config.orig_duplex = tp->link_config.duplex;
10211 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10212
10213 if (netif_running(dev))
10214 tg3_setup_phy(tp, 1);
10215
10216 tg3_full_unlock(tp);
10217
10218 return 0;
10219 }
10220
10221 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10222 {
10223 struct tg3 *tp = netdev_priv(dev);
10224
10225 strcpy(info->driver, DRV_MODULE_NAME);
10226 strcpy(info->version, DRV_MODULE_VERSION);
10227 strcpy(info->fw_version, tp->fw_ver);
10228 strcpy(info->bus_info, pci_name(tp->pdev));
10229 }
10230
10231 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10232 {
10233 struct tg3 *tp = netdev_priv(dev);
10234
10235 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10236 wol->supported = WAKE_MAGIC;
10237 else
10238 wol->supported = 0;
10239 wol->wolopts = 0;
10240 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10241 wol->wolopts = WAKE_MAGIC;
10242 memset(&wol->sopass, 0, sizeof(wol->sopass));
10243 }
10244
10245 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10246 {
10247 struct tg3 *tp = netdev_priv(dev);
10248 struct device *dp = &tp->pdev->dev;
10249
10250 if (wol->wolopts & ~WAKE_MAGIC)
10251 return -EINVAL;
10252 if ((wol->wolopts & WAKE_MAGIC) &&
10253 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10254 return -EINVAL;
10255
10256 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10257
10258 spin_lock_bh(&tp->lock);
10259 if (device_may_wakeup(dp))
10260 tg3_flag_set(tp, WOL_ENABLE);
10261 else
10262 tg3_flag_clear(tp, WOL_ENABLE);
10263 spin_unlock_bh(&tp->lock);
10264
10265 return 0;
10266 }
10267
10268 static u32 tg3_get_msglevel(struct net_device *dev)
10269 {
10270 struct tg3 *tp = netdev_priv(dev);
10271 return tp->msg_enable;
10272 }
10273
10274 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10275 {
10276 struct tg3 *tp = netdev_priv(dev);
10277 tp->msg_enable = value;
10278 }
10279
10280 static int tg3_nway_reset(struct net_device *dev)
10281 {
10282 struct tg3 *tp = netdev_priv(dev);
10283 int r;
10284
10285 if (!netif_running(dev))
10286 return -EAGAIN;
10287
10288 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10289 return -EINVAL;
10290
10291 if (tg3_flag(tp, USE_PHYLIB)) {
10292 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10293 return -EAGAIN;
10294 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10295 } else {
10296 u32 bmcr;
10297
10298 spin_lock_bh(&tp->lock);
10299 r = -EINVAL;
10300 tg3_readphy(tp, MII_BMCR, &bmcr);
10301 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10302 ((bmcr & BMCR_ANENABLE) ||
10303 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10304 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10305 BMCR_ANENABLE);
10306 r = 0;
10307 }
10308 spin_unlock_bh(&tp->lock);
10309 }
10310
10311 return r;
10312 }
10313
10314 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10315 {
10316 struct tg3 *tp = netdev_priv(dev);
10317
10318 ering->rx_max_pending = tp->rx_std_ring_mask;
10319 ering->rx_mini_max_pending = 0;
10320 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10321 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10322 else
10323 ering->rx_jumbo_max_pending = 0;
10324
10325 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10326
10327 ering->rx_pending = tp->rx_pending;
10328 ering->rx_mini_pending = 0;
10329 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10330 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10331 else
10332 ering->rx_jumbo_pending = 0;
10333
10334 ering->tx_pending = tp->napi[0].tx_pending;
10335 }
10336
10337 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10338 {
10339 struct tg3 *tp = netdev_priv(dev);
10340 int i, irq_sync = 0, err = 0;
10341
10342 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10343 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10344 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10345 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10346 (tg3_flag(tp, TSO_BUG) &&
10347 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10348 return -EINVAL;
10349
10350 if (netif_running(dev)) {
10351 tg3_phy_stop(tp);
10352 tg3_netif_stop(tp);
10353 irq_sync = 1;
10354 }
10355
10356 tg3_full_lock(tp, irq_sync);
10357
10358 tp->rx_pending = ering->rx_pending;
10359
10360 if (tg3_flag(tp, MAX_RXPEND_64) &&
10361 tp->rx_pending > 63)
10362 tp->rx_pending = 63;
10363 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10364
10365 for (i = 0; i < tp->irq_max; i++)
10366 tp->napi[i].tx_pending = ering->tx_pending;
10367
10368 if (netif_running(dev)) {
10369 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10370 err = tg3_restart_hw(tp, 1);
10371 if (!err)
10372 tg3_netif_start(tp);
10373 }
10374
10375 tg3_full_unlock(tp);
10376
10377 if (irq_sync && !err)
10378 tg3_phy_start(tp);
10379
10380 return err;
10381 }
10382
10383 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10384 {
10385 struct tg3 *tp = netdev_priv(dev);
10386
10387 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10388
10389 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10390 epause->rx_pause = 1;
10391 else
10392 epause->rx_pause = 0;
10393
10394 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10395 epause->tx_pause = 1;
10396 else
10397 epause->tx_pause = 0;
10398 }
10399
10400 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10401 {
10402 struct tg3 *tp = netdev_priv(dev);
10403 int err = 0;
10404
10405 if (tg3_flag(tp, USE_PHYLIB)) {
10406 u32 newadv;
10407 struct phy_device *phydev;
10408
10409 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10410
10411 if (!(phydev->supported & SUPPORTED_Pause) ||
10412 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10413 (epause->rx_pause != epause->tx_pause)))
10414 return -EINVAL;
10415
10416 tp->link_config.flowctrl = 0;
10417 if (epause->rx_pause) {
10418 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10419
10420 if (epause->tx_pause) {
10421 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10422 newadv = ADVERTISED_Pause;
10423 } else
10424 newadv = ADVERTISED_Pause |
10425 ADVERTISED_Asym_Pause;
10426 } else if (epause->tx_pause) {
10427 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10428 newadv = ADVERTISED_Asym_Pause;
10429 } else
10430 newadv = 0;
10431
10432 if (epause->autoneg)
10433 tg3_flag_set(tp, PAUSE_AUTONEG);
10434 else
10435 tg3_flag_clear(tp, PAUSE_AUTONEG);
10436
10437 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10438 u32 oldadv = phydev->advertising &
10439 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10440 if (oldadv != newadv) {
10441 phydev->advertising &=
10442 ~(ADVERTISED_Pause |
10443 ADVERTISED_Asym_Pause);
10444 phydev->advertising |= newadv;
10445 if (phydev->autoneg) {
10446 /*
10447 * Always renegotiate the link to
10448 * inform our link partner of our
10449 * flow control settings, even if the
10450 * flow control is forced. Let
10451 * tg3_adjust_link() do the final
10452 * flow control setup.
10453 */
10454 return phy_start_aneg(phydev);
10455 }
10456 }
10457
10458 if (!epause->autoneg)
10459 tg3_setup_flow_control(tp, 0, 0);
10460 } else {
10461 tp->link_config.orig_advertising &=
10462 ~(ADVERTISED_Pause |
10463 ADVERTISED_Asym_Pause);
10464 tp->link_config.orig_advertising |= newadv;
10465 }
10466 } else {
10467 int irq_sync = 0;
10468
10469 if (netif_running(dev)) {
10470 tg3_netif_stop(tp);
10471 irq_sync = 1;
10472 }
10473
10474 tg3_full_lock(tp, irq_sync);
10475
10476 if (epause->autoneg)
10477 tg3_flag_set(tp, PAUSE_AUTONEG);
10478 else
10479 tg3_flag_clear(tp, PAUSE_AUTONEG);
10480 if (epause->rx_pause)
10481 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10482 else
10483 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10484 if (epause->tx_pause)
10485 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10486 else
10487 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10488
10489 if (netif_running(dev)) {
10490 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10491 err = tg3_restart_hw(tp, 1);
10492 if (!err)
10493 tg3_netif_start(tp);
10494 }
10495
10496 tg3_full_unlock(tp);
10497 }
10498
10499 return err;
10500 }
10501
10502 static int tg3_get_sset_count(struct net_device *dev, int sset)
10503 {
10504 switch (sset) {
10505 case ETH_SS_TEST:
10506 return TG3_NUM_TEST;
10507 case ETH_SS_STATS:
10508 return TG3_NUM_STATS;
10509 default:
10510 return -EOPNOTSUPP;
10511 }
10512 }
10513
10514 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10515 {
10516 switch (stringset) {
10517 case ETH_SS_STATS:
10518 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10519 break;
10520 case ETH_SS_TEST:
10521 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10522 break;
10523 default:
10524 WARN_ON(1); /* we need a WARN() */
10525 break;
10526 }
10527 }
10528
10529 static int tg3_set_phys_id(struct net_device *dev,
10530 enum ethtool_phys_id_state state)
10531 {
10532 struct tg3 *tp = netdev_priv(dev);
10533
10534 if (!netif_running(tp->dev))
10535 return -EAGAIN;
10536
10537 switch (state) {
10538 case ETHTOOL_ID_ACTIVE:
10539 return 1; /* cycle on/off once per second */
10540
10541 case ETHTOOL_ID_ON:
10542 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10543 LED_CTRL_1000MBPS_ON |
10544 LED_CTRL_100MBPS_ON |
10545 LED_CTRL_10MBPS_ON |
10546 LED_CTRL_TRAFFIC_OVERRIDE |
10547 LED_CTRL_TRAFFIC_BLINK |
10548 LED_CTRL_TRAFFIC_LED);
10549 break;
10550
10551 case ETHTOOL_ID_OFF:
10552 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10553 LED_CTRL_TRAFFIC_OVERRIDE);
10554 break;
10555
10556 case ETHTOOL_ID_INACTIVE:
10557 tw32(MAC_LED_CTRL, tp->led_ctrl);
10558 break;
10559 }
10560
10561 return 0;
10562 }
10563
10564 static void tg3_get_ethtool_stats(struct net_device *dev,
10565 struct ethtool_stats *estats, u64 *tmp_stats)
10566 {
10567 struct tg3 *tp = netdev_priv(dev);
10568 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10569 }
10570
10571 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10572 {
10573 int i;
10574 __be32 *buf;
10575 u32 offset = 0, len = 0;
10576 u32 magic, val;
10577
10578 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10579 return NULL;
10580
10581 if (magic == TG3_EEPROM_MAGIC) {
10582 for (offset = TG3_NVM_DIR_START;
10583 offset < TG3_NVM_DIR_END;
10584 offset += TG3_NVM_DIRENT_SIZE) {
10585 if (tg3_nvram_read(tp, offset, &val))
10586 return NULL;
10587
10588 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10589 TG3_NVM_DIRTYPE_EXTVPD)
10590 break;
10591 }
10592
10593 if (offset != TG3_NVM_DIR_END) {
10594 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10595 if (tg3_nvram_read(tp, offset + 4, &offset))
10596 return NULL;
10597
10598 offset = tg3_nvram_logical_addr(tp, offset);
10599 }
10600 }
10601
10602 if (!offset || !len) {
10603 offset = TG3_NVM_VPD_OFF;
10604 len = TG3_NVM_VPD_LEN;
10605 }
10606
10607 buf = kmalloc(len, GFP_KERNEL);
10608 if (buf == NULL)
10609 return NULL;
10610
10611 if (magic == TG3_EEPROM_MAGIC) {
10612 for (i = 0; i < len; i += 4) {
10613 /* The data is in little-endian format in NVRAM.
10614 * Use the big-endian read routines to preserve
10615 * the byte order as it exists in NVRAM.
10616 */
10617 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10618 goto error;
10619 }
10620 } else {
10621 u8 *ptr;
10622 ssize_t cnt;
10623 unsigned int pos = 0;
10624
10625 ptr = (u8 *)&buf[0];
10626 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10627 cnt = pci_read_vpd(tp->pdev, pos,
10628 len - pos, ptr);
10629 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10630 cnt = 0;
10631 else if (cnt < 0)
10632 goto error;
10633 }
10634 if (pos != len)
10635 goto error;
10636 }
10637
10638 *vpdlen = len;
10639
10640 return buf;
10641
10642 error:
10643 kfree(buf);
10644 return NULL;
10645 }
10646
10647 #define NVRAM_TEST_SIZE 0x100
10648 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10649 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10650 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10651 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10652 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10653 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10654 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10655 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10656
10657 static int tg3_test_nvram(struct tg3 *tp)
10658 {
10659 u32 csum, magic, len;
10660 __be32 *buf;
10661 int i, j, k, err = 0, size;
10662
10663 if (tg3_flag(tp, NO_NVRAM))
10664 return 0;
10665
10666 if (tg3_nvram_read(tp, 0, &magic) != 0)
10667 return -EIO;
10668
10669 if (magic == TG3_EEPROM_MAGIC)
10670 size = NVRAM_TEST_SIZE;
10671 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10672 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10673 TG3_EEPROM_SB_FORMAT_1) {
10674 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10675 case TG3_EEPROM_SB_REVISION_0:
10676 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10677 break;
10678 case TG3_EEPROM_SB_REVISION_2:
10679 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10680 break;
10681 case TG3_EEPROM_SB_REVISION_3:
10682 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10683 break;
10684 case TG3_EEPROM_SB_REVISION_4:
10685 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10686 break;
10687 case TG3_EEPROM_SB_REVISION_5:
10688 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10689 break;
10690 case TG3_EEPROM_SB_REVISION_6:
10691 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10692 break;
10693 default:
10694 return -EIO;
10695 }
10696 } else
10697 return 0;
10698 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10699 size = NVRAM_SELFBOOT_HW_SIZE;
10700 else
10701 return -EIO;
10702
10703 buf = kmalloc(size, GFP_KERNEL);
10704 if (buf == NULL)
10705 return -ENOMEM;
10706
10707 err = -EIO;
10708 for (i = 0, j = 0; i < size; i += 4, j++) {
10709 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10710 if (err)
10711 break;
10712 }
10713 if (i < size)
10714 goto out;
10715
10716 /* Selfboot format */
10717 magic = be32_to_cpu(buf[0]);
10718 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10719 TG3_EEPROM_MAGIC_FW) {
10720 u8 *buf8 = (u8 *) buf, csum8 = 0;
10721
10722 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10723 TG3_EEPROM_SB_REVISION_2) {
10724 /* For rev 2, the csum doesn't include the MBA. */
10725 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10726 csum8 += buf8[i];
10727 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10728 csum8 += buf8[i];
10729 } else {
10730 for (i = 0; i < size; i++)
10731 csum8 += buf8[i];
10732 }
10733
10734 if (csum8 == 0) {
10735 err = 0;
10736 goto out;
10737 }
10738
10739 err = -EIO;
10740 goto out;
10741 }
10742
10743 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10744 TG3_EEPROM_MAGIC_HW) {
10745 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10746 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10747 u8 *buf8 = (u8 *) buf;
10748
10749 /* Separate the parity bits and the data bytes. */
10750 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10751 if ((i == 0) || (i == 8)) {
10752 int l;
10753 u8 msk;
10754
10755 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10756 parity[k++] = buf8[i] & msk;
10757 i++;
10758 } else if (i == 16) {
10759 int l;
10760 u8 msk;
10761
10762 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10763 parity[k++] = buf8[i] & msk;
10764 i++;
10765
10766 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10767 parity[k++] = buf8[i] & msk;
10768 i++;
10769 }
10770 data[j++] = buf8[i];
10771 }
10772
10773 err = -EIO;
10774 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10775 u8 hw8 = hweight8(data[i]);
10776
10777 if ((hw8 & 0x1) && parity[i])
10778 goto out;
10779 else if (!(hw8 & 0x1) && !parity[i])
10780 goto out;
10781 }
10782 err = 0;
10783 goto out;
10784 }
10785
10786 err = -EIO;
10787
10788 /* Bootstrap checksum at offset 0x10 */
10789 csum = calc_crc((unsigned char *) buf, 0x10);
10790 if (csum != le32_to_cpu(buf[0x10/4]))
10791 goto out;
10792
10793 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10794 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10795 if (csum != le32_to_cpu(buf[0xfc/4]))
10796 goto out;
10797
10798 kfree(buf);
10799
10800 buf = tg3_vpd_readblock(tp, &len);
10801 if (!buf)
10802 return -ENOMEM;
10803
10804 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10805 if (i > 0) {
10806 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10807 if (j < 0)
10808 goto out;
10809
10810 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10811 goto out;
10812
10813 i += PCI_VPD_LRDT_TAG_SIZE;
10814 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10815 PCI_VPD_RO_KEYWORD_CHKSUM);
10816 if (j > 0) {
10817 u8 csum8 = 0;
10818
10819 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10820
10821 for (i = 0; i <= j; i++)
10822 csum8 += ((u8 *)buf)[i];
10823
10824 if (csum8)
10825 goto out;
10826 }
10827 }
10828
10829 err = 0;
10830
10831 out:
10832 kfree(buf);
10833 return err;
10834 }
10835
10836 #define TG3_SERDES_TIMEOUT_SEC 2
10837 #define TG3_COPPER_TIMEOUT_SEC 6
10838
10839 static int tg3_test_link(struct tg3 *tp)
10840 {
10841 int i, max;
10842
10843 if (!netif_running(tp->dev))
10844 return -ENODEV;
10845
10846 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10847 max = TG3_SERDES_TIMEOUT_SEC;
10848 else
10849 max = TG3_COPPER_TIMEOUT_SEC;
10850
10851 for (i = 0; i < max; i++) {
10852 if (netif_carrier_ok(tp->dev))
10853 return 0;
10854
10855 if (msleep_interruptible(1000))
10856 break;
10857 }
10858
10859 return -EIO;
10860 }
10861
10862 /* Only test the commonly used registers */
10863 static int tg3_test_registers(struct tg3 *tp)
10864 {
10865 int i, is_5705, is_5750;
10866 u32 offset, read_mask, write_mask, val, save_val, read_val;
10867 static struct {
10868 u16 offset;
10869 u16 flags;
10870 #define TG3_FL_5705 0x1
10871 #define TG3_FL_NOT_5705 0x2
10872 #define TG3_FL_NOT_5788 0x4
10873 #define TG3_FL_NOT_5750 0x8
10874 u32 read_mask;
10875 u32 write_mask;
10876 } reg_tbl[] = {
10877 /* MAC Control Registers */
10878 { MAC_MODE, TG3_FL_NOT_5705,
10879 0x00000000, 0x00ef6f8c },
10880 { MAC_MODE, TG3_FL_5705,
10881 0x00000000, 0x01ef6b8c },
10882 { MAC_STATUS, TG3_FL_NOT_5705,
10883 0x03800107, 0x00000000 },
10884 { MAC_STATUS, TG3_FL_5705,
10885 0x03800100, 0x00000000 },
10886 { MAC_ADDR_0_HIGH, 0x0000,
10887 0x00000000, 0x0000ffff },
10888 { MAC_ADDR_0_LOW, 0x0000,
10889 0x00000000, 0xffffffff },
10890 { MAC_RX_MTU_SIZE, 0x0000,
10891 0x00000000, 0x0000ffff },
10892 { MAC_TX_MODE, 0x0000,
10893 0x00000000, 0x00000070 },
10894 { MAC_TX_LENGTHS, 0x0000,
10895 0x00000000, 0x00003fff },
10896 { MAC_RX_MODE, TG3_FL_NOT_5705,
10897 0x00000000, 0x000007fc },
10898 { MAC_RX_MODE, TG3_FL_5705,
10899 0x00000000, 0x000007dc },
10900 { MAC_HASH_REG_0, 0x0000,
10901 0x00000000, 0xffffffff },
10902 { MAC_HASH_REG_1, 0x0000,
10903 0x00000000, 0xffffffff },
10904 { MAC_HASH_REG_2, 0x0000,
10905 0x00000000, 0xffffffff },
10906 { MAC_HASH_REG_3, 0x0000,
10907 0x00000000, 0xffffffff },
10908
10909 /* Receive Data and Receive BD Initiator Control Registers. */
10910 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10911 0x00000000, 0xffffffff },
10912 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10913 0x00000000, 0xffffffff },
10914 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10915 0x00000000, 0x00000003 },
10916 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10917 0x00000000, 0xffffffff },
10918 { RCVDBDI_STD_BD+0, 0x0000,
10919 0x00000000, 0xffffffff },
10920 { RCVDBDI_STD_BD+4, 0x0000,
10921 0x00000000, 0xffffffff },
10922 { RCVDBDI_STD_BD+8, 0x0000,
10923 0x00000000, 0xffff0002 },
10924 { RCVDBDI_STD_BD+0xc, 0x0000,
10925 0x00000000, 0xffffffff },
10926
10927 /* Receive BD Initiator Control Registers. */
10928 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10929 0x00000000, 0xffffffff },
10930 { RCVBDI_STD_THRESH, TG3_FL_5705,
10931 0x00000000, 0x000003ff },
10932 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10933 0x00000000, 0xffffffff },
10934
10935 /* Host Coalescing Control Registers. */
10936 { HOSTCC_MODE, TG3_FL_NOT_5705,
10937 0x00000000, 0x00000004 },
10938 { HOSTCC_MODE, TG3_FL_5705,
10939 0x00000000, 0x000000f6 },
10940 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10941 0x00000000, 0xffffffff },
10942 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10943 0x00000000, 0x000003ff },
10944 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10945 0x00000000, 0xffffffff },
10946 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10947 0x00000000, 0x000003ff },
10948 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10949 0x00000000, 0xffffffff },
10950 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10951 0x00000000, 0x000000ff },
10952 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10953 0x00000000, 0xffffffff },
10954 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10955 0x00000000, 0x000000ff },
10956 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10957 0x00000000, 0xffffffff },
10958 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10959 0x00000000, 0xffffffff },
10960 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10961 0x00000000, 0xffffffff },
10962 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10963 0x00000000, 0x000000ff },
10964 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10965 0x00000000, 0xffffffff },
10966 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10967 0x00000000, 0x000000ff },
10968 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10969 0x00000000, 0xffffffff },
10970 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10971 0x00000000, 0xffffffff },
10972 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10973 0x00000000, 0xffffffff },
10974 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10975 0x00000000, 0xffffffff },
10976 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10977 0x00000000, 0xffffffff },
10978 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10979 0xffffffff, 0x00000000 },
10980 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10981 0xffffffff, 0x00000000 },
10982
10983 /* Buffer Manager Control Registers. */
10984 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10985 0x00000000, 0x007fff80 },
10986 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10987 0x00000000, 0x007fffff },
10988 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10989 0x00000000, 0x0000003f },
10990 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10991 0x00000000, 0x000001ff },
10992 { BUFMGR_MB_HIGH_WATER, 0x0000,
10993 0x00000000, 0x000001ff },
10994 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10995 0xffffffff, 0x00000000 },
10996 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10997 0xffffffff, 0x00000000 },
10998
10999 /* Mailbox Registers */
11000 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11001 0x00000000, 0x000001ff },
11002 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11003 0x00000000, 0x000001ff },
11004 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11005 0x00000000, 0x000007ff },
11006 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11007 0x00000000, 0x000001ff },
11008
11009 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11010 };
11011
11012 is_5705 = is_5750 = 0;
11013 if (tg3_flag(tp, 5705_PLUS)) {
11014 is_5705 = 1;
11015 if (tg3_flag(tp, 5750_PLUS))
11016 is_5750 = 1;
11017 }
11018
11019 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11020 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11021 continue;
11022
11023 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11024 continue;
11025
11026 if (tg3_flag(tp, IS_5788) &&
11027 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11028 continue;
11029
11030 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11031 continue;
11032
11033 offset = (u32) reg_tbl[i].offset;
11034 read_mask = reg_tbl[i].read_mask;
11035 write_mask = reg_tbl[i].write_mask;
11036
11037 /* Save the original register content */
11038 save_val = tr32(offset);
11039
11040 /* Determine the read-only value. */
11041 read_val = save_val & read_mask;
11042
11043 /* Write zero to the register, then make sure the read-only bits
11044 * are not changed and the read/write bits are all zeros.
11045 */
11046 tw32(offset, 0);
11047
11048 val = tr32(offset);
11049
11050 /* Test the read-only and read/write bits. */
11051 if (((val & read_mask) != read_val) || (val & write_mask))
11052 goto out;
11053
11054 /* Write ones to all the bits defined by RdMask and WrMask, then
11055 * make sure the read-only bits are not changed and the
11056 * read/write bits are all ones.
11057 */
11058 tw32(offset, read_mask | write_mask);
11059
11060 val = tr32(offset);
11061
11062 /* Test the read-only bits. */
11063 if ((val & read_mask) != read_val)
11064 goto out;
11065
11066 /* Test the read/write bits. */
11067 if ((val & write_mask) != write_mask)
11068 goto out;
11069
11070 tw32(offset, save_val);
11071 }
11072
11073 return 0;
11074
11075 out:
11076 if (netif_msg_hw(tp))
11077 netdev_err(tp->dev,
11078 "Register test failed at offset %x\n", offset);
11079 tw32(offset, save_val);
11080 return -EIO;
11081 }
11082
11083 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11084 {
11085 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11086 int i;
11087 u32 j;
11088
11089 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11090 for (j = 0; j < len; j += 4) {
11091 u32 val;
11092
11093 tg3_write_mem(tp, offset + j, test_pattern[i]);
11094 tg3_read_mem(tp, offset + j, &val);
11095 if (val != test_pattern[i])
11096 return -EIO;
11097 }
11098 }
11099 return 0;
11100 }
11101
11102 static int tg3_test_memory(struct tg3 *tp)
11103 {
11104 static struct mem_entry {
11105 u32 offset;
11106 u32 len;
11107 } mem_tbl_570x[] = {
11108 { 0x00000000, 0x00b50},
11109 { 0x00002000, 0x1c000},
11110 { 0xffffffff, 0x00000}
11111 }, mem_tbl_5705[] = {
11112 { 0x00000100, 0x0000c},
11113 { 0x00000200, 0x00008},
11114 { 0x00004000, 0x00800},
11115 { 0x00006000, 0x01000},
11116 { 0x00008000, 0x02000},
11117 { 0x00010000, 0x0e000},
11118 { 0xffffffff, 0x00000}
11119 }, mem_tbl_5755[] = {
11120 { 0x00000200, 0x00008},
11121 { 0x00004000, 0x00800},
11122 { 0x00006000, 0x00800},
11123 { 0x00008000, 0x02000},
11124 { 0x00010000, 0x0c000},
11125 { 0xffffffff, 0x00000}
11126 }, mem_tbl_5906[] = {
11127 { 0x00000200, 0x00008},
11128 { 0x00004000, 0x00400},
11129 { 0x00006000, 0x00400},
11130 { 0x00008000, 0x01000},
11131 { 0x00010000, 0x01000},
11132 { 0xffffffff, 0x00000}
11133 }, mem_tbl_5717[] = {
11134 { 0x00000200, 0x00008},
11135 { 0x00010000, 0x0a000},
11136 { 0x00020000, 0x13c00},
11137 { 0xffffffff, 0x00000}
11138 }, mem_tbl_57765[] = {
11139 { 0x00000200, 0x00008},
11140 { 0x00004000, 0x00800},
11141 { 0x00006000, 0x09800},
11142 { 0x00010000, 0x0a000},
11143 { 0xffffffff, 0x00000}
11144 };
11145 struct mem_entry *mem_tbl;
11146 int err = 0;
11147 int i;
11148
11149 if (tg3_flag(tp, 5717_PLUS))
11150 mem_tbl = mem_tbl_5717;
11151 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11152 mem_tbl = mem_tbl_57765;
11153 else if (tg3_flag(tp, 5755_PLUS))
11154 mem_tbl = mem_tbl_5755;
11155 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11156 mem_tbl = mem_tbl_5906;
11157 else if (tg3_flag(tp, 5705_PLUS))
11158 mem_tbl = mem_tbl_5705;
11159 else
11160 mem_tbl = mem_tbl_570x;
11161
11162 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11163 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11164 if (err)
11165 break;
11166 }
11167
11168 return err;
11169 }
11170
11171 #define TG3_MAC_LOOPBACK 0
11172 #define TG3_PHY_LOOPBACK 1
11173 #define TG3_TSO_LOOPBACK 2
11174
11175 #define TG3_TSO_MSS 500
11176
11177 #define TG3_TSO_IP_HDR_LEN 20
11178 #define TG3_TSO_TCP_HDR_LEN 20
11179 #define TG3_TSO_TCP_OPT_LEN 12
11180
11181 static const u8 tg3_tso_header[] = {
11182 0x08, 0x00,
11183 0x45, 0x00, 0x00, 0x00,
11184 0x00, 0x00, 0x40, 0x00,
11185 0x40, 0x06, 0x00, 0x00,
11186 0x0a, 0x00, 0x00, 0x01,
11187 0x0a, 0x00, 0x00, 0x02,
11188 0x0d, 0x00, 0xe0, 0x00,
11189 0x00, 0x00, 0x01, 0x00,
11190 0x00, 0x00, 0x02, 0x00,
11191 0x80, 0x10, 0x10, 0x00,
11192 0x14, 0x09, 0x00, 0x00,
11193 0x01, 0x01, 0x08, 0x0a,
11194 0x11, 0x11, 0x11, 0x11,
11195 0x11, 0x11, 0x11, 0x11,
11196 };
11197
11198 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11199 {
11200 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11201 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11202 struct sk_buff *skb, *rx_skb;
11203 u8 *tx_data;
11204 dma_addr_t map;
11205 int num_pkts, tx_len, rx_len, i, err;
11206 struct tg3_rx_buffer_desc *desc;
11207 struct tg3_napi *tnapi, *rnapi;
11208 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11209
11210 tnapi = &tp->napi[0];
11211 rnapi = &tp->napi[0];
11212 if (tp->irq_cnt > 1) {
11213 if (tg3_flag(tp, ENABLE_RSS))
11214 rnapi = &tp->napi[1];
11215 if (tg3_flag(tp, ENABLE_TSS))
11216 tnapi = &tp->napi[1];
11217 }
11218 coal_now = tnapi->coal_now | rnapi->coal_now;
11219
11220 if (loopback_mode == TG3_MAC_LOOPBACK) {
11221 /* HW errata - mac loopback fails in some cases on 5780.
11222 * Normal traffic and PHY loopback are not affected by
11223 * errata. Also, the MAC loopback test is deprecated for
11224 * all newer ASIC revisions.
11225 */
11226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11227 tg3_flag(tp, CPMU_PRESENT))
11228 return 0;
11229
11230 mac_mode = tp->mac_mode &
11231 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11232 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11233 if (!tg3_flag(tp, 5705_PLUS))
11234 mac_mode |= MAC_MODE_LINK_POLARITY;
11235 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11236 mac_mode |= MAC_MODE_PORT_MODE_MII;
11237 else
11238 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11239 tw32(MAC_MODE, mac_mode);
11240 } else {
11241 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11242 tg3_phy_fet_toggle_apd(tp, false);
11243 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11244 } else
11245 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11246
11247 tg3_phy_toggle_automdix(tp, 0);
11248
11249 tg3_writephy(tp, MII_BMCR, val);
11250 udelay(40);
11251
11252 mac_mode = tp->mac_mode &
11253 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11254 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11255 tg3_writephy(tp, MII_TG3_FET_PTEST,
11256 MII_TG3_FET_PTEST_FRC_TX_LINK |
11257 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11258 /* The write needs to be flushed for the AC131 */
11259 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11260 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11261 mac_mode |= MAC_MODE_PORT_MODE_MII;
11262 } else
11263 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11264
11265 /* reset to prevent losing 1st rx packet intermittently */
11266 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11267 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11268 udelay(10);
11269 tw32_f(MAC_RX_MODE, tp->rx_mode);
11270 }
11271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11272 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11273 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11274 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11275 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11276 mac_mode |= MAC_MODE_LINK_POLARITY;
11277 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11278 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11279 }
11280 tw32(MAC_MODE, mac_mode);
11281
11282 /* Wait for link */
11283 for (i = 0; i < 100; i++) {
11284 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11285 break;
11286 mdelay(1);
11287 }
11288 }
11289
11290 err = -EIO;
11291
11292 tx_len = pktsz;
11293 skb = netdev_alloc_skb(tp->dev, tx_len);
11294 if (!skb)
11295 return -ENOMEM;
11296
11297 tx_data = skb_put(skb, tx_len);
11298 memcpy(tx_data, tp->dev->dev_addr, 6);
11299 memset(tx_data + 6, 0x0, 8);
11300
11301 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11302
11303 if (loopback_mode == TG3_TSO_LOOPBACK) {
11304 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11305
11306 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11307 TG3_TSO_TCP_OPT_LEN;
11308
11309 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11310 sizeof(tg3_tso_header));
11311 mss = TG3_TSO_MSS;
11312
11313 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11314 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11315
11316 /* Set the total length field in the IP header */
11317 iph->tot_len = htons((u16)(mss + hdr_len));
11318
11319 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11320 TXD_FLAG_CPU_POST_DMA);
11321
11322 if (tg3_flag(tp, HW_TSO_1) ||
11323 tg3_flag(tp, HW_TSO_2) ||
11324 tg3_flag(tp, HW_TSO_3)) {
11325 struct tcphdr *th;
11326 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11327 th = (struct tcphdr *)&tx_data[val];
11328 th->check = 0;
11329 } else
11330 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11331
11332 if (tg3_flag(tp, HW_TSO_3)) {
11333 mss |= (hdr_len & 0xc) << 12;
11334 if (hdr_len & 0x10)
11335 base_flags |= 0x00000010;
11336 base_flags |= (hdr_len & 0x3e0) << 5;
11337 } else if (tg3_flag(tp, HW_TSO_2))
11338 mss |= hdr_len << 9;
11339 else if (tg3_flag(tp, HW_TSO_1) ||
11340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11341 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11342 } else {
11343 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11344 }
11345
11346 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11347 } else {
11348 num_pkts = 1;
11349 data_off = ETH_HLEN;
11350 }
11351
11352 for (i = data_off; i < tx_len; i++)
11353 tx_data[i] = (u8) (i & 0xff);
11354
11355 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11356 if (pci_dma_mapping_error(tp->pdev, map)) {
11357 dev_kfree_skb(skb);
11358 return -EIO;
11359 }
11360
11361 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11362 rnapi->coal_now);
11363
11364 udelay(10);
11365
11366 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11367
11368 tg3_tx_set_bd(tnapi, tnapi->tx_prod, map, tx_len,
11369 base_flags | TXD_FLAG_END, mss, 0);
11370
11371 tnapi->tx_prod++;
11372
11373 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11374 tr32_mailbox(tnapi->prodmbox);
11375
11376 udelay(10);
11377
11378 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11379 for (i = 0; i < 35; i++) {
11380 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11381 coal_now);
11382
11383 udelay(10);
11384
11385 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11386 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11387 if ((tx_idx == tnapi->tx_prod) &&
11388 (rx_idx == (rx_start_idx + num_pkts)))
11389 break;
11390 }
11391
11392 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11393 dev_kfree_skb(skb);
11394
11395 if (tx_idx != tnapi->tx_prod)
11396 goto out;
11397
11398 if (rx_idx != rx_start_idx + num_pkts)
11399 goto out;
11400
11401 val = data_off;
11402 while (rx_idx != rx_start_idx) {
11403 desc = &rnapi->rx_rcb[rx_start_idx++];
11404 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11405 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11406
11407 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11408 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11409 goto out;
11410
11411 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11412 - ETH_FCS_LEN;
11413
11414 if (loopback_mode != TG3_TSO_LOOPBACK) {
11415 if (rx_len != tx_len)
11416 goto out;
11417
11418 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11419 if (opaque_key != RXD_OPAQUE_RING_STD)
11420 goto out;
11421 } else {
11422 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11423 goto out;
11424 }
11425 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11426 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11427 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11428 goto out;
11429 }
11430
11431 if (opaque_key == RXD_OPAQUE_RING_STD) {
11432 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11433 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11434 mapping);
11435 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11436 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11437 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11438 mapping);
11439 } else
11440 goto out;
11441
11442 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11443 PCI_DMA_FROMDEVICE);
11444
11445 for (i = data_off; i < rx_len; i++, val++) {
11446 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11447 goto out;
11448 }
11449 }
11450
11451 err = 0;
11452
11453 /* tg3_free_rings will unmap and free the rx_skb */
11454 out:
11455 return err;
11456 }
11457
11458 #define TG3_STD_LOOPBACK_FAILED 1
11459 #define TG3_JMB_LOOPBACK_FAILED 2
11460 #define TG3_TSO_LOOPBACK_FAILED 4
11461
11462 #define TG3_MAC_LOOPBACK_SHIFT 0
11463 #define TG3_PHY_LOOPBACK_SHIFT 4
11464 #define TG3_LOOPBACK_FAILED 0x00000077
11465
11466 static int tg3_test_loopback(struct tg3 *tp)
11467 {
11468 int err = 0;
11469 u32 eee_cap, cpmuctrl = 0;
11470
11471 if (!netif_running(tp->dev))
11472 return TG3_LOOPBACK_FAILED;
11473
11474 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11475 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11476
11477 err = tg3_reset_hw(tp, 1);
11478 if (err) {
11479 err = TG3_LOOPBACK_FAILED;
11480 goto done;
11481 }
11482
11483 if (tg3_flag(tp, ENABLE_RSS)) {
11484 int i;
11485
11486 /* Reroute all rx packets to the 1st queue */
11487 for (i = MAC_RSS_INDIR_TBL_0;
11488 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11489 tw32(i, 0x0);
11490 }
11491
11492 /* Turn off gphy autopowerdown. */
11493 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11494 tg3_phy_toggle_apd(tp, false);
11495
11496 if (tg3_flag(tp, CPMU_PRESENT)) {
11497 int i;
11498 u32 status;
11499
11500 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11501
11502 /* Wait for up to 40 microseconds to acquire lock. */
11503 for (i = 0; i < 4; i++) {
11504 status = tr32(TG3_CPMU_MUTEX_GNT);
11505 if (status == CPMU_MUTEX_GNT_DRIVER)
11506 break;
11507 udelay(10);
11508 }
11509
11510 if (status != CPMU_MUTEX_GNT_DRIVER) {
11511 err = TG3_LOOPBACK_FAILED;
11512 goto done;
11513 }
11514
11515 /* Turn off link-based power management. */
11516 cpmuctrl = tr32(TG3_CPMU_CTRL);
11517 tw32(TG3_CPMU_CTRL,
11518 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11519 CPMU_CTRL_LINK_AWARE_MODE));
11520 }
11521
11522 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11523 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11524
11525 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11526 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11527 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11528
11529 if (tg3_flag(tp, CPMU_PRESENT)) {
11530 tw32(TG3_CPMU_CTRL, cpmuctrl);
11531
11532 /* Release the mutex */
11533 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11534 }
11535
11536 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11537 !tg3_flag(tp, USE_PHYLIB)) {
11538 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11539 err |= TG3_STD_LOOPBACK_FAILED <<
11540 TG3_PHY_LOOPBACK_SHIFT;
11541 if (tg3_flag(tp, TSO_CAPABLE) &&
11542 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11543 err |= TG3_TSO_LOOPBACK_FAILED <<
11544 TG3_PHY_LOOPBACK_SHIFT;
11545 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11546 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11547 err |= TG3_JMB_LOOPBACK_FAILED <<
11548 TG3_PHY_LOOPBACK_SHIFT;
11549 }
11550
11551 /* Re-enable gphy autopowerdown. */
11552 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11553 tg3_phy_toggle_apd(tp, true);
11554
11555 done:
11556 tp->phy_flags |= eee_cap;
11557
11558 return err;
11559 }
11560
11561 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11562 u64 *data)
11563 {
11564 struct tg3 *tp = netdev_priv(dev);
11565
11566 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11567 tg3_power_up(tp)) {
11568 etest->flags |= ETH_TEST_FL_FAILED;
11569 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11570 return;
11571 }
11572
11573 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11574
11575 if (tg3_test_nvram(tp) != 0) {
11576 etest->flags |= ETH_TEST_FL_FAILED;
11577 data[0] = 1;
11578 }
11579 if (tg3_test_link(tp) != 0) {
11580 etest->flags |= ETH_TEST_FL_FAILED;
11581 data[1] = 1;
11582 }
11583 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11584 int err, err2 = 0, irq_sync = 0;
11585
11586 if (netif_running(dev)) {
11587 tg3_phy_stop(tp);
11588 tg3_netif_stop(tp);
11589 irq_sync = 1;
11590 }
11591
11592 tg3_full_lock(tp, irq_sync);
11593
11594 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11595 err = tg3_nvram_lock(tp);
11596 tg3_halt_cpu(tp, RX_CPU_BASE);
11597 if (!tg3_flag(tp, 5705_PLUS))
11598 tg3_halt_cpu(tp, TX_CPU_BASE);
11599 if (!err)
11600 tg3_nvram_unlock(tp);
11601
11602 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11603 tg3_phy_reset(tp);
11604
11605 if (tg3_test_registers(tp) != 0) {
11606 etest->flags |= ETH_TEST_FL_FAILED;
11607 data[2] = 1;
11608 }
11609 if (tg3_test_memory(tp) != 0) {
11610 etest->flags |= ETH_TEST_FL_FAILED;
11611 data[3] = 1;
11612 }
11613 if ((data[4] = tg3_test_loopback(tp)) != 0)
11614 etest->flags |= ETH_TEST_FL_FAILED;
11615
11616 tg3_full_unlock(tp);
11617
11618 if (tg3_test_interrupt(tp) != 0) {
11619 etest->flags |= ETH_TEST_FL_FAILED;
11620 data[5] = 1;
11621 }
11622
11623 tg3_full_lock(tp, 0);
11624
11625 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11626 if (netif_running(dev)) {
11627 tg3_flag_set(tp, INIT_COMPLETE);
11628 err2 = tg3_restart_hw(tp, 1);
11629 if (!err2)
11630 tg3_netif_start(tp);
11631 }
11632
11633 tg3_full_unlock(tp);
11634
11635 if (irq_sync && !err2)
11636 tg3_phy_start(tp);
11637 }
11638 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11639 tg3_power_down(tp);
11640
11641 }
11642
11643 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11644 {
11645 struct mii_ioctl_data *data = if_mii(ifr);
11646 struct tg3 *tp = netdev_priv(dev);
11647 int err;
11648
11649 if (tg3_flag(tp, USE_PHYLIB)) {
11650 struct phy_device *phydev;
11651 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11652 return -EAGAIN;
11653 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11654 return phy_mii_ioctl(phydev, ifr, cmd);
11655 }
11656
11657 switch (cmd) {
11658 case SIOCGMIIPHY:
11659 data->phy_id = tp->phy_addr;
11660
11661 /* fallthru */
11662 case SIOCGMIIREG: {
11663 u32 mii_regval;
11664
11665 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11666 break; /* We have no PHY */
11667
11668 if (!netif_running(dev))
11669 return -EAGAIN;
11670
11671 spin_lock_bh(&tp->lock);
11672 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11673 spin_unlock_bh(&tp->lock);
11674
11675 data->val_out = mii_regval;
11676
11677 return err;
11678 }
11679
11680 case SIOCSMIIREG:
11681 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11682 break; /* We have no PHY */
11683
11684 if (!netif_running(dev))
11685 return -EAGAIN;
11686
11687 spin_lock_bh(&tp->lock);
11688 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11689 spin_unlock_bh(&tp->lock);
11690
11691 return err;
11692
11693 default:
11694 /* do nothing */
11695 break;
11696 }
11697 return -EOPNOTSUPP;
11698 }
11699
11700 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11701 {
11702 struct tg3 *tp = netdev_priv(dev);
11703
11704 memcpy(ec, &tp->coal, sizeof(*ec));
11705 return 0;
11706 }
11707
11708 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11709 {
11710 struct tg3 *tp = netdev_priv(dev);
11711 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11712 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11713
11714 if (!tg3_flag(tp, 5705_PLUS)) {
11715 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11716 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11717 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11718 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11719 }
11720
11721 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11722 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11723 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11724 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11725 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11726 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11727 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11728 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11729 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11730 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11731 return -EINVAL;
11732
11733 /* No rx interrupts will be generated if both are zero */
11734 if ((ec->rx_coalesce_usecs == 0) &&
11735 (ec->rx_max_coalesced_frames == 0))
11736 return -EINVAL;
11737
11738 /* No tx interrupts will be generated if both are zero */
11739 if ((ec->tx_coalesce_usecs == 0) &&
11740 (ec->tx_max_coalesced_frames == 0))
11741 return -EINVAL;
11742
11743 /* Only copy relevant parameters, ignore all others. */
11744 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11745 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11746 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11747 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11748 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11749 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11750 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11751 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11752 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11753
11754 if (netif_running(dev)) {
11755 tg3_full_lock(tp, 0);
11756 __tg3_set_coalesce(tp, &tp->coal);
11757 tg3_full_unlock(tp);
11758 }
11759 return 0;
11760 }
11761
11762 static const struct ethtool_ops tg3_ethtool_ops = {
11763 .get_settings = tg3_get_settings,
11764 .set_settings = tg3_set_settings,
11765 .get_drvinfo = tg3_get_drvinfo,
11766 .get_regs_len = tg3_get_regs_len,
11767 .get_regs = tg3_get_regs,
11768 .get_wol = tg3_get_wol,
11769 .set_wol = tg3_set_wol,
11770 .get_msglevel = tg3_get_msglevel,
11771 .set_msglevel = tg3_set_msglevel,
11772 .nway_reset = tg3_nway_reset,
11773 .get_link = ethtool_op_get_link,
11774 .get_eeprom_len = tg3_get_eeprom_len,
11775 .get_eeprom = tg3_get_eeprom,
11776 .set_eeprom = tg3_set_eeprom,
11777 .get_ringparam = tg3_get_ringparam,
11778 .set_ringparam = tg3_set_ringparam,
11779 .get_pauseparam = tg3_get_pauseparam,
11780 .set_pauseparam = tg3_set_pauseparam,
11781 .self_test = tg3_self_test,
11782 .get_strings = tg3_get_strings,
11783 .set_phys_id = tg3_set_phys_id,
11784 .get_ethtool_stats = tg3_get_ethtool_stats,
11785 .get_coalesce = tg3_get_coalesce,
11786 .set_coalesce = tg3_set_coalesce,
11787 .get_sset_count = tg3_get_sset_count,
11788 };
11789
11790 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11791 {
11792 u32 cursize, val, magic;
11793
11794 tp->nvram_size = EEPROM_CHIP_SIZE;
11795
11796 if (tg3_nvram_read(tp, 0, &magic) != 0)
11797 return;
11798
11799 if ((magic != TG3_EEPROM_MAGIC) &&
11800 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11801 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11802 return;
11803
11804 /*
11805 * Size the chip by reading offsets at increasing powers of two.
11806 * When we encounter our validation signature, we know the addressing
11807 * has wrapped around, and thus have our chip size.
11808 */
11809 cursize = 0x10;
11810
11811 while (cursize < tp->nvram_size) {
11812 if (tg3_nvram_read(tp, cursize, &val) != 0)
11813 return;
11814
11815 if (val == magic)
11816 break;
11817
11818 cursize <<= 1;
11819 }
11820
11821 tp->nvram_size = cursize;
11822 }
11823
11824 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11825 {
11826 u32 val;
11827
11828 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11829 return;
11830
11831 /* Selfboot format */
11832 if (val != TG3_EEPROM_MAGIC) {
11833 tg3_get_eeprom_size(tp);
11834 return;
11835 }
11836
11837 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11838 if (val != 0) {
11839 /* This is confusing. We want to operate on the
11840 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11841 * call will read from NVRAM and byteswap the data
11842 * according to the byteswapping settings for all
11843 * other register accesses. This ensures the data we
11844 * want will always reside in the lower 16-bits.
11845 * However, the data in NVRAM is in LE format, which
11846 * means the data from the NVRAM read will always be
11847 * opposite the endianness of the CPU. The 16-bit
11848 * byteswap then brings the data to CPU endianness.
11849 */
11850 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11851 return;
11852 }
11853 }
11854 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11855 }
11856
11857 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11858 {
11859 u32 nvcfg1;
11860
11861 nvcfg1 = tr32(NVRAM_CFG1);
11862 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11863 tg3_flag_set(tp, FLASH);
11864 } else {
11865 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11866 tw32(NVRAM_CFG1, nvcfg1);
11867 }
11868
11869 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11870 tg3_flag(tp, 5780_CLASS)) {
11871 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11872 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11873 tp->nvram_jedecnum = JEDEC_ATMEL;
11874 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11875 tg3_flag_set(tp, NVRAM_BUFFERED);
11876 break;
11877 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11878 tp->nvram_jedecnum = JEDEC_ATMEL;
11879 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11880 break;
11881 case FLASH_VENDOR_ATMEL_EEPROM:
11882 tp->nvram_jedecnum = JEDEC_ATMEL;
11883 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11884 tg3_flag_set(tp, NVRAM_BUFFERED);
11885 break;
11886 case FLASH_VENDOR_ST:
11887 tp->nvram_jedecnum = JEDEC_ST;
11888 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11889 tg3_flag_set(tp, NVRAM_BUFFERED);
11890 break;
11891 case FLASH_VENDOR_SAIFUN:
11892 tp->nvram_jedecnum = JEDEC_SAIFUN;
11893 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11894 break;
11895 case FLASH_VENDOR_SST_SMALL:
11896 case FLASH_VENDOR_SST_LARGE:
11897 tp->nvram_jedecnum = JEDEC_SST;
11898 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11899 break;
11900 }
11901 } else {
11902 tp->nvram_jedecnum = JEDEC_ATMEL;
11903 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11904 tg3_flag_set(tp, NVRAM_BUFFERED);
11905 }
11906 }
11907
11908 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11909 {
11910 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11911 case FLASH_5752PAGE_SIZE_256:
11912 tp->nvram_pagesize = 256;
11913 break;
11914 case FLASH_5752PAGE_SIZE_512:
11915 tp->nvram_pagesize = 512;
11916 break;
11917 case FLASH_5752PAGE_SIZE_1K:
11918 tp->nvram_pagesize = 1024;
11919 break;
11920 case FLASH_5752PAGE_SIZE_2K:
11921 tp->nvram_pagesize = 2048;
11922 break;
11923 case FLASH_5752PAGE_SIZE_4K:
11924 tp->nvram_pagesize = 4096;
11925 break;
11926 case FLASH_5752PAGE_SIZE_264:
11927 tp->nvram_pagesize = 264;
11928 break;
11929 case FLASH_5752PAGE_SIZE_528:
11930 tp->nvram_pagesize = 528;
11931 break;
11932 }
11933 }
11934
11935 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11936 {
11937 u32 nvcfg1;
11938
11939 nvcfg1 = tr32(NVRAM_CFG1);
11940
11941 /* NVRAM protection for TPM */
11942 if (nvcfg1 & (1 << 27))
11943 tg3_flag_set(tp, PROTECTED_NVRAM);
11944
11945 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11946 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11947 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11948 tp->nvram_jedecnum = JEDEC_ATMEL;
11949 tg3_flag_set(tp, NVRAM_BUFFERED);
11950 break;
11951 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11952 tp->nvram_jedecnum = JEDEC_ATMEL;
11953 tg3_flag_set(tp, NVRAM_BUFFERED);
11954 tg3_flag_set(tp, FLASH);
11955 break;
11956 case FLASH_5752VENDOR_ST_M45PE10:
11957 case FLASH_5752VENDOR_ST_M45PE20:
11958 case FLASH_5752VENDOR_ST_M45PE40:
11959 tp->nvram_jedecnum = JEDEC_ST;
11960 tg3_flag_set(tp, NVRAM_BUFFERED);
11961 tg3_flag_set(tp, FLASH);
11962 break;
11963 }
11964
11965 if (tg3_flag(tp, FLASH)) {
11966 tg3_nvram_get_pagesize(tp, nvcfg1);
11967 } else {
11968 /* For eeprom, set pagesize to maximum eeprom size */
11969 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11970
11971 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11972 tw32(NVRAM_CFG1, nvcfg1);
11973 }
11974 }
11975
11976 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11977 {
11978 u32 nvcfg1, protect = 0;
11979
11980 nvcfg1 = tr32(NVRAM_CFG1);
11981
11982 /* NVRAM protection for TPM */
11983 if (nvcfg1 & (1 << 27)) {
11984 tg3_flag_set(tp, PROTECTED_NVRAM);
11985 protect = 1;
11986 }
11987
11988 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11989 switch (nvcfg1) {
11990 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11991 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11992 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11993 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11994 tp->nvram_jedecnum = JEDEC_ATMEL;
11995 tg3_flag_set(tp, NVRAM_BUFFERED);
11996 tg3_flag_set(tp, FLASH);
11997 tp->nvram_pagesize = 264;
11998 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11999 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12000 tp->nvram_size = (protect ? 0x3e200 :
12001 TG3_NVRAM_SIZE_512KB);
12002 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12003 tp->nvram_size = (protect ? 0x1f200 :
12004 TG3_NVRAM_SIZE_256KB);
12005 else
12006 tp->nvram_size = (protect ? 0x1f200 :
12007 TG3_NVRAM_SIZE_128KB);
12008 break;
12009 case FLASH_5752VENDOR_ST_M45PE10:
12010 case FLASH_5752VENDOR_ST_M45PE20:
12011 case FLASH_5752VENDOR_ST_M45PE40:
12012 tp->nvram_jedecnum = JEDEC_ST;
12013 tg3_flag_set(tp, NVRAM_BUFFERED);
12014 tg3_flag_set(tp, FLASH);
12015 tp->nvram_pagesize = 256;
12016 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12017 tp->nvram_size = (protect ?
12018 TG3_NVRAM_SIZE_64KB :
12019 TG3_NVRAM_SIZE_128KB);
12020 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12021 tp->nvram_size = (protect ?
12022 TG3_NVRAM_SIZE_64KB :
12023 TG3_NVRAM_SIZE_256KB);
12024 else
12025 tp->nvram_size = (protect ?
12026 TG3_NVRAM_SIZE_128KB :
12027 TG3_NVRAM_SIZE_512KB);
12028 break;
12029 }
12030 }
12031
12032 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12033 {
12034 u32 nvcfg1;
12035
12036 nvcfg1 = tr32(NVRAM_CFG1);
12037
12038 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12039 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12040 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12041 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12042 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12043 tp->nvram_jedecnum = JEDEC_ATMEL;
12044 tg3_flag_set(tp, NVRAM_BUFFERED);
12045 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12046
12047 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12048 tw32(NVRAM_CFG1, nvcfg1);
12049 break;
12050 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12051 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12052 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12053 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12054 tp->nvram_jedecnum = JEDEC_ATMEL;
12055 tg3_flag_set(tp, NVRAM_BUFFERED);
12056 tg3_flag_set(tp, FLASH);
12057 tp->nvram_pagesize = 264;
12058 break;
12059 case FLASH_5752VENDOR_ST_M45PE10:
12060 case FLASH_5752VENDOR_ST_M45PE20:
12061 case FLASH_5752VENDOR_ST_M45PE40:
12062 tp->nvram_jedecnum = JEDEC_ST;
12063 tg3_flag_set(tp, NVRAM_BUFFERED);
12064 tg3_flag_set(tp, FLASH);
12065 tp->nvram_pagesize = 256;
12066 break;
12067 }
12068 }
12069
12070 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12071 {
12072 u32 nvcfg1, protect = 0;
12073
12074 nvcfg1 = tr32(NVRAM_CFG1);
12075
12076 /* NVRAM protection for TPM */
12077 if (nvcfg1 & (1 << 27)) {
12078 tg3_flag_set(tp, PROTECTED_NVRAM);
12079 protect = 1;
12080 }
12081
12082 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12083 switch (nvcfg1) {
12084 case FLASH_5761VENDOR_ATMEL_ADB021D:
12085 case FLASH_5761VENDOR_ATMEL_ADB041D:
12086 case FLASH_5761VENDOR_ATMEL_ADB081D:
12087 case FLASH_5761VENDOR_ATMEL_ADB161D:
12088 case FLASH_5761VENDOR_ATMEL_MDB021D:
12089 case FLASH_5761VENDOR_ATMEL_MDB041D:
12090 case FLASH_5761VENDOR_ATMEL_MDB081D:
12091 case FLASH_5761VENDOR_ATMEL_MDB161D:
12092 tp->nvram_jedecnum = JEDEC_ATMEL;
12093 tg3_flag_set(tp, NVRAM_BUFFERED);
12094 tg3_flag_set(tp, FLASH);
12095 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12096 tp->nvram_pagesize = 256;
12097 break;
12098 case FLASH_5761VENDOR_ST_A_M45PE20:
12099 case FLASH_5761VENDOR_ST_A_M45PE40:
12100 case FLASH_5761VENDOR_ST_A_M45PE80:
12101 case FLASH_5761VENDOR_ST_A_M45PE16:
12102 case FLASH_5761VENDOR_ST_M_M45PE20:
12103 case FLASH_5761VENDOR_ST_M_M45PE40:
12104 case FLASH_5761VENDOR_ST_M_M45PE80:
12105 case FLASH_5761VENDOR_ST_M_M45PE16:
12106 tp->nvram_jedecnum = JEDEC_ST;
12107 tg3_flag_set(tp, NVRAM_BUFFERED);
12108 tg3_flag_set(tp, FLASH);
12109 tp->nvram_pagesize = 256;
12110 break;
12111 }
12112
12113 if (protect) {
12114 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12115 } else {
12116 switch (nvcfg1) {
12117 case FLASH_5761VENDOR_ATMEL_ADB161D:
12118 case FLASH_5761VENDOR_ATMEL_MDB161D:
12119 case FLASH_5761VENDOR_ST_A_M45PE16:
12120 case FLASH_5761VENDOR_ST_M_M45PE16:
12121 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12122 break;
12123 case FLASH_5761VENDOR_ATMEL_ADB081D:
12124 case FLASH_5761VENDOR_ATMEL_MDB081D:
12125 case FLASH_5761VENDOR_ST_A_M45PE80:
12126 case FLASH_5761VENDOR_ST_M_M45PE80:
12127 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12128 break;
12129 case FLASH_5761VENDOR_ATMEL_ADB041D:
12130 case FLASH_5761VENDOR_ATMEL_MDB041D:
12131 case FLASH_5761VENDOR_ST_A_M45PE40:
12132 case FLASH_5761VENDOR_ST_M_M45PE40:
12133 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12134 break;
12135 case FLASH_5761VENDOR_ATMEL_ADB021D:
12136 case FLASH_5761VENDOR_ATMEL_MDB021D:
12137 case FLASH_5761VENDOR_ST_A_M45PE20:
12138 case FLASH_5761VENDOR_ST_M_M45PE20:
12139 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12140 break;
12141 }
12142 }
12143 }
12144
12145 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12146 {
12147 tp->nvram_jedecnum = JEDEC_ATMEL;
12148 tg3_flag_set(tp, NVRAM_BUFFERED);
12149 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12150 }
12151
12152 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12153 {
12154 u32 nvcfg1;
12155
12156 nvcfg1 = tr32(NVRAM_CFG1);
12157
12158 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12159 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12160 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12161 tp->nvram_jedecnum = JEDEC_ATMEL;
12162 tg3_flag_set(tp, NVRAM_BUFFERED);
12163 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12164
12165 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12166 tw32(NVRAM_CFG1, nvcfg1);
12167 return;
12168 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12169 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12170 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12171 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12172 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12173 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12174 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12175 tp->nvram_jedecnum = JEDEC_ATMEL;
12176 tg3_flag_set(tp, NVRAM_BUFFERED);
12177 tg3_flag_set(tp, FLASH);
12178
12179 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12180 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12181 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12182 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12183 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12184 break;
12185 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12186 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12187 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12188 break;
12189 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12190 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12191 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12192 break;
12193 }
12194 break;
12195 case FLASH_5752VENDOR_ST_M45PE10:
12196 case FLASH_5752VENDOR_ST_M45PE20:
12197 case FLASH_5752VENDOR_ST_M45PE40:
12198 tp->nvram_jedecnum = JEDEC_ST;
12199 tg3_flag_set(tp, NVRAM_BUFFERED);
12200 tg3_flag_set(tp, FLASH);
12201
12202 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12203 case FLASH_5752VENDOR_ST_M45PE10:
12204 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12205 break;
12206 case FLASH_5752VENDOR_ST_M45PE20:
12207 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12208 break;
12209 case FLASH_5752VENDOR_ST_M45PE40:
12210 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12211 break;
12212 }
12213 break;
12214 default:
12215 tg3_flag_set(tp, NO_NVRAM);
12216 return;
12217 }
12218
12219 tg3_nvram_get_pagesize(tp, nvcfg1);
12220 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12221 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12222 }
12223
12224
12225 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12226 {
12227 u32 nvcfg1;
12228
12229 nvcfg1 = tr32(NVRAM_CFG1);
12230
12231 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12232 case FLASH_5717VENDOR_ATMEL_EEPROM:
12233 case FLASH_5717VENDOR_MICRO_EEPROM:
12234 tp->nvram_jedecnum = JEDEC_ATMEL;
12235 tg3_flag_set(tp, NVRAM_BUFFERED);
12236 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12237
12238 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12239 tw32(NVRAM_CFG1, nvcfg1);
12240 return;
12241 case FLASH_5717VENDOR_ATMEL_MDB011D:
12242 case FLASH_5717VENDOR_ATMEL_ADB011B:
12243 case FLASH_5717VENDOR_ATMEL_ADB011D:
12244 case FLASH_5717VENDOR_ATMEL_MDB021D:
12245 case FLASH_5717VENDOR_ATMEL_ADB021B:
12246 case FLASH_5717VENDOR_ATMEL_ADB021D:
12247 case FLASH_5717VENDOR_ATMEL_45USPT:
12248 tp->nvram_jedecnum = JEDEC_ATMEL;
12249 tg3_flag_set(tp, NVRAM_BUFFERED);
12250 tg3_flag_set(tp, FLASH);
12251
12252 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12253 case FLASH_5717VENDOR_ATMEL_MDB021D:
12254 /* Detect size with tg3_nvram_get_size() */
12255 break;
12256 case FLASH_5717VENDOR_ATMEL_ADB021B:
12257 case FLASH_5717VENDOR_ATMEL_ADB021D:
12258 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12259 break;
12260 default:
12261 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12262 break;
12263 }
12264 break;
12265 case FLASH_5717VENDOR_ST_M_M25PE10:
12266 case FLASH_5717VENDOR_ST_A_M25PE10:
12267 case FLASH_5717VENDOR_ST_M_M45PE10:
12268 case FLASH_5717VENDOR_ST_A_M45PE10:
12269 case FLASH_5717VENDOR_ST_M_M25PE20:
12270 case FLASH_5717VENDOR_ST_A_M25PE20:
12271 case FLASH_5717VENDOR_ST_M_M45PE20:
12272 case FLASH_5717VENDOR_ST_A_M45PE20:
12273 case FLASH_5717VENDOR_ST_25USPT:
12274 case FLASH_5717VENDOR_ST_45USPT:
12275 tp->nvram_jedecnum = JEDEC_ST;
12276 tg3_flag_set(tp, NVRAM_BUFFERED);
12277 tg3_flag_set(tp, FLASH);
12278
12279 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12280 case FLASH_5717VENDOR_ST_M_M25PE20:
12281 case FLASH_5717VENDOR_ST_M_M45PE20:
12282 /* Detect size with tg3_nvram_get_size() */
12283 break;
12284 case FLASH_5717VENDOR_ST_A_M25PE20:
12285 case FLASH_5717VENDOR_ST_A_M45PE20:
12286 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12287 break;
12288 default:
12289 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12290 break;
12291 }
12292 break;
12293 default:
12294 tg3_flag_set(tp, NO_NVRAM);
12295 return;
12296 }
12297
12298 tg3_nvram_get_pagesize(tp, nvcfg1);
12299 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12300 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12301 }
12302
12303 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12304 {
12305 u32 nvcfg1, nvmpinstrp;
12306
12307 nvcfg1 = tr32(NVRAM_CFG1);
12308 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12309
12310 switch (nvmpinstrp) {
12311 case FLASH_5720_EEPROM_HD:
12312 case FLASH_5720_EEPROM_LD:
12313 tp->nvram_jedecnum = JEDEC_ATMEL;
12314 tg3_flag_set(tp, NVRAM_BUFFERED);
12315
12316 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12317 tw32(NVRAM_CFG1, nvcfg1);
12318 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12319 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12320 else
12321 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12322 return;
12323 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12324 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12325 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12326 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12327 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12328 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12329 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12330 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12331 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12332 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12333 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12334 case FLASH_5720VENDOR_ATMEL_45USPT:
12335 tp->nvram_jedecnum = JEDEC_ATMEL;
12336 tg3_flag_set(tp, NVRAM_BUFFERED);
12337 tg3_flag_set(tp, FLASH);
12338
12339 switch (nvmpinstrp) {
12340 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12341 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12342 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12343 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12344 break;
12345 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12346 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12347 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12348 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12349 break;
12350 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12351 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12352 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12353 break;
12354 default:
12355 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12356 break;
12357 }
12358 break;
12359 case FLASH_5720VENDOR_M_ST_M25PE10:
12360 case FLASH_5720VENDOR_M_ST_M45PE10:
12361 case FLASH_5720VENDOR_A_ST_M25PE10:
12362 case FLASH_5720VENDOR_A_ST_M45PE10:
12363 case FLASH_5720VENDOR_M_ST_M25PE20:
12364 case FLASH_5720VENDOR_M_ST_M45PE20:
12365 case FLASH_5720VENDOR_A_ST_M25PE20:
12366 case FLASH_5720VENDOR_A_ST_M45PE20:
12367 case FLASH_5720VENDOR_M_ST_M25PE40:
12368 case FLASH_5720VENDOR_M_ST_M45PE40:
12369 case FLASH_5720VENDOR_A_ST_M25PE40:
12370 case FLASH_5720VENDOR_A_ST_M45PE40:
12371 case FLASH_5720VENDOR_M_ST_M25PE80:
12372 case FLASH_5720VENDOR_M_ST_M45PE80:
12373 case FLASH_5720VENDOR_A_ST_M25PE80:
12374 case FLASH_5720VENDOR_A_ST_M45PE80:
12375 case FLASH_5720VENDOR_ST_25USPT:
12376 case FLASH_5720VENDOR_ST_45USPT:
12377 tp->nvram_jedecnum = JEDEC_ST;
12378 tg3_flag_set(tp, NVRAM_BUFFERED);
12379 tg3_flag_set(tp, FLASH);
12380
12381 switch (nvmpinstrp) {
12382 case FLASH_5720VENDOR_M_ST_M25PE20:
12383 case FLASH_5720VENDOR_M_ST_M45PE20:
12384 case FLASH_5720VENDOR_A_ST_M25PE20:
12385 case FLASH_5720VENDOR_A_ST_M45PE20:
12386 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12387 break;
12388 case FLASH_5720VENDOR_M_ST_M25PE40:
12389 case FLASH_5720VENDOR_M_ST_M45PE40:
12390 case FLASH_5720VENDOR_A_ST_M25PE40:
12391 case FLASH_5720VENDOR_A_ST_M45PE40:
12392 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12393 break;
12394 case FLASH_5720VENDOR_M_ST_M25PE80:
12395 case FLASH_5720VENDOR_M_ST_M45PE80:
12396 case FLASH_5720VENDOR_A_ST_M25PE80:
12397 case FLASH_5720VENDOR_A_ST_M45PE80:
12398 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12399 break;
12400 default:
12401 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12402 break;
12403 }
12404 break;
12405 default:
12406 tg3_flag_set(tp, NO_NVRAM);
12407 return;
12408 }
12409
12410 tg3_nvram_get_pagesize(tp, nvcfg1);
12411 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12412 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12413 }
12414
12415 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12416 static void __devinit tg3_nvram_init(struct tg3 *tp)
12417 {
12418 tw32_f(GRC_EEPROM_ADDR,
12419 (EEPROM_ADDR_FSM_RESET |
12420 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12421 EEPROM_ADDR_CLKPERD_SHIFT)));
12422
12423 msleep(1);
12424
12425 /* Enable seeprom accesses. */
12426 tw32_f(GRC_LOCAL_CTRL,
12427 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12428 udelay(100);
12429
12430 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12431 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12432 tg3_flag_set(tp, NVRAM);
12433
12434 if (tg3_nvram_lock(tp)) {
12435 netdev_warn(tp->dev,
12436 "Cannot get nvram lock, %s failed\n",
12437 __func__);
12438 return;
12439 }
12440 tg3_enable_nvram_access(tp);
12441
12442 tp->nvram_size = 0;
12443
12444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12445 tg3_get_5752_nvram_info(tp);
12446 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12447 tg3_get_5755_nvram_info(tp);
12448 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12451 tg3_get_5787_nvram_info(tp);
12452 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12453 tg3_get_5761_nvram_info(tp);
12454 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12455 tg3_get_5906_nvram_info(tp);
12456 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12458 tg3_get_57780_nvram_info(tp);
12459 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12461 tg3_get_5717_nvram_info(tp);
12462 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12463 tg3_get_5720_nvram_info(tp);
12464 else
12465 tg3_get_nvram_info(tp);
12466
12467 if (tp->nvram_size == 0)
12468 tg3_get_nvram_size(tp);
12469
12470 tg3_disable_nvram_access(tp);
12471 tg3_nvram_unlock(tp);
12472
12473 } else {
12474 tg3_flag_clear(tp, NVRAM);
12475 tg3_flag_clear(tp, NVRAM_BUFFERED);
12476
12477 tg3_get_eeprom_size(tp);
12478 }
12479 }
12480
12481 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12482 u32 offset, u32 len, u8 *buf)
12483 {
12484 int i, j, rc = 0;
12485 u32 val;
12486
12487 for (i = 0; i < len; i += 4) {
12488 u32 addr;
12489 __be32 data;
12490
12491 addr = offset + i;
12492
12493 memcpy(&data, buf + i, 4);
12494
12495 /*
12496 * The SEEPROM interface expects the data to always be opposite
12497 * the native endian format. We accomplish this by reversing
12498 * all the operations that would have been performed on the
12499 * data from a call to tg3_nvram_read_be32().
12500 */
12501 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12502
12503 val = tr32(GRC_EEPROM_ADDR);
12504 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12505
12506 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12507 EEPROM_ADDR_READ);
12508 tw32(GRC_EEPROM_ADDR, val |
12509 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12510 (addr & EEPROM_ADDR_ADDR_MASK) |
12511 EEPROM_ADDR_START |
12512 EEPROM_ADDR_WRITE);
12513
12514 for (j = 0; j < 1000; j++) {
12515 val = tr32(GRC_EEPROM_ADDR);
12516
12517 if (val & EEPROM_ADDR_COMPLETE)
12518 break;
12519 msleep(1);
12520 }
12521 if (!(val & EEPROM_ADDR_COMPLETE)) {
12522 rc = -EBUSY;
12523 break;
12524 }
12525 }
12526
12527 return rc;
12528 }
12529
12530 /* offset and length are dword aligned */
12531 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12532 u8 *buf)
12533 {
12534 int ret = 0;
12535 u32 pagesize = tp->nvram_pagesize;
12536 u32 pagemask = pagesize - 1;
12537 u32 nvram_cmd;
12538 u8 *tmp;
12539
12540 tmp = kmalloc(pagesize, GFP_KERNEL);
12541 if (tmp == NULL)
12542 return -ENOMEM;
12543
12544 while (len) {
12545 int j;
12546 u32 phy_addr, page_off, size;
12547
12548 phy_addr = offset & ~pagemask;
12549
12550 for (j = 0; j < pagesize; j += 4) {
12551 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12552 (__be32 *) (tmp + j));
12553 if (ret)
12554 break;
12555 }
12556 if (ret)
12557 break;
12558
12559 page_off = offset & pagemask;
12560 size = pagesize;
12561 if (len < size)
12562 size = len;
12563
12564 len -= size;
12565
12566 memcpy(tmp + page_off, buf, size);
12567
12568 offset = offset + (pagesize - page_off);
12569
12570 tg3_enable_nvram_access(tp);
12571
12572 /*
12573 * Before we can erase the flash page, we need
12574 * to issue a special "write enable" command.
12575 */
12576 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12577
12578 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12579 break;
12580
12581 /* Erase the target page */
12582 tw32(NVRAM_ADDR, phy_addr);
12583
12584 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12585 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12586
12587 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12588 break;
12589
12590 /* Issue another write enable to start the write. */
12591 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12592
12593 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12594 break;
12595
12596 for (j = 0; j < pagesize; j += 4) {
12597 __be32 data;
12598
12599 data = *((__be32 *) (tmp + j));
12600
12601 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12602
12603 tw32(NVRAM_ADDR, phy_addr + j);
12604
12605 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12606 NVRAM_CMD_WR;
12607
12608 if (j == 0)
12609 nvram_cmd |= NVRAM_CMD_FIRST;
12610 else if (j == (pagesize - 4))
12611 nvram_cmd |= NVRAM_CMD_LAST;
12612
12613 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12614 break;
12615 }
12616 if (ret)
12617 break;
12618 }
12619
12620 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12621 tg3_nvram_exec_cmd(tp, nvram_cmd);
12622
12623 kfree(tmp);
12624
12625 return ret;
12626 }
12627
12628 /* offset and length are dword aligned */
12629 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12630 u8 *buf)
12631 {
12632 int i, ret = 0;
12633
12634 for (i = 0; i < len; i += 4, offset += 4) {
12635 u32 page_off, phy_addr, nvram_cmd;
12636 __be32 data;
12637
12638 memcpy(&data, buf + i, 4);
12639 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12640
12641 page_off = offset % tp->nvram_pagesize;
12642
12643 phy_addr = tg3_nvram_phys_addr(tp, offset);
12644
12645 tw32(NVRAM_ADDR, phy_addr);
12646
12647 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12648
12649 if (page_off == 0 || i == 0)
12650 nvram_cmd |= NVRAM_CMD_FIRST;
12651 if (page_off == (tp->nvram_pagesize - 4))
12652 nvram_cmd |= NVRAM_CMD_LAST;
12653
12654 if (i == (len - 4))
12655 nvram_cmd |= NVRAM_CMD_LAST;
12656
12657 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12658 !tg3_flag(tp, 5755_PLUS) &&
12659 (tp->nvram_jedecnum == JEDEC_ST) &&
12660 (nvram_cmd & NVRAM_CMD_FIRST)) {
12661
12662 if ((ret = tg3_nvram_exec_cmd(tp,
12663 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12664 NVRAM_CMD_DONE)))
12665
12666 break;
12667 }
12668 if (!tg3_flag(tp, FLASH)) {
12669 /* We always do complete word writes to eeprom. */
12670 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12671 }
12672
12673 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12674 break;
12675 }
12676 return ret;
12677 }
12678
12679 /* offset and length are dword aligned */
12680 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12681 {
12682 int ret;
12683
12684 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12685 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12686 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12687 udelay(40);
12688 }
12689
12690 if (!tg3_flag(tp, NVRAM)) {
12691 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12692 } else {
12693 u32 grc_mode;
12694
12695 ret = tg3_nvram_lock(tp);
12696 if (ret)
12697 return ret;
12698
12699 tg3_enable_nvram_access(tp);
12700 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12701 tw32(NVRAM_WRITE1, 0x406);
12702
12703 grc_mode = tr32(GRC_MODE);
12704 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12705
12706 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12707 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12708 buf);
12709 } else {
12710 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12711 buf);
12712 }
12713
12714 grc_mode = tr32(GRC_MODE);
12715 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12716
12717 tg3_disable_nvram_access(tp);
12718 tg3_nvram_unlock(tp);
12719 }
12720
12721 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12722 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12723 udelay(40);
12724 }
12725
12726 return ret;
12727 }
12728
12729 struct subsys_tbl_ent {
12730 u16 subsys_vendor, subsys_devid;
12731 u32 phy_id;
12732 };
12733
12734 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12735 /* Broadcom boards. */
12736 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12737 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12738 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12739 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12740 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12741 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12742 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12743 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12744 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12745 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12746 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12747 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12748 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12749 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12750 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12751 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12752 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12753 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12754 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12755 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12756 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12757 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12758
12759 /* 3com boards. */
12760 { TG3PCI_SUBVENDOR_ID_3COM,
12761 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12762 { TG3PCI_SUBVENDOR_ID_3COM,
12763 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12764 { TG3PCI_SUBVENDOR_ID_3COM,
12765 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12766 { TG3PCI_SUBVENDOR_ID_3COM,
12767 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12768 { TG3PCI_SUBVENDOR_ID_3COM,
12769 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12770
12771 /* DELL boards. */
12772 { TG3PCI_SUBVENDOR_ID_DELL,
12773 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12774 { TG3PCI_SUBVENDOR_ID_DELL,
12775 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12776 { TG3PCI_SUBVENDOR_ID_DELL,
12777 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12778 { TG3PCI_SUBVENDOR_ID_DELL,
12779 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12780
12781 /* Compaq boards. */
12782 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12783 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12784 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12785 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12786 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12787 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12788 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12789 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12790 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12791 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12792
12793 /* IBM boards. */
12794 { TG3PCI_SUBVENDOR_ID_IBM,
12795 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12796 };
12797
12798 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12799 {
12800 int i;
12801
12802 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12803 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12804 tp->pdev->subsystem_vendor) &&
12805 (subsys_id_to_phy_id[i].subsys_devid ==
12806 tp->pdev->subsystem_device))
12807 return &subsys_id_to_phy_id[i];
12808 }
12809 return NULL;
12810 }
12811
12812 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12813 {
12814 u32 val;
12815
12816 tp->phy_id = TG3_PHY_ID_INVALID;
12817 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12818
12819 /* Assume an onboard device and WOL capable by default. */
12820 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12821 tg3_flag_set(tp, WOL_CAP);
12822
12823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12824 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12825 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12826 tg3_flag_set(tp, IS_NIC);
12827 }
12828 val = tr32(VCPU_CFGSHDW);
12829 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12830 tg3_flag_set(tp, ASPM_WORKAROUND);
12831 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12832 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12833 tg3_flag_set(tp, WOL_ENABLE);
12834 device_set_wakeup_enable(&tp->pdev->dev, true);
12835 }
12836 goto done;
12837 }
12838
12839 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12840 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12841 u32 nic_cfg, led_cfg;
12842 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12843 int eeprom_phy_serdes = 0;
12844
12845 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12846 tp->nic_sram_data_cfg = nic_cfg;
12847
12848 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12849 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12850 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12851 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12852 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12853 (ver > 0) && (ver < 0x100))
12854 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12855
12856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12857 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12858
12859 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12860 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12861 eeprom_phy_serdes = 1;
12862
12863 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12864 if (nic_phy_id != 0) {
12865 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12866 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12867
12868 eeprom_phy_id = (id1 >> 16) << 10;
12869 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12870 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12871 } else
12872 eeprom_phy_id = 0;
12873
12874 tp->phy_id = eeprom_phy_id;
12875 if (eeprom_phy_serdes) {
12876 if (!tg3_flag(tp, 5705_PLUS))
12877 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12878 else
12879 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12880 }
12881
12882 if (tg3_flag(tp, 5750_PLUS))
12883 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12884 SHASTA_EXT_LED_MODE_MASK);
12885 else
12886 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12887
12888 switch (led_cfg) {
12889 default:
12890 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12891 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12892 break;
12893
12894 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12895 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12896 break;
12897
12898 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12899 tp->led_ctrl = LED_CTRL_MODE_MAC;
12900
12901 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12902 * read on some older 5700/5701 bootcode.
12903 */
12904 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12905 ASIC_REV_5700 ||
12906 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12907 ASIC_REV_5701)
12908 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12909
12910 break;
12911
12912 case SHASTA_EXT_LED_SHARED:
12913 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12914 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12915 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12916 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12917 LED_CTRL_MODE_PHY_2);
12918 break;
12919
12920 case SHASTA_EXT_LED_MAC:
12921 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12922 break;
12923
12924 case SHASTA_EXT_LED_COMBO:
12925 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12926 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12927 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12928 LED_CTRL_MODE_PHY_2);
12929 break;
12930
12931 }
12932
12933 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12935 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12936 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12937
12938 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12939 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12940
12941 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12942 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12943 if ((tp->pdev->subsystem_vendor ==
12944 PCI_VENDOR_ID_ARIMA) &&
12945 (tp->pdev->subsystem_device == 0x205a ||
12946 tp->pdev->subsystem_device == 0x2063))
12947 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12948 } else {
12949 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12950 tg3_flag_set(tp, IS_NIC);
12951 }
12952
12953 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12954 tg3_flag_set(tp, ENABLE_ASF);
12955 if (tg3_flag(tp, 5750_PLUS))
12956 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12957 }
12958
12959 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12960 tg3_flag(tp, 5750_PLUS))
12961 tg3_flag_set(tp, ENABLE_APE);
12962
12963 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12964 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12965 tg3_flag_clear(tp, WOL_CAP);
12966
12967 if (tg3_flag(tp, WOL_CAP) &&
12968 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12969 tg3_flag_set(tp, WOL_ENABLE);
12970 device_set_wakeup_enable(&tp->pdev->dev, true);
12971 }
12972
12973 if (cfg2 & (1 << 17))
12974 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12975
12976 /* serdes signal pre-emphasis in register 0x590 set by */
12977 /* bootcode if bit 18 is set */
12978 if (cfg2 & (1 << 18))
12979 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12980
12981 if ((tg3_flag(tp, 57765_PLUS) ||
12982 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12983 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12984 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12985 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12986
12987 if (tg3_flag(tp, PCI_EXPRESS) &&
12988 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12989 !tg3_flag(tp, 57765_PLUS)) {
12990 u32 cfg3;
12991
12992 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12993 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12994 tg3_flag_set(tp, ASPM_WORKAROUND);
12995 }
12996
12997 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12998 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12999 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13000 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13001 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13002 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13003 }
13004 done:
13005 if (tg3_flag(tp, WOL_CAP))
13006 device_set_wakeup_enable(&tp->pdev->dev,
13007 tg3_flag(tp, WOL_ENABLE));
13008 else
13009 device_set_wakeup_capable(&tp->pdev->dev, false);
13010 }
13011
13012 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13013 {
13014 int i;
13015 u32 val;
13016
13017 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13018 tw32(OTP_CTRL, cmd);
13019
13020 /* Wait for up to 1 ms for command to execute. */
13021 for (i = 0; i < 100; i++) {
13022 val = tr32(OTP_STATUS);
13023 if (val & OTP_STATUS_CMD_DONE)
13024 break;
13025 udelay(10);
13026 }
13027
13028 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13029 }
13030
13031 /* Read the gphy configuration from the OTP region of the chip. The gphy
13032 * configuration is a 32-bit value that straddles the alignment boundary.
13033 * We do two 32-bit reads and then shift and merge the results.
13034 */
13035 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13036 {
13037 u32 bhalf_otp, thalf_otp;
13038
13039 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13040
13041 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13042 return 0;
13043
13044 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13045
13046 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13047 return 0;
13048
13049 thalf_otp = tr32(OTP_READ_DATA);
13050
13051 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13052
13053 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13054 return 0;
13055
13056 bhalf_otp = tr32(OTP_READ_DATA);
13057
13058 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13059 }
13060
13061 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13062 {
13063 u32 adv = ADVERTISED_Autoneg |
13064 ADVERTISED_Pause;
13065
13066 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13067 adv |= ADVERTISED_1000baseT_Half |
13068 ADVERTISED_1000baseT_Full;
13069
13070 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13071 adv |= ADVERTISED_100baseT_Half |
13072 ADVERTISED_100baseT_Full |
13073 ADVERTISED_10baseT_Half |
13074 ADVERTISED_10baseT_Full |
13075 ADVERTISED_TP;
13076 else
13077 adv |= ADVERTISED_FIBRE;
13078
13079 tp->link_config.advertising = adv;
13080 tp->link_config.speed = SPEED_INVALID;
13081 tp->link_config.duplex = DUPLEX_INVALID;
13082 tp->link_config.autoneg = AUTONEG_ENABLE;
13083 tp->link_config.active_speed = SPEED_INVALID;
13084 tp->link_config.active_duplex = DUPLEX_INVALID;
13085 tp->link_config.orig_speed = SPEED_INVALID;
13086 tp->link_config.orig_duplex = DUPLEX_INVALID;
13087 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13088 }
13089
13090 static int __devinit tg3_phy_probe(struct tg3 *tp)
13091 {
13092 u32 hw_phy_id_1, hw_phy_id_2;
13093 u32 hw_phy_id, hw_phy_id_masked;
13094 int err;
13095
13096 /* flow control autonegotiation is default behavior */
13097 tg3_flag_set(tp, PAUSE_AUTONEG);
13098 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13099
13100 if (tg3_flag(tp, USE_PHYLIB))
13101 return tg3_phy_init(tp);
13102
13103 /* Reading the PHY ID register can conflict with ASF
13104 * firmware access to the PHY hardware.
13105 */
13106 err = 0;
13107 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13108 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13109 } else {
13110 /* Now read the physical PHY_ID from the chip and verify
13111 * that it is sane. If it doesn't look good, we fall back
13112 * to either the hard-coded table based PHY_ID and failing
13113 * that the value found in the eeprom area.
13114 */
13115 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13116 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13117
13118 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13119 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13120 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13121
13122 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13123 }
13124
13125 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13126 tp->phy_id = hw_phy_id;
13127 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13128 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13129 else
13130 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13131 } else {
13132 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13133 /* Do nothing, phy ID already set up in
13134 * tg3_get_eeprom_hw_cfg().
13135 */
13136 } else {
13137 struct subsys_tbl_ent *p;
13138
13139 /* No eeprom signature? Try the hardcoded
13140 * subsys device table.
13141 */
13142 p = tg3_lookup_by_subsys(tp);
13143 if (!p)
13144 return -ENODEV;
13145
13146 tp->phy_id = p->phy_id;
13147 if (!tp->phy_id ||
13148 tp->phy_id == TG3_PHY_ID_BCM8002)
13149 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13150 }
13151 }
13152
13153 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13154 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13156 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13157 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13158 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13159 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13160 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13161
13162 tg3_phy_init_link_config(tp);
13163
13164 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13165 !tg3_flag(tp, ENABLE_APE) &&
13166 !tg3_flag(tp, ENABLE_ASF)) {
13167 u32 bmsr, mask;
13168
13169 tg3_readphy(tp, MII_BMSR, &bmsr);
13170 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13171 (bmsr & BMSR_LSTATUS))
13172 goto skip_phy_reset;
13173
13174 err = tg3_phy_reset(tp);
13175 if (err)
13176 return err;
13177
13178 tg3_phy_set_wirespeed(tp);
13179
13180 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13181 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13182 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13183 if (!tg3_copper_is_advertising_all(tp, mask)) {
13184 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13185 tp->link_config.flowctrl);
13186
13187 tg3_writephy(tp, MII_BMCR,
13188 BMCR_ANENABLE | BMCR_ANRESTART);
13189 }
13190 }
13191
13192 skip_phy_reset:
13193 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13194 err = tg3_init_5401phy_dsp(tp);
13195 if (err)
13196 return err;
13197
13198 err = tg3_init_5401phy_dsp(tp);
13199 }
13200
13201 return err;
13202 }
13203
13204 static void __devinit tg3_read_vpd(struct tg3 *tp)
13205 {
13206 u8 *vpd_data;
13207 unsigned int block_end, rosize, len;
13208 u32 vpdlen;
13209 int j, i = 0;
13210
13211 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13212 if (!vpd_data)
13213 goto out_no_vpd;
13214
13215 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13216 if (i < 0)
13217 goto out_not_found;
13218
13219 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13220 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13221 i += PCI_VPD_LRDT_TAG_SIZE;
13222
13223 if (block_end > vpdlen)
13224 goto out_not_found;
13225
13226 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13227 PCI_VPD_RO_KEYWORD_MFR_ID);
13228 if (j > 0) {
13229 len = pci_vpd_info_field_size(&vpd_data[j]);
13230
13231 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13232 if (j + len > block_end || len != 4 ||
13233 memcmp(&vpd_data[j], "1028", 4))
13234 goto partno;
13235
13236 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13237 PCI_VPD_RO_KEYWORD_VENDOR0);
13238 if (j < 0)
13239 goto partno;
13240
13241 len = pci_vpd_info_field_size(&vpd_data[j]);
13242
13243 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13244 if (j + len > block_end)
13245 goto partno;
13246
13247 memcpy(tp->fw_ver, &vpd_data[j], len);
13248 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13249 }
13250
13251 partno:
13252 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13253 PCI_VPD_RO_KEYWORD_PARTNO);
13254 if (i < 0)
13255 goto out_not_found;
13256
13257 len = pci_vpd_info_field_size(&vpd_data[i]);
13258
13259 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13260 if (len > TG3_BPN_SIZE ||
13261 (len + i) > vpdlen)
13262 goto out_not_found;
13263
13264 memcpy(tp->board_part_number, &vpd_data[i], len);
13265
13266 out_not_found:
13267 kfree(vpd_data);
13268 if (tp->board_part_number[0])
13269 return;
13270
13271 out_no_vpd:
13272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13273 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13274 strcpy(tp->board_part_number, "BCM5717");
13275 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13276 strcpy(tp->board_part_number, "BCM5718");
13277 else
13278 goto nomatch;
13279 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13280 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13281 strcpy(tp->board_part_number, "BCM57780");
13282 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13283 strcpy(tp->board_part_number, "BCM57760");
13284 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13285 strcpy(tp->board_part_number, "BCM57790");
13286 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13287 strcpy(tp->board_part_number, "BCM57788");
13288 else
13289 goto nomatch;
13290 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13291 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13292 strcpy(tp->board_part_number, "BCM57761");
13293 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13294 strcpy(tp->board_part_number, "BCM57765");
13295 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13296 strcpy(tp->board_part_number, "BCM57781");
13297 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13298 strcpy(tp->board_part_number, "BCM57785");
13299 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13300 strcpy(tp->board_part_number, "BCM57791");
13301 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13302 strcpy(tp->board_part_number, "BCM57795");
13303 else
13304 goto nomatch;
13305 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13306 strcpy(tp->board_part_number, "BCM95906");
13307 } else {
13308 nomatch:
13309 strcpy(tp->board_part_number, "none");
13310 }
13311 }
13312
13313 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13314 {
13315 u32 val;
13316
13317 if (tg3_nvram_read(tp, offset, &val) ||
13318 (val & 0xfc000000) != 0x0c000000 ||
13319 tg3_nvram_read(tp, offset + 4, &val) ||
13320 val != 0)
13321 return 0;
13322
13323 return 1;
13324 }
13325
13326 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13327 {
13328 u32 val, offset, start, ver_offset;
13329 int i, dst_off;
13330 bool newver = false;
13331
13332 if (tg3_nvram_read(tp, 0xc, &offset) ||
13333 tg3_nvram_read(tp, 0x4, &start))
13334 return;
13335
13336 offset = tg3_nvram_logical_addr(tp, offset);
13337
13338 if (tg3_nvram_read(tp, offset, &val))
13339 return;
13340
13341 if ((val & 0xfc000000) == 0x0c000000) {
13342 if (tg3_nvram_read(tp, offset + 4, &val))
13343 return;
13344
13345 if (val == 0)
13346 newver = true;
13347 }
13348
13349 dst_off = strlen(tp->fw_ver);
13350
13351 if (newver) {
13352 if (TG3_VER_SIZE - dst_off < 16 ||
13353 tg3_nvram_read(tp, offset + 8, &ver_offset))
13354 return;
13355
13356 offset = offset + ver_offset - start;
13357 for (i = 0; i < 16; i += 4) {
13358 __be32 v;
13359 if (tg3_nvram_read_be32(tp, offset + i, &v))
13360 return;
13361
13362 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13363 }
13364 } else {
13365 u32 major, minor;
13366
13367 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13368 return;
13369
13370 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13371 TG3_NVM_BCVER_MAJSFT;
13372 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13373 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13374 "v%d.%02d", major, minor);
13375 }
13376 }
13377
13378 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13379 {
13380 u32 val, major, minor;
13381
13382 /* Use native endian representation */
13383 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13384 return;
13385
13386 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13387 TG3_NVM_HWSB_CFG1_MAJSFT;
13388 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13389 TG3_NVM_HWSB_CFG1_MINSFT;
13390
13391 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13392 }
13393
13394 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13395 {
13396 u32 offset, major, minor, build;
13397
13398 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13399
13400 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13401 return;
13402
13403 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13404 case TG3_EEPROM_SB_REVISION_0:
13405 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13406 break;
13407 case TG3_EEPROM_SB_REVISION_2:
13408 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13409 break;
13410 case TG3_EEPROM_SB_REVISION_3:
13411 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13412 break;
13413 case TG3_EEPROM_SB_REVISION_4:
13414 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13415 break;
13416 case TG3_EEPROM_SB_REVISION_5:
13417 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13418 break;
13419 case TG3_EEPROM_SB_REVISION_6:
13420 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13421 break;
13422 default:
13423 return;
13424 }
13425
13426 if (tg3_nvram_read(tp, offset, &val))
13427 return;
13428
13429 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13430 TG3_EEPROM_SB_EDH_BLD_SHFT;
13431 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13432 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13433 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13434
13435 if (minor > 99 || build > 26)
13436 return;
13437
13438 offset = strlen(tp->fw_ver);
13439 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13440 " v%d.%02d", major, minor);
13441
13442 if (build > 0) {
13443 offset = strlen(tp->fw_ver);
13444 if (offset < TG3_VER_SIZE - 1)
13445 tp->fw_ver[offset] = 'a' + build - 1;
13446 }
13447 }
13448
13449 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13450 {
13451 u32 val, offset, start;
13452 int i, vlen;
13453
13454 for (offset = TG3_NVM_DIR_START;
13455 offset < TG3_NVM_DIR_END;
13456 offset += TG3_NVM_DIRENT_SIZE) {
13457 if (tg3_nvram_read(tp, offset, &val))
13458 return;
13459
13460 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13461 break;
13462 }
13463
13464 if (offset == TG3_NVM_DIR_END)
13465 return;
13466
13467 if (!tg3_flag(tp, 5705_PLUS))
13468 start = 0x08000000;
13469 else if (tg3_nvram_read(tp, offset - 4, &start))
13470 return;
13471
13472 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13473 !tg3_fw_img_is_valid(tp, offset) ||
13474 tg3_nvram_read(tp, offset + 8, &val))
13475 return;
13476
13477 offset += val - start;
13478
13479 vlen = strlen(tp->fw_ver);
13480
13481 tp->fw_ver[vlen++] = ',';
13482 tp->fw_ver[vlen++] = ' ';
13483
13484 for (i = 0; i < 4; i++) {
13485 __be32 v;
13486 if (tg3_nvram_read_be32(tp, offset, &v))
13487 return;
13488
13489 offset += sizeof(v);
13490
13491 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13492 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13493 break;
13494 }
13495
13496 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13497 vlen += sizeof(v);
13498 }
13499 }
13500
13501 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13502 {
13503 int vlen;
13504 u32 apedata;
13505 char *fwtype;
13506
13507 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13508 return;
13509
13510 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13511 if (apedata != APE_SEG_SIG_MAGIC)
13512 return;
13513
13514 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13515 if (!(apedata & APE_FW_STATUS_READY))
13516 return;
13517
13518 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13519
13520 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13521 tg3_flag_set(tp, APE_HAS_NCSI);
13522 fwtype = "NCSI";
13523 } else {
13524 fwtype = "DASH";
13525 }
13526
13527 vlen = strlen(tp->fw_ver);
13528
13529 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13530 fwtype,
13531 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13532 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13533 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13534 (apedata & APE_FW_VERSION_BLDMSK));
13535 }
13536
13537 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13538 {
13539 u32 val;
13540 bool vpd_vers = false;
13541
13542 if (tp->fw_ver[0] != 0)
13543 vpd_vers = true;
13544
13545 if (tg3_flag(tp, NO_NVRAM)) {
13546 strcat(tp->fw_ver, "sb");
13547 return;
13548 }
13549
13550 if (tg3_nvram_read(tp, 0, &val))
13551 return;
13552
13553 if (val == TG3_EEPROM_MAGIC)
13554 tg3_read_bc_ver(tp);
13555 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13556 tg3_read_sb_ver(tp, val);
13557 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13558 tg3_read_hwsb_ver(tp);
13559 else
13560 return;
13561
13562 if (vpd_vers)
13563 goto done;
13564
13565 if (tg3_flag(tp, ENABLE_APE)) {
13566 if (tg3_flag(tp, ENABLE_ASF))
13567 tg3_read_dash_ver(tp);
13568 } else if (tg3_flag(tp, ENABLE_ASF)) {
13569 tg3_read_mgmtfw_ver(tp);
13570 }
13571
13572 done:
13573 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13574 }
13575
13576 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13577
13578 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13579 {
13580 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13581 return TG3_RX_RET_MAX_SIZE_5717;
13582 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13583 return TG3_RX_RET_MAX_SIZE_5700;
13584 else
13585 return TG3_RX_RET_MAX_SIZE_5705;
13586 }
13587
13588 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13589 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13590 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13591 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13592 { },
13593 };
13594
13595 static int __devinit tg3_get_invariants(struct tg3 *tp)
13596 {
13597 u32 misc_ctrl_reg;
13598 u32 pci_state_reg, grc_misc_cfg;
13599 u32 val;
13600 u16 pci_cmd;
13601 int err;
13602
13603 /* Force memory write invalidate off. If we leave it on,
13604 * then on 5700_BX chips we have to enable a workaround.
13605 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13606 * to match the cacheline size. The Broadcom driver have this
13607 * workaround but turns MWI off all the times so never uses
13608 * it. This seems to suggest that the workaround is insufficient.
13609 */
13610 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13611 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13612 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13613
13614 /* Important! -- Make sure register accesses are byteswapped
13615 * correctly. Also, for those chips that require it, make
13616 * sure that indirect register accesses are enabled before
13617 * the first operation.
13618 */
13619 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13620 &misc_ctrl_reg);
13621 tp->misc_host_ctrl |= (misc_ctrl_reg &
13622 MISC_HOST_CTRL_CHIPREV);
13623 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13624 tp->misc_host_ctrl);
13625
13626 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13627 MISC_HOST_CTRL_CHIPREV_SHIFT);
13628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13629 u32 prod_id_asic_rev;
13630
13631 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13632 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13633 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13634 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13635 pci_read_config_dword(tp->pdev,
13636 TG3PCI_GEN2_PRODID_ASICREV,
13637 &prod_id_asic_rev);
13638 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13639 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13640 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13644 pci_read_config_dword(tp->pdev,
13645 TG3PCI_GEN15_PRODID_ASICREV,
13646 &prod_id_asic_rev);
13647 else
13648 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13649 &prod_id_asic_rev);
13650
13651 tp->pci_chip_rev_id = prod_id_asic_rev;
13652 }
13653
13654 /* Wrong chip ID in 5752 A0. This code can be removed later
13655 * as A0 is not in production.
13656 */
13657 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13658 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13659
13660 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13661 * we need to disable memory and use config. cycles
13662 * only to access all registers. The 5702/03 chips
13663 * can mistakenly decode the special cycles from the
13664 * ICH chipsets as memory write cycles, causing corruption
13665 * of register and memory space. Only certain ICH bridges
13666 * will drive special cycles with non-zero data during the
13667 * address phase which can fall within the 5703's address
13668 * range. This is not an ICH bug as the PCI spec allows
13669 * non-zero address during special cycles. However, only
13670 * these ICH bridges are known to drive non-zero addresses
13671 * during special cycles.
13672 *
13673 * Since special cycles do not cross PCI bridges, we only
13674 * enable this workaround if the 5703 is on the secondary
13675 * bus of these ICH bridges.
13676 */
13677 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13678 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13679 static struct tg3_dev_id {
13680 u32 vendor;
13681 u32 device;
13682 u32 rev;
13683 } ich_chipsets[] = {
13684 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13685 PCI_ANY_ID },
13686 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13687 PCI_ANY_ID },
13688 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13689 0xa },
13690 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13691 PCI_ANY_ID },
13692 { },
13693 };
13694 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13695 struct pci_dev *bridge = NULL;
13696
13697 while (pci_id->vendor != 0) {
13698 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13699 bridge);
13700 if (!bridge) {
13701 pci_id++;
13702 continue;
13703 }
13704 if (pci_id->rev != PCI_ANY_ID) {
13705 if (bridge->revision > pci_id->rev)
13706 continue;
13707 }
13708 if (bridge->subordinate &&
13709 (bridge->subordinate->number ==
13710 tp->pdev->bus->number)) {
13711 tg3_flag_set(tp, ICH_WORKAROUND);
13712 pci_dev_put(bridge);
13713 break;
13714 }
13715 }
13716 }
13717
13718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13719 static struct tg3_dev_id {
13720 u32 vendor;
13721 u32 device;
13722 } bridge_chipsets[] = {
13723 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13724 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13725 { },
13726 };
13727 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13728 struct pci_dev *bridge = NULL;
13729
13730 while (pci_id->vendor != 0) {
13731 bridge = pci_get_device(pci_id->vendor,
13732 pci_id->device,
13733 bridge);
13734 if (!bridge) {
13735 pci_id++;
13736 continue;
13737 }
13738 if (bridge->subordinate &&
13739 (bridge->subordinate->number <=
13740 tp->pdev->bus->number) &&
13741 (bridge->subordinate->subordinate >=
13742 tp->pdev->bus->number)) {
13743 tg3_flag_set(tp, 5701_DMA_BUG);
13744 pci_dev_put(bridge);
13745 break;
13746 }
13747 }
13748 }
13749
13750 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13751 * DMA addresses > 40-bit. This bridge may have other additional
13752 * 57xx devices behind it in some 4-port NIC designs for example.
13753 * Any tg3 device found behind the bridge will also need the 40-bit
13754 * DMA workaround.
13755 */
13756 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13758 tg3_flag_set(tp, 5780_CLASS);
13759 tg3_flag_set(tp, 40BIT_DMA_BUG);
13760 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13761 } else {
13762 struct pci_dev *bridge = NULL;
13763
13764 do {
13765 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13766 PCI_DEVICE_ID_SERVERWORKS_EPB,
13767 bridge);
13768 if (bridge && bridge->subordinate &&
13769 (bridge->subordinate->number <=
13770 tp->pdev->bus->number) &&
13771 (bridge->subordinate->subordinate >=
13772 tp->pdev->bus->number)) {
13773 tg3_flag_set(tp, 40BIT_DMA_BUG);
13774 pci_dev_put(bridge);
13775 break;
13776 }
13777 } while (bridge);
13778 }
13779
13780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13781 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13782 tp->pdev_peer = tg3_find_peer(tp);
13783
13784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13787 tg3_flag_set(tp, 5717_PLUS);
13788
13789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13790 tg3_flag(tp, 5717_PLUS))
13791 tg3_flag_set(tp, 57765_PLUS);
13792
13793 /* Intentionally exclude ASIC_REV_5906 */
13794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13796 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13798 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13799 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13800 tg3_flag(tp, 57765_PLUS))
13801 tg3_flag_set(tp, 5755_PLUS);
13802
13803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13806 tg3_flag(tp, 5755_PLUS) ||
13807 tg3_flag(tp, 5780_CLASS))
13808 tg3_flag_set(tp, 5750_PLUS);
13809
13810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13811 tg3_flag(tp, 5750_PLUS))
13812 tg3_flag_set(tp, 5705_PLUS);
13813
13814 /* Determine TSO capabilities */
13815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13816 ; /* Do nothing. HW bug. */
13817 else if (tg3_flag(tp, 57765_PLUS))
13818 tg3_flag_set(tp, HW_TSO_3);
13819 else if (tg3_flag(tp, 5755_PLUS) ||
13820 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13821 tg3_flag_set(tp, HW_TSO_2);
13822 else if (tg3_flag(tp, 5750_PLUS)) {
13823 tg3_flag_set(tp, HW_TSO_1);
13824 tg3_flag_set(tp, TSO_BUG);
13825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13826 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13827 tg3_flag_clear(tp, TSO_BUG);
13828 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13829 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13830 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13831 tg3_flag_set(tp, TSO_BUG);
13832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13833 tp->fw_needed = FIRMWARE_TG3TSO5;
13834 else
13835 tp->fw_needed = FIRMWARE_TG3TSO;
13836 }
13837
13838 /* Selectively allow TSO based on operating conditions */
13839 if (tg3_flag(tp, HW_TSO_1) ||
13840 tg3_flag(tp, HW_TSO_2) ||
13841 tg3_flag(tp, HW_TSO_3) ||
13842 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13843 tg3_flag_set(tp, TSO_CAPABLE);
13844 else {
13845 tg3_flag_clear(tp, TSO_CAPABLE);
13846 tg3_flag_clear(tp, TSO_BUG);
13847 tp->fw_needed = NULL;
13848 }
13849
13850 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13851 tp->fw_needed = FIRMWARE_TG3;
13852
13853 tp->irq_max = 1;
13854
13855 if (tg3_flag(tp, 5750_PLUS)) {
13856 tg3_flag_set(tp, SUPPORT_MSI);
13857 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13858 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13859 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13860 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13861 tp->pdev_peer == tp->pdev))
13862 tg3_flag_clear(tp, SUPPORT_MSI);
13863
13864 if (tg3_flag(tp, 5755_PLUS) ||
13865 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13866 tg3_flag_set(tp, 1SHOT_MSI);
13867 }
13868
13869 if (tg3_flag(tp, 57765_PLUS)) {
13870 tg3_flag_set(tp, SUPPORT_MSIX);
13871 tp->irq_max = TG3_IRQ_MAX_VECS;
13872 }
13873 }
13874
13875 if (tg3_flag(tp, 5755_PLUS))
13876 tg3_flag_set(tp, SHORT_DMA_BUG);
13877
13878 if (tg3_flag(tp, 5717_PLUS))
13879 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13880
13881 if (tg3_flag(tp, 57765_PLUS) &&
13882 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13883 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13884
13885 if (!tg3_flag(tp, 5705_PLUS) ||
13886 tg3_flag(tp, 5780_CLASS) ||
13887 tg3_flag(tp, USE_JUMBO_BDFLAG))
13888 tg3_flag_set(tp, JUMBO_CAPABLE);
13889
13890 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13891 &pci_state_reg);
13892
13893 if (pci_is_pcie(tp->pdev)) {
13894 u16 lnkctl;
13895
13896 tg3_flag_set(tp, PCI_EXPRESS);
13897
13898 tp->pcie_readrq = 4096;
13899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13901 tp->pcie_readrq = 2048;
13902
13903 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13904
13905 pci_read_config_word(tp->pdev,
13906 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13907 &lnkctl);
13908 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13909 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13910 ASIC_REV_5906) {
13911 tg3_flag_clear(tp, HW_TSO_2);
13912 tg3_flag_clear(tp, TSO_CAPABLE);
13913 }
13914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13916 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13917 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13918 tg3_flag_set(tp, CLKREQ_BUG);
13919 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13920 tg3_flag_set(tp, L1PLLPD_EN);
13921 }
13922 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13923 /* BCM5785 devices are effectively PCIe devices, and should
13924 * follow PCIe codepaths, but do not have a PCIe capabilities
13925 * section.
13926 */
13927 tg3_flag_set(tp, PCI_EXPRESS);
13928 } else if (!tg3_flag(tp, 5705_PLUS) ||
13929 tg3_flag(tp, 5780_CLASS)) {
13930 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13931 if (!tp->pcix_cap) {
13932 dev_err(&tp->pdev->dev,
13933 "Cannot find PCI-X capability, aborting\n");
13934 return -EIO;
13935 }
13936
13937 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13938 tg3_flag_set(tp, PCIX_MODE);
13939 }
13940
13941 /* If we have an AMD 762 or VIA K8T800 chipset, write
13942 * reordering to the mailbox registers done by the host
13943 * controller can cause major troubles. We read back from
13944 * every mailbox register write to force the writes to be
13945 * posted to the chip in order.
13946 */
13947 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13948 !tg3_flag(tp, PCI_EXPRESS))
13949 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13950
13951 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13952 &tp->pci_cacheline_sz);
13953 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13954 &tp->pci_lat_timer);
13955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13956 tp->pci_lat_timer < 64) {
13957 tp->pci_lat_timer = 64;
13958 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13959 tp->pci_lat_timer);
13960 }
13961
13962 /* Important! -- It is critical that the PCI-X hw workaround
13963 * situation is decided before the first MMIO register access.
13964 */
13965 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13966 /* 5700 BX chips need to have their TX producer index
13967 * mailboxes written twice to workaround a bug.
13968 */
13969 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13970
13971 /* If we are in PCI-X mode, enable register write workaround.
13972 *
13973 * The workaround is to use indirect register accesses
13974 * for all chip writes not to mailbox registers.
13975 */
13976 if (tg3_flag(tp, PCIX_MODE)) {
13977 u32 pm_reg;
13978
13979 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13980
13981 /* The chip can have it's power management PCI config
13982 * space registers clobbered due to this bug.
13983 * So explicitly force the chip into D0 here.
13984 */
13985 pci_read_config_dword(tp->pdev,
13986 tp->pm_cap + PCI_PM_CTRL,
13987 &pm_reg);
13988 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13989 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13990 pci_write_config_dword(tp->pdev,
13991 tp->pm_cap + PCI_PM_CTRL,
13992 pm_reg);
13993
13994 /* Also, force SERR#/PERR# in PCI command. */
13995 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13996 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13997 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13998 }
13999 }
14000
14001 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14002 tg3_flag_set(tp, PCI_HIGH_SPEED);
14003 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14004 tg3_flag_set(tp, PCI_32BIT);
14005
14006 /* Chip-specific fixup from Broadcom driver */
14007 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14008 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14009 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14010 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14011 }
14012
14013 /* Default fast path register access methods */
14014 tp->read32 = tg3_read32;
14015 tp->write32 = tg3_write32;
14016 tp->read32_mbox = tg3_read32;
14017 tp->write32_mbox = tg3_write32;
14018 tp->write32_tx_mbox = tg3_write32;
14019 tp->write32_rx_mbox = tg3_write32;
14020
14021 /* Various workaround register access methods */
14022 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14023 tp->write32 = tg3_write_indirect_reg32;
14024 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14025 (tg3_flag(tp, PCI_EXPRESS) &&
14026 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14027 /*
14028 * Back to back register writes can cause problems on these
14029 * chips, the workaround is to read back all reg writes
14030 * except those to mailbox regs.
14031 *
14032 * See tg3_write_indirect_reg32().
14033 */
14034 tp->write32 = tg3_write_flush_reg32;
14035 }
14036
14037 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14038 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14039 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14040 tp->write32_rx_mbox = tg3_write_flush_reg32;
14041 }
14042
14043 if (tg3_flag(tp, ICH_WORKAROUND)) {
14044 tp->read32 = tg3_read_indirect_reg32;
14045 tp->write32 = tg3_write_indirect_reg32;
14046 tp->read32_mbox = tg3_read_indirect_mbox;
14047 tp->write32_mbox = tg3_write_indirect_mbox;
14048 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14049 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14050
14051 iounmap(tp->regs);
14052 tp->regs = NULL;
14053
14054 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14055 pci_cmd &= ~PCI_COMMAND_MEMORY;
14056 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14057 }
14058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14059 tp->read32_mbox = tg3_read32_mbox_5906;
14060 tp->write32_mbox = tg3_write32_mbox_5906;
14061 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14062 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14063 }
14064
14065 if (tp->write32 == tg3_write_indirect_reg32 ||
14066 (tg3_flag(tp, PCIX_MODE) &&
14067 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14068 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14069 tg3_flag_set(tp, SRAM_USE_CONFIG);
14070
14071 /* The memory arbiter has to be enabled in order for SRAM accesses
14072 * to succeed. Normally on powerup the tg3 chip firmware will make
14073 * sure it is enabled, but other entities such as system netboot
14074 * code might disable it.
14075 */
14076 val = tr32(MEMARB_MODE);
14077 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14078
14079 if (tg3_flag(tp, PCIX_MODE)) {
14080 pci_read_config_dword(tp->pdev,
14081 tp->pcix_cap + PCI_X_STATUS, &val);
14082 tp->pci_fn = val & 0x7;
14083 } else {
14084 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14085 }
14086
14087 /* Get eeprom hw config before calling tg3_set_power_state().
14088 * In particular, the TG3_FLAG_IS_NIC flag must be
14089 * determined before calling tg3_set_power_state() so that
14090 * we know whether or not to switch out of Vaux power.
14091 * When the flag is set, it means that GPIO1 is used for eeprom
14092 * write protect and also implies that it is a LOM where GPIOs
14093 * are not used to switch power.
14094 */
14095 tg3_get_eeprom_hw_cfg(tp);
14096
14097 if (tg3_flag(tp, ENABLE_APE)) {
14098 /* Allow reads and writes to the
14099 * APE register and memory space.
14100 */
14101 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14102 PCISTATE_ALLOW_APE_SHMEM_WR |
14103 PCISTATE_ALLOW_APE_PSPACE_WR;
14104 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14105 pci_state_reg);
14106
14107 tg3_ape_lock_init(tp);
14108 }
14109
14110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14114 tg3_flag(tp, 57765_PLUS))
14115 tg3_flag_set(tp, CPMU_PRESENT);
14116
14117 /* Set up tp->grc_local_ctrl before calling
14118 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14119 * will bring 5700's external PHY out of reset.
14120 * It is also used as eeprom write protect on LOMs.
14121 */
14122 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14124 tg3_flag(tp, EEPROM_WRITE_PROT))
14125 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14126 GRC_LCLCTRL_GPIO_OUTPUT1);
14127 /* Unused GPIO3 must be driven as output on 5752 because there
14128 * are no pull-up resistors on unused GPIO pins.
14129 */
14130 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14131 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14132
14133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14136 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14137
14138 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14139 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14140 /* Turn off the debug UART. */
14141 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14142 if (tg3_flag(tp, IS_NIC))
14143 /* Keep VMain power. */
14144 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14145 GRC_LCLCTRL_GPIO_OUTPUT0;
14146 }
14147
14148 /* Switch out of Vaux if it is a NIC */
14149 tg3_pwrsrc_switch_to_vmain(tp);
14150
14151 /* Derive initial jumbo mode from MTU assigned in
14152 * ether_setup() via the alloc_etherdev() call
14153 */
14154 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14155 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14156
14157 /* Determine WakeOnLan speed to use. */
14158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14159 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14160 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14161 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14162 tg3_flag_clear(tp, WOL_SPEED_100MB);
14163 } else {
14164 tg3_flag_set(tp, WOL_SPEED_100MB);
14165 }
14166
14167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14168 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14169
14170 /* A few boards don't want Ethernet@WireSpeed phy feature */
14171 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14172 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14173 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14174 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14175 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14176 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14177 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14178
14179 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14180 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14181 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14182 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14183 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14184
14185 if (tg3_flag(tp, 5705_PLUS) &&
14186 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14187 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14188 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14189 !tg3_flag(tp, 57765_PLUS)) {
14190 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14194 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14195 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14196 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14197 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14198 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14199 } else
14200 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14201 }
14202
14203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14204 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14205 tp->phy_otp = tg3_read_otp_phycfg(tp);
14206 if (tp->phy_otp == 0)
14207 tp->phy_otp = TG3_OTP_DEFAULT;
14208 }
14209
14210 if (tg3_flag(tp, CPMU_PRESENT))
14211 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14212 else
14213 tp->mi_mode = MAC_MI_MODE_BASE;
14214
14215 tp->coalesce_mode = 0;
14216 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14217 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14218 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14219
14220 /* Set these bits to enable statistics workaround. */
14221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14222 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14223 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14224 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14225 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14226 }
14227
14228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14230 tg3_flag_set(tp, USE_PHYLIB);
14231
14232 err = tg3_mdio_init(tp);
14233 if (err)
14234 return err;
14235
14236 /* Initialize data/descriptor byte/word swapping. */
14237 val = tr32(GRC_MODE);
14238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14239 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14240 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14241 GRC_MODE_B2HRX_ENABLE |
14242 GRC_MODE_HTX2B_ENABLE |
14243 GRC_MODE_HOST_STACKUP);
14244 else
14245 val &= GRC_MODE_HOST_STACKUP;
14246
14247 tw32(GRC_MODE, val | tp->grc_mode);
14248
14249 tg3_switch_clocks(tp);
14250
14251 /* Clear this out for sanity. */
14252 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14253
14254 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14255 &pci_state_reg);
14256 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14257 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14258 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14259
14260 if (chiprevid == CHIPREV_ID_5701_A0 ||
14261 chiprevid == CHIPREV_ID_5701_B0 ||
14262 chiprevid == CHIPREV_ID_5701_B2 ||
14263 chiprevid == CHIPREV_ID_5701_B5) {
14264 void __iomem *sram_base;
14265
14266 /* Write some dummy words into the SRAM status block
14267 * area, see if it reads back correctly. If the return
14268 * value is bad, force enable the PCIX workaround.
14269 */
14270 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14271
14272 writel(0x00000000, sram_base);
14273 writel(0x00000000, sram_base + 4);
14274 writel(0xffffffff, sram_base + 4);
14275 if (readl(sram_base) != 0x00000000)
14276 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14277 }
14278 }
14279
14280 udelay(50);
14281 tg3_nvram_init(tp);
14282
14283 grc_misc_cfg = tr32(GRC_MISC_CFG);
14284 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14285
14286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14287 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14288 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14289 tg3_flag_set(tp, IS_5788);
14290
14291 if (!tg3_flag(tp, IS_5788) &&
14292 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14293 tg3_flag_set(tp, TAGGED_STATUS);
14294 if (tg3_flag(tp, TAGGED_STATUS)) {
14295 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14296 HOSTCC_MODE_CLRTICK_TXBD);
14297
14298 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14299 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14300 tp->misc_host_ctrl);
14301 }
14302
14303 /* Preserve the APE MAC_MODE bits */
14304 if (tg3_flag(tp, ENABLE_APE))
14305 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14306 else
14307 tp->mac_mode = TG3_DEF_MAC_MODE;
14308
14309 /* these are limited to 10/100 only */
14310 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14311 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14312 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14313 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14314 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14315 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14316 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14317 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14318 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14319 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14320 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14321 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14322 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14323 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14324 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14325 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14326
14327 err = tg3_phy_probe(tp);
14328 if (err) {
14329 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14330 /* ... but do not return immediately ... */
14331 tg3_mdio_fini(tp);
14332 }
14333
14334 tg3_read_vpd(tp);
14335 tg3_read_fw_ver(tp);
14336
14337 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14338 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14339 } else {
14340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14341 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14342 else
14343 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14344 }
14345
14346 /* 5700 {AX,BX} chips have a broken status block link
14347 * change bit implementation, so we must use the
14348 * status register in those cases.
14349 */
14350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14351 tg3_flag_set(tp, USE_LINKCHG_REG);
14352 else
14353 tg3_flag_clear(tp, USE_LINKCHG_REG);
14354
14355 /* The led_ctrl is set during tg3_phy_probe, here we might
14356 * have to force the link status polling mechanism based
14357 * upon subsystem IDs.
14358 */
14359 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14361 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14362 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14363 tg3_flag_set(tp, USE_LINKCHG_REG);
14364 }
14365
14366 /* For all SERDES we poll the MAC status register. */
14367 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14368 tg3_flag_set(tp, POLL_SERDES);
14369 else
14370 tg3_flag_clear(tp, POLL_SERDES);
14371
14372 tp->rx_offset = NET_IP_ALIGN;
14373 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14375 tg3_flag(tp, PCIX_MODE)) {
14376 tp->rx_offset = 0;
14377 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14378 tp->rx_copy_thresh = ~(u16)0;
14379 #endif
14380 }
14381
14382 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14383 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14384 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14385
14386 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14387
14388 /* Increment the rx prod index on the rx std ring by at most
14389 * 8 for these chips to workaround hw errata.
14390 */
14391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14394 tp->rx_std_max_post = 8;
14395
14396 if (tg3_flag(tp, ASPM_WORKAROUND))
14397 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14398 PCIE_PWR_MGMT_L1_THRESH_MSK;
14399
14400 return err;
14401 }
14402
14403 #ifdef CONFIG_SPARC
14404 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14405 {
14406 struct net_device *dev = tp->dev;
14407 struct pci_dev *pdev = tp->pdev;
14408 struct device_node *dp = pci_device_to_OF_node(pdev);
14409 const unsigned char *addr;
14410 int len;
14411
14412 addr = of_get_property(dp, "local-mac-address", &len);
14413 if (addr && len == 6) {
14414 memcpy(dev->dev_addr, addr, 6);
14415 memcpy(dev->perm_addr, dev->dev_addr, 6);
14416 return 0;
14417 }
14418 return -ENODEV;
14419 }
14420
14421 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14422 {
14423 struct net_device *dev = tp->dev;
14424
14425 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14426 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14427 return 0;
14428 }
14429 #endif
14430
14431 static int __devinit tg3_get_device_address(struct tg3 *tp)
14432 {
14433 struct net_device *dev = tp->dev;
14434 u32 hi, lo, mac_offset;
14435 int addr_ok = 0;
14436
14437 #ifdef CONFIG_SPARC
14438 if (!tg3_get_macaddr_sparc(tp))
14439 return 0;
14440 #endif
14441
14442 mac_offset = 0x7c;
14443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14444 tg3_flag(tp, 5780_CLASS)) {
14445 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14446 mac_offset = 0xcc;
14447 if (tg3_nvram_lock(tp))
14448 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14449 else
14450 tg3_nvram_unlock(tp);
14451 } else if (tg3_flag(tp, 5717_PLUS)) {
14452 if (tp->pci_fn & 1)
14453 mac_offset = 0xcc;
14454 if (tp->pci_fn > 1)
14455 mac_offset += 0x18c;
14456 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14457 mac_offset = 0x10;
14458
14459 /* First try to get it from MAC address mailbox. */
14460 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14461 if ((hi >> 16) == 0x484b) {
14462 dev->dev_addr[0] = (hi >> 8) & 0xff;
14463 dev->dev_addr[1] = (hi >> 0) & 0xff;
14464
14465 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14466 dev->dev_addr[2] = (lo >> 24) & 0xff;
14467 dev->dev_addr[3] = (lo >> 16) & 0xff;
14468 dev->dev_addr[4] = (lo >> 8) & 0xff;
14469 dev->dev_addr[5] = (lo >> 0) & 0xff;
14470
14471 /* Some old bootcode may report a 0 MAC address in SRAM */
14472 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14473 }
14474 if (!addr_ok) {
14475 /* Next, try NVRAM. */
14476 if (!tg3_flag(tp, NO_NVRAM) &&
14477 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14478 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14479 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14480 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14481 }
14482 /* Finally just fetch it out of the MAC control regs. */
14483 else {
14484 hi = tr32(MAC_ADDR_0_HIGH);
14485 lo = tr32(MAC_ADDR_0_LOW);
14486
14487 dev->dev_addr[5] = lo & 0xff;
14488 dev->dev_addr[4] = (lo >> 8) & 0xff;
14489 dev->dev_addr[3] = (lo >> 16) & 0xff;
14490 dev->dev_addr[2] = (lo >> 24) & 0xff;
14491 dev->dev_addr[1] = hi & 0xff;
14492 dev->dev_addr[0] = (hi >> 8) & 0xff;
14493 }
14494 }
14495
14496 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14497 #ifdef CONFIG_SPARC
14498 if (!tg3_get_default_macaddr_sparc(tp))
14499 return 0;
14500 #endif
14501 return -EINVAL;
14502 }
14503 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14504 return 0;
14505 }
14506
14507 #define BOUNDARY_SINGLE_CACHELINE 1
14508 #define BOUNDARY_MULTI_CACHELINE 2
14509
14510 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14511 {
14512 int cacheline_size;
14513 u8 byte;
14514 int goal;
14515
14516 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14517 if (byte == 0)
14518 cacheline_size = 1024;
14519 else
14520 cacheline_size = (int) byte * 4;
14521
14522 /* On 5703 and later chips, the boundary bits have no
14523 * effect.
14524 */
14525 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14526 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14527 !tg3_flag(tp, PCI_EXPRESS))
14528 goto out;
14529
14530 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14531 goal = BOUNDARY_MULTI_CACHELINE;
14532 #else
14533 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14534 goal = BOUNDARY_SINGLE_CACHELINE;
14535 #else
14536 goal = 0;
14537 #endif
14538 #endif
14539
14540 if (tg3_flag(tp, 57765_PLUS)) {
14541 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14542 goto out;
14543 }
14544
14545 if (!goal)
14546 goto out;
14547
14548 /* PCI controllers on most RISC systems tend to disconnect
14549 * when a device tries to burst across a cache-line boundary.
14550 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14551 *
14552 * Unfortunately, for PCI-E there are only limited
14553 * write-side controls for this, and thus for reads
14554 * we will still get the disconnects. We'll also waste
14555 * these PCI cycles for both read and write for chips
14556 * other than 5700 and 5701 which do not implement the
14557 * boundary bits.
14558 */
14559 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14560 switch (cacheline_size) {
14561 case 16:
14562 case 32:
14563 case 64:
14564 case 128:
14565 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14566 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14567 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14568 } else {
14569 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14570 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14571 }
14572 break;
14573
14574 case 256:
14575 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14576 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14577 break;
14578
14579 default:
14580 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14581 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14582 break;
14583 }
14584 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14585 switch (cacheline_size) {
14586 case 16:
14587 case 32:
14588 case 64:
14589 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14590 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14591 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14592 break;
14593 }
14594 /* fallthrough */
14595 case 128:
14596 default:
14597 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14598 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14599 break;
14600 }
14601 } else {
14602 switch (cacheline_size) {
14603 case 16:
14604 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14605 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14606 DMA_RWCTRL_WRITE_BNDRY_16);
14607 break;
14608 }
14609 /* fallthrough */
14610 case 32:
14611 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14612 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14613 DMA_RWCTRL_WRITE_BNDRY_32);
14614 break;
14615 }
14616 /* fallthrough */
14617 case 64:
14618 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14619 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14620 DMA_RWCTRL_WRITE_BNDRY_64);
14621 break;
14622 }
14623 /* fallthrough */
14624 case 128:
14625 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14626 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14627 DMA_RWCTRL_WRITE_BNDRY_128);
14628 break;
14629 }
14630 /* fallthrough */
14631 case 256:
14632 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14633 DMA_RWCTRL_WRITE_BNDRY_256);
14634 break;
14635 case 512:
14636 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14637 DMA_RWCTRL_WRITE_BNDRY_512);
14638 break;
14639 case 1024:
14640 default:
14641 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14642 DMA_RWCTRL_WRITE_BNDRY_1024);
14643 break;
14644 }
14645 }
14646
14647 out:
14648 return val;
14649 }
14650
14651 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14652 {
14653 struct tg3_internal_buffer_desc test_desc;
14654 u32 sram_dma_descs;
14655 int i, ret;
14656
14657 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14658
14659 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14660 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14661 tw32(RDMAC_STATUS, 0);
14662 tw32(WDMAC_STATUS, 0);
14663
14664 tw32(BUFMGR_MODE, 0);
14665 tw32(FTQ_RESET, 0);
14666
14667 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14668 test_desc.addr_lo = buf_dma & 0xffffffff;
14669 test_desc.nic_mbuf = 0x00002100;
14670 test_desc.len = size;
14671
14672 /*
14673 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14674 * the *second* time the tg3 driver was getting loaded after an
14675 * initial scan.
14676 *
14677 * Broadcom tells me:
14678 * ...the DMA engine is connected to the GRC block and a DMA
14679 * reset may affect the GRC block in some unpredictable way...
14680 * The behavior of resets to individual blocks has not been tested.
14681 *
14682 * Broadcom noted the GRC reset will also reset all sub-components.
14683 */
14684 if (to_device) {
14685 test_desc.cqid_sqid = (13 << 8) | 2;
14686
14687 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14688 udelay(40);
14689 } else {
14690 test_desc.cqid_sqid = (16 << 8) | 7;
14691
14692 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14693 udelay(40);
14694 }
14695 test_desc.flags = 0x00000005;
14696
14697 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14698 u32 val;
14699
14700 val = *(((u32 *)&test_desc) + i);
14701 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14702 sram_dma_descs + (i * sizeof(u32)));
14703 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14704 }
14705 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14706
14707 if (to_device)
14708 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14709 else
14710 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14711
14712 ret = -ENODEV;
14713 for (i = 0; i < 40; i++) {
14714 u32 val;
14715
14716 if (to_device)
14717 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14718 else
14719 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14720 if ((val & 0xffff) == sram_dma_descs) {
14721 ret = 0;
14722 break;
14723 }
14724
14725 udelay(100);
14726 }
14727
14728 return ret;
14729 }
14730
14731 #define TEST_BUFFER_SIZE 0x2000
14732
14733 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14734 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14735 { },
14736 };
14737
14738 static int __devinit tg3_test_dma(struct tg3 *tp)
14739 {
14740 dma_addr_t buf_dma;
14741 u32 *buf, saved_dma_rwctrl;
14742 int ret = 0;
14743
14744 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14745 &buf_dma, GFP_KERNEL);
14746 if (!buf) {
14747 ret = -ENOMEM;
14748 goto out_nofree;
14749 }
14750
14751 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14752 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14753
14754 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14755
14756 if (tg3_flag(tp, 57765_PLUS))
14757 goto out;
14758
14759 if (tg3_flag(tp, PCI_EXPRESS)) {
14760 /* DMA read watermark not used on PCIE */
14761 tp->dma_rwctrl |= 0x00180000;
14762 } else if (!tg3_flag(tp, PCIX_MODE)) {
14763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14764 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14765 tp->dma_rwctrl |= 0x003f0000;
14766 else
14767 tp->dma_rwctrl |= 0x003f000f;
14768 } else {
14769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14771 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14772 u32 read_water = 0x7;
14773
14774 /* If the 5704 is behind the EPB bridge, we can
14775 * do the less restrictive ONE_DMA workaround for
14776 * better performance.
14777 */
14778 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14780 tp->dma_rwctrl |= 0x8000;
14781 else if (ccval == 0x6 || ccval == 0x7)
14782 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14783
14784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14785 read_water = 4;
14786 /* Set bit 23 to enable PCIX hw bug fix */
14787 tp->dma_rwctrl |=
14788 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14789 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14790 (1 << 23);
14791 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14792 /* 5780 always in PCIX mode */
14793 tp->dma_rwctrl |= 0x00144000;
14794 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14795 /* 5714 always in PCIX mode */
14796 tp->dma_rwctrl |= 0x00148000;
14797 } else {
14798 tp->dma_rwctrl |= 0x001b000f;
14799 }
14800 }
14801
14802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14803 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14804 tp->dma_rwctrl &= 0xfffffff0;
14805
14806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14808 /* Remove this if it causes problems for some boards. */
14809 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14810
14811 /* On 5700/5701 chips, we need to set this bit.
14812 * Otherwise the chip will issue cacheline transactions
14813 * to streamable DMA memory with not all the byte
14814 * enables turned on. This is an error on several
14815 * RISC PCI controllers, in particular sparc64.
14816 *
14817 * On 5703/5704 chips, this bit has been reassigned
14818 * a different meaning. In particular, it is used
14819 * on those chips to enable a PCI-X workaround.
14820 */
14821 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14822 }
14823
14824 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14825
14826 #if 0
14827 /* Unneeded, already done by tg3_get_invariants. */
14828 tg3_switch_clocks(tp);
14829 #endif
14830
14831 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14832 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14833 goto out;
14834
14835 /* It is best to perform DMA test with maximum write burst size
14836 * to expose the 5700/5701 write DMA bug.
14837 */
14838 saved_dma_rwctrl = tp->dma_rwctrl;
14839 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14840 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14841
14842 while (1) {
14843 u32 *p = buf, i;
14844
14845 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14846 p[i] = i;
14847
14848 /* Send the buffer to the chip. */
14849 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14850 if (ret) {
14851 dev_err(&tp->pdev->dev,
14852 "%s: Buffer write failed. err = %d\n",
14853 __func__, ret);
14854 break;
14855 }
14856
14857 #if 0
14858 /* validate data reached card RAM correctly. */
14859 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14860 u32 val;
14861 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14862 if (le32_to_cpu(val) != p[i]) {
14863 dev_err(&tp->pdev->dev,
14864 "%s: Buffer corrupted on device! "
14865 "(%d != %d)\n", __func__, val, i);
14866 /* ret = -ENODEV here? */
14867 }
14868 p[i] = 0;
14869 }
14870 #endif
14871 /* Now read it back. */
14872 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14873 if (ret) {
14874 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14875 "err = %d\n", __func__, ret);
14876 break;
14877 }
14878
14879 /* Verify it. */
14880 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14881 if (p[i] == i)
14882 continue;
14883
14884 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14885 DMA_RWCTRL_WRITE_BNDRY_16) {
14886 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14887 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14888 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14889 break;
14890 } else {
14891 dev_err(&tp->pdev->dev,
14892 "%s: Buffer corrupted on read back! "
14893 "(%d != %d)\n", __func__, p[i], i);
14894 ret = -ENODEV;
14895 goto out;
14896 }
14897 }
14898
14899 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14900 /* Success. */
14901 ret = 0;
14902 break;
14903 }
14904 }
14905 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14906 DMA_RWCTRL_WRITE_BNDRY_16) {
14907 /* DMA test passed without adjusting DMA boundary,
14908 * now look for chipsets that are known to expose the
14909 * DMA bug without failing the test.
14910 */
14911 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14912 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14913 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14914 } else {
14915 /* Safe to use the calculated DMA boundary. */
14916 tp->dma_rwctrl = saved_dma_rwctrl;
14917 }
14918
14919 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14920 }
14921
14922 out:
14923 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14924 out_nofree:
14925 return ret;
14926 }
14927
14928 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14929 {
14930 if (tg3_flag(tp, 57765_PLUS)) {
14931 tp->bufmgr_config.mbuf_read_dma_low_water =
14932 DEFAULT_MB_RDMA_LOW_WATER_5705;
14933 tp->bufmgr_config.mbuf_mac_rx_low_water =
14934 DEFAULT_MB_MACRX_LOW_WATER_57765;
14935 tp->bufmgr_config.mbuf_high_water =
14936 DEFAULT_MB_HIGH_WATER_57765;
14937
14938 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14939 DEFAULT_MB_RDMA_LOW_WATER_5705;
14940 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14941 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14942 tp->bufmgr_config.mbuf_high_water_jumbo =
14943 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14944 } else if (tg3_flag(tp, 5705_PLUS)) {
14945 tp->bufmgr_config.mbuf_read_dma_low_water =
14946 DEFAULT_MB_RDMA_LOW_WATER_5705;
14947 tp->bufmgr_config.mbuf_mac_rx_low_water =
14948 DEFAULT_MB_MACRX_LOW_WATER_5705;
14949 tp->bufmgr_config.mbuf_high_water =
14950 DEFAULT_MB_HIGH_WATER_5705;
14951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14952 tp->bufmgr_config.mbuf_mac_rx_low_water =
14953 DEFAULT_MB_MACRX_LOW_WATER_5906;
14954 tp->bufmgr_config.mbuf_high_water =
14955 DEFAULT_MB_HIGH_WATER_5906;
14956 }
14957
14958 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14959 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14960 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14961 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14962 tp->bufmgr_config.mbuf_high_water_jumbo =
14963 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14964 } else {
14965 tp->bufmgr_config.mbuf_read_dma_low_water =
14966 DEFAULT_MB_RDMA_LOW_WATER;
14967 tp->bufmgr_config.mbuf_mac_rx_low_water =
14968 DEFAULT_MB_MACRX_LOW_WATER;
14969 tp->bufmgr_config.mbuf_high_water =
14970 DEFAULT_MB_HIGH_WATER;
14971
14972 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14973 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14974 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14975 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14976 tp->bufmgr_config.mbuf_high_water_jumbo =
14977 DEFAULT_MB_HIGH_WATER_JUMBO;
14978 }
14979
14980 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14981 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14982 }
14983
14984 static char * __devinit tg3_phy_string(struct tg3 *tp)
14985 {
14986 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14987 case TG3_PHY_ID_BCM5400: return "5400";
14988 case TG3_PHY_ID_BCM5401: return "5401";
14989 case TG3_PHY_ID_BCM5411: return "5411";
14990 case TG3_PHY_ID_BCM5701: return "5701";
14991 case TG3_PHY_ID_BCM5703: return "5703";
14992 case TG3_PHY_ID_BCM5704: return "5704";
14993 case TG3_PHY_ID_BCM5705: return "5705";
14994 case TG3_PHY_ID_BCM5750: return "5750";
14995 case TG3_PHY_ID_BCM5752: return "5752";
14996 case TG3_PHY_ID_BCM5714: return "5714";
14997 case TG3_PHY_ID_BCM5780: return "5780";
14998 case TG3_PHY_ID_BCM5755: return "5755";
14999 case TG3_PHY_ID_BCM5787: return "5787";
15000 case TG3_PHY_ID_BCM5784: return "5784";
15001 case TG3_PHY_ID_BCM5756: return "5722/5756";
15002 case TG3_PHY_ID_BCM5906: return "5906";
15003 case TG3_PHY_ID_BCM5761: return "5761";
15004 case TG3_PHY_ID_BCM5718C: return "5718C";
15005 case TG3_PHY_ID_BCM5718S: return "5718S";
15006 case TG3_PHY_ID_BCM57765: return "57765";
15007 case TG3_PHY_ID_BCM5719C: return "5719C";
15008 case TG3_PHY_ID_BCM5720C: return "5720C";
15009 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15010 case 0: return "serdes";
15011 default: return "unknown";
15012 }
15013 }
15014
15015 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15016 {
15017 if (tg3_flag(tp, PCI_EXPRESS)) {
15018 strcpy(str, "PCI Express");
15019 return str;
15020 } else if (tg3_flag(tp, PCIX_MODE)) {
15021 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15022
15023 strcpy(str, "PCIX:");
15024
15025 if ((clock_ctrl == 7) ||
15026 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15027 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15028 strcat(str, "133MHz");
15029 else if (clock_ctrl == 0)
15030 strcat(str, "33MHz");
15031 else if (clock_ctrl == 2)
15032 strcat(str, "50MHz");
15033 else if (clock_ctrl == 4)
15034 strcat(str, "66MHz");
15035 else if (clock_ctrl == 6)
15036 strcat(str, "100MHz");
15037 } else {
15038 strcpy(str, "PCI:");
15039 if (tg3_flag(tp, PCI_HIGH_SPEED))
15040 strcat(str, "66MHz");
15041 else
15042 strcat(str, "33MHz");
15043 }
15044 if (tg3_flag(tp, PCI_32BIT))
15045 strcat(str, ":32-bit");
15046 else
15047 strcat(str, ":64-bit");
15048 return str;
15049 }
15050
15051 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15052 {
15053 struct pci_dev *peer;
15054 unsigned int func, devnr = tp->pdev->devfn & ~7;
15055
15056 for (func = 0; func < 8; func++) {
15057 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15058 if (peer && peer != tp->pdev)
15059 break;
15060 pci_dev_put(peer);
15061 }
15062 /* 5704 can be configured in single-port mode, set peer to
15063 * tp->pdev in that case.
15064 */
15065 if (!peer) {
15066 peer = tp->pdev;
15067 return peer;
15068 }
15069
15070 /*
15071 * We don't need to keep the refcount elevated; there's no way
15072 * to remove one half of this device without removing the other
15073 */
15074 pci_dev_put(peer);
15075
15076 return peer;
15077 }
15078
15079 static void __devinit tg3_init_coal(struct tg3 *tp)
15080 {
15081 struct ethtool_coalesce *ec = &tp->coal;
15082
15083 memset(ec, 0, sizeof(*ec));
15084 ec->cmd = ETHTOOL_GCOALESCE;
15085 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15086 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15087 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15088 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15089 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15090 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15091 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15092 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15093 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15094
15095 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15096 HOSTCC_MODE_CLRTICK_TXBD)) {
15097 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15098 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15099 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15100 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15101 }
15102
15103 if (tg3_flag(tp, 5705_PLUS)) {
15104 ec->rx_coalesce_usecs_irq = 0;
15105 ec->tx_coalesce_usecs_irq = 0;
15106 ec->stats_block_coalesce_usecs = 0;
15107 }
15108 }
15109
15110 static const struct net_device_ops tg3_netdev_ops = {
15111 .ndo_open = tg3_open,
15112 .ndo_stop = tg3_close,
15113 .ndo_start_xmit = tg3_start_xmit,
15114 .ndo_get_stats64 = tg3_get_stats64,
15115 .ndo_validate_addr = eth_validate_addr,
15116 .ndo_set_multicast_list = tg3_set_rx_mode,
15117 .ndo_set_mac_address = tg3_set_mac_addr,
15118 .ndo_do_ioctl = tg3_ioctl,
15119 .ndo_tx_timeout = tg3_tx_timeout,
15120 .ndo_change_mtu = tg3_change_mtu,
15121 .ndo_fix_features = tg3_fix_features,
15122 .ndo_set_features = tg3_set_features,
15123 #ifdef CONFIG_NET_POLL_CONTROLLER
15124 .ndo_poll_controller = tg3_poll_controller,
15125 #endif
15126 };
15127
15128 static int __devinit tg3_init_one(struct pci_dev *pdev,
15129 const struct pci_device_id *ent)
15130 {
15131 struct net_device *dev;
15132 struct tg3 *tp;
15133 int i, err, pm_cap;
15134 u32 sndmbx, rcvmbx, intmbx;
15135 char str[40];
15136 u64 dma_mask, persist_dma_mask;
15137 u32 features = 0;
15138
15139 printk_once(KERN_INFO "%s\n", version);
15140
15141 err = pci_enable_device(pdev);
15142 if (err) {
15143 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15144 return err;
15145 }
15146
15147 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15148 if (err) {
15149 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15150 goto err_out_disable_pdev;
15151 }
15152
15153 pci_set_master(pdev);
15154
15155 /* Find power-management capability. */
15156 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15157 if (pm_cap == 0) {
15158 dev_err(&pdev->dev,
15159 "Cannot find Power Management capability, aborting\n");
15160 err = -EIO;
15161 goto err_out_free_res;
15162 }
15163
15164 err = pci_set_power_state(pdev, PCI_D0);
15165 if (err) {
15166 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15167 goto err_out_free_res;
15168 }
15169
15170 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15171 if (!dev) {
15172 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15173 err = -ENOMEM;
15174 goto err_out_power_down;
15175 }
15176
15177 SET_NETDEV_DEV(dev, &pdev->dev);
15178
15179 tp = netdev_priv(dev);
15180 tp->pdev = pdev;
15181 tp->dev = dev;
15182 tp->pm_cap = pm_cap;
15183 tp->rx_mode = TG3_DEF_RX_MODE;
15184 tp->tx_mode = TG3_DEF_TX_MODE;
15185
15186 if (tg3_debug > 0)
15187 tp->msg_enable = tg3_debug;
15188 else
15189 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15190
15191 /* The word/byte swap controls here control register access byte
15192 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15193 * setting below.
15194 */
15195 tp->misc_host_ctrl =
15196 MISC_HOST_CTRL_MASK_PCI_INT |
15197 MISC_HOST_CTRL_WORD_SWAP |
15198 MISC_HOST_CTRL_INDIR_ACCESS |
15199 MISC_HOST_CTRL_PCISTATE_RW;
15200
15201 /* The NONFRM (non-frame) byte/word swap controls take effect
15202 * on descriptor entries, anything which isn't packet data.
15203 *
15204 * The StrongARM chips on the board (one for tx, one for rx)
15205 * are running in big-endian mode.
15206 */
15207 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15208 GRC_MODE_WSWAP_NONFRM_DATA);
15209 #ifdef __BIG_ENDIAN
15210 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15211 #endif
15212 spin_lock_init(&tp->lock);
15213 spin_lock_init(&tp->indirect_lock);
15214 INIT_WORK(&tp->reset_task, tg3_reset_task);
15215
15216 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15217 if (!tp->regs) {
15218 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15219 err = -ENOMEM;
15220 goto err_out_free_dev;
15221 }
15222
15223 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15224 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15225 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15226 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15227 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15228 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15229 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15230 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15231 tg3_flag_set(tp, ENABLE_APE);
15232 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15233 if (!tp->aperegs) {
15234 dev_err(&pdev->dev,
15235 "Cannot map APE registers, aborting\n");
15236 err = -ENOMEM;
15237 goto err_out_iounmap;
15238 }
15239 }
15240
15241 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15242 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15243
15244 dev->ethtool_ops = &tg3_ethtool_ops;
15245 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15246 dev->netdev_ops = &tg3_netdev_ops;
15247 dev->irq = pdev->irq;
15248
15249 err = tg3_get_invariants(tp);
15250 if (err) {
15251 dev_err(&pdev->dev,
15252 "Problem fetching invariants of chip, aborting\n");
15253 goto err_out_apeunmap;
15254 }
15255
15256 /* The EPB bridge inside 5714, 5715, and 5780 and any
15257 * device behind the EPB cannot support DMA addresses > 40-bit.
15258 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15259 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15260 * do DMA address check in tg3_start_xmit().
15261 */
15262 if (tg3_flag(tp, IS_5788))
15263 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15264 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15265 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15266 #ifdef CONFIG_HIGHMEM
15267 dma_mask = DMA_BIT_MASK(64);
15268 #endif
15269 } else
15270 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15271
15272 /* Configure DMA attributes. */
15273 if (dma_mask > DMA_BIT_MASK(32)) {
15274 err = pci_set_dma_mask(pdev, dma_mask);
15275 if (!err) {
15276 features |= NETIF_F_HIGHDMA;
15277 err = pci_set_consistent_dma_mask(pdev,
15278 persist_dma_mask);
15279 if (err < 0) {
15280 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15281 "DMA for consistent allocations\n");
15282 goto err_out_apeunmap;
15283 }
15284 }
15285 }
15286 if (err || dma_mask == DMA_BIT_MASK(32)) {
15287 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15288 if (err) {
15289 dev_err(&pdev->dev,
15290 "No usable DMA configuration, aborting\n");
15291 goto err_out_apeunmap;
15292 }
15293 }
15294
15295 tg3_init_bufmgr_config(tp);
15296
15297 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15298
15299 /* 5700 B0 chips do not support checksumming correctly due
15300 * to hardware bugs.
15301 */
15302 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15303 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15304
15305 if (tg3_flag(tp, 5755_PLUS))
15306 features |= NETIF_F_IPV6_CSUM;
15307 }
15308
15309 /* TSO is on by default on chips that support hardware TSO.
15310 * Firmware TSO on older chips gives lower performance, so it
15311 * is off by default, but can be enabled using ethtool.
15312 */
15313 if ((tg3_flag(tp, HW_TSO_1) ||
15314 tg3_flag(tp, HW_TSO_2) ||
15315 tg3_flag(tp, HW_TSO_3)) &&
15316 (features & NETIF_F_IP_CSUM))
15317 features |= NETIF_F_TSO;
15318 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15319 if (features & NETIF_F_IPV6_CSUM)
15320 features |= NETIF_F_TSO6;
15321 if (tg3_flag(tp, HW_TSO_3) ||
15322 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15323 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15324 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15327 features |= NETIF_F_TSO_ECN;
15328 }
15329
15330 dev->features |= features;
15331 dev->vlan_features |= features;
15332
15333 /*
15334 * Add loopback capability only for a subset of devices that support
15335 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15336 * loopback for the remaining devices.
15337 */
15338 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15339 !tg3_flag(tp, CPMU_PRESENT))
15340 /* Add the loopback capability */
15341 features |= NETIF_F_LOOPBACK;
15342
15343 dev->hw_features |= features;
15344
15345 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15346 !tg3_flag(tp, TSO_CAPABLE) &&
15347 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15348 tg3_flag_set(tp, MAX_RXPEND_64);
15349 tp->rx_pending = 63;
15350 }
15351
15352 err = tg3_get_device_address(tp);
15353 if (err) {
15354 dev_err(&pdev->dev,
15355 "Could not obtain valid ethernet address, aborting\n");
15356 goto err_out_apeunmap;
15357 }
15358
15359 /*
15360 * Reset chip in case UNDI or EFI driver did not shutdown
15361 * DMA self test will enable WDMAC and we'll see (spurious)
15362 * pending DMA on the PCI bus at that point.
15363 */
15364 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15365 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15366 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15367 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15368 }
15369
15370 err = tg3_test_dma(tp);
15371 if (err) {
15372 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15373 goto err_out_apeunmap;
15374 }
15375
15376 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15377 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15378 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15379 for (i = 0; i < tp->irq_max; i++) {
15380 struct tg3_napi *tnapi = &tp->napi[i];
15381
15382 tnapi->tp = tp;
15383 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15384
15385 tnapi->int_mbox = intmbx;
15386 if (i < 4)
15387 intmbx += 0x8;
15388 else
15389 intmbx += 0x4;
15390
15391 tnapi->consmbox = rcvmbx;
15392 tnapi->prodmbox = sndmbx;
15393
15394 if (i)
15395 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15396 else
15397 tnapi->coal_now = HOSTCC_MODE_NOW;
15398
15399 if (!tg3_flag(tp, SUPPORT_MSIX))
15400 break;
15401
15402 /*
15403 * If we support MSIX, we'll be using RSS. If we're using
15404 * RSS, the first vector only handles link interrupts and the
15405 * remaining vectors handle rx and tx interrupts. Reuse the
15406 * mailbox values for the next iteration. The values we setup
15407 * above are still useful for the single vectored mode.
15408 */
15409 if (!i)
15410 continue;
15411
15412 rcvmbx += 0x8;
15413
15414 if (sndmbx & 0x4)
15415 sndmbx -= 0x4;
15416 else
15417 sndmbx += 0xc;
15418 }
15419
15420 tg3_init_coal(tp);
15421
15422 pci_set_drvdata(pdev, dev);
15423
15424 if (tg3_flag(tp, 5717_PLUS)) {
15425 /* Resume a low-power mode */
15426 tg3_frob_aux_power(tp, false);
15427 }
15428
15429 err = register_netdev(dev);
15430 if (err) {
15431 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15432 goto err_out_apeunmap;
15433 }
15434
15435 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15436 tp->board_part_number,
15437 tp->pci_chip_rev_id,
15438 tg3_bus_string(tp, str),
15439 dev->dev_addr);
15440
15441 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15442 struct phy_device *phydev;
15443 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15444 netdev_info(dev,
15445 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15446 phydev->drv->name, dev_name(&phydev->dev));
15447 } else {
15448 char *ethtype;
15449
15450 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15451 ethtype = "10/100Base-TX";
15452 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15453 ethtype = "1000Base-SX";
15454 else
15455 ethtype = "10/100/1000Base-T";
15456
15457 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15458 "(WireSpeed[%d], EEE[%d])\n",
15459 tg3_phy_string(tp), ethtype,
15460 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15461 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15462 }
15463
15464 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15465 (dev->features & NETIF_F_RXCSUM) != 0,
15466 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15467 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15468 tg3_flag(tp, ENABLE_ASF) != 0,
15469 tg3_flag(tp, TSO_CAPABLE) != 0);
15470 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15471 tp->dma_rwctrl,
15472 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15473 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15474
15475 pci_save_state(pdev);
15476
15477 return 0;
15478
15479 err_out_apeunmap:
15480 if (tp->aperegs) {
15481 iounmap(tp->aperegs);
15482 tp->aperegs = NULL;
15483 }
15484
15485 err_out_iounmap:
15486 if (tp->regs) {
15487 iounmap(tp->regs);
15488 tp->regs = NULL;
15489 }
15490
15491 err_out_free_dev:
15492 free_netdev(dev);
15493
15494 err_out_power_down:
15495 pci_set_power_state(pdev, PCI_D3hot);
15496
15497 err_out_free_res:
15498 pci_release_regions(pdev);
15499
15500 err_out_disable_pdev:
15501 pci_disable_device(pdev);
15502 pci_set_drvdata(pdev, NULL);
15503 return err;
15504 }
15505
15506 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15507 {
15508 struct net_device *dev = pci_get_drvdata(pdev);
15509
15510 if (dev) {
15511 struct tg3 *tp = netdev_priv(dev);
15512
15513 if (tp->fw)
15514 release_firmware(tp->fw);
15515
15516 cancel_work_sync(&tp->reset_task);
15517
15518 if (!tg3_flag(tp, USE_PHYLIB)) {
15519 tg3_phy_fini(tp);
15520 tg3_mdio_fini(tp);
15521 }
15522
15523 unregister_netdev(dev);
15524 if (tp->aperegs) {
15525 iounmap(tp->aperegs);
15526 tp->aperegs = NULL;
15527 }
15528 if (tp->regs) {
15529 iounmap(tp->regs);
15530 tp->regs = NULL;
15531 }
15532 free_netdev(dev);
15533 pci_release_regions(pdev);
15534 pci_disable_device(pdev);
15535 pci_set_drvdata(pdev, NULL);
15536 }
15537 }
15538
15539 #ifdef CONFIG_PM_SLEEP
15540 static int tg3_suspend(struct device *device)
15541 {
15542 struct pci_dev *pdev = to_pci_dev(device);
15543 struct net_device *dev = pci_get_drvdata(pdev);
15544 struct tg3 *tp = netdev_priv(dev);
15545 int err;
15546
15547 if (!netif_running(dev))
15548 return 0;
15549
15550 flush_work_sync(&tp->reset_task);
15551 tg3_phy_stop(tp);
15552 tg3_netif_stop(tp);
15553
15554 del_timer_sync(&tp->timer);
15555
15556 tg3_full_lock(tp, 1);
15557 tg3_disable_ints(tp);
15558 tg3_full_unlock(tp);
15559
15560 netif_device_detach(dev);
15561
15562 tg3_full_lock(tp, 0);
15563 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15564 tg3_flag_clear(tp, INIT_COMPLETE);
15565 tg3_full_unlock(tp);
15566
15567 err = tg3_power_down_prepare(tp);
15568 if (err) {
15569 int err2;
15570
15571 tg3_full_lock(tp, 0);
15572
15573 tg3_flag_set(tp, INIT_COMPLETE);
15574 err2 = tg3_restart_hw(tp, 1);
15575 if (err2)
15576 goto out;
15577
15578 tp->timer.expires = jiffies + tp->timer_offset;
15579 add_timer(&tp->timer);
15580
15581 netif_device_attach(dev);
15582 tg3_netif_start(tp);
15583
15584 out:
15585 tg3_full_unlock(tp);
15586
15587 if (!err2)
15588 tg3_phy_start(tp);
15589 }
15590
15591 return err;
15592 }
15593
15594 static int tg3_resume(struct device *device)
15595 {
15596 struct pci_dev *pdev = to_pci_dev(device);
15597 struct net_device *dev = pci_get_drvdata(pdev);
15598 struct tg3 *tp = netdev_priv(dev);
15599 int err;
15600
15601 if (!netif_running(dev))
15602 return 0;
15603
15604 netif_device_attach(dev);
15605
15606 tg3_full_lock(tp, 0);
15607
15608 tg3_flag_set(tp, INIT_COMPLETE);
15609 err = tg3_restart_hw(tp, 1);
15610 if (err)
15611 goto out;
15612
15613 tp->timer.expires = jiffies + tp->timer_offset;
15614 add_timer(&tp->timer);
15615
15616 tg3_netif_start(tp);
15617
15618 out:
15619 tg3_full_unlock(tp);
15620
15621 if (!err)
15622 tg3_phy_start(tp);
15623
15624 return err;
15625 }
15626
15627 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15628 #define TG3_PM_OPS (&tg3_pm_ops)
15629
15630 #else
15631
15632 #define TG3_PM_OPS NULL
15633
15634 #endif /* CONFIG_PM_SLEEP */
15635
15636 /**
15637 * tg3_io_error_detected - called when PCI error is detected
15638 * @pdev: Pointer to PCI device
15639 * @state: The current pci connection state
15640 *
15641 * This function is called after a PCI bus error affecting
15642 * this device has been detected.
15643 */
15644 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15645 pci_channel_state_t state)
15646 {
15647 struct net_device *netdev = pci_get_drvdata(pdev);
15648 struct tg3 *tp = netdev_priv(netdev);
15649 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15650
15651 netdev_info(netdev, "PCI I/O error detected\n");
15652
15653 rtnl_lock();
15654
15655 if (!netif_running(netdev))
15656 goto done;
15657
15658 tg3_phy_stop(tp);
15659
15660 tg3_netif_stop(tp);
15661
15662 del_timer_sync(&tp->timer);
15663 tg3_flag_clear(tp, RESTART_TIMER);
15664
15665 /* Want to make sure that the reset task doesn't run */
15666 cancel_work_sync(&tp->reset_task);
15667 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15668 tg3_flag_clear(tp, RESTART_TIMER);
15669
15670 netif_device_detach(netdev);
15671
15672 /* Clean up software state, even if MMIO is blocked */
15673 tg3_full_lock(tp, 0);
15674 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15675 tg3_full_unlock(tp);
15676
15677 done:
15678 if (state == pci_channel_io_perm_failure)
15679 err = PCI_ERS_RESULT_DISCONNECT;
15680 else
15681 pci_disable_device(pdev);
15682
15683 rtnl_unlock();
15684
15685 return err;
15686 }
15687
15688 /**
15689 * tg3_io_slot_reset - called after the pci bus has been reset.
15690 * @pdev: Pointer to PCI device
15691 *
15692 * Restart the card from scratch, as if from a cold-boot.
15693 * At this point, the card has exprienced a hard reset,
15694 * followed by fixups by BIOS, and has its config space
15695 * set up identically to what it was at cold boot.
15696 */
15697 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15698 {
15699 struct net_device *netdev = pci_get_drvdata(pdev);
15700 struct tg3 *tp = netdev_priv(netdev);
15701 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15702 int err;
15703
15704 rtnl_lock();
15705
15706 if (pci_enable_device(pdev)) {
15707 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15708 goto done;
15709 }
15710
15711 pci_set_master(pdev);
15712 pci_restore_state(pdev);
15713 pci_save_state(pdev);
15714
15715 if (!netif_running(netdev)) {
15716 rc = PCI_ERS_RESULT_RECOVERED;
15717 goto done;
15718 }
15719
15720 err = tg3_power_up(tp);
15721 if (err)
15722 goto done;
15723
15724 rc = PCI_ERS_RESULT_RECOVERED;
15725
15726 done:
15727 rtnl_unlock();
15728
15729 return rc;
15730 }
15731
15732 /**
15733 * tg3_io_resume - called when traffic can start flowing again.
15734 * @pdev: Pointer to PCI device
15735 *
15736 * This callback is called when the error recovery driver tells
15737 * us that its OK to resume normal operation.
15738 */
15739 static void tg3_io_resume(struct pci_dev *pdev)
15740 {
15741 struct net_device *netdev = pci_get_drvdata(pdev);
15742 struct tg3 *tp = netdev_priv(netdev);
15743 int err;
15744
15745 rtnl_lock();
15746
15747 if (!netif_running(netdev))
15748 goto done;
15749
15750 tg3_full_lock(tp, 0);
15751 tg3_flag_set(tp, INIT_COMPLETE);
15752 err = tg3_restart_hw(tp, 1);
15753 tg3_full_unlock(tp);
15754 if (err) {
15755 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15756 goto done;
15757 }
15758
15759 netif_device_attach(netdev);
15760
15761 tp->timer.expires = jiffies + tp->timer_offset;
15762 add_timer(&tp->timer);
15763
15764 tg3_netif_start(tp);
15765
15766 tg3_phy_start(tp);
15767
15768 done:
15769 rtnl_unlock();
15770 }
15771
15772 static struct pci_error_handlers tg3_err_handler = {
15773 .error_detected = tg3_io_error_detected,
15774 .slot_reset = tg3_io_slot_reset,
15775 .resume = tg3_io_resume
15776 };
15777
15778 static struct pci_driver tg3_driver = {
15779 .name = DRV_MODULE_NAME,
15780 .id_table = tg3_pci_tbl,
15781 .probe = tg3_init_one,
15782 .remove = __devexit_p(tg3_remove_one),
15783 .err_handler = &tg3_err_handler,
15784 .driver.pm = TG3_PM_OPS,
15785 };
15786
15787 static int __init tg3_init(void)
15788 {
15789 return pci_register_driver(&tg3_driver);
15790 }
15791
15792 static void __exit tg3_cleanup(void)
15793 {
15794 pci_unregister_driver(&tg3_driver);
15795 }
15796
15797 module_init(tg3_init);
15798 module_exit(tg3_cleanup);
This page took 0.409936 seconds and 5 git commands to generate.