2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
52 #include <net/checksum.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
60 #include <asm/idprom.h>
69 /* Functions & macros to verify TG3_FLAGS types */
71 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
73 return test_bit(flag
, bits
);
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
83 clear_bit(flag
, bits
);
86 #define tg3_flag(tp, flag) \
87 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag) \
89 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag) \
91 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define DRV_MODULE_NAME "tg3"
95 #define TG3_MIN_NUM 124
96 #define DRV_MODULE_VERSION \
97 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE "March 21, 2012"
100 #define RESET_KIND_SHUTDOWN 0
101 #define RESET_KIND_INIT 1
102 #define RESET_KIND_SUSPEND 2
104 #define TG3_DEF_RX_MODE 0
105 #define TG3_DEF_TX_MODE 0
106 #define TG3_DEF_MSG_ENABLE \
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
118 /* length of time before we decide the hardware is borked,
119 * and dev->tx_timeout() should be called to fix the problem
122 #define TG3_TX_TIMEOUT (5 * HZ)
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU 60
126 #define TG3_MAX_MTU(tp) \
127 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130 * You can't change the ring sizes, but you can change where you place
131 * them in the NIC onboard memory.
133 #define TG3_RX_STD_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING 200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
142 /* Do not place this n-ring entries value into the tp struct itself,
143 * we really want to expose these constants to GCC so that modulo et
144 * al. operations are done with shifts and masks instead of with
145 * hw multiply/modulo instructions. Another solution would be to
146 * replace things like '% foo' with '& (foo - 1)'.
149 #define TG3_TX_RING_SIZE 512
150 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
152 #define TG3_RX_STD_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
160 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162 #define TG3_DMA_BYTE_ENAB 64
164 #define TG3_RX_STD_DMA_SZ 1536
165 #define TG3_RX_JMB_DMA_SZ 9046
167 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
169 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179 * that are at least dword aligned when used in PCIX mode. The driver
180 * works around this bug by double copying the packet. This workaround
181 * is built into the normal double copy length check for efficiency.
183 * However, the double copy is only necessary on those architectures
184 * where unaligned memory accesses are inefficient. For those architectures
185 * where unaligned memory accesses incur little penalty, we can reintegrate
186 * the 5701 in the normal rx path. Doing so saves a device structure
187 * dereference by hardcoding the double copy threshold in place.
189 #define TG3_RX_COPY_THRESHOLD 256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
193 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
199 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K 2048
205 #define TG3_TX_BD_DMA_MAX_4K 4096
207 #define TG3_RAW_IP_ALIGN 2
209 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
210 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212 #define FIRMWARE_TG3 "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
216 static char version
[] __devinitdata
=
217 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION
);
223 MODULE_FIRMWARE(FIRMWARE_TG3
);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
227 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug
, int, 0);
229 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
311 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
312 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
313 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
317 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
319 static const struct {
320 const char string
[ETH_GSTRING_LEN
];
321 } ethtool_stats_keys
[] = {
324 { "rx_ucast_packets" },
325 { "rx_mcast_packets" },
326 { "rx_bcast_packets" },
328 { "rx_align_errors" },
329 { "rx_xon_pause_rcvd" },
330 { "rx_xoff_pause_rcvd" },
331 { "rx_mac_ctrl_rcvd" },
332 { "rx_xoff_entered" },
333 { "rx_frame_too_long_errors" },
335 { "rx_undersize_packets" },
336 { "rx_in_length_errors" },
337 { "rx_out_length_errors" },
338 { "rx_64_or_less_octet_packets" },
339 { "rx_65_to_127_octet_packets" },
340 { "rx_128_to_255_octet_packets" },
341 { "rx_256_to_511_octet_packets" },
342 { "rx_512_to_1023_octet_packets" },
343 { "rx_1024_to_1522_octet_packets" },
344 { "rx_1523_to_2047_octet_packets" },
345 { "rx_2048_to_4095_octet_packets" },
346 { "rx_4096_to_8191_octet_packets" },
347 { "rx_8192_to_9022_octet_packets" },
354 { "tx_flow_control" },
356 { "tx_single_collisions" },
357 { "tx_mult_collisions" },
359 { "tx_excessive_collisions" },
360 { "tx_late_collisions" },
361 { "tx_collide_2times" },
362 { "tx_collide_3times" },
363 { "tx_collide_4times" },
364 { "tx_collide_5times" },
365 { "tx_collide_6times" },
366 { "tx_collide_7times" },
367 { "tx_collide_8times" },
368 { "tx_collide_9times" },
369 { "tx_collide_10times" },
370 { "tx_collide_11times" },
371 { "tx_collide_12times" },
372 { "tx_collide_13times" },
373 { "tx_collide_14times" },
374 { "tx_collide_15times" },
375 { "tx_ucast_packets" },
376 { "tx_mcast_packets" },
377 { "tx_bcast_packets" },
378 { "tx_carrier_sense_errors" },
382 { "dma_writeq_full" },
383 { "dma_write_prioq_full" },
387 { "rx_threshold_hit" },
389 { "dma_readq_full" },
390 { "dma_read_prioq_full" },
391 { "tx_comp_queue_full" },
393 { "ring_set_send_prod_index" },
394 { "ring_status_update" },
396 { "nic_avoided_irqs" },
397 { "nic_tx_threshold_hit" },
399 { "mbuf_lwm_thresh_hit" },
402 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
405 static const struct {
406 const char string
[ETH_GSTRING_LEN
];
407 } ethtool_test_keys
[] = {
408 { "nvram test (online) " },
409 { "link test (online) " },
410 { "register test (offline)" },
411 { "memory test (offline)" },
412 { "mac loopback test (offline)" },
413 { "phy loopback test (offline)" },
414 { "ext loopback test (offline)" },
415 { "interrupt test (offline)" },
418 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
421 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
423 writel(val
, tp
->regs
+ off
);
426 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
428 return readl(tp
->regs
+ off
);
431 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
433 writel(val
, tp
->aperegs
+ off
);
436 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
438 return readl(tp
->aperegs
+ off
);
441 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
445 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
446 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
447 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
448 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
451 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
453 writel(val
, tp
->regs
+ off
);
454 readl(tp
->regs
+ off
);
457 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
462 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
463 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
464 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
465 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
469 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
473 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
474 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
475 TG3_64BIT_REG_LOW
, val
);
478 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
479 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
480 TG3_64BIT_REG_LOW
, val
);
484 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
485 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
486 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
487 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
489 /* In indirect mode when disabling interrupts, we also need
490 * to clear the interrupt bit in the GRC local ctrl register.
492 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
494 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
495 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
499 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
504 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
505 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
506 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
507 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512 * where it is unsafe to read back the register without some delay.
513 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
516 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
518 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
519 /* Non-posted methods */
520 tp
->write32(tp
, off
, val
);
523 tg3_write32(tp
, off
, val
);
528 /* Wait again after the read for the posted method to guarantee that
529 * the wait time is met.
535 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
537 tp
->write32_mbox(tp
, off
, val
);
538 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
539 tp
->read32_mbox(tp
, off
);
542 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
544 void __iomem
*mbox
= tp
->regs
+ off
;
546 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
548 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
552 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
554 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
557 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
559 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
562 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
568 #define tw32(reg, val) tp->write32(tp, reg, val)
569 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg) tp->read32(tp, reg)
573 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
577 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
578 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
581 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
582 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
583 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
584 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
586 /* Always leave this as zero. */
587 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
590 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
592 /* Always leave this as zero. */
593 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
595 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
598 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
602 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
603 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
608 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
609 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
610 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
611 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
613 /* Always leave this as zero. */
614 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
617 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
619 /* Always leave this as zero. */
620 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
622 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
625 static void tg3_ape_lock_init(struct tg3
*tp
)
630 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
631 regbase
= TG3_APE_LOCK_GRANT
;
633 regbase
= TG3_APE_PER_LOCK_GRANT
;
635 /* Make sure the driver hasn't any stale locks. */
636 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
638 case TG3_APE_LOCK_PHY0
:
639 case TG3_APE_LOCK_PHY1
:
640 case TG3_APE_LOCK_PHY2
:
641 case TG3_APE_LOCK_PHY3
:
642 bit
= APE_LOCK_GRANT_DRIVER
;
646 bit
= APE_LOCK_GRANT_DRIVER
;
648 bit
= 1 << tp
->pci_fn
;
650 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
655 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
659 u32 status
, req
, gnt
, bit
;
661 if (!tg3_flag(tp
, ENABLE_APE
))
665 case TG3_APE_LOCK_GPIO
:
666 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
668 case TG3_APE_LOCK_GRC
:
669 case TG3_APE_LOCK_MEM
:
671 bit
= APE_LOCK_REQ_DRIVER
;
673 bit
= 1 << tp
->pci_fn
;
675 case TG3_APE_LOCK_PHY0
:
676 case TG3_APE_LOCK_PHY1
:
677 case TG3_APE_LOCK_PHY2
:
678 case TG3_APE_LOCK_PHY3
:
679 bit
= APE_LOCK_REQ_DRIVER
;
685 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
686 req
= TG3_APE_LOCK_REQ
;
687 gnt
= TG3_APE_LOCK_GRANT
;
689 req
= TG3_APE_PER_LOCK_REQ
;
690 gnt
= TG3_APE_PER_LOCK_GRANT
;
695 tg3_ape_write32(tp
, req
+ off
, bit
);
697 /* Wait for up to 1 millisecond to acquire lock. */
698 for (i
= 0; i
< 100; i
++) {
699 status
= tg3_ape_read32(tp
, gnt
+ off
);
706 /* Revoke the lock request. */
707 tg3_ape_write32(tp
, gnt
+ off
, bit
);
714 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
718 if (!tg3_flag(tp
, ENABLE_APE
))
722 case TG3_APE_LOCK_GPIO
:
723 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
725 case TG3_APE_LOCK_GRC
:
726 case TG3_APE_LOCK_MEM
:
728 bit
= APE_LOCK_GRANT_DRIVER
;
730 bit
= 1 << tp
->pci_fn
;
732 case TG3_APE_LOCK_PHY0
:
733 case TG3_APE_LOCK_PHY1
:
734 case TG3_APE_LOCK_PHY2
:
735 case TG3_APE_LOCK_PHY3
:
736 bit
= APE_LOCK_GRANT_DRIVER
;
742 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
743 gnt
= TG3_APE_LOCK_GRANT
;
745 gnt
= TG3_APE_PER_LOCK_GRANT
;
747 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
750 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
755 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
758 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
759 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
762 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
765 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
768 return timeout_us
? 0 : -EBUSY
;
771 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
775 for (i
= 0; i
< timeout_us
/ 10; i
++) {
776 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
778 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
784 return i
== timeout_us
/ 10;
787 int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
, u32 len
)
790 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
792 if (!tg3_flag(tp
, APE_HAS_NCSI
))
795 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
796 if (apedata
!= APE_SEG_SIG_MAGIC
)
799 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
800 if (!(apedata
& APE_FW_STATUS_READY
))
803 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
805 msgoff
= bufoff
+ 2 * sizeof(u32
);
806 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
811 /* Cap xfer sizes to scratchpad limits. */
812 length
= (len
> maxlen
) ? maxlen
: len
;
815 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
816 if (!(apedata
& APE_FW_STATUS_READY
))
819 /* Wait for up to 1 msec for APE to service previous event. */
820 err
= tg3_ape_event_lock(tp
, 1000);
824 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
825 APE_EVENT_STATUS_SCRTCHPD_READ
|
826 APE_EVENT_STATUS_EVENT_PENDING
;
827 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
829 tg3_ape_write32(tp
, bufoff
, base_off
);
830 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
832 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
833 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
837 if (tg3_ape_wait_for_event(tp
, 30000))
840 for (i
= 0; length
; i
+= 4, length
-= 4) {
841 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
842 memcpy(data
, &val
, sizeof(u32
));
850 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
855 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
856 if (apedata
!= APE_SEG_SIG_MAGIC
)
859 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
860 if (!(apedata
& APE_FW_STATUS_READY
))
863 /* Wait for up to 1 millisecond for APE to service previous event. */
864 err
= tg3_ape_event_lock(tp
, 1000);
868 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
869 event
| APE_EVENT_STATUS_EVENT_PENDING
);
871 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
872 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
877 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
882 if (!tg3_flag(tp
, ENABLE_APE
))
886 case RESET_KIND_INIT
:
887 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
888 APE_HOST_SEG_SIG_MAGIC
);
889 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
890 APE_HOST_SEG_LEN_MAGIC
);
891 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
892 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
893 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
894 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
895 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
896 APE_HOST_BEHAV_NO_PHYLOCK
);
897 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
898 TG3_APE_HOST_DRVR_STATE_START
);
900 event
= APE_EVENT_STATUS_STATE_START
;
902 case RESET_KIND_SHUTDOWN
:
903 /* With the interface we are currently using,
904 * APE does not track driver state. Wiping
905 * out the HOST SEGMENT SIGNATURE forces
906 * the APE to assume OS absent status.
908 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
910 if (device_may_wakeup(&tp
->pdev
->dev
) &&
911 tg3_flag(tp
, WOL_ENABLE
)) {
912 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
913 TG3_APE_HOST_WOL_SPEED_AUTO
);
914 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
916 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
918 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
920 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
922 case RESET_KIND_SUSPEND
:
923 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
929 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
931 tg3_ape_send_event(tp
, event
);
934 static void tg3_disable_ints(struct tg3
*tp
)
938 tw32(TG3PCI_MISC_HOST_CTRL
,
939 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
940 for (i
= 0; i
< tp
->irq_max
; i
++)
941 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
944 static void tg3_enable_ints(struct tg3
*tp
)
951 tw32(TG3PCI_MISC_HOST_CTRL
,
952 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
954 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
955 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
956 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
958 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
959 if (tg3_flag(tp
, 1SHOT_MSI
))
960 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
962 tp
->coal_now
|= tnapi
->coal_now
;
965 /* Force an initial interrupt */
966 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
967 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
968 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
970 tw32(HOSTCC_MODE
, tp
->coal_now
);
972 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
975 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
977 struct tg3
*tp
= tnapi
->tp
;
978 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
979 unsigned int work_exists
= 0;
981 /* check for phy events */
982 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
983 if (sblk
->status
& SD_STATUS_LINK_CHG
)
987 /* check for TX work to do */
988 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
991 /* check for RX work to do */
992 if (tnapi
->rx_rcb_prod_idx
&&
993 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1000 * similar to tg3_enable_ints, but it accurately determines whether there
1001 * is new work pending and can return without flushing the PIO write
1002 * which reenables interrupts
1004 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1006 struct tg3
*tp
= tnapi
->tp
;
1008 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1011 /* When doing tagged status, this work check is unnecessary.
1012 * The last_tag we write above tells the chip which piece of
1013 * work we've completed.
1015 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1016 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1017 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1020 static void tg3_switch_clocks(struct tg3
*tp
)
1023 u32 orig_clock_ctrl
;
1025 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1028 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1030 orig_clock_ctrl
= clock_ctrl
;
1031 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1032 CLOCK_CTRL_CLKRUN_OENABLE
|
1034 tp
->pci_clock_ctrl
= clock_ctrl
;
1036 if (tg3_flag(tp
, 5705_PLUS
)) {
1037 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1038 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1039 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1041 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1042 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1044 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1046 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1047 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1050 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1053 #define PHY_BUSY_LOOPS 5000
1055 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1061 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1063 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1067 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1071 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1072 MI_COM_PHY_ADDR_MASK
);
1073 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1074 MI_COM_REG_ADDR_MASK
);
1075 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1077 tw32_f(MAC_MI_COM
, frame_val
);
1079 loops
= PHY_BUSY_LOOPS
;
1080 while (loops
!= 0) {
1082 frame_val
= tr32(MAC_MI_COM
);
1084 if ((frame_val
& MI_COM_BUSY
) == 0) {
1086 frame_val
= tr32(MAC_MI_COM
);
1094 *val
= frame_val
& MI_COM_DATA_MASK
;
1098 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1099 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1103 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1108 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1114 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1115 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1118 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1120 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1124 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1126 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1127 MI_COM_PHY_ADDR_MASK
);
1128 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1129 MI_COM_REG_ADDR_MASK
);
1130 frame_val
|= (val
& MI_COM_DATA_MASK
);
1131 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1133 tw32_f(MAC_MI_COM
, frame_val
);
1135 loops
= PHY_BUSY_LOOPS
;
1136 while (loops
!= 0) {
1138 frame_val
= tr32(MAC_MI_COM
);
1139 if ((frame_val
& MI_COM_BUSY
) == 0) {
1141 frame_val
= tr32(MAC_MI_COM
);
1151 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1152 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1156 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1161 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1165 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1169 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1173 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1174 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1178 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1184 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1188 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1192 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1196 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1197 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1201 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1207 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1211 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1213 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1218 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1222 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1224 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1229 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1233 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1234 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1235 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1237 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1242 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1244 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1245 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1247 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253 MII_TG3_AUXCTL_ACTL_TX_6DB)
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257 MII_TG3_AUXCTL_ACTL_TX_6DB);
1259 static int tg3_bmcr_reset(struct tg3
*tp
)
1264 /* OK, reset it, and poll the BMCR_RESET bit until it
1265 * clears or we time out.
1267 phy_control
= BMCR_RESET
;
1268 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1274 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1278 if ((phy_control
& BMCR_RESET
) == 0) {
1290 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1292 struct tg3
*tp
= bp
->priv
;
1295 spin_lock_bh(&tp
->lock
);
1297 if (tg3_readphy(tp
, reg
, &val
))
1300 spin_unlock_bh(&tp
->lock
);
1305 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1307 struct tg3
*tp
= bp
->priv
;
1310 spin_lock_bh(&tp
->lock
);
1312 if (tg3_writephy(tp
, reg
, val
))
1315 spin_unlock_bh(&tp
->lock
);
1320 static int tg3_mdio_reset(struct mii_bus
*bp
)
1325 static void tg3_mdio_config_5785(struct tg3
*tp
)
1328 struct phy_device
*phydev
;
1330 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1331 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1332 case PHY_ID_BCM50610
:
1333 case PHY_ID_BCM50610M
:
1334 val
= MAC_PHYCFG2_50610_LED_MODES
;
1336 case PHY_ID_BCMAC131
:
1337 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1339 case PHY_ID_RTL8211C
:
1340 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1342 case PHY_ID_RTL8201E
:
1343 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1349 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1350 tw32(MAC_PHYCFG2
, val
);
1352 val
= tr32(MAC_PHYCFG1
);
1353 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1354 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1355 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1356 tw32(MAC_PHYCFG1
, val
);
1361 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1362 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1363 MAC_PHYCFG2_FMODE_MASK_MASK
|
1364 MAC_PHYCFG2_GMODE_MASK_MASK
|
1365 MAC_PHYCFG2_ACT_MASK_MASK
|
1366 MAC_PHYCFG2_QUAL_MASK_MASK
|
1367 MAC_PHYCFG2_INBAND_ENABLE
;
1369 tw32(MAC_PHYCFG2
, val
);
1371 val
= tr32(MAC_PHYCFG1
);
1372 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1373 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1374 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1375 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1376 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1377 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1378 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1380 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1381 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1382 tw32(MAC_PHYCFG1
, val
);
1384 val
= tr32(MAC_EXT_RGMII_MODE
);
1385 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1386 MAC_RGMII_MODE_RX_QUALITY
|
1387 MAC_RGMII_MODE_RX_ACTIVITY
|
1388 MAC_RGMII_MODE_RX_ENG_DET
|
1389 MAC_RGMII_MODE_TX_ENABLE
|
1390 MAC_RGMII_MODE_TX_LOWPWR
|
1391 MAC_RGMII_MODE_TX_RESET
);
1392 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1393 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1394 val
|= MAC_RGMII_MODE_RX_INT_B
|
1395 MAC_RGMII_MODE_RX_QUALITY
|
1396 MAC_RGMII_MODE_RX_ACTIVITY
|
1397 MAC_RGMII_MODE_RX_ENG_DET
;
1398 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1399 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1400 MAC_RGMII_MODE_TX_LOWPWR
|
1401 MAC_RGMII_MODE_TX_RESET
;
1403 tw32(MAC_EXT_RGMII_MODE
, val
);
1406 static void tg3_mdio_start(struct tg3
*tp
)
1408 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1409 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1412 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1413 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1414 tg3_mdio_config_5785(tp
);
1417 static int tg3_mdio_init(struct tg3
*tp
)
1421 struct phy_device
*phydev
;
1423 if (tg3_flag(tp
, 5717_PLUS
)) {
1426 tp
->phy_addr
= tp
->pci_fn
+ 1;
1428 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1429 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1431 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1432 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1436 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1440 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1443 tp
->mdio_bus
= mdiobus_alloc();
1444 if (tp
->mdio_bus
== NULL
)
1447 tp
->mdio_bus
->name
= "tg3 mdio bus";
1448 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1449 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1450 tp
->mdio_bus
->priv
= tp
;
1451 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1452 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1453 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1454 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1455 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1456 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1458 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1459 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1461 /* The bus registration will look for all the PHYs on the mdio bus.
1462 * Unfortunately, it does not ensure the PHY is powered up before
1463 * accessing the PHY ID registers. A chip reset is the
1464 * quickest way to bring the device back to an operational state..
1466 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1469 i
= mdiobus_register(tp
->mdio_bus
);
1471 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1472 mdiobus_free(tp
->mdio_bus
);
1476 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1478 if (!phydev
|| !phydev
->drv
) {
1479 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1480 mdiobus_unregister(tp
->mdio_bus
);
1481 mdiobus_free(tp
->mdio_bus
);
1485 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1486 case PHY_ID_BCM57780
:
1487 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1488 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1490 case PHY_ID_BCM50610
:
1491 case PHY_ID_BCM50610M
:
1492 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1493 PHY_BRCM_RX_REFCLK_UNUSED
|
1494 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1495 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1496 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1497 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1498 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1499 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1500 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1501 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1503 case PHY_ID_RTL8211C
:
1504 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1506 case PHY_ID_RTL8201E
:
1507 case PHY_ID_BCMAC131
:
1508 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1509 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1510 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1514 tg3_flag_set(tp
, MDIOBUS_INITED
);
1516 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1517 tg3_mdio_config_5785(tp
);
1522 static void tg3_mdio_fini(struct tg3
*tp
)
1524 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1525 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1526 mdiobus_unregister(tp
->mdio_bus
);
1527 mdiobus_free(tp
->mdio_bus
);
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1536 val
= tr32(GRC_RX_CPU_EVENT
);
1537 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1538 tw32_f(GRC_RX_CPU_EVENT
, val
);
1540 tp
->last_event_jiffies
= jiffies
;
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1549 unsigned int delay_cnt
;
1552 /* If enough time has passed, no wait is necessary. */
1553 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1554 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1556 if (time_remain
< 0)
1559 /* Check if we can shorten the wait time. */
1560 delay_cnt
= jiffies_to_usecs(time_remain
);
1561 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1562 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1563 delay_cnt
= (delay_cnt
>> 3) + 1;
1565 for (i
= 0; i
< delay_cnt
; i
++) {
1566 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1578 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1580 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1581 val
|= (reg
& 0xffff);
1585 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1587 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1588 val
|= (reg
& 0xffff);
1592 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1593 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1595 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1596 val
|= (reg
& 0xffff);
1600 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3
*tp
)
1612 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1615 tg3_phy_gather_ump_data(tp
, data
);
1617 tg3_wait_for_event_ack(tp
);
1619 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1620 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1621 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1622 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1623 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1624 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1626 tg3_generate_fw_event(tp
);
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3
*tp
)
1632 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1633 /* Wait for RX cpu to ACK the previous event. */
1634 tg3_wait_for_event_ack(tp
);
1636 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1638 tg3_generate_fw_event(tp
);
1640 /* Wait for RX cpu to ACK this event. */
1641 tg3_wait_for_event_ack(tp
);
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1648 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1649 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1651 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1653 case RESET_KIND_INIT
:
1654 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1658 case RESET_KIND_SHUTDOWN
:
1659 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1663 case RESET_KIND_SUSPEND
:
1664 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1673 if (kind
== RESET_KIND_INIT
||
1674 kind
== RESET_KIND_SUSPEND
)
1675 tg3_ape_driver_state_change(tp
, kind
);
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1681 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1683 case RESET_KIND_INIT
:
1684 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1685 DRV_STATE_START_DONE
);
1688 case RESET_KIND_SHUTDOWN
:
1689 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1690 DRV_STATE_UNLOAD_DONE
);
1698 if (kind
== RESET_KIND_SHUTDOWN
)
1699 tg3_ape_driver_state_change(tp
, kind
);
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1705 if (tg3_flag(tp
, ENABLE_ASF
)) {
1707 case RESET_KIND_INIT
:
1708 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1712 case RESET_KIND_SHUTDOWN
:
1713 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1717 case RESET_KIND_SUSPEND
:
1718 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1728 static int tg3_poll_fw(struct tg3
*tp
)
1733 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1734 /* Wait up to 20ms for init done. */
1735 for (i
= 0; i
< 200; i
++) {
1736 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1743 /* Wait for firmware initialization to complete. */
1744 for (i
= 0; i
< 100000; i
++) {
1745 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1746 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1751 /* Chip might not be fitted with firmware. Some Sun onboard
1752 * parts are configured like that. So don't signal the timeout
1753 * of the above loop as an error, but do report the lack of
1754 * running firmware once.
1756 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1757 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1759 netdev_info(tp
->dev
, "No firmware running\n");
1762 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
1763 /* The 57765 A0 needs a little more
1764 * time to do some important work.
1772 static void tg3_link_report(struct tg3
*tp
)
1774 if (!netif_carrier_ok(tp
->dev
)) {
1775 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1776 tg3_ump_link_report(tp
);
1777 } else if (netif_msg_link(tp
)) {
1778 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1779 (tp
->link_config
.active_speed
== SPEED_1000
?
1781 (tp
->link_config
.active_speed
== SPEED_100
?
1783 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1786 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1787 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1789 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1792 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1793 netdev_info(tp
->dev
, "EEE is %s\n",
1794 tp
->setlpicnt
? "enabled" : "disabled");
1796 tg3_ump_link_report(tp
);
1800 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1804 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1805 miireg
= ADVERTISE_1000XPAUSE
;
1806 else if (flow_ctrl
& FLOW_CTRL_TX
)
1807 miireg
= ADVERTISE_1000XPSE_ASYM
;
1808 else if (flow_ctrl
& FLOW_CTRL_RX
)
1809 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1816 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1820 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1821 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1822 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1823 if (lcladv
& ADVERTISE_1000XPAUSE
)
1825 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1832 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1836 u32 old_rx_mode
= tp
->rx_mode
;
1837 u32 old_tx_mode
= tp
->tx_mode
;
1839 if (tg3_flag(tp
, USE_PHYLIB
))
1840 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1842 autoneg
= tp
->link_config
.autoneg
;
1844 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1845 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1846 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1848 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1850 flowctrl
= tp
->link_config
.flowctrl
;
1852 tp
->link_config
.active_flowctrl
= flowctrl
;
1854 if (flowctrl
& FLOW_CTRL_RX
)
1855 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1857 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1859 if (old_rx_mode
!= tp
->rx_mode
)
1860 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1862 if (flowctrl
& FLOW_CTRL_TX
)
1863 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1865 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1867 if (old_tx_mode
!= tp
->tx_mode
)
1868 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1871 static void tg3_adjust_link(struct net_device
*dev
)
1873 u8 oldflowctrl
, linkmesg
= 0;
1874 u32 mac_mode
, lcl_adv
, rmt_adv
;
1875 struct tg3
*tp
= netdev_priv(dev
);
1876 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1878 spin_lock_bh(&tp
->lock
);
1880 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1881 MAC_MODE_HALF_DUPLEX
);
1883 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1889 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1890 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1891 else if (phydev
->speed
== SPEED_1000
||
1892 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1893 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1895 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1897 if (phydev
->duplex
== DUPLEX_HALF
)
1898 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1900 lcl_adv
= mii_advertise_flowctrl(
1901 tp
->link_config
.flowctrl
);
1904 rmt_adv
= LPA_PAUSE_CAP
;
1905 if (phydev
->asym_pause
)
1906 rmt_adv
|= LPA_PAUSE_ASYM
;
1909 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1911 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1913 if (mac_mode
!= tp
->mac_mode
) {
1914 tp
->mac_mode
= mac_mode
;
1915 tw32_f(MAC_MODE
, tp
->mac_mode
);
1919 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1920 if (phydev
->speed
== SPEED_10
)
1922 MAC_MI_STAT_10MBPS_MODE
|
1923 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1925 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1928 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1929 tw32(MAC_TX_LENGTHS
,
1930 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1931 (6 << TX_LENGTHS_IPG_SHIFT
) |
1932 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1934 tw32(MAC_TX_LENGTHS
,
1935 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1936 (6 << TX_LENGTHS_IPG_SHIFT
) |
1937 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1939 if (phydev
->link
!= tp
->old_link
||
1940 phydev
->speed
!= tp
->link_config
.active_speed
||
1941 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1942 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1945 tp
->old_link
= phydev
->link
;
1946 tp
->link_config
.active_speed
= phydev
->speed
;
1947 tp
->link_config
.active_duplex
= phydev
->duplex
;
1949 spin_unlock_bh(&tp
->lock
);
1952 tg3_link_report(tp
);
1955 static int tg3_phy_init(struct tg3
*tp
)
1957 struct phy_device
*phydev
;
1959 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1962 /* Bring the PHY back to a known state. */
1965 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1967 /* Attach the MAC to the PHY. */
1968 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1969 phydev
->dev_flags
, phydev
->interface
);
1970 if (IS_ERR(phydev
)) {
1971 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1972 return PTR_ERR(phydev
);
1975 /* Mask with MAC supported features. */
1976 switch (phydev
->interface
) {
1977 case PHY_INTERFACE_MODE_GMII
:
1978 case PHY_INTERFACE_MODE_RGMII
:
1979 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1980 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1982 SUPPORTED_Asym_Pause
);
1986 case PHY_INTERFACE_MODE_MII
:
1987 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1989 SUPPORTED_Asym_Pause
);
1992 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1996 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1998 phydev
->advertising
= phydev
->supported
;
2003 static void tg3_phy_start(struct tg3
*tp
)
2005 struct phy_device
*phydev
;
2007 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2010 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2012 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2013 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2014 phydev
->speed
= tp
->link_config
.speed
;
2015 phydev
->duplex
= tp
->link_config
.duplex
;
2016 phydev
->autoneg
= tp
->link_config
.autoneg
;
2017 phydev
->advertising
= tp
->link_config
.advertising
;
2022 phy_start_aneg(phydev
);
2025 static void tg3_phy_stop(struct tg3
*tp
)
2027 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2030 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2033 static void tg3_phy_fini(struct tg3
*tp
)
2035 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2036 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2037 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2041 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2046 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2049 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2050 /* Cannot do read-modify-write on 5401 */
2051 err
= tg3_phy_auxctl_write(tp
,
2052 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2053 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2058 err
= tg3_phy_auxctl_read(tp
,
2059 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2063 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2064 err
= tg3_phy_auxctl_write(tp
,
2065 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2071 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2075 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2078 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2079 phytest
| MII_TG3_FET_SHADOW_EN
);
2080 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2082 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2084 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2085 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2087 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2091 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2095 if (!tg3_flag(tp
, 5705_PLUS
) ||
2096 (tg3_flag(tp
, 5717_PLUS
) &&
2097 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2100 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2101 tg3_phy_fet_toggle_apd(tp
, enable
);
2105 reg
= MII_TG3_MISC_SHDW_WREN
|
2106 MII_TG3_MISC_SHDW_SCR5_SEL
|
2107 MII_TG3_MISC_SHDW_SCR5_LPED
|
2108 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2109 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2110 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2111 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
2112 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2114 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2117 reg
= MII_TG3_MISC_SHDW_WREN
|
2118 MII_TG3_MISC_SHDW_APD_SEL
|
2119 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2121 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2123 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2126 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2130 if (!tg3_flag(tp
, 5705_PLUS
) ||
2131 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2134 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2137 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2138 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2140 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2141 ephy
| MII_TG3_FET_SHADOW_EN
);
2142 if (!tg3_readphy(tp
, reg
, &phy
)) {
2144 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2146 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2147 tg3_writephy(tp
, reg
, phy
);
2149 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2154 ret
= tg3_phy_auxctl_read(tp
,
2155 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2158 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2160 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2161 tg3_phy_auxctl_write(tp
,
2162 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2167 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2172 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2175 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2177 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2178 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2181 static void tg3_phy_apply_otp(struct tg3
*tp
)
2190 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
2193 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2194 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2195 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2197 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2198 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2199 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2201 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2202 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2203 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2205 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2206 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2208 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2209 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2211 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2212 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2213 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2215 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2218 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2222 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2227 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2228 current_link_up
== 1 &&
2229 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2230 (tp
->link_config
.active_speed
== SPEED_100
||
2231 tp
->link_config
.active_speed
== SPEED_1000
)) {
2234 if (tp
->link_config
.active_speed
== SPEED_1000
)
2235 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2237 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2239 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2241 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2242 TG3_CL45_D7_EEERES_STAT
, &val
);
2244 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2245 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2249 if (!tp
->setlpicnt
) {
2250 if (current_link_up
== 1 &&
2251 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2252 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2253 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2256 val
= tr32(TG3_CPMU_EEE_MODE
);
2257 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2261 static void tg3_phy_eee_enable(struct tg3
*tp
)
2265 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2266 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2267 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2268 tg3_flag(tp
, 57765_CLASS
)) &&
2269 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2270 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2271 MII_TG3_DSP_TAP26_RMRXSTO
;
2272 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2273 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2276 val
= tr32(TG3_CPMU_EEE_MODE
);
2277 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2280 static int tg3_wait_macro_done(struct tg3
*tp
)
2287 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2288 if ((tmp32
& 0x1000) == 0)
2298 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2300 static const u32 test_pat
[4][6] = {
2301 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2308 for (chan
= 0; chan
< 4; chan
++) {
2311 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2312 (chan
* 0x2000) | 0x0200);
2313 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2315 for (i
= 0; i
< 6; i
++)
2316 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2319 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2320 if (tg3_wait_macro_done(tp
)) {
2325 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2326 (chan
* 0x2000) | 0x0200);
2327 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2328 if (tg3_wait_macro_done(tp
)) {
2333 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2334 if (tg3_wait_macro_done(tp
)) {
2339 for (i
= 0; i
< 6; i
+= 2) {
2342 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2343 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2344 tg3_wait_macro_done(tp
)) {
2350 if (low
!= test_pat
[chan
][i
] ||
2351 high
!= test_pat
[chan
][i
+1]) {
2352 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2353 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2354 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2364 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2368 for (chan
= 0; chan
< 4; chan
++) {
2371 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2372 (chan
* 0x2000) | 0x0200);
2373 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2374 for (i
= 0; i
< 6; i
++)
2375 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2376 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2377 if (tg3_wait_macro_done(tp
))
2384 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2386 u32 reg32
, phy9_orig
;
2387 int retries
, do_phy_reset
, err
;
2393 err
= tg3_bmcr_reset(tp
);
2399 /* Disable transmitter and interrupt. */
2400 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2404 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2406 /* Set full-duplex, 1000 mbps. */
2407 tg3_writephy(tp
, MII_BMCR
,
2408 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2410 /* Set to master mode. */
2411 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2414 tg3_writephy(tp
, MII_CTRL1000
,
2415 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2417 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2421 /* Block the PHY control access. */
2422 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2424 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2427 } while (--retries
);
2429 err
= tg3_phy_reset_chanpat(tp
);
2433 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2435 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2436 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2438 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2440 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2442 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2444 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2451 /* This will reset the tigon3 PHY if there is no valid
2452 * link unless the FORCE argument is non-zero.
2454 static int tg3_phy_reset(struct tg3
*tp
)
2459 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2460 val
= tr32(GRC_MISC_CFG
);
2461 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2464 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2465 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2469 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2470 netif_carrier_off(tp
->dev
);
2471 tg3_link_report(tp
);
2474 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2475 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2476 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2477 err
= tg3_phy_reset_5703_4_5(tp
);
2484 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2485 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2486 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2487 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2489 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2492 err
= tg3_bmcr_reset(tp
);
2496 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2497 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2498 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2500 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2503 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2504 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2505 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2506 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2507 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2508 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2510 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2514 if (tg3_flag(tp
, 5717_PLUS
) &&
2515 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2518 tg3_phy_apply_otp(tp
);
2520 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2521 tg3_phy_toggle_apd(tp
, true);
2523 tg3_phy_toggle_apd(tp
, false);
2526 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2527 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2528 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2529 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2530 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2533 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2534 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2535 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2538 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2539 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2540 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2541 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2542 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2543 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2545 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2546 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2547 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2548 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2549 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2550 tg3_writephy(tp
, MII_TG3_TEST1
,
2551 MII_TG3_TEST1_TRIM_EN
| 0x4);
2553 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2555 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2559 /* Set Extended packet length bit (bit 14) on all chips that */
2560 /* support jumbo frames */
2561 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2562 /* Cannot do read-modify-write on 5401 */
2563 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2564 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2565 /* Set bit 14 with read-modify-write to preserve other bits */
2566 err
= tg3_phy_auxctl_read(tp
,
2567 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2569 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2570 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2573 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574 * jumbo frames transmission.
2576 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2577 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2578 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2579 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2582 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2583 /* adjust output voltage */
2584 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2587 tg3_phy_toggle_automdix(tp
, 1);
2588 tg3_phy_set_wirespeed(tp
);
2592 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2594 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2595 TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600 (TG3_GPIO_MSG_DRVR_PRES << 12))
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606 (TG3_GPIO_MSG_NEED_VAUX << 12))
2608 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2612 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2613 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2614 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2616 status
= tr32(TG3_CPMU_DRV_STATUS
);
2618 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2619 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2620 status
|= (newstat
<< shift
);
2622 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2623 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2624 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2626 tw32(TG3_CPMU_DRV_STATUS
, status
);
2628 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2633 if (!tg3_flag(tp
, IS_NIC
))
2636 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2637 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2638 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2639 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2642 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2644 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2645 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2647 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2649 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2650 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2660 if (!tg3_flag(tp
, IS_NIC
) ||
2661 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2662 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2665 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2667 tw32_wait_f(GRC_LOCAL_CTRL
,
2668 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2669 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2671 tw32_wait_f(GRC_LOCAL_CTRL
,
2673 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2675 tw32_wait_f(GRC_LOCAL_CTRL
,
2676 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2677 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2682 if (!tg3_flag(tp
, IS_NIC
))
2685 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2686 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2687 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2688 (GRC_LCLCTRL_GPIO_OE0
|
2689 GRC_LCLCTRL_GPIO_OE1
|
2690 GRC_LCLCTRL_GPIO_OE2
|
2691 GRC_LCLCTRL_GPIO_OUTPUT0
|
2692 GRC_LCLCTRL_GPIO_OUTPUT1
),
2693 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2694 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2695 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2696 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2698 GRC_LCLCTRL_GPIO_OE1
|
2699 GRC_LCLCTRL_GPIO_OE2
|
2700 GRC_LCLCTRL_GPIO_OUTPUT0
|
2701 GRC_LCLCTRL_GPIO_OUTPUT1
|
2703 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2704 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2706 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2707 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2708 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2710 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2711 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2712 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2715 u32 grc_local_ctrl
= 0;
2717 /* Workaround to prevent overdrawing Amps. */
2718 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2719 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2720 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2722 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2725 /* On 5753 and variants, GPIO2 cannot be used. */
2726 no_gpio2
= tp
->nic_sram_data_cfg
&
2727 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2729 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2730 GRC_LCLCTRL_GPIO_OE1
|
2731 GRC_LCLCTRL_GPIO_OE2
|
2732 GRC_LCLCTRL_GPIO_OUTPUT1
|
2733 GRC_LCLCTRL_GPIO_OUTPUT2
;
2735 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2736 GRC_LCLCTRL_GPIO_OUTPUT2
);
2738 tw32_wait_f(GRC_LOCAL_CTRL
,
2739 tp
->grc_local_ctrl
| grc_local_ctrl
,
2740 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2742 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2744 tw32_wait_f(GRC_LOCAL_CTRL
,
2745 tp
->grc_local_ctrl
| grc_local_ctrl
,
2746 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2749 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2750 tw32_wait_f(GRC_LOCAL_CTRL
,
2751 tp
->grc_local_ctrl
| grc_local_ctrl
,
2752 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2757 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2761 /* Serialize power state transitions */
2762 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2765 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2766 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2768 msg
= tg3_set_function_status(tp
, msg
);
2770 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2773 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2774 tg3_pwrsrc_switch_to_vaux(tp
);
2776 tg3_pwrsrc_die_with_vmain(tp
);
2779 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2782 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2784 bool need_vaux
= false;
2786 /* The GPIOs do something completely different on 57765. */
2787 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2790 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2791 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2792 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2793 tg3_frob_aux_power_5717(tp
, include_wol
?
2794 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2798 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2799 struct net_device
*dev_peer
;
2801 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2803 /* remove_one() may have been run on the peer. */
2805 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2807 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2810 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2811 tg3_flag(tp_peer
, ENABLE_ASF
))
2816 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2817 tg3_flag(tp
, ENABLE_ASF
))
2821 tg3_pwrsrc_switch_to_vaux(tp
);
2823 tg3_pwrsrc_die_with_vmain(tp
);
2826 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2828 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2830 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2831 if (speed
!= SPEED_10
)
2833 } else if (speed
== SPEED_10
)
2839 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2843 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2844 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2845 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2846 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2849 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2850 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2851 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2856 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2858 val
= tr32(GRC_MISC_CFG
);
2859 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2862 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2864 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2867 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2868 tg3_writephy(tp
, MII_BMCR
,
2869 BMCR_ANENABLE
| BMCR_ANRESTART
);
2871 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2872 phytest
| MII_TG3_FET_SHADOW_EN
);
2873 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2874 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2876 MII_TG3_FET_SHDW_AUXMODE4
,
2879 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2882 } else if (do_low_power
) {
2883 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2884 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2886 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2887 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2888 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2889 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2892 /* The PHY should not be powered down on some chips because
2895 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2896 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2897 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2898 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2899 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
&&
2903 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2904 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2905 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2906 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2907 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2908 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2911 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3
*tp
)
2917 if (tg3_flag(tp
, NVRAM
)) {
2920 if (tp
->nvram_lock_cnt
== 0) {
2921 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2922 for (i
= 0; i
< 8000; i
++) {
2923 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2928 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2932 tp
->nvram_lock_cnt
++;
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3
*tp
)
2940 if (tg3_flag(tp
, NVRAM
)) {
2941 if (tp
->nvram_lock_cnt
> 0)
2942 tp
->nvram_lock_cnt
--;
2943 if (tp
->nvram_lock_cnt
== 0)
2944 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3
*tp
)
2951 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2952 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2954 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3
*tp
)
2961 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2962 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2964 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2968 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2969 u32 offset
, u32
*val
)
2974 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2977 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2978 EEPROM_ADDR_DEVID_MASK
|
2980 tw32(GRC_EEPROM_ADDR
,
2982 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2983 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2984 EEPROM_ADDR_ADDR_MASK
) |
2985 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2987 for (i
= 0; i
< 1000; i
++) {
2988 tmp
= tr32(GRC_EEPROM_ADDR
);
2990 if (tmp
& EEPROM_ADDR_COMPLETE
)
2994 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2997 tmp
= tr32(GRC_EEPROM_DATA
);
3000 * The data will always be opposite the native endian
3001 * format. Perform a blind byteswap to compensate.
3008 #define NVRAM_CMD_TIMEOUT 10000
3010 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3014 tw32(NVRAM_CMD
, nvram_cmd
);
3015 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3017 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3023 if (i
== NVRAM_CMD_TIMEOUT
)
3029 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3031 if (tg3_flag(tp
, NVRAM
) &&
3032 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3033 tg3_flag(tp
, FLASH
) &&
3034 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3035 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3037 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3038 ATMEL_AT45DB0X1B_PAGE_POS
) +
3039 (addr
% tp
->nvram_pagesize
);
3044 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3046 if (tg3_flag(tp
, NVRAM
) &&
3047 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3048 tg3_flag(tp
, FLASH
) &&
3049 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3050 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3052 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3053 tp
->nvram_pagesize
) +
3054 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060 * the byteswapping settings for all other register accesses.
3061 * tg3 devices are BE devices, so on a BE machine, the data
3062 * returned will be exactly as it is seen in NVRAM. On a LE
3063 * machine, the 32-bit value will be byteswapped.
3065 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3069 if (!tg3_flag(tp
, NVRAM
))
3070 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3072 offset
= tg3_nvram_phys_addr(tp
, offset
);
3074 if (offset
> NVRAM_ADDR_MSK
)
3077 ret
= tg3_nvram_lock(tp
);
3081 tg3_enable_nvram_access(tp
);
3083 tw32(NVRAM_ADDR
, offset
);
3084 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3085 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3088 *val
= tr32(NVRAM_RDDATA
);
3090 tg3_disable_nvram_access(tp
);
3092 tg3_nvram_unlock(tp
);
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3101 int res
= tg3_nvram_read(tp
, offset
, &v
);
3103 *val
= cpu_to_be32(v
);
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3108 u32 offset
, u32 len
, u8
*buf
)
3113 for (i
= 0; i
< len
; i
+= 4) {
3119 memcpy(&data
, buf
+ i
, 4);
3122 * The SEEPROM interface expects the data to always be opposite
3123 * the native endian format. We accomplish this by reversing
3124 * all the operations that would have been performed on the
3125 * data from a call to tg3_nvram_read_be32().
3127 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3129 val
= tr32(GRC_EEPROM_ADDR
);
3130 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3132 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3134 tw32(GRC_EEPROM_ADDR
, val
|
3135 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3136 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3140 for (j
= 0; j
< 1000; j
++) {
3141 val
= tr32(GRC_EEPROM_ADDR
);
3143 if (val
& EEPROM_ADDR_COMPLETE
)
3147 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3161 u32 pagesize
= tp
->nvram_pagesize
;
3162 u32 pagemask
= pagesize
- 1;
3166 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3172 u32 phy_addr
, page_off
, size
;
3174 phy_addr
= offset
& ~pagemask
;
3176 for (j
= 0; j
< pagesize
; j
+= 4) {
3177 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3178 (__be32
*) (tmp
+ j
));
3185 page_off
= offset
& pagemask
;
3192 memcpy(tmp
+ page_off
, buf
, size
);
3194 offset
= offset
+ (pagesize
- page_off
);
3196 tg3_enable_nvram_access(tp
);
3199 * Before we can erase the flash page, we need
3200 * to issue a special "write enable" command.
3202 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3204 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3207 /* Erase the target page */
3208 tw32(NVRAM_ADDR
, phy_addr
);
3210 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3211 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3213 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3216 /* Issue another write enable to start the write. */
3217 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3219 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3222 for (j
= 0; j
< pagesize
; j
+= 4) {
3225 data
= *((__be32
*) (tmp
+ j
));
3227 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3229 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3231 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3235 nvram_cmd
|= NVRAM_CMD_FIRST
;
3236 else if (j
== (pagesize
- 4))
3237 nvram_cmd
|= NVRAM_CMD_LAST
;
3239 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3247 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3248 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3261 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3262 u32 page_off
, phy_addr
, nvram_cmd
;
3265 memcpy(&data
, buf
+ i
, 4);
3266 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3268 page_off
= offset
% tp
->nvram_pagesize
;
3270 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3272 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3274 if (page_off
== 0 || i
== 0)
3275 nvram_cmd
|= NVRAM_CMD_FIRST
;
3276 if (page_off
== (tp
->nvram_pagesize
- 4))
3277 nvram_cmd
|= NVRAM_CMD_LAST
;
3280 nvram_cmd
|= NVRAM_CMD_LAST
;
3282 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3283 !tg3_flag(tp
, FLASH
) ||
3284 !tg3_flag(tp
, 57765_PLUS
))
3285 tw32(NVRAM_ADDR
, phy_addr
);
3287 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
3288 !tg3_flag(tp
, 5755_PLUS
) &&
3289 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3290 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3293 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3294 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3298 if (!tg3_flag(tp
, FLASH
)) {
3299 /* We always do complete word writes to eeprom. */
3300 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3303 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3315 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3316 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3317 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3321 if (!tg3_flag(tp
, NVRAM
)) {
3322 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3326 ret
= tg3_nvram_lock(tp
);
3330 tg3_enable_nvram_access(tp
);
3331 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3332 tw32(NVRAM_WRITE1
, 0x406);
3334 grc_mode
= tr32(GRC_MODE
);
3335 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3337 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3338 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3341 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3345 grc_mode
= tr32(GRC_MODE
);
3346 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3348 tg3_disable_nvram_access(tp
);
3349 tg3_nvram_unlock(tp
);
3352 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3353 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3360 #define RX_CPU_SCRATCH_BASE 0x30000
3361 #define RX_CPU_SCRATCH_SIZE 0x04000
3362 #define TX_CPU_SCRATCH_BASE 0x34000
3363 #define TX_CPU_SCRATCH_SIZE 0x04000
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
3370 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3372 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3373 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3375 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3378 if (offset
== RX_CPU_BASE
) {
3379 for (i
= 0; i
< 10000; i
++) {
3380 tw32(offset
+ CPU_STATE
, 0xffffffff);
3381 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3382 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3386 tw32(offset
+ CPU_STATE
, 0xffffffff);
3387 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3390 for (i
= 0; i
< 10000; i
++) {
3391 tw32(offset
+ CPU_STATE
, 0xffffffff);
3392 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3393 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3399 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3400 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3404 /* Clear firmware's nvram arbitration. */
3405 if (tg3_flag(tp
, NVRAM
))
3406 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3411 unsigned int fw_base
;
3412 unsigned int fw_len
;
3413 const __be32
*fw_data
;
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3418 u32 cpu_scratch_base
, int cpu_scratch_size
,
3419 struct fw_info
*info
)
3421 int err
, lock_err
, i
;
3422 void (*write_op
)(struct tg3
*, u32
, u32
);
3424 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3426 "%s: Trying to load TX cpu firmware which is 5705\n",
3431 if (tg3_flag(tp
, 5705_PLUS
))
3432 write_op
= tg3_write_mem
;
3434 write_op
= tg3_write_indirect_reg32
;
3436 /* It is possible that bootcode is still loading at this point.
3437 * Get the nvram lock first before halting the cpu.
3439 lock_err
= tg3_nvram_lock(tp
);
3440 err
= tg3_halt_cpu(tp
, cpu_base
);
3442 tg3_nvram_unlock(tp
);
3446 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3447 write_op(tp
, cpu_scratch_base
+ i
, 0);
3448 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3449 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3450 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3451 write_op(tp
, (cpu_scratch_base
+
3452 (info
->fw_base
& 0xffff) +
3454 be32_to_cpu(info
->fw_data
[i
]));
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3465 struct fw_info info
;
3466 const __be32
*fw_data
;
3469 fw_data
= (void *)tp
->fw
->data
;
3471 /* Firmware blob starts with version numbers, followed by
3472 start address and length. We are setting complete length.
3473 length = end_address_of_bss - start_address_of_text.
3474 Remainder is the blob to be loaded contiguously
3475 from start address. */
3477 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3478 info
.fw_len
= tp
->fw
->size
- 12;
3479 info
.fw_data
= &fw_data
[3];
3481 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3482 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3487 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3488 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3493 /* Now startup only the RX cpu. */
3494 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3495 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3497 for (i
= 0; i
< 5; i
++) {
3498 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3500 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3501 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3502 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3506 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3507 "should be %08x\n", __func__
,
3508 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3511 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3512 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3
*tp
)
3520 struct fw_info info
;
3521 const __be32
*fw_data
;
3522 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3525 if (tg3_flag(tp
, HW_TSO_1
) ||
3526 tg3_flag(tp
, HW_TSO_2
) ||
3527 tg3_flag(tp
, HW_TSO_3
))
3530 fw_data
= (void *)tp
->fw
->data
;
3532 /* Firmware blob starts with version numbers, followed by
3533 start address and length. We are setting complete length.
3534 length = end_address_of_bss - start_address_of_text.
3535 Remainder is the blob to be loaded contiguously
3536 from start address. */
3538 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3539 cpu_scratch_size
= tp
->fw_len
;
3540 info
.fw_len
= tp
->fw
->size
- 12;
3541 info
.fw_data
= &fw_data
[3];
3543 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
3544 cpu_base
= RX_CPU_BASE
;
3545 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3547 cpu_base
= TX_CPU_BASE
;
3548 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3549 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3552 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3553 cpu_scratch_base
, cpu_scratch_size
,
3558 /* Now startup the cpu. */
3559 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3560 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3562 for (i
= 0; i
< 5; i
++) {
3563 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3565 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3566 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3567 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3572 "%s fails to set CPU PC, is %08x should be %08x\n",
3573 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3576 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3577 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3585 u32 addr_high
, addr_low
;
3588 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3589 tp
->dev
->dev_addr
[1]);
3590 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3591 (tp
->dev
->dev_addr
[3] << 16) |
3592 (tp
->dev
->dev_addr
[4] << 8) |
3593 (tp
->dev
->dev_addr
[5] << 0));
3594 for (i
= 0; i
< 4; i
++) {
3595 if (i
== 1 && skip_mac_1
)
3597 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3598 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3601 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3602 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
3603 for (i
= 0; i
< 12; i
++) {
3604 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3605 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3609 addr_high
= (tp
->dev
->dev_addr
[0] +
3610 tp
->dev
->dev_addr
[1] +
3611 tp
->dev
->dev_addr
[2] +
3612 tp
->dev
->dev_addr
[3] +
3613 tp
->dev
->dev_addr
[4] +
3614 tp
->dev
->dev_addr
[5]) &
3615 TX_BACKOFF_SEED_MASK
;
3616 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3619 static void tg3_enable_register_access(struct tg3
*tp
)
3622 * Make sure register accesses (indirect or otherwise) will function
3625 pci_write_config_dword(tp
->pdev
,
3626 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3629 static int tg3_power_up(struct tg3
*tp
)
3633 tg3_enable_register_access(tp
);
3635 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3637 /* Switch out of Vaux if it is a NIC */
3638 tg3_pwrsrc_switch_to_vmain(tp
);
3640 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3646 static int tg3_setup_phy(struct tg3
*, int);
3648 static int tg3_power_down_prepare(struct tg3
*tp
)
3651 bool device_should_wake
, do_low_power
;
3653 tg3_enable_register_access(tp
);
3655 /* Restore the CLKREQ setting. */
3656 if (tg3_flag(tp
, CLKREQ_BUG
))
3657 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3658 PCI_EXP_LNKCTL_CLKREQ_EN
);
3660 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3661 tw32(TG3PCI_MISC_HOST_CTRL
,
3662 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3664 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3665 tg3_flag(tp
, WOL_ENABLE
);
3667 if (tg3_flag(tp
, USE_PHYLIB
)) {
3668 do_low_power
= false;
3669 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3670 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3671 struct phy_device
*phydev
;
3672 u32 phyid
, advertising
;
3674 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3676 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3678 tp
->link_config
.speed
= phydev
->speed
;
3679 tp
->link_config
.duplex
= phydev
->duplex
;
3680 tp
->link_config
.autoneg
= phydev
->autoneg
;
3681 tp
->link_config
.advertising
= phydev
->advertising
;
3683 advertising
= ADVERTISED_TP
|
3685 ADVERTISED_Autoneg
|
3686 ADVERTISED_10baseT_Half
;
3688 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3689 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3691 ADVERTISED_100baseT_Half
|
3692 ADVERTISED_100baseT_Full
|
3693 ADVERTISED_10baseT_Full
;
3695 advertising
|= ADVERTISED_10baseT_Full
;
3698 phydev
->advertising
= advertising
;
3700 phy_start_aneg(phydev
);
3702 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3703 if (phyid
!= PHY_ID_BCMAC131
) {
3704 phyid
&= PHY_BCM_OUI_MASK
;
3705 if (phyid
== PHY_BCM_OUI_1
||
3706 phyid
== PHY_BCM_OUI_2
||
3707 phyid
== PHY_BCM_OUI_3
)
3708 do_low_power
= true;
3712 do_low_power
= true;
3714 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3715 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3717 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3718 tg3_setup_phy(tp
, 0);
3721 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3724 val
= tr32(GRC_VCPU_EXT_CTRL
);
3725 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3726 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3730 for (i
= 0; i
< 200; i
++) {
3731 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3732 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3737 if (tg3_flag(tp
, WOL_CAP
))
3738 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3739 WOL_DRV_STATE_SHUTDOWN
|
3743 if (device_should_wake
) {
3746 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3748 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3749 tg3_phy_auxctl_write(tp
,
3750 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3751 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3752 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3753 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3757 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3758 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3760 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3762 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3763 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3765 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3766 SPEED_100
: SPEED_10
;
3767 if (tg3_5700_link_polarity(tp
, speed
))
3768 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3770 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3773 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3776 if (!tg3_flag(tp
, 5750_PLUS
))
3777 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3779 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3780 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3781 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3782 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3784 if (tg3_flag(tp
, ENABLE_APE
))
3785 mac_mode
|= MAC_MODE_APE_TX_EN
|
3786 MAC_MODE_APE_RX_EN
|
3787 MAC_MODE_TDE_ENABLE
;
3789 tw32_f(MAC_MODE
, mac_mode
);
3792 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3796 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3797 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3798 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
3801 base_val
= tp
->pci_clock_ctrl
;
3802 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3803 CLOCK_CTRL_TXCLK_DISABLE
);
3805 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3806 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3807 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3808 tg3_flag(tp
, CPMU_PRESENT
) ||
3809 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3811 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3812 u32 newbits1
, newbits2
;
3814 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3815 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3816 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3817 CLOCK_CTRL_TXCLK_DISABLE
|
3819 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3820 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3821 newbits1
= CLOCK_CTRL_625_CORE
;
3822 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3824 newbits1
= CLOCK_CTRL_ALTCLK
;
3825 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3828 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3831 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3834 if (!tg3_flag(tp
, 5705_PLUS
)) {
3837 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3838 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3839 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3840 CLOCK_CTRL_TXCLK_DISABLE
|
3841 CLOCK_CTRL_44MHZ_CORE
);
3843 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3846 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3847 tp
->pci_clock_ctrl
| newbits3
, 40);
3851 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3852 tg3_power_down_phy(tp
, do_low_power
);
3854 tg3_frob_aux_power(tp
, true);
3856 /* Workaround for unstable PLL clock */
3857 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3858 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3859 u32 val
= tr32(0x7d00);
3861 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3863 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3866 err
= tg3_nvram_lock(tp
);
3867 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3869 tg3_nvram_unlock(tp
);
3873 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3878 static void tg3_power_down(struct tg3
*tp
)
3880 tg3_power_down_prepare(tp
);
3882 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3883 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3886 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3888 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3889 case MII_TG3_AUX_STAT_10HALF
:
3891 *duplex
= DUPLEX_HALF
;
3894 case MII_TG3_AUX_STAT_10FULL
:
3896 *duplex
= DUPLEX_FULL
;
3899 case MII_TG3_AUX_STAT_100HALF
:
3901 *duplex
= DUPLEX_HALF
;
3904 case MII_TG3_AUX_STAT_100FULL
:
3906 *duplex
= DUPLEX_FULL
;
3909 case MII_TG3_AUX_STAT_1000HALF
:
3910 *speed
= SPEED_1000
;
3911 *duplex
= DUPLEX_HALF
;
3914 case MII_TG3_AUX_STAT_1000FULL
:
3915 *speed
= SPEED_1000
;
3916 *duplex
= DUPLEX_FULL
;
3920 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3921 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3923 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3927 *speed
= SPEED_UNKNOWN
;
3928 *duplex
= DUPLEX_UNKNOWN
;
3933 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3938 new_adv
= ADVERTISE_CSMA
;
3939 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
3940 new_adv
|= mii_advertise_flowctrl(flowctrl
);
3942 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3946 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3947 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
3949 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3950 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3951 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3953 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3958 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3961 tw32(TG3_CPMU_EEE_MODE
,
3962 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3964 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
3969 /* Advertise 100-BaseTX EEE ability */
3970 if (advertise
& ADVERTISED_100baseT_Full
)
3971 val
|= MDIO_AN_EEE_ADV_100TX
;
3972 /* Advertise 1000-BaseT EEE ability */
3973 if (advertise
& ADVERTISED_1000baseT_Full
)
3974 val
|= MDIO_AN_EEE_ADV_1000T
;
3975 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3979 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3981 case ASIC_REV_57765
:
3982 case ASIC_REV_57766
:
3984 /* If we advertised any eee advertisements above... */
3986 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3987 MII_TG3_DSP_TAP26_RMRXSTO
|
3988 MII_TG3_DSP_TAP26_OPCSINPT
;
3989 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3992 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3993 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3994 MII_TG3_DSP_CH34TP2_HIBW01
);
3997 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
4006 static void tg3_phy_copper_begin(struct tg3
*tp
)
4008 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4009 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4012 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
4013 adv
= ADVERTISED_10baseT_Half
|
4014 ADVERTISED_10baseT_Full
;
4015 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4016 adv
|= ADVERTISED_100baseT_Half
|
4017 ADVERTISED_100baseT_Full
;
4019 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4021 adv
= tp
->link_config
.advertising
;
4022 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4023 adv
&= ~(ADVERTISED_1000baseT_Half
|
4024 ADVERTISED_1000baseT_Full
);
4026 fc
= tp
->link_config
.flowctrl
;
4029 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4031 tg3_writephy(tp
, MII_BMCR
,
4032 BMCR_ANENABLE
| BMCR_ANRESTART
);
4035 u32 bmcr
, orig_bmcr
;
4037 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4038 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4041 switch (tp
->link_config
.speed
) {
4047 bmcr
|= BMCR_SPEED100
;
4051 bmcr
|= BMCR_SPEED1000
;
4055 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4056 bmcr
|= BMCR_FULLDPLX
;
4058 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4059 (bmcr
!= orig_bmcr
)) {
4060 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4061 for (i
= 0; i
< 1500; i
++) {
4065 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4066 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4068 if (!(tmp
& BMSR_LSTATUS
)) {
4073 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4079 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4083 /* Turn off tap power management. */
4084 /* Set Extended packet length bit */
4085 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4087 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4088 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4089 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4090 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4091 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4098 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4100 u32 advmsk
, tgtadv
, advertising
;
4102 advertising
= tp
->link_config
.advertising
;
4103 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4105 advmsk
= ADVERTISE_ALL
;
4106 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4107 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4108 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4111 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4114 if ((*lcladv
& advmsk
) != tgtadv
)
4117 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4120 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4122 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4126 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
4127 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)) {
4128 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4129 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4130 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4132 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4135 if (tg3_ctrl
!= tgtadv
)
4142 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4146 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4149 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4152 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4155 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4158 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4159 tp
->link_config
.rmt_adv
= lpeth
;
4164 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4166 int current_link_up
;
4168 u32 lcl_adv
, rmt_adv
;
4176 (MAC_STATUS_SYNC_CHANGED
|
4177 MAC_STATUS_CFG_CHANGED
|
4178 MAC_STATUS_MI_COMPLETION
|
4179 MAC_STATUS_LNKSTATE_CHANGED
));
4182 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4184 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4188 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4190 /* Some third-party PHYs need to be reset on link going
4193 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
4194 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
4195 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
4196 netif_carrier_ok(tp
->dev
)) {
4197 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4198 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4199 !(bmsr
& BMSR_LSTATUS
))
4205 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4206 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4207 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4208 !tg3_flag(tp
, INIT_COMPLETE
))
4211 if (!(bmsr
& BMSR_LSTATUS
)) {
4212 err
= tg3_init_5401phy_dsp(tp
);
4216 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4217 for (i
= 0; i
< 1000; i
++) {
4219 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4220 (bmsr
& BMSR_LSTATUS
)) {
4226 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4227 TG3_PHY_REV_BCM5401_B0
&&
4228 !(bmsr
& BMSR_LSTATUS
) &&
4229 tp
->link_config
.active_speed
== SPEED_1000
) {
4230 err
= tg3_phy_reset(tp
);
4232 err
= tg3_init_5401phy_dsp(tp
);
4237 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
4238 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
4239 /* 5701 {A0,B0} CRC bug workaround */
4240 tg3_writephy(tp
, 0x15, 0x0a75);
4241 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4242 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4243 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4246 /* Clear pending interrupts... */
4247 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4248 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4250 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4251 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4252 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4253 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4255 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
4256 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
4257 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4258 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4259 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4261 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4264 current_link_up
= 0;
4265 current_speed
= SPEED_UNKNOWN
;
4266 current_duplex
= DUPLEX_UNKNOWN
;
4267 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4268 tp
->link_config
.rmt_adv
= 0;
4270 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4271 err
= tg3_phy_auxctl_read(tp
,
4272 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4274 if (!err
&& !(val
& (1 << 10))) {
4275 tg3_phy_auxctl_write(tp
,
4276 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4283 for (i
= 0; i
< 100; i
++) {
4284 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4285 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4286 (bmsr
& BMSR_LSTATUS
))
4291 if (bmsr
& BMSR_LSTATUS
) {
4294 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4295 for (i
= 0; i
< 2000; i
++) {
4297 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4302 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4307 for (i
= 0; i
< 200; i
++) {
4308 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4309 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4311 if (bmcr
&& bmcr
!= 0x7fff)
4319 tp
->link_config
.active_speed
= current_speed
;
4320 tp
->link_config
.active_duplex
= current_duplex
;
4322 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4323 if ((bmcr
& BMCR_ANENABLE
) &&
4324 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4325 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4326 current_link_up
= 1;
4328 if (!(bmcr
& BMCR_ANENABLE
) &&
4329 tp
->link_config
.speed
== current_speed
&&
4330 tp
->link_config
.duplex
== current_duplex
&&
4331 tp
->link_config
.flowctrl
==
4332 tp
->link_config
.active_flowctrl
) {
4333 current_link_up
= 1;
4337 if (current_link_up
== 1 &&
4338 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4341 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4342 reg
= MII_TG3_FET_GEN_STAT
;
4343 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4345 reg
= MII_TG3_EXT_STAT
;
4346 bit
= MII_TG3_EXT_STAT_MDIX
;
4349 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4350 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4352 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4357 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4358 tg3_phy_copper_begin(tp
);
4360 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4361 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4362 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4363 current_link_up
= 1;
4366 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4367 if (current_link_up
== 1) {
4368 if (tp
->link_config
.active_speed
== SPEED_100
||
4369 tp
->link_config
.active_speed
== SPEED_10
)
4370 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4372 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4373 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4374 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4376 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4378 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4379 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4380 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4382 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
4383 if (current_link_up
== 1 &&
4384 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4385 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4387 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4390 /* ??? Without this setting Netgear GA302T PHY does not
4391 * ??? send/receive packets...
4393 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4394 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
4395 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4396 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4400 tw32_f(MAC_MODE
, tp
->mac_mode
);
4403 tg3_phy_eee_adjust(tp
, current_link_up
);
4405 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4406 /* Polled via timer. */
4407 tw32_f(MAC_EVENT
, 0);
4409 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4413 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
4414 current_link_up
== 1 &&
4415 tp
->link_config
.active_speed
== SPEED_1000
&&
4416 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4419 (MAC_STATUS_SYNC_CHANGED
|
4420 MAC_STATUS_CFG_CHANGED
));
4423 NIC_SRAM_FIRMWARE_MBOX
,
4424 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4427 /* Prevent send BD corruption. */
4428 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4429 if (tp
->link_config
.active_speed
== SPEED_100
||
4430 tp
->link_config
.active_speed
== SPEED_10
)
4431 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4432 PCI_EXP_LNKCTL_CLKREQ_EN
);
4434 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4435 PCI_EXP_LNKCTL_CLKREQ_EN
);
4438 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4439 if (current_link_up
)
4440 netif_carrier_on(tp
->dev
);
4442 netif_carrier_off(tp
->dev
);
4443 tg3_link_report(tp
);
4449 struct tg3_fiber_aneginfo
{
4451 #define ANEG_STATE_UNKNOWN 0
4452 #define ANEG_STATE_AN_ENABLE 1
4453 #define ANEG_STATE_RESTART_INIT 2
4454 #define ANEG_STATE_RESTART 3
4455 #define ANEG_STATE_DISABLE_LINK_OK 4
4456 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4457 #define ANEG_STATE_ABILITY_DETECT 6
4458 #define ANEG_STATE_ACK_DETECT_INIT 7
4459 #define ANEG_STATE_ACK_DETECT 8
4460 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4461 #define ANEG_STATE_COMPLETE_ACK 10
4462 #define ANEG_STATE_IDLE_DETECT_INIT 11
4463 #define ANEG_STATE_IDLE_DETECT 12
4464 #define ANEG_STATE_LINK_OK 13
4465 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4466 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4469 #define MR_AN_ENABLE 0x00000001
4470 #define MR_RESTART_AN 0x00000002
4471 #define MR_AN_COMPLETE 0x00000004
4472 #define MR_PAGE_RX 0x00000008
4473 #define MR_NP_LOADED 0x00000010
4474 #define MR_TOGGLE_TX 0x00000020
4475 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4476 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4477 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4478 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4479 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4480 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4481 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4482 #define MR_TOGGLE_RX 0x00002000
4483 #define MR_NP_RX 0x00004000
4485 #define MR_LINK_OK 0x80000000
4487 unsigned long link_time
, cur_time
;
4489 u32 ability_match_cfg
;
4490 int ability_match_count
;
4492 char ability_match
, idle_match
, ack_match
;
4494 u32 txconfig
, rxconfig
;
4495 #define ANEG_CFG_NP 0x00000080
4496 #define ANEG_CFG_ACK 0x00000040
4497 #define ANEG_CFG_RF2 0x00000020
4498 #define ANEG_CFG_RF1 0x00000010
4499 #define ANEG_CFG_PS2 0x00000001
4500 #define ANEG_CFG_PS1 0x00008000
4501 #define ANEG_CFG_HD 0x00004000
4502 #define ANEG_CFG_FD 0x00002000
4503 #define ANEG_CFG_INVAL 0x00001f06
4508 #define ANEG_TIMER_ENAB 2
4509 #define ANEG_FAILED -1
4511 #define ANEG_STATE_SETTLE_TIME 10000
4513 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4514 struct tg3_fiber_aneginfo
*ap
)
4517 unsigned long delta
;
4521 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4525 ap
->ability_match_cfg
= 0;
4526 ap
->ability_match_count
= 0;
4527 ap
->ability_match
= 0;
4533 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4534 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4536 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4537 ap
->ability_match_cfg
= rx_cfg_reg
;
4538 ap
->ability_match
= 0;
4539 ap
->ability_match_count
= 0;
4541 if (++ap
->ability_match_count
> 1) {
4542 ap
->ability_match
= 1;
4543 ap
->ability_match_cfg
= rx_cfg_reg
;
4546 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4554 ap
->ability_match_cfg
= 0;
4555 ap
->ability_match_count
= 0;
4556 ap
->ability_match
= 0;
4562 ap
->rxconfig
= rx_cfg_reg
;
4565 switch (ap
->state
) {
4566 case ANEG_STATE_UNKNOWN
:
4567 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4568 ap
->state
= ANEG_STATE_AN_ENABLE
;
4571 case ANEG_STATE_AN_ENABLE
:
4572 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4573 if (ap
->flags
& MR_AN_ENABLE
) {
4576 ap
->ability_match_cfg
= 0;
4577 ap
->ability_match_count
= 0;
4578 ap
->ability_match
= 0;
4582 ap
->state
= ANEG_STATE_RESTART_INIT
;
4584 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4588 case ANEG_STATE_RESTART_INIT
:
4589 ap
->link_time
= ap
->cur_time
;
4590 ap
->flags
&= ~(MR_NP_LOADED
);
4592 tw32(MAC_TX_AUTO_NEG
, 0);
4593 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4594 tw32_f(MAC_MODE
, tp
->mac_mode
);
4597 ret
= ANEG_TIMER_ENAB
;
4598 ap
->state
= ANEG_STATE_RESTART
;
4601 case ANEG_STATE_RESTART
:
4602 delta
= ap
->cur_time
- ap
->link_time
;
4603 if (delta
> ANEG_STATE_SETTLE_TIME
)
4604 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4606 ret
= ANEG_TIMER_ENAB
;
4609 case ANEG_STATE_DISABLE_LINK_OK
:
4613 case ANEG_STATE_ABILITY_DETECT_INIT
:
4614 ap
->flags
&= ~(MR_TOGGLE_TX
);
4615 ap
->txconfig
= ANEG_CFG_FD
;
4616 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4617 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4618 ap
->txconfig
|= ANEG_CFG_PS1
;
4619 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4620 ap
->txconfig
|= ANEG_CFG_PS2
;
4621 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4622 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4623 tw32_f(MAC_MODE
, tp
->mac_mode
);
4626 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4629 case ANEG_STATE_ABILITY_DETECT
:
4630 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4631 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4634 case ANEG_STATE_ACK_DETECT_INIT
:
4635 ap
->txconfig
|= ANEG_CFG_ACK
;
4636 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4637 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4638 tw32_f(MAC_MODE
, tp
->mac_mode
);
4641 ap
->state
= ANEG_STATE_ACK_DETECT
;
4644 case ANEG_STATE_ACK_DETECT
:
4645 if (ap
->ack_match
!= 0) {
4646 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4647 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4648 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4650 ap
->state
= ANEG_STATE_AN_ENABLE
;
4652 } else if (ap
->ability_match
!= 0 &&
4653 ap
->rxconfig
== 0) {
4654 ap
->state
= ANEG_STATE_AN_ENABLE
;
4658 case ANEG_STATE_COMPLETE_ACK_INIT
:
4659 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4663 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4664 MR_LP_ADV_HALF_DUPLEX
|
4665 MR_LP_ADV_SYM_PAUSE
|
4666 MR_LP_ADV_ASYM_PAUSE
|
4667 MR_LP_ADV_REMOTE_FAULT1
|
4668 MR_LP_ADV_REMOTE_FAULT2
|
4669 MR_LP_ADV_NEXT_PAGE
|
4672 if (ap
->rxconfig
& ANEG_CFG_FD
)
4673 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4674 if (ap
->rxconfig
& ANEG_CFG_HD
)
4675 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4676 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4677 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4678 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4679 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4680 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4681 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4682 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4683 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4684 if (ap
->rxconfig
& ANEG_CFG_NP
)
4685 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4687 ap
->link_time
= ap
->cur_time
;
4689 ap
->flags
^= (MR_TOGGLE_TX
);
4690 if (ap
->rxconfig
& 0x0008)
4691 ap
->flags
|= MR_TOGGLE_RX
;
4692 if (ap
->rxconfig
& ANEG_CFG_NP
)
4693 ap
->flags
|= MR_NP_RX
;
4694 ap
->flags
|= MR_PAGE_RX
;
4696 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4697 ret
= ANEG_TIMER_ENAB
;
4700 case ANEG_STATE_COMPLETE_ACK
:
4701 if (ap
->ability_match
!= 0 &&
4702 ap
->rxconfig
== 0) {
4703 ap
->state
= ANEG_STATE_AN_ENABLE
;
4706 delta
= ap
->cur_time
- ap
->link_time
;
4707 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4708 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4709 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4711 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4712 !(ap
->flags
& MR_NP_RX
)) {
4713 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4721 case ANEG_STATE_IDLE_DETECT_INIT
:
4722 ap
->link_time
= ap
->cur_time
;
4723 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4724 tw32_f(MAC_MODE
, tp
->mac_mode
);
4727 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4728 ret
= ANEG_TIMER_ENAB
;
4731 case ANEG_STATE_IDLE_DETECT
:
4732 if (ap
->ability_match
!= 0 &&
4733 ap
->rxconfig
== 0) {
4734 ap
->state
= ANEG_STATE_AN_ENABLE
;
4737 delta
= ap
->cur_time
- ap
->link_time
;
4738 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4739 /* XXX another gem from the Broadcom driver :( */
4740 ap
->state
= ANEG_STATE_LINK_OK
;
4744 case ANEG_STATE_LINK_OK
:
4745 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4749 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4750 /* ??? unimplemented */
4753 case ANEG_STATE_NEXT_PAGE_WAIT
:
4754 /* ??? unimplemented */
4765 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4768 struct tg3_fiber_aneginfo aninfo
;
4769 int status
= ANEG_FAILED
;
4773 tw32_f(MAC_TX_AUTO_NEG
, 0);
4775 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4776 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4779 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4782 memset(&aninfo
, 0, sizeof(aninfo
));
4783 aninfo
.flags
|= MR_AN_ENABLE
;
4784 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4785 aninfo
.cur_time
= 0;
4787 while (++tick
< 195000) {
4788 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4789 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4795 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4796 tw32_f(MAC_MODE
, tp
->mac_mode
);
4799 *txflags
= aninfo
.txconfig
;
4800 *rxflags
= aninfo
.flags
;
4802 if (status
== ANEG_DONE
&&
4803 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4804 MR_LP_ADV_FULL_DUPLEX
)))
4810 static void tg3_init_bcm8002(struct tg3
*tp
)
4812 u32 mac_status
= tr32(MAC_STATUS
);
4815 /* Reset when initting first time or we have a link. */
4816 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4817 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4820 /* Set PLL lock range. */
4821 tg3_writephy(tp
, 0x16, 0x8007);
4824 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4826 /* Wait for reset to complete. */
4827 /* XXX schedule_timeout() ... */
4828 for (i
= 0; i
< 500; i
++)
4831 /* Config mode; select PMA/Ch 1 regs. */
4832 tg3_writephy(tp
, 0x10, 0x8411);
4834 /* Enable auto-lock and comdet, select txclk for tx. */
4835 tg3_writephy(tp
, 0x11, 0x0a10);
4837 tg3_writephy(tp
, 0x18, 0x00a0);
4838 tg3_writephy(tp
, 0x16, 0x41ff);
4840 /* Assert and deassert POR. */
4841 tg3_writephy(tp
, 0x13, 0x0400);
4843 tg3_writephy(tp
, 0x13, 0x0000);
4845 tg3_writephy(tp
, 0x11, 0x0a50);
4847 tg3_writephy(tp
, 0x11, 0x0a10);
4849 /* Wait for signal to stabilize */
4850 /* XXX schedule_timeout() ... */
4851 for (i
= 0; i
< 15000; i
++)
4854 /* Deselect the channel register so we can read the PHYID
4857 tg3_writephy(tp
, 0x10, 0x8011);
4860 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4863 u32 sg_dig_ctrl
, sg_dig_status
;
4864 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4865 int workaround
, port_a
;
4866 int current_link_up
;
4869 expected_sg_dig_ctrl
= 0;
4872 current_link_up
= 0;
4874 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4875 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4877 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4880 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4881 /* preserve bits 20-23 for voltage regulator */
4882 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4885 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4887 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4888 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4890 u32 val
= serdes_cfg
;
4896 tw32_f(MAC_SERDES_CFG
, val
);
4899 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4901 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4902 tg3_setup_flow_control(tp
, 0, 0);
4903 current_link_up
= 1;
4908 /* Want auto-negotiation. */
4909 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4911 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4912 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4913 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4914 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4915 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4917 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4918 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4919 tp
->serdes_counter
&&
4920 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4921 MAC_STATUS_RCVD_CFG
)) ==
4922 MAC_STATUS_PCS_SYNCED
)) {
4923 tp
->serdes_counter
--;
4924 current_link_up
= 1;
4929 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4930 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4932 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4934 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4935 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4936 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4937 MAC_STATUS_SIGNAL_DET
)) {
4938 sg_dig_status
= tr32(SG_DIG_STATUS
);
4939 mac_status
= tr32(MAC_STATUS
);
4941 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4942 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4943 u32 local_adv
= 0, remote_adv
= 0;
4945 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4946 local_adv
|= ADVERTISE_1000XPAUSE
;
4947 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4948 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4950 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4951 remote_adv
|= LPA_1000XPAUSE
;
4952 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4953 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4955 tp
->link_config
.rmt_adv
=
4956 mii_adv_to_ethtool_adv_x(remote_adv
);
4958 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4959 current_link_up
= 1;
4960 tp
->serdes_counter
= 0;
4961 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4962 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4963 if (tp
->serdes_counter
)
4964 tp
->serdes_counter
--;
4967 u32 val
= serdes_cfg
;
4974 tw32_f(MAC_SERDES_CFG
, val
);
4977 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4980 /* Link parallel detection - link is up */
4981 /* only if we have PCS_SYNC and not */
4982 /* receiving config code words */
4983 mac_status
= tr32(MAC_STATUS
);
4984 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4985 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4986 tg3_setup_flow_control(tp
, 0, 0);
4987 current_link_up
= 1;
4989 TG3_PHYFLG_PARALLEL_DETECT
;
4990 tp
->serdes_counter
=
4991 SERDES_PARALLEL_DET_TIMEOUT
;
4993 goto restart_autoneg
;
4997 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4998 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5002 return current_link_up
;
5005 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5007 int current_link_up
= 0;
5009 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5012 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5013 u32 txflags
, rxflags
;
5016 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5017 u32 local_adv
= 0, remote_adv
= 0;
5019 if (txflags
& ANEG_CFG_PS1
)
5020 local_adv
|= ADVERTISE_1000XPAUSE
;
5021 if (txflags
& ANEG_CFG_PS2
)
5022 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5024 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5025 remote_adv
|= LPA_1000XPAUSE
;
5026 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5027 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5029 tp
->link_config
.rmt_adv
=
5030 mii_adv_to_ethtool_adv_x(remote_adv
);
5032 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5034 current_link_up
= 1;
5036 for (i
= 0; i
< 30; i
++) {
5039 (MAC_STATUS_SYNC_CHANGED
|
5040 MAC_STATUS_CFG_CHANGED
));
5042 if ((tr32(MAC_STATUS
) &
5043 (MAC_STATUS_SYNC_CHANGED
|
5044 MAC_STATUS_CFG_CHANGED
)) == 0)
5048 mac_status
= tr32(MAC_STATUS
);
5049 if (current_link_up
== 0 &&
5050 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5051 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5052 current_link_up
= 1;
5054 tg3_setup_flow_control(tp
, 0, 0);
5056 /* Forcing 1000FD link up. */
5057 current_link_up
= 1;
5059 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5062 tw32_f(MAC_MODE
, tp
->mac_mode
);
5067 return current_link_up
;
5070 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5073 u16 orig_active_speed
;
5074 u8 orig_active_duplex
;
5076 int current_link_up
;
5079 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5080 orig_active_speed
= tp
->link_config
.active_speed
;
5081 orig_active_duplex
= tp
->link_config
.active_duplex
;
5083 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5084 netif_carrier_ok(tp
->dev
) &&
5085 tg3_flag(tp
, INIT_COMPLETE
)) {
5086 mac_status
= tr32(MAC_STATUS
);
5087 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5088 MAC_STATUS_SIGNAL_DET
|
5089 MAC_STATUS_CFG_CHANGED
|
5090 MAC_STATUS_RCVD_CFG
);
5091 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5092 MAC_STATUS_SIGNAL_DET
)) {
5093 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5094 MAC_STATUS_CFG_CHANGED
));
5099 tw32_f(MAC_TX_AUTO_NEG
, 0);
5101 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5102 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5103 tw32_f(MAC_MODE
, tp
->mac_mode
);
5106 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5107 tg3_init_bcm8002(tp
);
5109 /* Enable link change event even when serdes polling. */
5110 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5113 current_link_up
= 0;
5114 tp
->link_config
.rmt_adv
= 0;
5115 mac_status
= tr32(MAC_STATUS
);
5117 if (tg3_flag(tp
, HW_AUTONEG
))
5118 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5120 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5122 tp
->napi
[0].hw_status
->status
=
5123 (SD_STATUS_UPDATED
|
5124 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5126 for (i
= 0; i
< 100; i
++) {
5127 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5128 MAC_STATUS_CFG_CHANGED
));
5130 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5131 MAC_STATUS_CFG_CHANGED
|
5132 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5136 mac_status
= tr32(MAC_STATUS
);
5137 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5138 current_link_up
= 0;
5139 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5140 tp
->serdes_counter
== 0) {
5141 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5142 MAC_MODE_SEND_CONFIGS
));
5144 tw32_f(MAC_MODE
, tp
->mac_mode
);
5148 if (current_link_up
== 1) {
5149 tp
->link_config
.active_speed
= SPEED_1000
;
5150 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5151 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5152 LED_CTRL_LNKLED_OVERRIDE
|
5153 LED_CTRL_1000MBPS_ON
));
5155 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5156 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5157 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5158 LED_CTRL_LNKLED_OVERRIDE
|
5159 LED_CTRL_TRAFFIC_OVERRIDE
));
5162 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
5163 if (current_link_up
)
5164 netif_carrier_on(tp
->dev
);
5166 netif_carrier_off(tp
->dev
);
5167 tg3_link_report(tp
);
5169 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5170 if (orig_pause_cfg
!= now_pause_cfg
||
5171 orig_active_speed
!= tp
->link_config
.active_speed
||
5172 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5173 tg3_link_report(tp
);
5179 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5181 int current_link_up
, err
= 0;
5185 u32 local_adv
, remote_adv
;
5187 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5188 tw32_f(MAC_MODE
, tp
->mac_mode
);
5194 (MAC_STATUS_SYNC_CHANGED
|
5195 MAC_STATUS_CFG_CHANGED
|
5196 MAC_STATUS_MI_COMPLETION
|
5197 MAC_STATUS_LNKSTATE_CHANGED
));
5203 current_link_up
= 0;
5204 current_speed
= SPEED_UNKNOWN
;
5205 current_duplex
= DUPLEX_UNKNOWN
;
5206 tp
->link_config
.rmt_adv
= 0;
5208 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5209 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5210 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
5211 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5212 bmsr
|= BMSR_LSTATUS
;
5214 bmsr
&= ~BMSR_LSTATUS
;
5217 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5219 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5220 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5221 /* do nothing, just check for link up at the end */
5222 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5225 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5226 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5227 ADVERTISE_1000XPAUSE
|
5228 ADVERTISE_1000XPSE_ASYM
|
5231 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5232 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5234 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5235 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5236 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5237 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5239 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5240 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5241 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5248 bmcr
&= ~BMCR_SPEED1000
;
5249 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5251 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5252 new_bmcr
|= BMCR_FULLDPLX
;
5254 if (new_bmcr
!= bmcr
) {
5255 /* BMCR_SPEED1000 is a reserved bit that needs
5256 * to be set on write.
5258 new_bmcr
|= BMCR_SPEED1000
;
5260 /* Force a linkdown */
5261 if (netif_carrier_ok(tp
->dev
)) {
5264 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5265 adv
&= ~(ADVERTISE_1000XFULL
|
5266 ADVERTISE_1000XHALF
|
5268 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5269 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5273 netif_carrier_off(tp
->dev
);
5275 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5277 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5278 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5279 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
5281 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5282 bmsr
|= BMSR_LSTATUS
;
5284 bmsr
&= ~BMSR_LSTATUS
;
5286 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5290 if (bmsr
& BMSR_LSTATUS
) {
5291 current_speed
= SPEED_1000
;
5292 current_link_up
= 1;
5293 if (bmcr
& BMCR_FULLDPLX
)
5294 current_duplex
= DUPLEX_FULL
;
5296 current_duplex
= DUPLEX_HALF
;
5301 if (bmcr
& BMCR_ANENABLE
) {
5304 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5305 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5306 common
= local_adv
& remote_adv
;
5307 if (common
& (ADVERTISE_1000XHALF
|
5308 ADVERTISE_1000XFULL
)) {
5309 if (common
& ADVERTISE_1000XFULL
)
5310 current_duplex
= DUPLEX_FULL
;
5312 current_duplex
= DUPLEX_HALF
;
5314 tp
->link_config
.rmt_adv
=
5315 mii_adv_to_ethtool_adv_x(remote_adv
);
5316 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5317 /* Link is up via parallel detect */
5319 current_link_up
= 0;
5324 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5325 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5327 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5328 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5329 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5331 tw32_f(MAC_MODE
, tp
->mac_mode
);
5334 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5336 tp
->link_config
.active_speed
= current_speed
;
5337 tp
->link_config
.active_duplex
= current_duplex
;
5339 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
5340 if (current_link_up
)
5341 netif_carrier_on(tp
->dev
);
5343 netif_carrier_off(tp
->dev
);
5344 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5346 tg3_link_report(tp
);
5351 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5353 if (tp
->serdes_counter
) {
5354 /* Give autoneg time to complete. */
5355 tp
->serdes_counter
--;
5359 if (!netif_carrier_ok(tp
->dev
) &&
5360 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5363 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5364 if (bmcr
& BMCR_ANENABLE
) {
5367 /* Select shadow register 0x1f */
5368 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5369 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5371 /* Select expansion interrupt status register */
5372 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5373 MII_TG3_DSP_EXP1_INT_STAT
);
5374 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5375 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5377 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5378 /* We have signal detect and not receiving
5379 * config code words, link is up by parallel
5383 bmcr
&= ~BMCR_ANENABLE
;
5384 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5385 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5386 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5389 } else if (netif_carrier_ok(tp
->dev
) &&
5390 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5391 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5394 /* Select expansion interrupt status register */
5395 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5396 MII_TG3_DSP_EXP1_INT_STAT
);
5397 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5401 /* Config code words received, turn on autoneg. */
5402 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5403 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5405 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5411 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5416 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5417 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5418 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5419 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5421 err
= tg3_setup_copper_phy(tp
, force_reset
);
5423 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
5426 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5427 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5429 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5434 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5435 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5436 tw32(GRC_MISC_CFG
, val
);
5439 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5440 (6 << TX_LENGTHS_IPG_SHIFT
);
5441 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
5442 val
|= tr32(MAC_TX_LENGTHS
) &
5443 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5444 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5446 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5447 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5448 tw32(MAC_TX_LENGTHS
, val
|
5449 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5451 tw32(MAC_TX_LENGTHS
, val
|
5452 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5454 if (!tg3_flag(tp
, 5705_PLUS
)) {
5455 if (netif_carrier_ok(tp
->dev
)) {
5456 tw32(HOSTCC_STAT_COAL_TICKS
,
5457 tp
->coal
.stats_block_coalesce_usecs
);
5459 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5463 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5464 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5465 if (!netif_carrier_ok(tp
->dev
))
5466 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5469 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5470 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5476 static inline int tg3_irq_sync(struct tg3
*tp
)
5478 return tp
->irq_sync
;
5481 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5485 dst
= (u32
*)((u8
*)dst
+ off
);
5486 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5487 *dst
++ = tr32(off
+ i
);
5490 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5492 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5493 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5494 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5495 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5496 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5497 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5498 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5499 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5500 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5501 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5502 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5503 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5504 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5505 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5506 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5507 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5508 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5509 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5510 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5512 if (tg3_flag(tp
, SUPPORT_MSIX
))
5513 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5515 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5516 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5517 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5518 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5519 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5520 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5521 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5522 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5524 if (!tg3_flag(tp
, 5705_PLUS
)) {
5525 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5526 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5527 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5530 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5531 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5532 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5533 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5534 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5536 if (tg3_flag(tp
, NVRAM
))
5537 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5540 static void tg3_dump_state(struct tg3
*tp
)
5545 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5547 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
5551 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5552 /* Read up to but not including private PCI registers */
5553 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5554 regs
[i
/ sizeof(u32
)] = tr32(i
);
5556 tg3_dump_legacy_regs(tp
, regs
);
5558 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5559 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5560 !regs
[i
+ 2] && !regs
[i
+ 3])
5563 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5565 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5570 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5571 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5573 /* SW status block */
5575 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5577 tnapi
->hw_status
->status
,
5578 tnapi
->hw_status
->status_tag
,
5579 tnapi
->hw_status
->rx_jumbo_consumer
,
5580 tnapi
->hw_status
->rx_consumer
,
5581 tnapi
->hw_status
->rx_mini_consumer
,
5582 tnapi
->hw_status
->idx
[0].rx_producer
,
5583 tnapi
->hw_status
->idx
[0].tx_consumer
);
5586 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5588 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5589 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5591 tnapi
->prodring
.rx_std_prod_idx
,
5592 tnapi
->prodring
.rx_std_cons_idx
,
5593 tnapi
->prodring
.rx_jmb_prod_idx
,
5594 tnapi
->prodring
.rx_jmb_cons_idx
);
5598 /* This is called whenever we suspect that the system chipset is re-
5599 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5600 * is bogus tx completions. We try to recover by setting the
5601 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5604 static void tg3_tx_recover(struct tg3
*tp
)
5606 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5607 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5609 netdev_warn(tp
->dev
,
5610 "The system may be re-ordering memory-mapped I/O "
5611 "cycles to the network device, attempting to recover. "
5612 "Please report the problem to the driver maintainer "
5613 "and include system chipset information.\n");
5615 spin_lock(&tp
->lock
);
5616 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5617 spin_unlock(&tp
->lock
);
5620 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5622 /* Tell compiler to fetch tx indices from memory. */
5624 return tnapi
->tx_pending
-
5625 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5628 /* Tigon3 never reports partial packet sends. So we do not
5629 * need special logic to handle SKBs that have not had all
5630 * of their frags sent yet, like SunGEM does.
5632 static void tg3_tx(struct tg3_napi
*tnapi
)
5634 struct tg3
*tp
= tnapi
->tp
;
5635 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5636 u32 sw_idx
= tnapi
->tx_cons
;
5637 struct netdev_queue
*txq
;
5638 int index
= tnapi
- tp
->napi
;
5639 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5641 if (tg3_flag(tp
, ENABLE_TSS
))
5644 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5646 while (sw_idx
!= hw_idx
) {
5647 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5648 struct sk_buff
*skb
= ri
->skb
;
5651 if (unlikely(skb
== NULL
)) {
5656 pci_unmap_single(tp
->pdev
,
5657 dma_unmap_addr(ri
, mapping
),
5663 while (ri
->fragmented
) {
5664 ri
->fragmented
= false;
5665 sw_idx
= NEXT_TX(sw_idx
);
5666 ri
= &tnapi
->tx_buffers
[sw_idx
];
5669 sw_idx
= NEXT_TX(sw_idx
);
5671 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5672 ri
= &tnapi
->tx_buffers
[sw_idx
];
5673 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5676 pci_unmap_page(tp
->pdev
,
5677 dma_unmap_addr(ri
, mapping
),
5678 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5681 while (ri
->fragmented
) {
5682 ri
->fragmented
= false;
5683 sw_idx
= NEXT_TX(sw_idx
);
5684 ri
= &tnapi
->tx_buffers
[sw_idx
];
5687 sw_idx
= NEXT_TX(sw_idx
);
5691 bytes_compl
+= skb
->len
;
5695 if (unlikely(tx_bug
)) {
5701 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
5703 tnapi
->tx_cons
= sw_idx
;
5705 /* Need to make the tx_cons update visible to tg3_start_xmit()
5706 * before checking for netif_queue_stopped(). Without the
5707 * memory barrier, there is a small possibility that tg3_start_xmit()
5708 * will miss it and cause the queue to be stopped forever.
5712 if (unlikely(netif_tx_queue_stopped(txq
) &&
5713 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
5714 __netif_tx_lock(txq
, smp_processor_id());
5715 if (netif_tx_queue_stopped(txq
) &&
5716 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
5717 netif_tx_wake_queue(txq
);
5718 __netif_tx_unlock(txq
);
5722 static void tg3_frag_free(bool is_frag
, void *data
)
5725 put_page(virt_to_head_page(data
));
5730 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
5732 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
5733 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
5738 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
5739 map_sz
, PCI_DMA_FROMDEVICE
);
5740 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
5745 /* Returns size of skb allocated or < 0 on error.
5747 * We only need to fill in the address because the other members
5748 * of the RX descriptor are invariant, see tg3_init_rings.
5750 * Note the purposeful assymetry of cpu vs. chip accesses. For
5751 * posting buffers we only dirty the first cache line of the RX
5752 * descriptor (containing the address). Whereas for the RX status
5753 * buffers the cpu only reads the last cacheline of the RX descriptor
5754 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5756 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
5757 u32 opaque_key
, u32 dest_idx_unmasked
,
5758 unsigned int *frag_size
)
5760 struct tg3_rx_buffer_desc
*desc
;
5761 struct ring_info
*map
;
5764 int skb_size
, data_size
, dest_idx
;
5766 switch (opaque_key
) {
5767 case RXD_OPAQUE_RING_STD
:
5768 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5769 desc
= &tpr
->rx_std
[dest_idx
];
5770 map
= &tpr
->rx_std_buffers
[dest_idx
];
5771 data_size
= tp
->rx_pkt_map_sz
;
5774 case RXD_OPAQUE_RING_JUMBO
:
5775 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5776 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
5777 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
5778 data_size
= TG3_RX_JMB_MAP_SZ
;
5785 /* Do not overwrite any of the map or rp information
5786 * until we are sure we can commit to a new buffer.
5788 * Callers depend upon this behavior and assume that
5789 * we leave everything unchanged if we fail.
5791 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
5792 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
5793 if (skb_size
<= PAGE_SIZE
) {
5794 data
= netdev_alloc_frag(skb_size
);
5795 *frag_size
= skb_size
;
5797 data
= kmalloc(skb_size
, GFP_ATOMIC
);
5803 mapping
= pci_map_single(tp
->pdev
,
5804 data
+ TG3_RX_OFFSET(tp
),
5806 PCI_DMA_FROMDEVICE
);
5807 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
5808 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
5813 dma_unmap_addr_set(map
, mapping
, mapping
);
5815 desc
->addr_hi
= ((u64
)mapping
>> 32);
5816 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
5821 /* We only need to move over in the address because the other
5822 * members of the RX descriptor are invariant. See notes above
5823 * tg3_alloc_rx_data for full details.
5825 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
5826 struct tg3_rx_prodring_set
*dpr
,
5827 u32 opaque_key
, int src_idx
,
5828 u32 dest_idx_unmasked
)
5830 struct tg3
*tp
= tnapi
->tp
;
5831 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
5832 struct ring_info
*src_map
, *dest_map
;
5833 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
5836 switch (opaque_key
) {
5837 case RXD_OPAQUE_RING_STD
:
5838 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
5839 dest_desc
= &dpr
->rx_std
[dest_idx
];
5840 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
5841 src_desc
= &spr
->rx_std
[src_idx
];
5842 src_map
= &spr
->rx_std_buffers
[src_idx
];
5845 case RXD_OPAQUE_RING_JUMBO
:
5846 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
5847 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
5848 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
5849 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
5850 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
5857 dest_map
->data
= src_map
->data
;
5858 dma_unmap_addr_set(dest_map
, mapping
,
5859 dma_unmap_addr(src_map
, mapping
));
5860 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5861 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5863 /* Ensure that the update to the skb happens after the physical
5864 * addresses have been transferred to the new BD location.
5868 src_map
->data
= NULL
;
5871 /* The RX ring scheme is composed of multiple rings which post fresh
5872 * buffers to the chip, and one special ring the chip uses to report
5873 * status back to the host.
5875 * The special ring reports the status of received packets to the
5876 * host. The chip does not write into the original descriptor the
5877 * RX buffer was obtained from. The chip simply takes the original
5878 * descriptor as provided by the host, updates the status and length
5879 * field, then writes this into the next status ring entry.
5881 * Each ring the host uses to post buffers to the chip is described
5882 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5883 * it is first placed into the on-chip ram. When the packet's length
5884 * is known, it walks down the TG3_BDINFO entries to select the ring.
5885 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5886 * which is within the range of the new packet's length is chosen.
5888 * The "separate ring for rx status" scheme may sound queer, but it makes
5889 * sense from a cache coherency perspective. If only the host writes
5890 * to the buffer post rings, and only the chip writes to the rx status
5891 * rings, then cache lines never move beyond shared-modified state.
5892 * If both the host and chip were to write into the same ring, cache line
5893 * eviction could occur since both entities want it in an exclusive state.
5895 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5897 struct tg3
*tp
= tnapi
->tp
;
5898 u32 work_mask
, rx_std_posted
= 0;
5899 u32 std_prod_idx
, jmb_prod_idx
;
5900 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5903 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5905 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5907 * We need to order the read of hw_idx and the read of
5908 * the opaque cookie.
5913 std_prod_idx
= tpr
->rx_std_prod_idx
;
5914 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5915 while (sw_idx
!= hw_idx
&& budget
> 0) {
5916 struct ring_info
*ri
;
5917 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5919 struct sk_buff
*skb
;
5920 dma_addr_t dma_addr
;
5921 u32 opaque_key
, desc_idx
, *post_ptr
;
5924 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5925 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5926 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5927 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5928 dma_addr
= dma_unmap_addr(ri
, mapping
);
5930 post_ptr
= &std_prod_idx
;
5932 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5933 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5934 dma_addr
= dma_unmap_addr(ri
, mapping
);
5936 post_ptr
= &jmb_prod_idx
;
5938 goto next_pkt_nopost
;
5940 work_mask
|= opaque_key
;
5942 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
5943 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
5945 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5946 desc_idx
, *post_ptr
);
5948 /* Other statistics kept track of by card. */
5953 prefetch(data
+ TG3_RX_OFFSET(tp
));
5954 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5957 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5959 unsigned int frag_size
;
5961 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
5962 *post_ptr
, &frag_size
);
5966 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5967 PCI_DMA_FROMDEVICE
);
5969 skb
= build_skb(data
, frag_size
);
5971 tg3_frag_free(frag_size
!= 0, data
);
5972 goto drop_it_no_recycle
;
5974 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
5975 /* Ensure that the update to the data happens
5976 * after the usage of the old DMA mapping.
5983 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5984 desc_idx
, *post_ptr
);
5986 skb
= netdev_alloc_skb(tp
->dev
,
5987 len
+ TG3_RAW_IP_ALIGN
);
5989 goto drop_it_no_recycle
;
5991 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
5992 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5994 data
+ TG3_RX_OFFSET(tp
),
5996 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6000 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6001 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6002 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6003 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6004 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6006 skb_checksum_none_assert(skb
);
6008 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6010 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6011 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6013 goto drop_it_no_recycle
;
6016 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6017 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6018 __vlan_hwaccel_put_tag(skb
,
6019 desc
->err_vlan
& RXD_VLAN_MASK
);
6021 napi_gro_receive(&tnapi
->napi
, skb
);
6029 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6030 tpr
->rx_std_prod_idx
= std_prod_idx
&
6031 tp
->rx_std_ring_mask
;
6032 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6033 tpr
->rx_std_prod_idx
);
6034 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6039 sw_idx
&= tp
->rx_ret_ring_mask
;
6041 /* Refresh hw_idx to see if there is new work */
6042 if (sw_idx
== hw_idx
) {
6043 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6048 /* ACK the status ring. */
6049 tnapi
->rx_rcb_ptr
= sw_idx
;
6050 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6052 /* Refill RX ring(s). */
6053 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6054 /* Sync BD data before updating mailbox */
6057 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6058 tpr
->rx_std_prod_idx
= std_prod_idx
&
6059 tp
->rx_std_ring_mask
;
6060 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6061 tpr
->rx_std_prod_idx
);
6063 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6064 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6065 tp
->rx_jmb_ring_mask
;
6066 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6067 tpr
->rx_jmb_prod_idx
);
6070 } else if (work_mask
) {
6071 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6072 * updated before the producer indices can be updated.
6076 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6077 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6079 if (tnapi
!= &tp
->napi
[1]) {
6080 tp
->rx_refill
= true;
6081 napi_schedule(&tp
->napi
[1].napi
);
6088 static void tg3_poll_link(struct tg3
*tp
)
6090 /* handle link change and other phy events */
6091 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6092 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6094 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6095 sblk
->status
= SD_STATUS_UPDATED
|
6096 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6097 spin_lock(&tp
->lock
);
6098 if (tg3_flag(tp
, USE_PHYLIB
)) {
6100 (MAC_STATUS_SYNC_CHANGED
|
6101 MAC_STATUS_CFG_CHANGED
|
6102 MAC_STATUS_MI_COMPLETION
|
6103 MAC_STATUS_LNKSTATE_CHANGED
));
6106 tg3_setup_phy(tp
, 0);
6107 spin_unlock(&tp
->lock
);
6112 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6113 struct tg3_rx_prodring_set
*dpr
,
6114 struct tg3_rx_prodring_set
*spr
)
6116 u32 si
, di
, cpycnt
, src_prod_idx
;
6120 src_prod_idx
= spr
->rx_std_prod_idx
;
6122 /* Make sure updates to the rx_std_buffers[] entries and the
6123 * standard producer index are seen in the correct order.
6127 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6130 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6131 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6133 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6134 spr
->rx_std_cons_idx
;
6136 cpycnt
= min(cpycnt
,
6137 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6139 si
= spr
->rx_std_cons_idx
;
6140 di
= dpr
->rx_std_prod_idx
;
6142 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6143 if (dpr
->rx_std_buffers
[i
].data
) {
6153 /* Ensure that updates to the rx_std_buffers ring and the
6154 * shadowed hardware producer ring from tg3_recycle_skb() are
6155 * ordered correctly WRT the skb check above.
6159 memcpy(&dpr
->rx_std_buffers
[di
],
6160 &spr
->rx_std_buffers
[si
],
6161 cpycnt
* sizeof(struct ring_info
));
6163 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6164 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6165 sbd
= &spr
->rx_std
[si
];
6166 dbd
= &dpr
->rx_std
[di
];
6167 dbd
->addr_hi
= sbd
->addr_hi
;
6168 dbd
->addr_lo
= sbd
->addr_lo
;
6171 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6172 tp
->rx_std_ring_mask
;
6173 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6174 tp
->rx_std_ring_mask
;
6178 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6180 /* Make sure updates to the rx_jmb_buffers[] entries and
6181 * the jumbo producer index are seen in the correct order.
6185 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6188 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6189 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6191 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6192 spr
->rx_jmb_cons_idx
;
6194 cpycnt
= min(cpycnt
,
6195 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6197 si
= spr
->rx_jmb_cons_idx
;
6198 di
= dpr
->rx_jmb_prod_idx
;
6200 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6201 if (dpr
->rx_jmb_buffers
[i
].data
) {
6211 /* Ensure that updates to the rx_jmb_buffers ring and the
6212 * shadowed hardware producer ring from tg3_recycle_skb() are
6213 * ordered correctly WRT the skb check above.
6217 memcpy(&dpr
->rx_jmb_buffers
[di
],
6218 &spr
->rx_jmb_buffers
[si
],
6219 cpycnt
* sizeof(struct ring_info
));
6221 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6222 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6223 sbd
= &spr
->rx_jmb
[si
].std
;
6224 dbd
= &dpr
->rx_jmb
[di
].std
;
6225 dbd
->addr_hi
= sbd
->addr_hi
;
6226 dbd
->addr_lo
= sbd
->addr_lo
;
6229 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6230 tp
->rx_jmb_ring_mask
;
6231 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6232 tp
->rx_jmb_ring_mask
;
6238 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6240 struct tg3
*tp
= tnapi
->tp
;
6242 /* run TX completion thread */
6243 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6245 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6249 if (!tnapi
->rx_rcb_prod_idx
)
6252 /* run RX thread, within the bounds set by NAPI.
6253 * All RX "locking" is done by ensuring outside
6254 * code synchronizes with tg3->napi.poll()
6256 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6257 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6259 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6260 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6262 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6263 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6265 tp
->rx_refill
= false;
6266 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6267 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6268 &tp
->napi
[i
].prodring
);
6272 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6273 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6274 dpr
->rx_std_prod_idx
);
6276 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6277 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6278 dpr
->rx_jmb_prod_idx
);
6283 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6289 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6291 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6292 schedule_work(&tp
->reset_task
);
6295 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6297 cancel_work_sync(&tp
->reset_task
);
6298 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6299 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6302 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6304 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6305 struct tg3
*tp
= tnapi
->tp
;
6307 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6310 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6312 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6315 if (unlikely(work_done
>= budget
))
6318 /* tp->last_tag is used in tg3_int_reenable() below
6319 * to tell the hw how much work has been processed,
6320 * so we must read it before checking for more work.
6322 tnapi
->last_tag
= sblk
->status_tag
;
6323 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6326 /* check for RX/TX work to do */
6327 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6328 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6330 /* This test here is not race free, but will reduce
6331 * the number of interrupts by looping again.
6333 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6336 napi_complete(napi
);
6337 /* Reenable interrupts. */
6338 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6340 /* This test here is synchronized by napi_schedule()
6341 * and napi_complete() to close the race condition.
6343 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6344 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6345 HOSTCC_MODE_ENABLE
|
6356 /* work_done is guaranteed to be less than budget. */
6357 napi_complete(napi
);
6358 tg3_reset_task_schedule(tp
);
6362 static void tg3_process_error(struct tg3
*tp
)
6365 bool real_error
= false;
6367 if (tg3_flag(tp
, ERROR_PROCESSED
))
6370 /* Check Flow Attention register */
6371 val
= tr32(HOSTCC_FLOW_ATTN
);
6372 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6373 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6377 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6378 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6382 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6383 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6392 tg3_flag_set(tp
, ERROR_PROCESSED
);
6393 tg3_reset_task_schedule(tp
);
6396 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6398 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6399 struct tg3
*tp
= tnapi
->tp
;
6401 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6404 if (sblk
->status
& SD_STATUS_ERROR
)
6405 tg3_process_error(tp
);
6409 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6411 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6414 if (unlikely(work_done
>= budget
))
6417 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6418 /* tp->last_tag is used in tg3_int_reenable() below
6419 * to tell the hw how much work has been processed,
6420 * so we must read it before checking for more work.
6422 tnapi
->last_tag
= sblk
->status_tag
;
6423 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6426 sblk
->status
&= ~SD_STATUS_UPDATED
;
6428 if (likely(!tg3_has_work(tnapi
))) {
6429 napi_complete(napi
);
6430 tg3_int_reenable(tnapi
);
6438 /* work_done is guaranteed to be less than budget. */
6439 napi_complete(napi
);
6440 tg3_reset_task_schedule(tp
);
6444 static void tg3_napi_disable(struct tg3
*tp
)
6448 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6449 napi_disable(&tp
->napi
[i
].napi
);
6452 static void tg3_napi_enable(struct tg3
*tp
)
6456 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6457 napi_enable(&tp
->napi
[i
].napi
);
6460 static void tg3_napi_init(struct tg3
*tp
)
6464 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6465 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6466 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6469 static void tg3_napi_fini(struct tg3
*tp
)
6473 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6474 netif_napi_del(&tp
->napi
[i
].napi
);
6477 static inline void tg3_netif_stop(struct tg3
*tp
)
6479 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6480 tg3_napi_disable(tp
);
6481 netif_tx_disable(tp
->dev
);
6484 static inline void tg3_netif_start(struct tg3
*tp
)
6486 /* NOTE: unconditional netif_tx_wake_all_queues is only
6487 * appropriate so long as all callers are assured to
6488 * have free tx slots (such as after tg3_init_hw)
6490 netif_tx_wake_all_queues(tp
->dev
);
6492 tg3_napi_enable(tp
);
6493 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6494 tg3_enable_ints(tp
);
6497 static void tg3_irq_quiesce(struct tg3
*tp
)
6501 BUG_ON(tp
->irq_sync
);
6506 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6507 synchronize_irq(tp
->napi
[i
].irq_vec
);
6510 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6511 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6512 * with as well. Most of the time, this is not necessary except when
6513 * shutting down the device.
6515 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6517 spin_lock_bh(&tp
->lock
);
6519 tg3_irq_quiesce(tp
);
6522 static inline void tg3_full_unlock(struct tg3
*tp
)
6524 spin_unlock_bh(&tp
->lock
);
6527 /* One-shot MSI handler - Chip automatically disables interrupt
6528 * after sending MSI so driver doesn't have to do it.
6530 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6532 struct tg3_napi
*tnapi
= dev_id
;
6533 struct tg3
*tp
= tnapi
->tp
;
6535 prefetch(tnapi
->hw_status
);
6537 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6539 if (likely(!tg3_irq_sync(tp
)))
6540 napi_schedule(&tnapi
->napi
);
6545 /* MSI ISR - No need to check for interrupt sharing and no need to
6546 * flush status block and interrupt mailbox. PCI ordering rules
6547 * guarantee that MSI will arrive after the status block.
6549 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6551 struct tg3_napi
*tnapi
= dev_id
;
6552 struct tg3
*tp
= tnapi
->tp
;
6554 prefetch(tnapi
->hw_status
);
6556 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6558 * Writing any value to intr-mbox-0 clears PCI INTA# and
6559 * chip-internal interrupt pending events.
6560 * Writing non-zero to intr-mbox-0 additional tells the
6561 * NIC to stop sending us irqs, engaging "in-intr-handler"
6564 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6565 if (likely(!tg3_irq_sync(tp
)))
6566 napi_schedule(&tnapi
->napi
);
6568 return IRQ_RETVAL(1);
6571 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6573 struct tg3_napi
*tnapi
= dev_id
;
6574 struct tg3
*tp
= tnapi
->tp
;
6575 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6576 unsigned int handled
= 1;
6578 /* In INTx mode, it is possible for the interrupt to arrive at
6579 * the CPU before the status block posted prior to the interrupt.
6580 * Reading the PCI State register will confirm whether the
6581 * interrupt is ours and will flush the status block.
6583 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6584 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6585 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6592 * Writing any value to intr-mbox-0 clears PCI INTA# and
6593 * chip-internal interrupt pending events.
6594 * Writing non-zero to intr-mbox-0 additional tells the
6595 * NIC to stop sending us irqs, engaging "in-intr-handler"
6598 * Flush the mailbox to de-assert the IRQ immediately to prevent
6599 * spurious interrupts. The flush impacts performance but
6600 * excessive spurious interrupts can be worse in some cases.
6602 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6603 if (tg3_irq_sync(tp
))
6605 sblk
->status
&= ~SD_STATUS_UPDATED
;
6606 if (likely(tg3_has_work(tnapi
))) {
6607 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6608 napi_schedule(&tnapi
->napi
);
6610 /* No work, shared interrupt perhaps? re-enable
6611 * interrupts, and flush that PCI write
6613 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6617 return IRQ_RETVAL(handled
);
6620 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6622 struct tg3_napi
*tnapi
= dev_id
;
6623 struct tg3
*tp
= tnapi
->tp
;
6624 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6625 unsigned int handled
= 1;
6627 /* In INTx mode, it is possible for the interrupt to arrive at
6628 * the CPU before the status block posted prior to the interrupt.
6629 * Reading the PCI State register will confirm whether the
6630 * interrupt is ours and will flush the status block.
6632 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6633 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6634 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6641 * writing any value to intr-mbox-0 clears PCI INTA# and
6642 * chip-internal interrupt pending events.
6643 * writing non-zero to intr-mbox-0 additional tells the
6644 * NIC to stop sending us irqs, engaging "in-intr-handler"
6647 * Flush the mailbox to de-assert the IRQ immediately to prevent
6648 * spurious interrupts. The flush impacts performance but
6649 * excessive spurious interrupts can be worse in some cases.
6651 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6654 * In a shared interrupt configuration, sometimes other devices'
6655 * interrupts will scream. We record the current status tag here
6656 * so that the above check can report that the screaming interrupts
6657 * are unhandled. Eventually they will be silenced.
6659 tnapi
->last_irq_tag
= sblk
->status_tag
;
6661 if (tg3_irq_sync(tp
))
6664 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6666 napi_schedule(&tnapi
->napi
);
6669 return IRQ_RETVAL(handled
);
6672 /* ISR for interrupt test */
6673 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
6675 struct tg3_napi
*tnapi
= dev_id
;
6676 struct tg3
*tp
= tnapi
->tp
;
6677 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6679 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
6680 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6681 tg3_disable_ints(tp
);
6682 return IRQ_RETVAL(1);
6684 return IRQ_RETVAL(0);
6687 #ifdef CONFIG_NET_POLL_CONTROLLER
6688 static void tg3_poll_controller(struct net_device
*dev
)
6691 struct tg3
*tp
= netdev_priv(dev
);
6693 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6694 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
6698 static void tg3_tx_timeout(struct net_device
*dev
)
6700 struct tg3
*tp
= netdev_priv(dev
);
6702 if (netif_msg_tx_err(tp
)) {
6703 netdev_err(dev
, "transmit timed out, resetting\n");
6707 tg3_reset_task_schedule(tp
);
6710 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6711 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
6713 u32 base
= (u32
) mapping
& 0xffffffff;
6715 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
6718 /* Test for DMA addresses > 40-bit */
6719 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
6722 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6723 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
6724 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
6731 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
6732 dma_addr_t mapping
, u32 len
, u32 flags
,
6735 txbd
->addr_hi
= ((u64
) mapping
>> 32);
6736 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
6737 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
6738 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
6741 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
6742 dma_addr_t map
, u32 len
, u32 flags
,
6745 struct tg3
*tp
= tnapi
->tp
;
6748 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6751 if (tg3_4g_overflow_test(map
, len
))
6754 if (tg3_40bit_overflow_test(tp
, map
, len
))
6757 if (tp
->dma_limit
) {
6758 u32 prvidx
= *entry
;
6759 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
6760 while (len
> tp
->dma_limit
&& *budget
) {
6761 u32 frag_len
= tp
->dma_limit
;
6762 len
-= tp
->dma_limit
;
6764 /* Avoid the 8byte DMA problem */
6766 len
+= tp
->dma_limit
/ 2;
6767 frag_len
= tp
->dma_limit
/ 2;
6770 tnapi
->tx_buffers
[*entry
].fragmented
= true;
6772 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6773 frag_len
, tmp_flag
, mss
, vlan
);
6776 *entry
= NEXT_TX(*entry
);
6783 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6784 len
, flags
, mss
, vlan
);
6786 *entry
= NEXT_TX(*entry
);
6789 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
6793 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
6794 len
, flags
, mss
, vlan
);
6795 *entry
= NEXT_TX(*entry
);
6801 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
6804 struct sk_buff
*skb
;
6805 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
6810 pci_unmap_single(tnapi
->tp
->pdev
,
6811 dma_unmap_addr(txb
, mapping
),
6815 while (txb
->fragmented
) {
6816 txb
->fragmented
= false;
6817 entry
= NEXT_TX(entry
);
6818 txb
= &tnapi
->tx_buffers
[entry
];
6821 for (i
= 0; i
<= last
; i
++) {
6822 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6824 entry
= NEXT_TX(entry
);
6825 txb
= &tnapi
->tx_buffers
[entry
];
6827 pci_unmap_page(tnapi
->tp
->pdev
,
6828 dma_unmap_addr(txb
, mapping
),
6829 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
6831 while (txb
->fragmented
) {
6832 txb
->fragmented
= false;
6833 entry
= NEXT_TX(entry
);
6834 txb
= &tnapi
->tx_buffers
[entry
];
6839 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6840 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
6841 struct sk_buff
**pskb
,
6842 u32
*entry
, u32
*budget
,
6843 u32 base_flags
, u32 mss
, u32 vlan
)
6845 struct tg3
*tp
= tnapi
->tp
;
6846 struct sk_buff
*new_skb
, *skb
= *pskb
;
6847 dma_addr_t new_addr
= 0;
6850 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
6851 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
6853 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
6855 new_skb
= skb_copy_expand(skb
,
6856 skb_headroom(skb
) + more_headroom
,
6857 skb_tailroom(skb
), GFP_ATOMIC
);
6863 /* New SKB is guaranteed to be linear. */
6864 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
6866 /* Make sure the mapping succeeded */
6867 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
6868 dev_kfree_skb(new_skb
);
6871 u32 save_entry
= *entry
;
6873 base_flags
|= TXD_FLAG_END
;
6875 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
6876 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
6879 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
6880 new_skb
->len
, base_flags
,
6882 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
6883 dev_kfree_skb(new_skb
);
6894 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6896 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6897 * TSO header is greater than 80 bytes.
6899 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6901 struct sk_buff
*segs
, *nskb
;
6902 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6904 /* Estimate the number of fragments in the worst case */
6905 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6906 netif_stop_queue(tp
->dev
);
6908 /* netif_tx_stop_queue() must be done before checking
6909 * checking tx index in tg3_tx_avail() below, because in
6910 * tg3_tx(), we update tx index before checking for
6911 * netif_tx_queue_stopped().
6914 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6915 return NETDEV_TX_BUSY
;
6917 netif_wake_queue(tp
->dev
);
6920 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6922 goto tg3_tso_bug_end
;
6928 tg3_start_xmit(nskb
, tp
->dev
);
6934 return NETDEV_TX_OK
;
6937 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6938 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6940 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6942 struct tg3
*tp
= netdev_priv(dev
);
6943 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
6945 int i
= -1, would_hit_hwbug
;
6947 struct tg3_napi
*tnapi
;
6948 struct netdev_queue
*txq
;
6951 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6952 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6953 if (tg3_flag(tp
, ENABLE_TSS
))
6956 budget
= tg3_tx_avail(tnapi
);
6958 /* We are running in BH disabled context with netif_tx_lock
6959 * and TX reclaim runs via tp->napi.poll inside of a software
6960 * interrupt. Furthermore, IRQ processing runs lockless so we have
6961 * no IRQ context deadlocks to worry about either. Rejoice!
6963 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6964 if (!netif_tx_queue_stopped(txq
)) {
6965 netif_tx_stop_queue(txq
);
6967 /* This is a hard error, log it. */
6969 "BUG! Tx Ring full when queue awake!\n");
6971 return NETDEV_TX_BUSY
;
6974 entry
= tnapi
->tx_prod
;
6976 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6977 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6979 mss
= skb_shinfo(skb
)->gso_size
;
6982 u32 tcp_opt_len
, hdr_len
;
6984 if (skb_header_cloned(skb
) &&
6985 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
6989 tcp_opt_len
= tcp_optlen(skb
);
6991 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
6993 if (!skb_is_gso_v6(skb
)) {
6995 iph
->tot_len
= htons(mss
+ hdr_len
);
6998 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6999 tg3_flag(tp
, TSO_BUG
))
7000 return tg3_tso_bug(tp
, skb
);
7002 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7003 TXD_FLAG_CPU_POST_DMA
);
7005 if (tg3_flag(tp
, HW_TSO_1
) ||
7006 tg3_flag(tp
, HW_TSO_2
) ||
7007 tg3_flag(tp
, HW_TSO_3
)) {
7008 tcp_hdr(skb
)->check
= 0;
7009 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7011 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7016 if (tg3_flag(tp
, HW_TSO_3
)) {
7017 mss
|= (hdr_len
& 0xc) << 12;
7019 base_flags
|= 0x00000010;
7020 base_flags
|= (hdr_len
& 0x3e0) << 5;
7021 } else if (tg3_flag(tp
, HW_TSO_2
))
7022 mss
|= hdr_len
<< 9;
7023 else if (tg3_flag(tp
, HW_TSO_1
) ||
7024 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7025 if (tcp_opt_len
|| iph
->ihl
> 5) {
7028 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7029 mss
|= (tsflags
<< 11);
7032 if (tcp_opt_len
|| iph
->ihl
> 5) {
7035 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7036 base_flags
|= tsflags
<< 12;
7041 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7042 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7043 base_flags
|= TXD_FLAG_JMB_PKT
;
7045 if (vlan_tx_tag_present(skb
)) {
7046 base_flags
|= TXD_FLAG_VLAN
;
7047 vlan
= vlan_tx_tag_get(skb
);
7050 len
= skb_headlen(skb
);
7052 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7053 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7057 tnapi
->tx_buffers
[entry
].skb
= skb
;
7058 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7060 would_hit_hwbug
= 0;
7062 if (tg3_flag(tp
, 5701_DMA_BUG
))
7063 would_hit_hwbug
= 1;
7065 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7066 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7068 would_hit_hwbug
= 1;
7069 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7072 if (!tg3_flag(tp
, HW_TSO_1
) &&
7073 !tg3_flag(tp
, HW_TSO_2
) &&
7074 !tg3_flag(tp
, HW_TSO_3
))
7077 /* Now loop through additional data
7078 * fragments, and queue them.
7080 last
= skb_shinfo(skb
)->nr_frags
- 1;
7081 for (i
= 0; i
<= last
; i
++) {
7082 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7084 len
= skb_frag_size(frag
);
7085 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7086 len
, DMA_TO_DEVICE
);
7088 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7089 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7091 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7095 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7097 ((i
== last
) ? TXD_FLAG_END
: 0),
7099 would_hit_hwbug
= 1;
7105 if (would_hit_hwbug
) {
7106 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7108 /* If the workaround fails due to memory/mapping
7109 * failure, silently drop this packet.
7111 entry
= tnapi
->tx_prod
;
7112 budget
= tg3_tx_avail(tnapi
);
7113 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7114 base_flags
, mss
, vlan
))
7118 skb_tx_timestamp(skb
);
7119 netdev_tx_sent_queue(txq
, skb
->len
);
7121 /* Sync BD data before updating mailbox */
7124 /* Packets are ready, update Tx producer idx local and on card. */
7125 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7127 tnapi
->tx_prod
= entry
;
7128 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7129 netif_tx_stop_queue(txq
);
7131 /* netif_tx_stop_queue() must be done before checking
7132 * checking tx index in tg3_tx_avail() below, because in
7133 * tg3_tx(), we update tx index before checking for
7134 * netif_tx_queue_stopped().
7137 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7138 netif_tx_wake_queue(txq
);
7142 return NETDEV_TX_OK
;
7145 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7146 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7151 return NETDEV_TX_OK
;
7154 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7157 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7158 MAC_MODE_PORT_MODE_MASK
);
7160 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7162 if (!tg3_flag(tp
, 5705_PLUS
))
7163 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7165 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7166 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7168 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7170 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7172 if (tg3_flag(tp
, 5705_PLUS
) ||
7173 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7174 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
7175 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7178 tw32(MAC_MODE
, tp
->mac_mode
);
7182 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7184 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7186 tg3_phy_toggle_apd(tp
, false);
7187 tg3_phy_toggle_automdix(tp
, 0);
7189 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7192 bmcr
= BMCR_FULLDPLX
;
7197 bmcr
|= BMCR_SPEED100
;
7201 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7203 bmcr
|= BMCR_SPEED100
;
7206 bmcr
|= BMCR_SPEED1000
;
7211 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7212 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7213 val
|= CTL1000_AS_MASTER
|
7214 CTL1000_ENABLE_MASTER
;
7215 tg3_writephy(tp
, MII_CTRL1000
, val
);
7217 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7218 MII_TG3_FET_PTEST_TRIM_2
;
7219 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7222 bmcr
|= BMCR_LOOPBACK
;
7224 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7226 /* The write needs to be flushed for the FETs */
7227 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7228 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7232 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7233 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
7234 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7235 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7236 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7238 /* The write needs to be flushed for the AC131 */
7239 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7242 /* Reset to prevent losing 1st rx packet intermittently */
7243 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7244 tg3_flag(tp
, 5780_CLASS
)) {
7245 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7247 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7250 mac_mode
= tp
->mac_mode
&
7251 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7252 if (speed
== SPEED_1000
)
7253 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7255 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7257 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
7258 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7260 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7261 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7262 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7263 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7265 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7266 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7269 tw32(MAC_MODE
, mac_mode
);
7275 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7277 struct tg3
*tp
= netdev_priv(dev
);
7279 if (features
& NETIF_F_LOOPBACK
) {
7280 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7283 spin_lock_bh(&tp
->lock
);
7284 tg3_mac_loopback(tp
, true);
7285 netif_carrier_on(tp
->dev
);
7286 spin_unlock_bh(&tp
->lock
);
7287 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7289 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7292 spin_lock_bh(&tp
->lock
);
7293 tg3_mac_loopback(tp
, false);
7294 /* Force link status check */
7295 tg3_setup_phy(tp
, 1);
7296 spin_unlock_bh(&tp
->lock
);
7297 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7301 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7302 netdev_features_t features
)
7304 struct tg3
*tp
= netdev_priv(dev
);
7306 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7307 features
&= ~NETIF_F_ALL_TSO
;
7312 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7314 netdev_features_t changed
= dev
->features
^ features
;
7316 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7317 tg3_set_loopback(dev
, features
);
7322 static void tg3_rx_prodring_free(struct tg3
*tp
,
7323 struct tg3_rx_prodring_set
*tpr
)
7327 if (tpr
!= &tp
->napi
[0].prodring
) {
7328 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7329 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7330 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7333 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7334 for (i
= tpr
->rx_jmb_cons_idx
;
7335 i
!= tpr
->rx_jmb_prod_idx
;
7336 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7337 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7345 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7346 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7349 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7350 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7351 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7356 /* Initialize rx rings for packet processing.
7358 * The chip has been shut down and the driver detached from
7359 * the networking, so no interrupts or new tx packets will
7360 * end up in the driver. tp->{tx,}lock are held and thus
7363 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7364 struct tg3_rx_prodring_set
*tpr
)
7366 u32 i
, rx_pkt_dma_sz
;
7368 tpr
->rx_std_cons_idx
= 0;
7369 tpr
->rx_std_prod_idx
= 0;
7370 tpr
->rx_jmb_cons_idx
= 0;
7371 tpr
->rx_jmb_prod_idx
= 0;
7373 if (tpr
!= &tp
->napi
[0].prodring
) {
7374 memset(&tpr
->rx_std_buffers
[0], 0,
7375 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7376 if (tpr
->rx_jmb_buffers
)
7377 memset(&tpr
->rx_jmb_buffers
[0], 0,
7378 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7382 /* Zero out all descriptors. */
7383 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7385 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7386 if (tg3_flag(tp
, 5780_CLASS
) &&
7387 tp
->dev
->mtu
> ETH_DATA_LEN
)
7388 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7389 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7391 /* Initialize invariants of the rings, we only set this
7392 * stuff once. This works because the card does not
7393 * write into the rx buffer posting rings.
7395 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7396 struct tg3_rx_buffer_desc
*rxd
;
7398 rxd
= &tpr
->rx_std
[i
];
7399 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7400 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7401 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7402 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7405 /* Now allocate fresh SKBs for each rx ring. */
7406 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7407 unsigned int frag_size
;
7409 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
7411 netdev_warn(tp
->dev
,
7412 "Using a smaller RX standard ring. Only "
7413 "%d out of %d buffers were allocated "
7414 "successfully\n", i
, tp
->rx_pending
);
7422 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7425 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7427 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7430 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7431 struct tg3_rx_buffer_desc
*rxd
;
7433 rxd
= &tpr
->rx_jmb
[i
].std
;
7434 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7435 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7437 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7438 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7441 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7442 unsigned int frag_size
;
7444 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
7446 netdev_warn(tp
->dev
,
7447 "Using a smaller RX jumbo ring. Only %d "
7448 "out of %d buffers were allocated "
7449 "successfully\n", i
, tp
->rx_jumbo_pending
);
7452 tp
->rx_jumbo_pending
= i
;
7461 tg3_rx_prodring_free(tp
, tpr
);
7465 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7466 struct tg3_rx_prodring_set
*tpr
)
7468 kfree(tpr
->rx_std_buffers
);
7469 tpr
->rx_std_buffers
= NULL
;
7470 kfree(tpr
->rx_jmb_buffers
);
7471 tpr
->rx_jmb_buffers
= NULL
;
7473 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7474 tpr
->rx_std
, tpr
->rx_std_mapping
);
7478 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7479 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7484 static int tg3_rx_prodring_init(struct tg3
*tp
,
7485 struct tg3_rx_prodring_set
*tpr
)
7487 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7489 if (!tpr
->rx_std_buffers
)
7492 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7493 TG3_RX_STD_RING_BYTES(tp
),
7494 &tpr
->rx_std_mapping
,
7499 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7500 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7502 if (!tpr
->rx_jmb_buffers
)
7505 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7506 TG3_RX_JMB_RING_BYTES(tp
),
7507 &tpr
->rx_jmb_mapping
,
7516 tg3_rx_prodring_fini(tp
, tpr
);
7520 /* Free up pending packets in all rx/tx rings.
7522 * The chip has been shut down and the driver detached from
7523 * the networking, so no interrupts or new tx packets will
7524 * end up in the driver. tp->{tx,}lock is not held and we are not
7525 * in an interrupt context and thus may sleep.
7527 static void tg3_free_rings(struct tg3
*tp
)
7531 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7532 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7534 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7536 if (!tnapi
->tx_buffers
)
7539 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7540 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7545 tg3_tx_skb_unmap(tnapi
, i
,
7546 skb_shinfo(skb
)->nr_frags
- 1);
7548 dev_kfree_skb_any(skb
);
7550 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7554 /* Initialize tx/rx rings for packet processing.
7556 * The chip has been shut down and the driver detached from
7557 * the networking, so no interrupts or new tx packets will
7558 * end up in the driver. tp->{tx,}lock are held and thus
7561 static int tg3_init_rings(struct tg3
*tp
)
7565 /* Free up all the SKBs. */
7568 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7569 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7571 tnapi
->last_tag
= 0;
7572 tnapi
->last_irq_tag
= 0;
7573 tnapi
->hw_status
->status
= 0;
7574 tnapi
->hw_status
->status_tag
= 0;
7575 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7580 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7582 tnapi
->rx_rcb_ptr
= 0;
7584 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7586 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7596 * Must not be invoked with interrupt sources disabled and
7597 * the hardware shutdown down.
7599 static void tg3_free_consistent(struct tg3
*tp
)
7603 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7604 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7606 if (tnapi
->tx_ring
) {
7607 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7608 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7609 tnapi
->tx_ring
= NULL
;
7612 kfree(tnapi
->tx_buffers
);
7613 tnapi
->tx_buffers
= NULL
;
7615 if (tnapi
->rx_rcb
) {
7616 dma_free_coherent(&tp
->pdev
->dev
,
7617 TG3_RX_RCB_RING_BYTES(tp
),
7619 tnapi
->rx_rcb_mapping
);
7620 tnapi
->rx_rcb
= NULL
;
7623 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7625 if (tnapi
->hw_status
) {
7626 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
7628 tnapi
->status_mapping
);
7629 tnapi
->hw_status
= NULL
;
7634 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
7635 tp
->hw_stats
, tp
->stats_mapping
);
7636 tp
->hw_stats
= NULL
;
7641 * Must not be invoked with interrupt sources disabled and
7642 * the hardware shutdown down. Can sleep.
7644 static int tg3_alloc_consistent(struct tg3
*tp
)
7648 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
7649 sizeof(struct tg3_hw_stats
),
7655 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
7657 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7658 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7659 struct tg3_hw_status
*sblk
;
7661 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
7663 &tnapi
->status_mapping
,
7665 if (!tnapi
->hw_status
)
7668 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7669 sblk
= tnapi
->hw_status
;
7671 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
7674 /* If multivector TSS is enabled, vector 0 does not handle
7675 * tx interrupts. Don't allocate any resources for it.
7677 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
7678 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
7679 tnapi
->tx_buffers
= kzalloc(
7680 sizeof(struct tg3_tx_ring_info
) *
7681 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7682 if (!tnapi
->tx_buffers
)
7685 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7687 &tnapi
->tx_desc_mapping
,
7689 if (!tnapi
->tx_ring
)
7694 * When RSS is enabled, the status block format changes
7695 * slightly. The "rx_jumbo_consumer", "reserved",
7696 * and "rx_mini_consumer" members get mapped to the
7697 * other three rx return ring producer indexes.
7701 if (tg3_flag(tp
, ENABLE_RSS
)) {
7702 tnapi
->rx_rcb_prod_idx
= NULL
;
7707 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
7710 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
7713 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
7716 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
7721 * If multivector RSS is enabled, vector 0 does not handle
7722 * rx or tx interrupts. Don't allocate any resources for it.
7724 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
7727 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7728 TG3_RX_RCB_RING_BYTES(tp
),
7729 &tnapi
->rx_rcb_mapping
,
7734 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7740 tg3_free_consistent(tp
);
7744 #define MAX_WAIT_CNT 1000
7746 /* To stop a block, clear the enable bit and poll till it
7747 * clears. tp->lock is held.
7749 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
7754 if (tg3_flag(tp
, 5705_PLUS
)) {
7761 /* We can't enable/disable these bits of the
7762 * 5705/5750, just say success.
7775 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7778 if ((val
& enable_bit
) == 0)
7782 if (i
== MAX_WAIT_CNT
&& !silent
) {
7783 dev_err(&tp
->pdev
->dev
,
7784 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7792 /* tp->lock is held. */
7793 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
7797 tg3_disable_ints(tp
);
7799 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
7800 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7803 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
7804 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
7805 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
7806 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
7807 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
7808 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
7810 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
7811 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
7812 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
7813 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
7814 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
7815 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
7816 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
7818 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
7819 tw32_f(MAC_MODE
, tp
->mac_mode
);
7822 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
7823 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
7825 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
7827 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
7830 if (i
>= MAX_WAIT_CNT
) {
7831 dev_err(&tp
->pdev
->dev
,
7832 "%s timed out, TX_MODE_ENABLE will not clear "
7833 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
7837 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
7838 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
7839 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
7841 tw32(FTQ_RESET
, 0xffffffff);
7842 tw32(FTQ_RESET
, 0x00000000);
7844 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
7845 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
7847 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7848 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7849 if (tnapi
->hw_status
)
7850 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7856 /* Save PCI command register before chip reset */
7857 static void tg3_save_pci_state(struct tg3
*tp
)
7859 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7862 /* Restore PCI state after chip reset */
7863 static void tg3_restore_pci_state(struct tg3
*tp
)
7867 /* Re-enable indirect register accesses. */
7868 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7869 tp
->misc_host_ctrl
);
7871 /* Set MAX PCI retry to zero. */
7872 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7873 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7874 tg3_flag(tp
, PCIX_MODE
))
7875 val
|= PCISTATE_RETRY_SAME_DMA
;
7876 /* Allow reads and writes to the APE register and memory space. */
7877 if (tg3_flag(tp
, ENABLE_APE
))
7878 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7879 PCISTATE_ALLOW_APE_SHMEM_WR
|
7880 PCISTATE_ALLOW_APE_PSPACE_WR
;
7881 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7883 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7885 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
7886 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7887 tp
->pci_cacheline_sz
);
7888 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7892 /* Make sure PCI-X relaxed ordering bit is clear. */
7893 if (tg3_flag(tp
, PCIX_MODE
)) {
7896 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7898 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7899 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7903 if (tg3_flag(tp
, 5780_CLASS
)) {
7905 /* Chip reset on 5780 will reset MSI enable bit,
7906 * so need to restore it.
7908 if (tg3_flag(tp
, USING_MSI
)) {
7911 pci_read_config_word(tp
->pdev
,
7912 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7914 pci_write_config_word(tp
->pdev
,
7915 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7916 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7917 val
= tr32(MSGINT_MODE
);
7918 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7923 /* tp->lock is held. */
7924 static int tg3_chip_reset(struct tg3
*tp
)
7927 void (*write_op
)(struct tg3
*, u32
, u32
);
7932 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7934 /* No matching tg3_nvram_unlock() after this because
7935 * chip reset below will undo the nvram lock.
7937 tp
->nvram_lock_cnt
= 0;
7939 /* GRC_MISC_CFG core clock reset will clear the memory
7940 * enable bit in PCI register 4 and the MSI enable bit
7941 * on some chips, so we save relevant registers here.
7943 tg3_save_pci_state(tp
);
7945 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7946 tg3_flag(tp
, 5755_PLUS
))
7947 tw32(GRC_FASTBOOT_PC
, 0);
7950 * We must avoid the readl() that normally takes place.
7951 * It locks machines, causes machine checks, and other
7952 * fun things. So, temporarily disable the 5701
7953 * hardware workaround, while we do the reset.
7955 write_op
= tp
->write32
;
7956 if (write_op
== tg3_write_flush_reg32
)
7957 tp
->write32
= tg3_write32
;
7959 /* Prevent the irq handler from reading or writing PCI registers
7960 * during chip reset when the memory enable bit in the PCI command
7961 * register may be cleared. The chip does not generate interrupt
7962 * at this time, but the irq handler may still be called due to irq
7963 * sharing or irqpoll.
7965 tg3_flag_set(tp
, CHIP_RESETTING
);
7966 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7967 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7968 if (tnapi
->hw_status
) {
7969 tnapi
->hw_status
->status
= 0;
7970 tnapi
->hw_status
->status_tag
= 0;
7972 tnapi
->last_tag
= 0;
7973 tnapi
->last_irq_tag
= 0;
7977 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7978 synchronize_irq(tp
->napi
[i
].irq_vec
);
7980 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7981 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7982 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7986 val
= GRC_MISC_CFG_CORECLK_RESET
;
7988 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7989 /* Force PCIe 1.0a mode */
7990 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7991 !tg3_flag(tp
, 57765_PLUS
) &&
7992 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7993 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7994 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7996 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7997 tw32(GRC_MISC_CFG
, (1 << 29));
8002 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
8003 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8004 tw32(GRC_VCPU_EXT_CTRL
,
8005 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8008 /* Manage gphy power for all CPMU absent PCIe devices. */
8009 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8010 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8012 tw32(GRC_MISC_CFG
, val
);
8014 /* restore 5701 hardware bug workaround write method */
8015 tp
->write32
= write_op
;
8017 /* Unfortunately, we have to delay before the PCI read back.
8018 * Some 575X chips even will not respond to a PCI cfg access
8019 * when the reset command is given to the chip.
8021 * How do these hardware designers expect things to work
8022 * properly if the PCI write is posted for a long period
8023 * of time? It is always necessary to have some method by
8024 * which a register read back can occur to push the write
8025 * out which does the reset.
8027 * For most tg3 variants the trick below was working.
8032 /* Flush PCI posted writes. The normal MMIO registers
8033 * are inaccessible at this time so this is the only
8034 * way to make this reliably (actually, this is no longer
8035 * the case, see above). I tried to use indirect
8036 * register read/write but this upset some 5701 variants.
8038 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8042 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8045 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
8049 /* Wait for link training to complete. */
8050 for (i
= 0; i
< 5000; i
++)
8053 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8054 pci_write_config_dword(tp
->pdev
, 0xc4,
8055 cfg_val
| (1 << 15));
8058 /* Clear the "no snoop" and "relaxed ordering" bits. */
8059 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8061 * Older PCIe devices only support the 128 byte
8062 * MPS setting. Enforce the restriction.
8064 if (!tg3_flag(tp
, CPMU_PRESENT
))
8065 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8066 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8068 /* Clear error status */
8069 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8070 PCI_EXP_DEVSTA_CED
|
8071 PCI_EXP_DEVSTA_NFED
|
8072 PCI_EXP_DEVSTA_FED
|
8073 PCI_EXP_DEVSTA_URD
);
8076 tg3_restore_pci_state(tp
);
8078 tg3_flag_clear(tp
, CHIP_RESETTING
);
8079 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8082 if (tg3_flag(tp
, 5780_CLASS
))
8083 val
= tr32(MEMARB_MODE
);
8084 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8086 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
8088 tw32(0x5000, 0x400);
8091 tw32(GRC_MODE
, tp
->grc_mode
);
8093 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
8096 tw32(0xc4, val
| (1 << 15));
8099 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8100 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8101 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8102 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
8103 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8104 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8107 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8108 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8110 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8111 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8116 tw32_f(MAC_MODE
, val
);
8119 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8121 err
= tg3_poll_fw(tp
);
8127 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8128 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
8129 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
8130 !tg3_flag(tp
, 57765_PLUS
)) {
8133 tw32(0x7c00, val
| (1 << 25));
8136 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8137 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8138 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8141 /* Reprobe ASF enable state. */
8142 tg3_flag_clear(tp
, ENABLE_ASF
);
8143 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8144 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8145 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8148 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8149 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8150 tg3_flag_set(tp
, ENABLE_ASF
);
8151 tp
->last_event_jiffies
= jiffies
;
8152 if (tg3_flag(tp
, 5750_PLUS
))
8153 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8160 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8161 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8163 /* tp->lock is held. */
8164 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8170 tg3_write_sig_pre_reset(tp
, kind
);
8172 tg3_abort_hw(tp
, silent
);
8173 err
= tg3_chip_reset(tp
);
8175 __tg3_set_mac_addr(tp
, 0);
8177 tg3_write_sig_legacy(tp
, kind
);
8178 tg3_write_sig_post_reset(tp
, kind
);
8181 /* Save the stats across chip resets... */
8182 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8183 tg3_get_estats(tp
, &tp
->estats_prev
);
8185 /* And make sure the next sample is new data */
8186 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8195 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8197 struct tg3
*tp
= netdev_priv(dev
);
8198 struct sockaddr
*addr
= p
;
8199 int err
= 0, skip_mac_1
= 0;
8201 if (!is_valid_ether_addr(addr
->sa_data
))
8202 return -EADDRNOTAVAIL
;
8204 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8206 if (!netif_running(dev
))
8209 if (tg3_flag(tp
, ENABLE_ASF
)) {
8210 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8212 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8213 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8214 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8215 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8217 /* Skip MAC addr 1 if ASF is using it. */
8218 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8219 !(addr1_high
== 0 && addr1_low
== 0))
8222 spin_lock_bh(&tp
->lock
);
8223 __tg3_set_mac_addr(tp
, skip_mac_1
);
8224 spin_unlock_bh(&tp
->lock
);
8229 /* tp->lock is held. */
8230 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8231 dma_addr_t mapping
, u32 maxlen_flags
,
8235 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8236 ((u64
) mapping
>> 32));
8238 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8239 ((u64
) mapping
& 0xffffffff));
8241 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8244 if (!tg3_flag(tp
, 5705_PLUS
))
8246 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8250 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8254 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8255 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8256 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8257 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8259 tw32(HOSTCC_TXCOL_TICKS
, 0);
8260 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8261 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8264 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8265 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8266 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8267 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8269 tw32(HOSTCC_RXCOL_TICKS
, 0);
8270 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8271 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8274 if (!tg3_flag(tp
, 5705_PLUS
)) {
8275 u32 val
= ec
->stats_block_coalesce_usecs
;
8277 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8278 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8280 if (!netif_carrier_ok(tp
->dev
))
8283 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8286 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
8289 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8290 tw32(reg
, ec
->rx_coalesce_usecs
);
8291 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8292 tw32(reg
, ec
->rx_max_coalesced_frames
);
8293 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8294 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8296 if (tg3_flag(tp
, ENABLE_TSS
)) {
8297 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8298 tw32(reg
, ec
->tx_coalesce_usecs
);
8299 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8300 tw32(reg
, ec
->tx_max_coalesced_frames
);
8301 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8302 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8306 for (; i
< tp
->irq_max
- 1; i
++) {
8307 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8308 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8309 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8311 if (tg3_flag(tp
, ENABLE_TSS
)) {
8312 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8313 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8314 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8319 /* tp->lock is held. */
8320 static void tg3_rings_reset(struct tg3
*tp
)
8323 u32 stblk
, txrcb
, rxrcb
, limit
;
8324 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8326 /* Disable all transmit rings but the first. */
8327 if (!tg3_flag(tp
, 5705_PLUS
))
8328 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8329 else if (tg3_flag(tp
, 5717_PLUS
))
8330 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8331 else if (tg3_flag(tp
, 57765_CLASS
))
8332 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8334 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8336 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8337 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8338 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8339 BDINFO_FLAGS_DISABLED
);
8342 /* Disable all receive return rings but the first. */
8343 if (tg3_flag(tp
, 5717_PLUS
))
8344 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8345 else if (!tg3_flag(tp
, 5705_PLUS
))
8346 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8347 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8348 tg3_flag(tp
, 57765_CLASS
))
8349 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8351 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8353 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8354 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8355 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8356 BDINFO_FLAGS_DISABLED
);
8358 /* Disable interrupts */
8359 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8360 tp
->napi
[0].chk_msi_cnt
= 0;
8361 tp
->napi
[0].last_rx_cons
= 0;
8362 tp
->napi
[0].last_tx_cons
= 0;
8364 /* Zero mailbox registers. */
8365 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8366 for (i
= 1; i
< tp
->irq_max
; i
++) {
8367 tp
->napi
[i
].tx_prod
= 0;
8368 tp
->napi
[i
].tx_cons
= 0;
8369 if (tg3_flag(tp
, ENABLE_TSS
))
8370 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8371 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8372 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8373 tp
->napi
[i
].chk_msi_cnt
= 0;
8374 tp
->napi
[i
].last_rx_cons
= 0;
8375 tp
->napi
[i
].last_tx_cons
= 0;
8377 if (!tg3_flag(tp
, ENABLE_TSS
))
8378 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8380 tp
->napi
[0].tx_prod
= 0;
8381 tp
->napi
[0].tx_cons
= 0;
8382 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8383 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8386 /* Make sure the NIC-based send BD rings are disabled. */
8387 if (!tg3_flag(tp
, 5705_PLUS
)) {
8388 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8389 for (i
= 0; i
< 16; i
++)
8390 tw32_tx_mbox(mbox
+ i
* 8, 0);
8393 txrcb
= NIC_SRAM_SEND_RCB
;
8394 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8396 /* Clear status block in ram. */
8397 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8399 /* Set status block DMA address */
8400 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8401 ((u64
) tnapi
->status_mapping
>> 32));
8402 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8403 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8405 if (tnapi
->tx_ring
) {
8406 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8407 (TG3_TX_RING_SIZE
<<
8408 BDINFO_FLAGS_MAXLEN_SHIFT
),
8409 NIC_SRAM_TX_BUFFER_DESC
);
8410 txrcb
+= TG3_BDINFO_SIZE
;
8413 if (tnapi
->rx_rcb
) {
8414 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8415 (tp
->rx_ret_ring_mask
+ 1) <<
8416 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8417 rxrcb
+= TG3_BDINFO_SIZE
;
8420 stblk
= HOSTCC_STATBLCK_RING1
;
8422 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8423 u64 mapping
= (u64
)tnapi
->status_mapping
;
8424 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8425 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8427 /* Clear status block in ram. */
8428 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8430 if (tnapi
->tx_ring
) {
8431 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8432 (TG3_TX_RING_SIZE
<<
8433 BDINFO_FLAGS_MAXLEN_SHIFT
),
8434 NIC_SRAM_TX_BUFFER_DESC
);
8435 txrcb
+= TG3_BDINFO_SIZE
;
8438 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8439 ((tp
->rx_ret_ring_mask
+ 1) <<
8440 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8443 rxrcb
+= TG3_BDINFO_SIZE
;
8447 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8449 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8451 if (!tg3_flag(tp
, 5750_PLUS
) ||
8452 tg3_flag(tp
, 5780_CLASS
) ||
8453 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8454 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
8455 tg3_flag(tp
, 57765_PLUS
))
8456 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8457 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8458 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8459 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8461 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8463 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8464 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8466 val
= min(nic_rep_thresh
, host_rep_thresh
);
8467 tw32(RCVBDI_STD_THRESH
, val
);
8469 if (tg3_flag(tp
, 57765_PLUS
))
8470 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8472 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8475 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8477 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8479 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8480 tw32(RCVBDI_JUMBO_THRESH
, val
);
8482 if (tg3_flag(tp
, 57765_PLUS
))
8483 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8486 static inline u32
calc_crc(unsigned char *buf
, int len
)
8494 for (j
= 0; j
< len
; j
++) {
8497 for (k
= 0; k
< 8; k
++) {
8510 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
8512 /* accept or reject all multicast frames */
8513 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
8514 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
8515 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
8516 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
8519 static void __tg3_set_rx_mode(struct net_device
*dev
)
8521 struct tg3
*tp
= netdev_priv(dev
);
8524 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
8525 RX_MODE_KEEP_VLAN_TAG
);
8527 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8528 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8531 if (!tg3_flag(tp
, ENABLE_ASF
))
8532 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
8535 if (dev
->flags
& IFF_PROMISC
) {
8536 /* Promiscuous mode. */
8537 rx_mode
|= RX_MODE_PROMISC
;
8538 } else if (dev
->flags
& IFF_ALLMULTI
) {
8539 /* Accept all multicast. */
8540 tg3_set_multi(tp
, 1);
8541 } else if (netdev_mc_empty(dev
)) {
8542 /* Reject all multicast. */
8543 tg3_set_multi(tp
, 0);
8545 /* Accept one or more multicast(s). */
8546 struct netdev_hw_addr
*ha
;
8547 u32 mc_filter
[4] = { 0, };
8552 netdev_for_each_mc_addr(ha
, dev
) {
8553 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
8555 regidx
= (bit
& 0x60) >> 5;
8557 mc_filter
[regidx
] |= (1 << bit
);
8560 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
8561 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
8562 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
8563 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
8566 if (rx_mode
!= tp
->rx_mode
) {
8567 tp
->rx_mode
= rx_mode
;
8568 tw32_f(MAC_RX_MODE
, rx_mode
);
8573 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
)
8577 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
8578 tp
->rss_ind_tbl
[i
] =
8579 ethtool_rxfh_indir_default(i
, tp
->irq_cnt
- 1);
8582 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
8586 if (!tg3_flag(tp
, SUPPORT_MSIX
))
8589 if (tp
->irq_cnt
<= 2) {
8590 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
8594 /* Validate table against current IRQ count */
8595 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8596 if (tp
->rss_ind_tbl
[i
] >= tp
->irq_cnt
- 1)
8600 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
8601 tg3_rss_init_dflt_indir_tbl(tp
);
8604 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
8607 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8609 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8610 u32 val
= tp
->rss_ind_tbl
[i
];
8612 for (; i
% 8; i
++) {
8614 val
|= tp
->rss_ind_tbl
[i
];
8621 /* tp->lock is held. */
8622 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8624 u32 val
, rdmac_mode
;
8626 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8628 tg3_disable_ints(tp
);
8632 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8634 if (tg3_flag(tp
, INIT_COMPLETE
))
8635 tg3_abort_hw(tp
, 1);
8637 /* Enable MAC control of LPI */
8638 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8639 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8640 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8641 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8643 tw32_f(TG3_CPMU_EEE_CTRL
,
8644 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8646 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8647 TG3_CPMU_EEEMD_LPI_IN_TX
|
8648 TG3_CPMU_EEEMD_LPI_IN_RX
|
8649 TG3_CPMU_EEEMD_EEE_ENABLE
;
8651 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8652 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8654 if (tg3_flag(tp
, ENABLE_APE
))
8655 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8657 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8659 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8660 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8661 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8663 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8664 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8665 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8671 err
= tg3_chip_reset(tp
);
8675 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8677 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8678 val
= tr32(TG3_CPMU_CTRL
);
8679 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8680 tw32(TG3_CPMU_CTRL
, val
);
8682 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8683 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8684 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8685 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8687 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8688 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8689 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8690 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8692 val
= tr32(TG3_CPMU_HST_ACC
);
8693 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8694 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8695 tw32(TG3_CPMU_HST_ACC
, val
);
8698 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8699 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8700 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8701 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8702 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8704 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8705 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8707 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8709 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8710 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8713 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8714 u32 grc_mode
= tr32(GRC_MODE
);
8716 /* Access the lower 1K of PL PCIE block registers. */
8717 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8718 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8720 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8721 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8722 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8724 tw32(GRC_MODE
, grc_mode
);
8727 if (tg3_flag(tp
, 57765_CLASS
)) {
8728 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8729 u32 grc_mode
= tr32(GRC_MODE
);
8731 /* Access the lower 1K of PL PCIE block registers. */
8732 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8733 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8735 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8736 TG3_PCIE_PL_LO_PHYCTL5
);
8737 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8738 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8740 tw32(GRC_MODE
, grc_mode
);
8743 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8744 u32 grc_mode
= tr32(GRC_MODE
);
8746 /* Access the lower 1K of DL PCIE block registers. */
8747 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8748 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8750 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8751 TG3_PCIE_DL_LO_FTSMAX
);
8752 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8753 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8754 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8756 tw32(GRC_MODE
, grc_mode
);
8759 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8760 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8761 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8762 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8765 /* This works around an issue with Athlon chipsets on
8766 * B3 tigon3 silicon. This bit has no effect on any
8767 * other revision. But do not set this on PCI Express
8768 * chips and don't even touch the clocks if the CPMU is present.
8770 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8771 if (!tg3_flag(tp
, PCI_EXPRESS
))
8772 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8773 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8776 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8777 tg3_flag(tp
, PCIX_MODE
)) {
8778 val
= tr32(TG3PCI_PCISTATE
);
8779 val
|= PCISTATE_RETRY_SAME_DMA
;
8780 tw32(TG3PCI_PCISTATE
, val
);
8783 if (tg3_flag(tp
, ENABLE_APE
)) {
8784 /* Allow reads and writes to the
8785 * APE register and memory space.
8787 val
= tr32(TG3PCI_PCISTATE
);
8788 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8789 PCISTATE_ALLOW_APE_SHMEM_WR
|
8790 PCISTATE_ALLOW_APE_PSPACE_WR
;
8791 tw32(TG3PCI_PCISTATE
, val
);
8794 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8795 /* Enable some hw fixes. */
8796 val
= tr32(TG3PCI_MSI_DATA
);
8797 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8798 tw32(TG3PCI_MSI_DATA
, val
);
8801 /* Descriptor ring init may make accesses to the
8802 * NIC SRAM area to setup the TX descriptors, so we
8803 * can only do this after the hardware has been
8804 * successfully reset.
8806 err
= tg3_init_rings(tp
);
8810 if (tg3_flag(tp
, 57765_PLUS
)) {
8811 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8812 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8813 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8814 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8815 if (!tg3_flag(tp
, 57765_CLASS
) &&
8816 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8817 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8818 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8819 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8820 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8821 /* This value is determined during the probe time DMA
8822 * engine test, tg3_test_dma.
8824 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8827 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8828 GRC_MODE_4X_NIC_SEND_RINGS
|
8829 GRC_MODE_NO_TX_PHDR_CSUM
|
8830 GRC_MODE_NO_RX_PHDR_CSUM
);
8831 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8833 /* Pseudo-header checksum is done by hardware logic and not
8834 * the offload processers, so make the chip do the pseudo-
8835 * header checksums on receive. For transmit it is more
8836 * convenient to do the pseudo-header checksum in software
8837 * as Linux does that on transmit for us in all cases.
8839 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8843 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8845 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8846 val
= tr32(GRC_MISC_CFG
);
8848 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8849 tw32(GRC_MISC_CFG
, val
);
8851 /* Initialize MBUF/DESC pool. */
8852 if (tg3_flag(tp
, 5750_PLUS
)) {
8854 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8855 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8856 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8857 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8859 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8860 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8861 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8862 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8865 fw_len
= tp
->fw_len
;
8866 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8867 tw32(BUFMGR_MB_POOL_ADDR
,
8868 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8869 tw32(BUFMGR_MB_POOL_SIZE
,
8870 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8873 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8874 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8875 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8876 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8877 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8878 tw32(BUFMGR_MB_HIGH_WATER
,
8879 tp
->bufmgr_config
.mbuf_high_water
);
8881 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8882 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8883 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8884 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8885 tw32(BUFMGR_MB_HIGH_WATER
,
8886 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8888 tw32(BUFMGR_DMA_LOW_WATER
,
8889 tp
->bufmgr_config
.dma_low_water
);
8890 tw32(BUFMGR_DMA_HIGH_WATER
,
8891 tp
->bufmgr_config
.dma_high_water
);
8893 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8894 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8895 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8896 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8897 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8898 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8899 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8900 tw32(BUFMGR_MODE
, val
);
8901 for (i
= 0; i
< 2000; i
++) {
8902 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8907 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8911 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8912 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8914 tg3_setup_rxbd_thresholds(tp
);
8916 /* Initialize TG3_BDINFO's at:
8917 * RCVDBDI_STD_BD: standard eth size rx ring
8918 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8919 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8922 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8923 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8924 * ring attribute flags
8925 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8927 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8928 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8930 * The size of each ring is fixed in the firmware, but the location is
8933 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8934 ((u64
) tpr
->rx_std_mapping
>> 32));
8935 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8936 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8937 if (!tg3_flag(tp
, 5717_PLUS
))
8938 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8939 NIC_SRAM_RX_BUFFER_DESC
);
8941 /* Disable the mini ring */
8942 if (!tg3_flag(tp
, 5705_PLUS
))
8943 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8944 BDINFO_FLAGS_DISABLED
);
8946 /* Program the jumbo buffer descriptor ring control
8947 * blocks on those devices that have them.
8949 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8950 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8952 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8953 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8954 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8955 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8956 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8957 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8958 BDINFO_FLAGS_MAXLEN_SHIFT
;
8959 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8960 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8961 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8962 tg3_flag(tp
, 57765_CLASS
))
8963 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8964 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8966 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8967 BDINFO_FLAGS_DISABLED
);
8970 if (tg3_flag(tp
, 57765_PLUS
)) {
8971 val
= TG3_RX_STD_RING_SIZE(tp
);
8972 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8973 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8975 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8977 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8979 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8981 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8982 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8984 tpr
->rx_jmb_prod_idx
=
8985 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8986 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8988 tg3_rings_reset(tp
);
8990 /* Initialize MAC address and backoff seed. */
8991 __tg3_set_mac_addr(tp
, 0);
8993 /* MTU + ethernet header + FCS + optional VLAN tag */
8994 tw32(MAC_RX_MTU_SIZE
,
8995 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8997 /* The slot time is changed by tg3_setup_phy if we
8998 * run at gigabit with half duplex.
9000 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9001 (6 << TX_LENGTHS_IPG_SHIFT
) |
9002 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9004 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
9005 val
|= tr32(MAC_TX_LENGTHS
) &
9006 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9007 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9009 tw32(MAC_TX_LENGTHS
, val
);
9011 /* Receive rules. */
9012 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9013 tw32(RCVLPC_CONFIG
, 0x0181);
9015 /* Calculate RDMAC_MODE setting early, we need it to determine
9016 * the RCVLPC_STATE_ENABLE mask.
9018 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9019 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9020 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9021 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9022 RDMAC_MODE_LNGREAD_ENAB
);
9024 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
9025 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9027 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9028 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
9029 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
9030 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9031 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9032 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9034 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9035 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
9036 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9037 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
9038 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9039 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9040 !tg3_flag(tp
, IS_5788
)) {
9041 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9045 if (tg3_flag(tp
, PCI_EXPRESS
))
9046 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9048 if (tg3_flag(tp
, HW_TSO_1
) ||
9049 tg3_flag(tp
, HW_TSO_2
) ||
9050 tg3_flag(tp
, HW_TSO_3
))
9051 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9053 if (tg3_flag(tp
, 57765_PLUS
) ||
9054 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
9055 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
9056 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9058 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
9059 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9061 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
9062 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9063 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
9064 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
9065 tg3_flag(tp
, 57765_PLUS
)) {
9066 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
9067 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
) {
9068 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9069 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9070 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9071 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9072 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9073 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9075 tw32(TG3_RDMA_RSRVCTRL_REG
,
9076 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9079 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9080 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9081 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9082 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
9083 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9084 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9087 /* Receive/send statistics. */
9088 if (tg3_flag(tp
, 5750_PLUS
)) {
9089 val
= tr32(RCVLPC_STATS_ENABLE
);
9090 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9091 tw32(RCVLPC_STATS_ENABLE
, val
);
9092 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9093 tg3_flag(tp
, TSO_CAPABLE
)) {
9094 val
= tr32(RCVLPC_STATS_ENABLE
);
9095 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9096 tw32(RCVLPC_STATS_ENABLE
, val
);
9098 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9100 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9101 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9102 tw32(SNDDATAI_STATSCTRL
,
9103 (SNDDATAI_SCTRL_ENABLE
|
9104 SNDDATAI_SCTRL_FASTUPD
));
9106 /* Setup host coalescing engine. */
9107 tw32(HOSTCC_MODE
, 0);
9108 for (i
= 0; i
< 2000; i
++) {
9109 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9114 __tg3_set_coalesce(tp
, &tp
->coal
);
9116 if (!tg3_flag(tp
, 5705_PLUS
)) {
9117 /* Status/statistics block address. See tg3_timer,
9118 * the tg3_periodic_fetch_stats call there, and
9119 * tg3_get_stats to see how this works for 5705/5750 chips.
9121 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9122 ((u64
) tp
->stats_mapping
>> 32));
9123 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9124 ((u64
) tp
->stats_mapping
& 0xffffffff));
9125 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9127 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9129 /* Clear statistics and status block memory areas */
9130 for (i
= NIC_SRAM_STATS_BLK
;
9131 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9133 tg3_write_mem(tp
, i
, 0);
9138 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9140 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9141 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9142 if (!tg3_flag(tp
, 5705_PLUS
))
9143 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9145 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9146 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9147 /* reset to prevent losing 1st rx packet intermittently */
9148 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9152 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9153 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9154 MAC_MODE_FHDE_ENABLE
;
9155 if (tg3_flag(tp
, ENABLE_APE
))
9156 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9157 if (!tg3_flag(tp
, 5705_PLUS
) &&
9158 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9159 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
9160 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9161 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9164 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9165 * If TG3_FLAG_IS_NIC is zero, we should read the
9166 * register to preserve the GPIO settings for LOMs. The GPIOs,
9167 * whether used as inputs or outputs, are set by boot code after
9170 if (!tg3_flag(tp
, IS_NIC
)) {
9173 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9174 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9175 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9177 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
9178 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9179 GRC_LCLCTRL_GPIO_OUTPUT3
;
9181 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
9182 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9184 tp
->grc_local_ctrl
&= ~gpio_mask
;
9185 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9187 /* GPIO1 must be driven high for eeprom write protect */
9188 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9189 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9190 GRC_LCLCTRL_GPIO_OUTPUT1
);
9192 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9195 if (tg3_flag(tp
, USING_MSIX
)) {
9196 val
= tr32(MSGINT_MODE
);
9197 val
|= MSGINT_MODE_ENABLE
;
9198 if (tp
->irq_cnt
> 1)
9199 val
|= MSGINT_MODE_MULTIVEC_EN
;
9200 if (!tg3_flag(tp
, 1SHOT_MSI
))
9201 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9202 tw32(MSGINT_MODE
, val
);
9205 if (!tg3_flag(tp
, 5705_PLUS
)) {
9206 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9210 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9211 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9212 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9213 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9214 WDMAC_MODE_LNGREAD_ENAB
);
9216 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9217 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
9218 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9219 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
9220 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
9222 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9223 !tg3_flag(tp
, IS_5788
)) {
9224 val
|= WDMAC_MODE_RX_ACCEL
;
9228 /* Enable host coalescing bug fix */
9229 if (tg3_flag(tp
, 5755_PLUS
))
9230 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9232 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
9233 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9235 tw32_f(WDMAC_MODE
, val
);
9238 if (tg3_flag(tp
, PCIX_MODE
)) {
9241 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9243 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
9244 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9245 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9246 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
9247 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9248 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9250 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9254 tw32_f(RDMAC_MODE
, rdmac_mode
);
9257 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
) {
9258 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9259 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9262 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9263 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9264 val
|= TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9265 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9266 tg3_flag_set(tp
, 5719_RDMA_BUG
);
9270 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9271 if (!tg3_flag(tp
, 5705_PLUS
))
9272 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9274 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
9276 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9278 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9280 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9281 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9282 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9283 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9284 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9285 tw32(RCVDBDI_MODE
, val
);
9286 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9287 if (tg3_flag(tp
, HW_TSO_1
) ||
9288 tg3_flag(tp
, HW_TSO_2
) ||
9289 tg3_flag(tp
, HW_TSO_3
))
9290 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9291 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9292 if (tg3_flag(tp
, ENABLE_TSS
))
9293 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9294 tw32(SNDBDI_MODE
, val
);
9295 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9297 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9298 err
= tg3_load_5701_a0_firmware_fix(tp
);
9303 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9304 err
= tg3_load_tso_firmware(tp
);
9309 tp
->tx_mode
= TX_MODE_ENABLE
;
9311 if (tg3_flag(tp
, 5755_PLUS
) ||
9312 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
9313 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9315 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9316 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9317 tp
->tx_mode
&= ~val
;
9318 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9321 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9324 if (tg3_flag(tp
, ENABLE_RSS
)) {
9325 tg3_rss_write_indir_tbl(tp
);
9327 /* Setup the "secret" hash key. */
9328 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9329 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9330 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9331 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9332 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9333 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9334 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9335 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9336 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9337 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9340 tp
->rx_mode
= RX_MODE_ENABLE
;
9341 if (tg3_flag(tp
, 5755_PLUS
))
9342 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9344 if (tg3_flag(tp
, ENABLE_RSS
))
9345 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9346 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9347 RX_MODE_RSS_IPV6_HASH_EN
|
9348 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9349 RX_MODE_RSS_IPV4_HASH_EN
|
9350 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9352 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9355 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9357 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9358 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9359 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9362 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9365 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9366 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
9367 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9368 /* Set drive transmission level to 1.2V */
9369 /* only if the signal pre-emphasis bit is not set */
9370 val
= tr32(MAC_SERDES_CFG
);
9373 tw32(MAC_SERDES_CFG
, val
);
9375 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
9376 tw32(MAC_SERDES_CFG
, 0x616000);
9379 /* Prevent chip from dropping frames when flow control
9382 if (tg3_flag(tp
, 57765_CLASS
))
9386 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9388 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9389 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9390 /* Use hardware link auto-negotiation */
9391 tg3_flag_set(tp
, HW_AUTONEG
);
9394 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9395 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9398 tmp
= tr32(SERDES_RX_CTRL
);
9399 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9400 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9401 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9402 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9405 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9406 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9407 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9409 err
= tg3_setup_phy(tp
, 0);
9413 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9414 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9417 /* Clear CRC stats. */
9418 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9419 tg3_writephy(tp
, MII_TG3_TEST1
,
9420 tmp
| MII_TG3_TEST1_CRC_EN
);
9421 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9426 __tg3_set_rx_mode(tp
->dev
);
9428 /* Initialize receive rules. */
9429 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9430 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9431 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9432 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9434 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9438 if (tg3_flag(tp
, ENABLE_ASF
))
9442 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9444 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9446 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9448 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9450 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9452 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9454 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9456 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9458 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9460 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9462 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9464 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9466 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9468 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9476 if (tg3_flag(tp
, ENABLE_APE
))
9477 /* Write our heartbeat update interval to APE. */
9478 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9479 APE_HOST_HEARTBEAT_INT_DISABLE
);
9481 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9486 /* Called at device open time to get the chip ready for
9487 * packet processing. Invoked with tp->lock held.
9489 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9491 tg3_switch_clocks(tp
);
9493 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9495 return tg3_reset_hw(tp
, reset_phy
);
9498 #if IS_ENABLED(CONFIG_HWMON)
9499 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
9503 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
9504 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
9506 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
9509 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
9510 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
9511 memset(ocir
, 0, TG3_OCIR_LEN
);
9515 /* sysfs attributes for hwmon */
9516 static ssize_t
tg3_show_temp(struct device
*dev
,
9517 struct device_attribute
*devattr
, char *buf
)
9519 struct pci_dev
*pdev
= to_pci_dev(dev
);
9520 struct net_device
*netdev
= pci_get_drvdata(pdev
);
9521 struct tg3
*tp
= netdev_priv(netdev
);
9522 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
9525 spin_lock_bh(&tp
->lock
);
9526 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
9527 sizeof(temperature
));
9528 spin_unlock_bh(&tp
->lock
);
9529 return sprintf(buf
, "%u\n", temperature
);
9533 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
9534 TG3_TEMP_SENSOR_OFFSET
);
9535 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
9536 TG3_TEMP_CAUTION_OFFSET
);
9537 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
9538 TG3_TEMP_MAX_OFFSET
);
9540 static struct attribute
*tg3_attributes
[] = {
9541 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
9542 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
9543 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
9547 static const struct attribute_group tg3_group
= {
9548 .attrs
= tg3_attributes
,
9553 static void tg3_hwmon_close(struct tg3
*tp
)
9555 #if IS_ENABLED(CONFIG_HWMON)
9556 if (tp
->hwmon_dev
) {
9557 hwmon_device_unregister(tp
->hwmon_dev
);
9558 tp
->hwmon_dev
= NULL
;
9559 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
9564 static void tg3_hwmon_open(struct tg3
*tp
)
9566 #if IS_ENABLED(CONFIG_HWMON)
9569 struct pci_dev
*pdev
= tp
->pdev
;
9570 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
9572 tg3_sd_scan_scratchpad(tp
, ocirs
);
9574 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
9575 if (!ocirs
[i
].src_data_length
)
9578 size
+= ocirs
[i
].src_hdr_length
;
9579 size
+= ocirs
[i
].src_data_length
;
9585 /* Register hwmon sysfs hooks */
9586 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
9588 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
9592 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
9593 if (IS_ERR(tp
->hwmon_dev
)) {
9594 tp
->hwmon_dev
= NULL
;
9595 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
9596 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
9602 #define TG3_STAT_ADD32(PSTAT, REG) \
9603 do { u32 __val = tr32(REG); \
9604 (PSTAT)->low += __val; \
9605 if ((PSTAT)->low < __val) \
9606 (PSTAT)->high += 1; \
9609 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
9611 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
9613 if (!netif_carrier_ok(tp
->dev
))
9616 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
9617 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
9618 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
9619 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
9620 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
9621 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
9622 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
9623 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
9624 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
9625 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
9626 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
9627 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
9628 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
9629 if (unlikely(tg3_flag(tp
, 5719_RDMA_BUG
) &&
9630 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
9631 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
9634 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9635 val
&= ~TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9636 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9637 tg3_flag_clear(tp
, 5719_RDMA_BUG
);
9640 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
9641 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9642 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
9643 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
9644 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
9645 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
9646 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
9647 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
9648 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
9649 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
9650 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
9651 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
9652 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
9653 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
9655 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
9656 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9657 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
9658 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
9659 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
9661 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
9662 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
9664 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
9665 sp
->rx_discards
.low
+= val
;
9666 if (sp
->rx_discards
.low
< val
)
9667 sp
->rx_discards
.high
+= 1;
9669 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
9671 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
9674 static void tg3_chk_missed_msi(struct tg3
*tp
)
9678 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9679 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9681 if (tg3_has_work(tnapi
)) {
9682 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
9683 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
9684 if (tnapi
->chk_msi_cnt
< 1) {
9685 tnapi
->chk_msi_cnt
++;
9691 tnapi
->chk_msi_cnt
= 0;
9692 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9693 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9697 static void tg3_timer(unsigned long __opaque
)
9699 struct tg3
*tp
= (struct tg3
*) __opaque
;
9701 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
9704 spin_lock(&tp
->lock
);
9706 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9707 tg3_flag(tp
, 57765_CLASS
))
9708 tg3_chk_missed_msi(tp
);
9710 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9711 /* All of this garbage is because when using non-tagged
9712 * IRQ status the mailbox/status_block protocol the chip
9713 * uses with the cpu is race prone.
9715 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9716 tw32(GRC_LOCAL_CTRL
,
9717 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9719 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9720 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9723 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9724 spin_unlock(&tp
->lock
);
9725 tg3_reset_task_schedule(tp
);
9730 /* This part only runs once per second. */
9731 if (!--tp
->timer_counter
) {
9732 if (tg3_flag(tp
, 5705_PLUS
))
9733 tg3_periodic_fetch_stats(tp
);
9735 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9736 tg3_phy_eee_enable(tp
);
9738 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9742 mac_stat
= tr32(MAC_STATUS
);
9745 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9746 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9748 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9752 tg3_setup_phy(tp
, 0);
9753 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9754 u32 mac_stat
= tr32(MAC_STATUS
);
9757 if (netif_carrier_ok(tp
->dev
) &&
9758 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9761 if (!netif_carrier_ok(tp
->dev
) &&
9762 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9763 MAC_STATUS_SIGNAL_DET
))) {
9767 if (!tp
->serdes_counter
) {
9770 ~MAC_MODE_PORT_MODE_MASK
));
9772 tw32_f(MAC_MODE
, tp
->mac_mode
);
9775 tg3_setup_phy(tp
, 0);
9777 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9778 tg3_flag(tp
, 5780_CLASS
)) {
9779 tg3_serdes_parallel_detect(tp
);
9782 tp
->timer_counter
= tp
->timer_multiplier
;
9785 /* Heartbeat is only sent once every 2 seconds.
9787 * The heartbeat is to tell the ASF firmware that the host
9788 * driver is still alive. In the event that the OS crashes,
9789 * ASF needs to reset the hardware to free up the FIFO space
9790 * that may be filled with rx packets destined for the host.
9791 * If the FIFO is full, ASF will no longer function properly.
9793 * Unintended resets have been reported on real time kernels
9794 * where the timer doesn't run on time. Netpoll will also have
9797 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9798 * to check the ring condition when the heartbeat is expiring
9799 * before doing the reset. This will prevent most unintended
9802 if (!--tp
->asf_counter
) {
9803 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9804 tg3_wait_for_event_ack(tp
);
9806 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9807 FWCMD_NICDRV_ALIVE3
);
9808 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9809 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9810 TG3_FW_UPDATE_TIMEOUT_SEC
);
9812 tg3_generate_fw_event(tp
);
9814 tp
->asf_counter
= tp
->asf_multiplier
;
9817 spin_unlock(&tp
->lock
);
9820 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9821 add_timer(&tp
->timer
);
9824 static void __devinit
tg3_timer_init(struct tg3
*tp
)
9826 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9827 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9828 !tg3_flag(tp
, 57765_CLASS
))
9829 tp
->timer_offset
= HZ
;
9831 tp
->timer_offset
= HZ
/ 10;
9833 BUG_ON(tp
->timer_offset
> HZ
);
9835 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
9836 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
9837 TG3_FW_UPDATE_FREQ_SEC
;
9839 init_timer(&tp
->timer
);
9840 tp
->timer
.data
= (unsigned long) tp
;
9841 tp
->timer
.function
= tg3_timer
;
9844 static void tg3_timer_start(struct tg3
*tp
)
9846 tp
->asf_counter
= tp
->asf_multiplier
;
9847 tp
->timer_counter
= tp
->timer_multiplier
;
9849 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9850 add_timer(&tp
->timer
);
9853 static void tg3_timer_stop(struct tg3
*tp
)
9855 del_timer_sync(&tp
->timer
);
9858 /* Restart hardware after configuration changes, self-test, etc.
9859 * Invoked with tp->lock held.
9861 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
9862 __releases(tp
->lock
)
9863 __acquires(tp
->lock
)
9867 err
= tg3_init_hw(tp
, reset_phy
);
9870 "Failed to re-initialize device, aborting\n");
9871 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9872 tg3_full_unlock(tp
);
9875 tg3_napi_enable(tp
);
9877 tg3_full_lock(tp
, 0);
9882 static void tg3_reset_task(struct work_struct
*work
)
9884 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
9887 tg3_full_lock(tp
, 0);
9889 if (!netif_running(tp
->dev
)) {
9890 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
9891 tg3_full_unlock(tp
);
9895 tg3_full_unlock(tp
);
9901 tg3_full_lock(tp
, 1);
9903 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
9904 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
9905 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
9906 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
9907 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
9910 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
9911 err
= tg3_init_hw(tp
, 1);
9915 tg3_netif_start(tp
);
9918 tg3_full_unlock(tp
);
9923 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
9926 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9929 unsigned long flags
;
9931 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9933 if (tp
->irq_cnt
== 1)
9934 name
= tp
->dev
->name
;
9936 name
= &tnapi
->irq_lbl
[0];
9937 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9938 name
[IFNAMSIZ
-1] = 0;
9941 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9943 if (tg3_flag(tp
, 1SHOT_MSI
))
9948 if (tg3_flag(tp
, TAGGED_STATUS
))
9949 fn
= tg3_interrupt_tagged
;
9950 flags
= IRQF_SHARED
;
9953 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9956 static int tg3_test_interrupt(struct tg3
*tp
)
9958 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9959 struct net_device
*dev
= tp
->dev
;
9960 int err
, i
, intr_ok
= 0;
9963 if (!netif_running(dev
))
9966 tg3_disable_ints(tp
);
9968 free_irq(tnapi
->irq_vec
, tnapi
);
9971 * Turn off MSI one shot mode. Otherwise this test has no
9972 * observable way to know whether the interrupt was delivered.
9974 if (tg3_flag(tp
, 57765_PLUS
)) {
9975 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9976 tw32(MSGINT_MODE
, val
);
9979 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9980 IRQF_SHARED
, dev
->name
, tnapi
);
9984 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9985 tg3_enable_ints(tp
);
9987 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9990 for (i
= 0; i
< 5; i
++) {
9991 u32 int_mbox
, misc_host_ctrl
;
9993 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9994 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9996 if ((int_mbox
!= 0) ||
9997 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10002 if (tg3_flag(tp
, 57765_PLUS
) &&
10003 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10004 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10009 tg3_disable_ints(tp
);
10011 free_irq(tnapi
->irq_vec
, tnapi
);
10013 err
= tg3_request_irq(tp
, 0);
10019 /* Reenable MSI one shot mode. */
10020 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10021 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10022 tw32(MSGINT_MODE
, val
);
10030 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10031 * successfully restored
10033 static int tg3_test_msi(struct tg3
*tp
)
10038 if (!tg3_flag(tp
, USING_MSI
))
10041 /* Turn off SERR reporting in case MSI terminates with Master
10044 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10045 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
10046 pci_cmd
& ~PCI_COMMAND_SERR
);
10048 err
= tg3_test_interrupt(tp
);
10050 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10055 /* other failures */
10059 /* MSI test failed, go back to INTx mode */
10060 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
10061 "to INTx mode. Please report this failure to the PCI "
10062 "maintainer and include system chipset information\n");
10064 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10066 pci_disable_msi(tp
->pdev
);
10068 tg3_flag_clear(tp
, USING_MSI
);
10069 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10071 err
= tg3_request_irq(tp
, 0);
10075 /* Need to reset the chip because the MSI cycle may have terminated
10076 * with Master Abort.
10078 tg3_full_lock(tp
, 1);
10080 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10081 err
= tg3_init_hw(tp
, 1);
10083 tg3_full_unlock(tp
);
10086 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10091 static int tg3_request_firmware(struct tg3
*tp
)
10093 const __be32
*fw_data
;
10095 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
10096 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
10101 fw_data
= (void *)tp
->fw
->data
;
10103 /* Firmware blob starts with version numbers, followed by
10104 * start address and _full_ length including BSS sections
10105 * (which must be longer than the actual data, of course
10108 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
10109 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
10110 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
10111 tp
->fw_len
, tp
->fw_needed
);
10112 release_firmware(tp
->fw
);
10117 /* We no longer need firmware; we have it. */
10118 tp
->fw_needed
= NULL
;
10122 static bool tg3_enable_msix(struct tg3
*tp
)
10125 struct msix_entry msix_ent
[tp
->irq_max
];
10127 tp
->irq_cnt
= netif_get_num_default_rss_queues();
10128 if (tp
->irq_cnt
> 1) {
10129 /* We want as many rx rings enabled as there are cpus.
10130 * In multiqueue MSI-X mode, the first MSI-X vector
10131 * only deals with link interrupts, etc, so we add
10132 * one to the number of vectors we are requesting.
10134 tp
->irq_cnt
= min_t(unsigned, tp
->irq_cnt
+ 1, tp
->irq_max
);
10137 for (i
= 0; i
< tp
->irq_max
; i
++) {
10138 msix_ent
[i
].entry
= i
;
10139 msix_ent
[i
].vector
= 0;
10142 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
10145 } else if (rc
!= 0) {
10146 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
10148 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
10153 for (i
= 0; i
< tp
->irq_max
; i
++)
10154 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
10156 netif_set_real_num_tx_queues(tp
->dev
, 1);
10157 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
10158 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
10159 pci_disable_msix(tp
->pdev
);
10163 if (tp
->irq_cnt
> 1) {
10164 tg3_flag_set(tp
, ENABLE_RSS
);
10166 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
10167 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
10168 tg3_flag_set(tp
, ENABLE_TSS
);
10169 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
10176 static void tg3_ints_init(struct tg3
*tp
)
10178 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10179 !tg3_flag(tp
, TAGGED_STATUS
)) {
10180 /* All MSI supporting chips should support tagged
10181 * status. Assert that this is the case.
10183 netdev_warn(tp
->dev
,
10184 "MSI without TAGGED_STATUS? Not using MSI\n");
10188 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10189 tg3_flag_set(tp
, USING_MSIX
);
10190 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10191 tg3_flag_set(tp
, USING_MSI
);
10193 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10194 u32 msi_mode
= tr32(MSGINT_MODE
);
10195 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10196 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10197 if (!tg3_flag(tp
, 1SHOT_MSI
))
10198 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10199 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10202 if (!tg3_flag(tp
, USING_MSIX
)) {
10204 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10205 netif_set_real_num_tx_queues(tp
->dev
, 1);
10206 netif_set_real_num_rx_queues(tp
->dev
, 1);
10210 static void tg3_ints_fini(struct tg3
*tp
)
10212 if (tg3_flag(tp
, USING_MSIX
))
10213 pci_disable_msix(tp
->pdev
);
10214 else if (tg3_flag(tp
, USING_MSI
))
10215 pci_disable_msi(tp
->pdev
);
10216 tg3_flag_clear(tp
, USING_MSI
);
10217 tg3_flag_clear(tp
, USING_MSIX
);
10218 tg3_flag_clear(tp
, ENABLE_RSS
);
10219 tg3_flag_clear(tp
, ENABLE_TSS
);
10222 static int tg3_open(struct net_device
*dev
)
10224 struct tg3
*tp
= netdev_priv(dev
);
10227 if (tp
->fw_needed
) {
10228 err
= tg3_request_firmware(tp
);
10229 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
10233 netdev_warn(tp
->dev
, "TSO capability disabled\n");
10234 tg3_flag_clear(tp
, TSO_CAPABLE
);
10235 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
10236 netdev_notice(tp
->dev
, "TSO capability restored\n");
10237 tg3_flag_set(tp
, TSO_CAPABLE
);
10241 netif_carrier_off(tp
->dev
);
10243 err
= tg3_power_up(tp
);
10247 tg3_full_lock(tp
, 0);
10249 tg3_disable_ints(tp
);
10250 tg3_flag_clear(tp
, INIT_COMPLETE
);
10252 tg3_full_unlock(tp
);
10255 * Setup interrupts first so we know how
10256 * many NAPI resources to allocate
10260 tg3_rss_check_indir_tbl(tp
);
10262 /* The placement of this call is tied
10263 * to the setup and use of Host TX descriptors.
10265 err
= tg3_alloc_consistent(tp
);
10271 tg3_napi_enable(tp
);
10273 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10274 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10275 err
= tg3_request_irq(tp
, i
);
10277 for (i
--; i
>= 0; i
--) {
10278 tnapi
= &tp
->napi
[i
];
10279 free_irq(tnapi
->irq_vec
, tnapi
);
10285 tg3_full_lock(tp
, 0);
10287 err
= tg3_init_hw(tp
, 1);
10289 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10290 tg3_free_rings(tp
);
10293 tg3_full_unlock(tp
);
10298 if (tg3_flag(tp
, USING_MSI
)) {
10299 err
= tg3_test_msi(tp
);
10302 tg3_full_lock(tp
, 0);
10303 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10304 tg3_free_rings(tp
);
10305 tg3_full_unlock(tp
);
10310 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10311 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10313 tw32(PCIE_TRANSACTION_CFG
,
10314 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10320 tg3_hwmon_open(tp
);
10322 tg3_full_lock(tp
, 0);
10324 tg3_timer_start(tp
);
10325 tg3_flag_set(tp
, INIT_COMPLETE
);
10326 tg3_enable_ints(tp
);
10328 tg3_full_unlock(tp
);
10330 netif_tx_start_all_queues(dev
);
10333 * Reset loopback feature if it was turned on while the device was down
10334 * make sure that it's installed properly now.
10336 if (dev
->features
& NETIF_F_LOOPBACK
)
10337 tg3_set_loopback(dev
, dev
->features
);
10342 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10343 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10344 free_irq(tnapi
->irq_vec
, tnapi
);
10348 tg3_napi_disable(tp
);
10350 tg3_free_consistent(tp
);
10354 tg3_frob_aux_power(tp
, false);
10355 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
10359 static int tg3_close(struct net_device
*dev
)
10362 struct tg3
*tp
= netdev_priv(dev
);
10364 tg3_napi_disable(tp
);
10365 tg3_reset_task_cancel(tp
);
10367 netif_tx_stop_all_queues(dev
);
10369 tg3_timer_stop(tp
);
10371 tg3_hwmon_close(tp
);
10375 tg3_full_lock(tp
, 1);
10377 tg3_disable_ints(tp
);
10379 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10380 tg3_free_rings(tp
);
10381 tg3_flag_clear(tp
, INIT_COMPLETE
);
10383 tg3_full_unlock(tp
);
10385 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10386 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10387 free_irq(tnapi
->irq_vec
, tnapi
);
10392 /* Clear stats across close / open calls */
10393 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
10394 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
10398 tg3_free_consistent(tp
);
10400 tg3_power_down(tp
);
10402 netif_carrier_off(tp
->dev
);
10407 static inline u64
get_stat64(tg3_stat64_t
*val
)
10409 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
10412 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
10414 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10416 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10417 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10418 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
10421 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
10422 tg3_writephy(tp
, MII_TG3_TEST1
,
10423 val
| MII_TG3_TEST1_CRC_EN
);
10424 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
10428 tp
->phy_crc_errors
+= val
;
10430 return tp
->phy_crc_errors
;
10433 return get_stat64(&hw_stats
->rx_fcs_errors
);
10436 #define ESTAT_ADD(member) \
10437 estats->member = old_estats->member + \
10438 get_stat64(&hw_stats->member)
10440 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
10442 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
10443 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10445 ESTAT_ADD(rx_octets
);
10446 ESTAT_ADD(rx_fragments
);
10447 ESTAT_ADD(rx_ucast_packets
);
10448 ESTAT_ADD(rx_mcast_packets
);
10449 ESTAT_ADD(rx_bcast_packets
);
10450 ESTAT_ADD(rx_fcs_errors
);
10451 ESTAT_ADD(rx_align_errors
);
10452 ESTAT_ADD(rx_xon_pause_rcvd
);
10453 ESTAT_ADD(rx_xoff_pause_rcvd
);
10454 ESTAT_ADD(rx_mac_ctrl_rcvd
);
10455 ESTAT_ADD(rx_xoff_entered
);
10456 ESTAT_ADD(rx_frame_too_long_errors
);
10457 ESTAT_ADD(rx_jabbers
);
10458 ESTAT_ADD(rx_undersize_packets
);
10459 ESTAT_ADD(rx_in_length_errors
);
10460 ESTAT_ADD(rx_out_length_errors
);
10461 ESTAT_ADD(rx_64_or_less_octet_packets
);
10462 ESTAT_ADD(rx_65_to_127_octet_packets
);
10463 ESTAT_ADD(rx_128_to_255_octet_packets
);
10464 ESTAT_ADD(rx_256_to_511_octet_packets
);
10465 ESTAT_ADD(rx_512_to_1023_octet_packets
);
10466 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
10467 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
10468 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
10469 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
10470 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
10472 ESTAT_ADD(tx_octets
);
10473 ESTAT_ADD(tx_collisions
);
10474 ESTAT_ADD(tx_xon_sent
);
10475 ESTAT_ADD(tx_xoff_sent
);
10476 ESTAT_ADD(tx_flow_control
);
10477 ESTAT_ADD(tx_mac_errors
);
10478 ESTAT_ADD(tx_single_collisions
);
10479 ESTAT_ADD(tx_mult_collisions
);
10480 ESTAT_ADD(tx_deferred
);
10481 ESTAT_ADD(tx_excessive_collisions
);
10482 ESTAT_ADD(tx_late_collisions
);
10483 ESTAT_ADD(tx_collide_2times
);
10484 ESTAT_ADD(tx_collide_3times
);
10485 ESTAT_ADD(tx_collide_4times
);
10486 ESTAT_ADD(tx_collide_5times
);
10487 ESTAT_ADD(tx_collide_6times
);
10488 ESTAT_ADD(tx_collide_7times
);
10489 ESTAT_ADD(tx_collide_8times
);
10490 ESTAT_ADD(tx_collide_9times
);
10491 ESTAT_ADD(tx_collide_10times
);
10492 ESTAT_ADD(tx_collide_11times
);
10493 ESTAT_ADD(tx_collide_12times
);
10494 ESTAT_ADD(tx_collide_13times
);
10495 ESTAT_ADD(tx_collide_14times
);
10496 ESTAT_ADD(tx_collide_15times
);
10497 ESTAT_ADD(tx_ucast_packets
);
10498 ESTAT_ADD(tx_mcast_packets
);
10499 ESTAT_ADD(tx_bcast_packets
);
10500 ESTAT_ADD(tx_carrier_sense_errors
);
10501 ESTAT_ADD(tx_discards
);
10502 ESTAT_ADD(tx_errors
);
10504 ESTAT_ADD(dma_writeq_full
);
10505 ESTAT_ADD(dma_write_prioq_full
);
10506 ESTAT_ADD(rxbds_empty
);
10507 ESTAT_ADD(rx_discards
);
10508 ESTAT_ADD(rx_errors
);
10509 ESTAT_ADD(rx_threshold_hit
);
10511 ESTAT_ADD(dma_readq_full
);
10512 ESTAT_ADD(dma_read_prioq_full
);
10513 ESTAT_ADD(tx_comp_queue_full
);
10515 ESTAT_ADD(ring_set_send_prod_index
);
10516 ESTAT_ADD(ring_status_update
);
10517 ESTAT_ADD(nic_irqs
);
10518 ESTAT_ADD(nic_avoided_irqs
);
10519 ESTAT_ADD(nic_tx_threshold_hit
);
10521 ESTAT_ADD(mbuf_lwm_thresh_hit
);
10524 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
10526 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
10527 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10529 stats
->rx_packets
= old_stats
->rx_packets
+
10530 get_stat64(&hw_stats
->rx_ucast_packets
) +
10531 get_stat64(&hw_stats
->rx_mcast_packets
) +
10532 get_stat64(&hw_stats
->rx_bcast_packets
);
10534 stats
->tx_packets
= old_stats
->tx_packets
+
10535 get_stat64(&hw_stats
->tx_ucast_packets
) +
10536 get_stat64(&hw_stats
->tx_mcast_packets
) +
10537 get_stat64(&hw_stats
->tx_bcast_packets
);
10539 stats
->rx_bytes
= old_stats
->rx_bytes
+
10540 get_stat64(&hw_stats
->rx_octets
);
10541 stats
->tx_bytes
= old_stats
->tx_bytes
+
10542 get_stat64(&hw_stats
->tx_octets
);
10544 stats
->rx_errors
= old_stats
->rx_errors
+
10545 get_stat64(&hw_stats
->rx_errors
);
10546 stats
->tx_errors
= old_stats
->tx_errors
+
10547 get_stat64(&hw_stats
->tx_errors
) +
10548 get_stat64(&hw_stats
->tx_mac_errors
) +
10549 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
10550 get_stat64(&hw_stats
->tx_discards
);
10552 stats
->multicast
= old_stats
->multicast
+
10553 get_stat64(&hw_stats
->rx_mcast_packets
);
10554 stats
->collisions
= old_stats
->collisions
+
10555 get_stat64(&hw_stats
->tx_collisions
);
10557 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
10558 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
10559 get_stat64(&hw_stats
->rx_undersize_packets
);
10561 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
10562 get_stat64(&hw_stats
->rxbds_empty
);
10563 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
10564 get_stat64(&hw_stats
->rx_align_errors
);
10565 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
10566 get_stat64(&hw_stats
->tx_discards
);
10567 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
10568 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
10570 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
10571 tg3_calc_crc_errors(tp
);
10573 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
10574 get_stat64(&hw_stats
->rx_discards
);
10576 stats
->rx_dropped
= tp
->rx_dropped
;
10577 stats
->tx_dropped
= tp
->tx_dropped
;
10580 static int tg3_get_regs_len(struct net_device
*dev
)
10582 return TG3_REG_BLK_SIZE
;
10585 static void tg3_get_regs(struct net_device
*dev
,
10586 struct ethtool_regs
*regs
, void *_p
)
10588 struct tg3
*tp
= netdev_priv(dev
);
10592 memset(_p
, 0, TG3_REG_BLK_SIZE
);
10594 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10597 tg3_full_lock(tp
, 0);
10599 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
10601 tg3_full_unlock(tp
);
10604 static int tg3_get_eeprom_len(struct net_device
*dev
)
10606 struct tg3
*tp
= netdev_priv(dev
);
10608 return tp
->nvram_size
;
10611 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10613 struct tg3
*tp
= netdev_priv(dev
);
10616 u32 i
, offset
, len
, b_offset
, b_count
;
10619 if (tg3_flag(tp
, NO_NVRAM
))
10622 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10625 offset
= eeprom
->offset
;
10629 eeprom
->magic
= TG3_EEPROM_MAGIC
;
10632 /* adjustments to start on required 4 byte boundary */
10633 b_offset
= offset
& 3;
10634 b_count
= 4 - b_offset
;
10635 if (b_count
> len
) {
10636 /* i.e. offset=1 len=2 */
10639 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
10642 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
10645 eeprom
->len
+= b_count
;
10648 /* read bytes up to the last 4 byte boundary */
10649 pd
= &data
[eeprom
->len
];
10650 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
10651 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
10656 memcpy(pd
+ i
, &val
, 4);
10661 /* read last bytes not ending on 4 byte boundary */
10662 pd
= &data
[eeprom
->len
];
10664 b_offset
= offset
+ len
- b_count
;
10665 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10668 memcpy(pd
, &val
, b_count
);
10669 eeprom
->len
+= b_count
;
10674 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10676 struct tg3
*tp
= netdev_priv(dev
);
10678 u32 offset
, len
, b_offset
, odd_len
;
10682 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10685 if (tg3_flag(tp
, NO_NVRAM
) ||
10686 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10689 offset
= eeprom
->offset
;
10692 if ((b_offset
= (offset
& 3))) {
10693 /* adjustments to start on required 4 byte boundary */
10694 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10705 /* adjustments to end on required 4 byte boundary */
10707 len
= (len
+ 3) & ~3;
10708 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10714 if (b_offset
|| odd_len
) {
10715 buf
= kmalloc(len
, GFP_KERNEL
);
10719 memcpy(buf
, &start
, 4);
10721 memcpy(buf
+len
-4, &end
, 4);
10722 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10725 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10733 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10735 struct tg3
*tp
= netdev_priv(dev
);
10737 if (tg3_flag(tp
, USE_PHYLIB
)) {
10738 struct phy_device
*phydev
;
10739 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10741 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10742 return phy_ethtool_gset(phydev
, cmd
);
10745 cmd
->supported
= (SUPPORTED_Autoneg
);
10747 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10748 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10749 SUPPORTED_1000baseT_Full
);
10751 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10752 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10753 SUPPORTED_100baseT_Full
|
10754 SUPPORTED_10baseT_Half
|
10755 SUPPORTED_10baseT_Full
|
10757 cmd
->port
= PORT_TP
;
10759 cmd
->supported
|= SUPPORTED_FIBRE
;
10760 cmd
->port
= PORT_FIBRE
;
10763 cmd
->advertising
= tp
->link_config
.advertising
;
10764 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10765 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10766 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10767 cmd
->advertising
|= ADVERTISED_Pause
;
10769 cmd
->advertising
|= ADVERTISED_Pause
|
10770 ADVERTISED_Asym_Pause
;
10772 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10773 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10776 if (netif_running(dev
) && netif_carrier_ok(dev
)) {
10777 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10778 cmd
->duplex
= tp
->link_config
.active_duplex
;
10779 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
10780 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10781 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
10782 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
10784 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
10787 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
10788 cmd
->duplex
= DUPLEX_UNKNOWN
;
10789 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
10791 cmd
->phy_address
= tp
->phy_addr
;
10792 cmd
->transceiver
= XCVR_INTERNAL
;
10793 cmd
->autoneg
= tp
->link_config
.autoneg
;
10799 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10801 struct tg3
*tp
= netdev_priv(dev
);
10802 u32 speed
= ethtool_cmd_speed(cmd
);
10804 if (tg3_flag(tp
, USE_PHYLIB
)) {
10805 struct phy_device
*phydev
;
10806 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10808 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10809 return phy_ethtool_sset(phydev
, cmd
);
10812 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10813 cmd
->autoneg
!= AUTONEG_DISABLE
)
10816 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10817 cmd
->duplex
!= DUPLEX_FULL
&&
10818 cmd
->duplex
!= DUPLEX_HALF
)
10821 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10822 u32 mask
= ADVERTISED_Autoneg
|
10824 ADVERTISED_Asym_Pause
;
10826 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10827 mask
|= ADVERTISED_1000baseT_Half
|
10828 ADVERTISED_1000baseT_Full
;
10830 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10831 mask
|= ADVERTISED_100baseT_Half
|
10832 ADVERTISED_100baseT_Full
|
10833 ADVERTISED_10baseT_Half
|
10834 ADVERTISED_10baseT_Full
|
10837 mask
|= ADVERTISED_FIBRE
;
10839 if (cmd
->advertising
& ~mask
)
10842 mask
&= (ADVERTISED_1000baseT_Half
|
10843 ADVERTISED_1000baseT_Full
|
10844 ADVERTISED_100baseT_Half
|
10845 ADVERTISED_100baseT_Full
|
10846 ADVERTISED_10baseT_Half
|
10847 ADVERTISED_10baseT_Full
);
10849 cmd
->advertising
&= mask
;
10851 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10852 if (speed
!= SPEED_1000
)
10855 if (cmd
->duplex
!= DUPLEX_FULL
)
10858 if (speed
!= SPEED_100
&&
10864 tg3_full_lock(tp
, 0);
10866 tp
->link_config
.autoneg
= cmd
->autoneg
;
10867 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10868 tp
->link_config
.advertising
= (cmd
->advertising
|
10869 ADVERTISED_Autoneg
);
10870 tp
->link_config
.speed
= SPEED_UNKNOWN
;
10871 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
10873 tp
->link_config
.advertising
= 0;
10874 tp
->link_config
.speed
= speed
;
10875 tp
->link_config
.duplex
= cmd
->duplex
;
10878 if (netif_running(dev
))
10879 tg3_setup_phy(tp
, 1);
10881 tg3_full_unlock(tp
);
10886 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10888 struct tg3
*tp
= netdev_priv(dev
);
10890 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
10891 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
10892 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
10893 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
10896 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10898 struct tg3
*tp
= netdev_priv(dev
);
10900 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10901 wol
->supported
= WAKE_MAGIC
;
10903 wol
->supported
= 0;
10905 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10906 wol
->wolopts
= WAKE_MAGIC
;
10907 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10910 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10912 struct tg3
*tp
= netdev_priv(dev
);
10913 struct device
*dp
= &tp
->pdev
->dev
;
10915 if (wol
->wolopts
& ~WAKE_MAGIC
)
10917 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10918 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10921 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10923 spin_lock_bh(&tp
->lock
);
10924 if (device_may_wakeup(dp
))
10925 tg3_flag_set(tp
, WOL_ENABLE
);
10927 tg3_flag_clear(tp
, WOL_ENABLE
);
10928 spin_unlock_bh(&tp
->lock
);
10933 static u32
tg3_get_msglevel(struct net_device
*dev
)
10935 struct tg3
*tp
= netdev_priv(dev
);
10936 return tp
->msg_enable
;
10939 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10941 struct tg3
*tp
= netdev_priv(dev
);
10942 tp
->msg_enable
= value
;
10945 static int tg3_nway_reset(struct net_device
*dev
)
10947 struct tg3
*tp
= netdev_priv(dev
);
10950 if (!netif_running(dev
))
10953 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10956 if (tg3_flag(tp
, USE_PHYLIB
)) {
10957 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10959 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10963 spin_lock_bh(&tp
->lock
);
10965 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10966 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10967 ((bmcr
& BMCR_ANENABLE
) ||
10968 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10969 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10973 spin_unlock_bh(&tp
->lock
);
10979 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10981 struct tg3
*tp
= netdev_priv(dev
);
10983 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10984 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10985 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10987 ering
->rx_jumbo_max_pending
= 0;
10989 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10991 ering
->rx_pending
= tp
->rx_pending
;
10992 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10993 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10995 ering
->rx_jumbo_pending
= 0;
10997 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
11000 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11002 struct tg3
*tp
= netdev_priv(dev
);
11003 int i
, irq_sync
= 0, err
= 0;
11005 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
11006 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
11007 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
11008 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
11009 (tg3_flag(tp
, TSO_BUG
) &&
11010 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
11013 if (netif_running(dev
)) {
11015 tg3_netif_stop(tp
);
11019 tg3_full_lock(tp
, irq_sync
);
11021 tp
->rx_pending
= ering
->rx_pending
;
11023 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
11024 tp
->rx_pending
> 63)
11025 tp
->rx_pending
= 63;
11026 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
11028 for (i
= 0; i
< tp
->irq_max
; i
++)
11029 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
11031 if (netif_running(dev
)) {
11032 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11033 err
= tg3_restart_hw(tp
, 1);
11035 tg3_netif_start(tp
);
11038 tg3_full_unlock(tp
);
11040 if (irq_sync
&& !err
)
11046 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11048 struct tg3
*tp
= netdev_priv(dev
);
11050 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
11052 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
11053 epause
->rx_pause
= 1;
11055 epause
->rx_pause
= 0;
11057 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
11058 epause
->tx_pause
= 1;
11060 epause
->tx_pause
= 0;
11063 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11065 struct tg3
*tp
= netdev_priv(dev
);
11068 if (tg3_flag(tp
, USE_PHYLIB
)) {
11070 struct phy_device
*phydev
;
11072 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11074 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
11075 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
11076 (epause
->rx_pause
!= epause
->tx_pause
)))
11079 tp
->link_config
.flowctrl
= 0;
11080 if (epause
->rx_pause
) {
11081 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11083 if (epause
->tx_pause
) {
11084 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11085 newadv
= ADVERTISED_Pause
;
11087 newadv
= ADVERTISED_Pause
|
11088 ADVERTISED_Asym_Pause
;
11089 } else if (epause
->tx_pause
) {
11090 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11091 newadv
= ADVERTISED_Asym_Pause
;
11095 if (epause
->autoneg
)
11096 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11098 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11100 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
11101 u32 oldadv
= phydev
->advertising
&
11102 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
11103 if (oldadv
!= newadv
) {
11104 phydev
->advertising
&=
11105 ~(ADVERTISED_Pause
|
11106 ADVERTISED_Asym_Pause
);
11107 phydev
->advertising
|= newadv
;
11108 if (phydev
->autoneg
) {
11110 * Always renegotiate the link to
11111 * inform our link partner of our
11112 * flow control settings, even if the
11113 * flow control is forced. Let
11114 * tg3_adjust_link() do the final
11115 * flow control setup.
11117 return phy_start_aneg(phydev
);
11121 if (!epause
->autoneg
)
11122 tg3_setup_flow_control(tp
, 0, 0);
11124 tp
->link_config
.advertising
&=
11125 ~(ADVERTISED_Pause
|
11126 ADVERTISED_Asym_Pause
);
11127 tp
->link_config
.advertising
|= newadv
;
11132 if (netif_running(dev
)) {
11133 tg3_netif_stop(tp
);
11137 tg3_full_lock(tp
, irq_sync
);
11139 if (epause
->autoneg
)
11140 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11142 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11143 if (epause
->rx_pause
)
11144 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11146 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
11147 if (epause
->tx_pause
)
11148 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11150 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
11152 if (netif_running(dev
)) {
11153 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11154 err
= tg3_restart_hw(tp
, 1);
11156 tg3_netif_start(tp
);
11159 tg3_full_unlock(tp
);
11165 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11169 return TG3_NUM_TEST
;
11171 return TG3_NUM_STATS
;
11173 return -EOPNOTSUPP
;
11177 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11178 u32
*rules __always_unused
)
11180 struct tg3
*tp
= netdev_priv(dev
);
11182 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11183 return -EOPNOTSUPP
;
11185 switch (info
->cmd
) {
11186 case ETHTOOL_GRXRINGS
:
11187 if (netif_running(tp
->dev
))
11188 info
->data
= tp
->irq_cnt
;
11190 info
->data
= num_online_cpus();
11191 if (info
->data
> TG3_IRQ_MAX_VECS_RSS
)
11192 info
->data
= TG3_IRQ_MAX_VECS_RSS
;
11195 /* The first interrupt vector only
11196 * handles link interrupts.
11202 return -EOPNOTSUPP
;
11206 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11209 struct tg3
*tp
= netdev_priv(dev
);
11211 if (tg3_flag(tp
, SUPPORT_MSIX
))
11212 size
= TG3_RSS_INDIR_TBL_SIZE
;
11217 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11219 struct tg3
*tp
= netdev_priv(dev
);
11222 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11223 indir
[i
] = tp
->rss_ind_tbl
[i
];
11228 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11230 struct tg3
*tp
= netdev_priv(dev
);
11233 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11234 tp
->rss_ind_tbl
[i
] = indir
[i
];
11236 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11239 /* It is legal to write the indirection
11240 * table while the device is running.
11242 tg3_full_lock(tp
, 0);
11243 tg3_rss_write_indir_tbl(tp
);
11244 tg3_full_unlock(tp
);
11249 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11251 switch (stringset
) {
11253 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
11256 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
11259 WARN_ON(1); /* we need a WARN() */
11264 static int tg3_set_phys_id(struct net_device
*dev
,
11265 enum ethtool_phys_id_state state
)
11267 struct tg3
*tp
= netdev_priv(dev
);
11269 if (!netif_running(tp
->dev
))
11273 case ETHTOOL_ID_ACTIVE
:
11274 return 1; /* cycle on/off once per second */
11276 case ETHTOOL_ID_ON
:
11277 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11278 LED_CTRL_1000MBPS_ON
|
11279 LED_CTRL_100MBPS_ON
|
11280 LED_CTRL_10MBPS_ON
|
11281 LED_CTRL_TRAFFIC_OVERRIDE
|
11282 LED_CTRL_TRAFFIC_BLINK
|
11283 LED_CTRL_TRAFFIC_LED
);
11286 case ETHTOOL_ID_OFF
:
11287 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11288 LED_CTRL_TRAFFIC_OVERRIDE
);
11291 case ETHTOOL_ID_INACTIVE
:
11292 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
11299 static void tg3_get_ethtool_stats(struct net_device
*dev
,
11300 struct ethtool_stats
*estats
, u64
*tmp_stats
)
11302 struct tg3
*tp
= netdev_priv(dev
);
11305 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
11307 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
11310 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
11314 u32 offset
= 0, len
= 0;
11317 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
11320 if (magic
== TG3_EEPROM_MAGIC
) {
11321 for (offset
= TG3_NVM_DIR_START
;
11322 offset
< TG3_NVM_DIR_END
;
11323 offset
+= TG3_NVM_DIRENT_SIZE
) {
11324 if (tg3_nvram_read(tp
, offset
, &val
))
11327 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
11328 TG3_NVM_DIRTYPE_EXTVPD
)
11332 if (offset
!= TG3_NVM_DIR_END
) {
11333 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
11334 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
11337 offset
= tg3_nvram_logical_addr(tp
, offset
);
11341 if (!offset
|| !len
) {
11342 offset
= TG3_NVM_VPD_OFF
;
11343 len
= TG3_NVM_VPD_LEN
;
11346 buf
= kmalloc(len
, GFP_KERNEL
);
11350 if (magic
== TG3_EEPROM_MAGIC
) {
11351 for (i
= 0; i
< len
; i
+= 4) {
11352 /* The data is in little-endian format in NVRAM.
11353 * Use the big-endian read routines to preserve
11354 * the byte order as it exists in NVRAM.
11356 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
11362 unsigned int pos
= 0;
11364 ptr
= (u8
*)&buf
[0];
11365 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
11366 cnt
= pci_read_vpd(tp
->pdev
, pos
,
11368 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
11386 #define NVRAM_TEST_SIZE 0x100
11387 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11388 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11389 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11390 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11391 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11392 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11393 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11394 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11396 static int tg3_test_nvram(struct tg3
*tp
)
11398 u32 csum
, magic
, len
;
11400 int i
, j
, k
, err
= 0, size
;
11402 if (tg3_flag(tp
, NO_NVRAM
))
11405 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11408 if (magic
== TG3_EEPROM_MAGIC
)
11409 size
= NVRAM_TEST_SIZE
;
11410 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
11411 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
11412 TG3_EEPROM_SB_FORMAT_1
) {
11413 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
11414 case TG3_EEPROM_SB_REVISION_0
:
11415 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
11417 case TG3_EEPROM_SB_REVISION_2
:
11418 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
11420 case TG3_EEPROM_SB_REVISION_3
:
11421 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
11423 case TG3_EEPROM_SB_REVISION_4
:
11424 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
11426 case TG3_EEPROM_SB_REVISION_5
:
11427 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
11429 case TG3_EEPROM_SB_REVISION_6
:
11430 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
11437 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
11438 size
= NVRAM_SELFBOOT_HW_SIZE
;
11442 buf
= kmalloc(size
, GFP_KERNEL
);
11447 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
11448 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
11455 /* Selfboot format */
11456 magic
= be32_to_cpu(buf
[0]);
11457 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
11458 TG3_EEPROM_MAGIC_FW
) {
11459 u8
*buf8
= (u8
*) buf
, csum8
= 0;
11461 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
11462 TG3_EEPROM_SB_REVISION_2
) {
11463 /* For rev 2, the csum doesn't include the MBA. */
11464 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
11466 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
11469 for (i
= 0; i
< size
; i
++)
11482 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
11483 TG3_EEPROM_MAGIC_HW
) {
11484 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
11485 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
11486 u8
*buf8
= (u8
*) buf
;
11488 /* Separate the parity bits and the data bytes. */
11489 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
11490 if ((i
== 0) || (i
== 8)) {
11494 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
11495 parity
[k
++] = buf8
[i
] & msk
;
11497 } else if (i
== 16) {
11501 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
11502 parity
[k
++] = buf8
[i
] & msk
;
11505 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
11506 parity
[k
++] = buf8
[i
] & msk
;
11509 data
[j
++] = buf8
[i
];
11513 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
11514 u8 hw8
= hweight8(data
[i
]);
11516 if ((hw8
& 0x1) && parity
[i
])
11518 else if (!(hw8
& 0x1) && !parity
[i
])
11527 /* Bootstrap checksum at offset 0x10 */
11528 csum
= calc_crc((unsigned char *) buf
, 0x10);
11529 if (csum
!= le32_to_cpu(buf
[0x10/4]))
11532 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11533 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
11534 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
11539 buf
= tg3_vpd_readblock(tp
, &len
);
11543 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
11545 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
11549 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
11552 i
+= PCI_VPD_LRDT_TAG_SIZE
;
11553 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
11554 PCI_VPD_RO_KEYWORD_CHKSUM
);
11558 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
11560 for (i
= 0; i
<= j
; i
++)
11561 csum8
+= ((u8
*)buf
)[i
];
11575 #define TG3_SERDES_TIMEOUT_SEC 2
11576 #define TG3_COPPER_TIMEOUT_SEC 6
11578 static int tg3_test_link(struct tg3
*tp
)
11582 if (!netif_running(tp
->dev
))
11585 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
11586 max
= TG3_SERDES_TIMEOUT_SEC
;
11588 max
= TG3_COPPER_TIMEOUT_SEC
;
11590 for (i
= 0; i
< max
; i
++) {
11591 if (netif_carrier_ok(tp
->dev
))
11594 if (msleep_interruptible(1000))
11601 /* Only test the commonly used registers */
11602 static int tg3_test_registers(struct tg3
*tp
)
11604 int i
, is_5705
, is_5750
;
11605 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
11609 #define TG3_FL_5705 0x1
11610 #define TG3_FL_NOT_5705 0x2
11611 #define TG3_FL_NOT_5788 0x4
11612 #define TG3_FL_NOT_5750 0x8
11616 /* MAC Control Registers */
11617 { MAC_MODE
, TG3_FL_NOT_5705
,
11618 0x00000000, 0x00ef6f8c },
11619 { MAC_MODE
, TG3_FL_5705
,
11620 0x00000000, 0x01ef6b8c },
11621 { MAC_STATUS
, TG3_FL_NOT_5705
,
11622 0x03800107, 0x00000000 },
11623 { MAC_STATUS
, TG3_FL_5705
,
11624 0x03800100, 0x00000000 },
11625 { MAC_ADDR_0_HIGH
, 0x0000,
11626 0x00000000, 0x0000ffff },
11627 { MAC_ADDR_0_LOW
, 0x0000,
11628 0x00000000, 0xffffffff },
11629 { MAC_RX_MTU_SIZE
, 0x0000,
11630 0x00000000, 0x0000ffff },
11631 { MAC_TX_MODE
, 0x0000,
11632 0x00000000, 0x00000070 },
11633 { MAC_TX_LENGTHS
, 0x0000,
11634 0x00000000, 0x00003fff },
11635 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
11636 0x00000000, 0x000007fc },
11637 { MAC_RX_MODE
, TG3_FL_5705
,
11638 0x00000000, 0x000007dc },
11639 { MAC_HASH_REG_0
, 0x0000,
11640 0x00000000, 0xffffffff },
11641 { MAC_HASH_REG_1
, 0x0000,
11642 0x00000000, 0xffffffff },
11643 { MAC_HASH_REG_2
, 0x0000,
11644 0x00000000, 0xffffffff },
11645 { MAC_HASH_REG_3
, 0x0000,
11646 0x00000000, 0xffffffff },
11648 /* Receive Data and Receive BD Initiator Control Registers. */
11649 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
11650 0x00000000, 0xffffffff },
11651 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
11652 0x00000000, 0xffffffff },
11653 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
11654 0x00000000, 0x00000003 },
11655 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
11656 0x00000000, 0xffffffff },
11657 { RCVDBDI_STD_BD
+0, 0x0000,
11658 0x00000000, 0xffffffff },
11659 { RCVDBDI_STD_BD
+4, 0x0000,
11660 0x00000000, 0xffffffff },
11661 { RCVDBDI_STD_BD
+8, 0x0000,
11662 0x00000000, 0xffff0002 },
11663 { RCVDBDI_STD_BD
+0xc, 0x0000,
11664 0x00000000, 0xffffffff },
11666 /* Receive BD Initiator Control Registers. */
11667 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
11668 0x00000000, 0xffffffff },
11669 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
11670 0x00000000, 0x000003ff },
11671 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
11672 0x00000000, 0xffffffff },
11674 /* Host Coalescing Control Registers. */
11675 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
11676 0x00000000, 0x00000004 },
11677 { HOSTCC_MODE
, TG3_FL_5705
,
11678 0x00000000, 0x000000f6 },
11679 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
11680 0x00000000, 0xffffffff },
11681 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
11682 0x00000000, 0x000003ff },
11683 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
11684 0x00000000, 0xffffffff },
11685 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
11686 0x00000000, 0x000003ff },
11687 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
11688 0x00000000, 0xffffffff },
11689 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11690 0x00000000, 0x000000ff },
11691 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
11692 0x00000000, 0xffffffff },
11693 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11694 0x00000000, 0x000000ff },
11695 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11696 0x00000000, 0xffffffff },
11697 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
11698 0x00000000, 0xffffffff },
11699 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11700 0x00000000, 0xffffffff },
11701 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11702 0x00000000, 0x000000ff },
11703 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
11704 0x00000000, 0xffffffff },
11705 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
11706 0x00000000, 0x000000ff },
11707 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
11708 0x00000000, 0xffffffff },
11709 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
11710 0x00000000, 0xffffffff },
11711 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
11712 0x00000000, 0xffffffff },
11713 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
11714 0x00000000, 0xffffffff },
11715 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
11716 0x00000000, 0xffffffff },
11717 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
11718 0xffffffff, 0x00000000 },
11719 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
11720 0xffffffff, 0x00000000 },
11722 /* Buffer Manager Control Registers. */
11723 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
11724 0x00000000, 0x007fff80 },
11725 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
11726 0x00000000, 0x007fffff },
11727 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
11728 0x00000000, 0x0000003f },
11729 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
11730 0x00000000, 0x000001ff },
11731 { BUFMGR_MB_HIGH_WATER
, 0x0000,
11732 0x00000000, 0x000001ff },
11733 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11734 0xffffffff, 0x00000000 },
11735 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11736 0xffffffff, 0x00000000 },
11738 /* Mailbox Registers */
11739 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11740 0x00000000, 0x000001ff },
11741 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11742 0x00000000, 0x000001ff },
11743 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11744 0x00000000, 0x000007ff },
11745 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11746 0x00000000, 0x000001ff },
11748 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11751 is_5705
= is_5750
= 0;
11752 if (tg3_flag(tp
, 5705_PLUS
)) {
11754 if (tg3_flag(tp
, 5750_PLUS
))
11758 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11759 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11762 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11765 if (tg3_flag(tp
, IS_5788
) &&
11766 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11769 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11772 offset
= (u32
) reg_tbl
[i
].offset
;
11773 read_mask
= reg_tbl
[i
].read_mask
;
11774 write_mask
= reg_tbl
[i
].write_mask
;
11776 /* Save the original register content */
11777 save_val
= tr32(offset
);
11779 /* Determine the read-only value. */
11780 read_val
= save_val
& read_mask
;
11782 /* Write zero to the register, then make sure the read-only bits
11783 * are not changed and the read/write bits are all zeros.
11787 val
= tr32(offset
);
11789 /* Test the read-only and read/write bits. */
11790 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11793 /* Write ones to all the bits defined by RdMask and WrMask, then
11794 * make sure the read-only bits are not changed and the
11795 * read/write bits are all ones.
11797 tw32(offset
, read_mask
| write_mask
);
11799 val
= tr32(offset
);
11801 /* Test the read-only bits. */
11802 if ((val
& read_mask
) != read_val
)
11805 /* Test the read/write bits. */
11806 if ((val
& write_mask
) != write_mask
)
11809 tw32(offset
, save_val
);
11815 if (netif_msg_hw(tp
))
11816 netdev_err(tp
->dev
,
11817 "Register test failed at offset %x\n", offset
);
11818 tw32(offset
, save_val
);
11822 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11824 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11828 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11829 for (j
= 0; j
< len
; j
+= 4) {
11832 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11833 tg3_read_mem(tp
, offset
+ j
, &val
);
11834 if (val
!= test_pattern
[i
])
11841 static int tg3_test_memory(struct tg3
*tp
)
11843 static struct mem_entry
{
11846 } mem_tbl_570x
[] = {
11847 { 0x00000000, 0x00b50},
11848 { 0x00002000, 0x1c000},
11849 { 0xffffffff, 0x00000}
11850 }, mem_tbl_5705
[] = {
11851 { 0x00000100, 0x0000c},
11852 { 0x00000200, 0x00008},
11853 { 0x00004000, 0x00800},
11854 { 0x00006000, 0x01000},
11855 { 0x00008000, 0x02000},
11856 { 0x00010000, 0x0e000},
11857 { 0xffffffff, 0x00000}
11858 }, mem_tbl_5755
[] = {
11859 { 0x00000200, 0x00008},
11860 { 0x00004000, 0x00800},
11861 { 0x00006000, 0x00800},
11862 { 0x00008000, 0x02000},
11863 { 0x00010000, 0x0c000},
11864 { 0xffffffff, 0x00000}
11865 }, mem_tbl_5906
[] = {
11866 { 0x00000200, 0x00008},
11867 { 0x00004000, 0x00400},
11868 { 0x00006000, 0x00400},
11869 { 0x00008000, 0x01000},
11870 { 0x00010000, 0x01000},
11871 { 0xffffffff, 0x00000}
11872 }, mem_tbl_5717
[] = {
11873 { 0x00000200, 0x00008},
11874 { 0x00010000, 0x0a000},
11875 { 0x00020000, 0x13c00},
11876 { 0xffffffff, 0x00000}
11877 }, mem_tbl_57765
[] = {
11878 { 0x00000200, 0x00008},
11879 { 0x00004000, 0x00800},
11880 { 0x00006000, 0x09800},
11881 { 0x00010000, 0x0a000},
11882 { 0xffffffff, 0x00000}
11884 struct mem_entry
*mem_tbl
;
11888 if (tg3_flag(tp
, 5717_PLUS
))
11889 mem_tbl
= mem_tbl_5717
;
11890 else if (tg3_flag(tp
, 57765_CLASS
))
11891 mem_tbl
= mem_tbl_57765
;
11892 else if (tg3_flag(tp
, 5755_PLUS
))
11893 mem_tbl
= mem_tbl_5755
;
11894 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11895 mem_tbl
= mem_tbl_5906
;
11896 else if (tg3_flag(tp
, 5705_PLUS
))
11897 mem_tbl
= mem_tbl_5705
;
11899 mem_tbl
= mem_tbl_570x
;
11901 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11902 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11910 #define TG3_TSO_MSS 500
11912 #define TG3_TSO_IP_HDR_LEN 20
11913 #define TG3_TSO_TCP_HDR_LEN 20
11914 #define TG3_TSO_TCP_OPT_LEN 12
11916 static const u8 tg3_tso_header
[] = {
11918 0x45, 0x00, 0x00, 0x00,
11919 0x00, 0x00, 0x40, 0x00,
11920 0x40, 0x06, 0x00, 0x00,
11921 0x0a, 0x00, 0x00, 0x01,
11922 0x0a, 0x00, 0x00, 0x02,
11923 0x0d, 0x00, 0xe0, 0x00,
11924 0x00, 0x00, 0x01, 0x00,
11925 0x00, 0x00, 0x02, 0x00,
11926 0x80, 0x10, 0x10, 0x00,
11927 0x14, 0x09, 0x00, 0x00,
11928 0x01, 0x01, 0x08, 0x0a,
11929 0x11, 0x11, 0x11, 0x11,
11930 0x11, 0x11, 0x11, 0x11,
11933 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
11935 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11936 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11938 struct sk_buff
*skb
;
11939 u8
*tx_data
, *rx_data
;
11941 int num_pkts
, tx_len
, rx_len
, i
, err
;
11942 struct tg3_rx_buffer_desc
*desc
;
11943 struct tg3_napi
*tnapi
, *rnapi
;
11944 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11946 tnapi
= &tp
->napi
[0];
11947 rnapi
= &tp
->napi
[0];
11948 if (tp
->irq_cnt
> 1) {
11949 if (tg3_flag(tp
, ENABLE_RSS
))
11950 rnapi
= &tp
->napi
[1];
11951 if (tg3_flag(tp
, ENABLE_TSS
))
11952 tnapi
= &tp
->napi
[1];
11954 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11959 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11963 tx_data
= skb_put(skb
, tx_len
);
11964 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11965 memset(tx_data
+ 6, 0x0, 8);
11967 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11969 if (tso_loopback
) {
11970 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11972 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11973 TG3_TSO_TCP_OPT_LEN
;
11975 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11976 sizeof(tg3_tso_header
));
11979 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11980 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11982 /* Set the total length field in the IP header */
11983 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11985 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11986 TXD_FLAG_CPU_POST_DMA
);
11988 if (tg3_flag(tp
, HW_TSO_1
) ||
11989 tg3_flag(tp
, HW_TSO_2
) ||
11990 tg3_flag(tp
, HW_TSO_3
)) {
11992 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11993 th
= (struct tcphdr
*)&tx_data
[val
];
11996 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11998 if (tg3_flag(tp
, HW_TSO_3
)) {
11999 mss
|= (hdr_len
& 0xc) << 12;
12000 if (hdr_len
& 0x10)
12001 base_flags
|= 0x00000010;
12002 base_flags
|= (hdr_len
& 0x3e0) << 5;
12003 } else if (tg3_flag(tp
, HW_TSO_2
))
12004 mss
|= hdr_len
<< 9;
12005 else if (tg3_flag(tp
, HW_TSO_1
) ||
12006 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
12007 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
12009 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
12012 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
12015 data_off
= ETH_HLEN
;
12017 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
12018 tx_len
> VLAN_ETH_FRAME_LEN
)
12019 base_flags
|= TXD_FLAG_JMB_PKT
;
12022 for (i
= data_off
; i
< tx_len
; i
++)
12023 tx_data
[i
] = (u8
) (i
& 0xff);
12025 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
12026 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
12027 dev_kfree_skb(skb
);
12031 val
= tnapi
->tx_prod
;
12032 tnapi
->tx_buffers
[val
].skb
= skb
;
12033 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
12035 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12040 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12042 budget
= tg3_tx_avail(tnapi
);
12043 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
12044 base_flags
| TXD_FLAG_END
, mss
, 0)) {
12045 tnapi
->tx_buffers
[val
].skb
= NULL
;
12046 dev_kfree_skb(skb
);
12052 /* Sync BD data before updating mailbox */
12055 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
12056 tr32_mailbox(tnapi
->prodmbox
);
12060 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12061 for (i
= 0; i
< 35; i
++) {
12062 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12067 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
12068 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12069 if ((tx_idx
== tnapi
->tx_prod
) &&
12070 (rx_idx
== (rx_start_idx
+ num_pkts
)))
12074 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
12075 dev_kfree_skb(skb
);
12077 if (tx_idx
!= tnapi
->tx_prod
)
12080 if (rx_idx
!= rx_start_idx
+ num_pkts
)
12084 while (rx_idx
!= rx_start_idx
) {
12085 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
12086 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
12087 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
12089 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
12090 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
12093 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
12096 if (!tso_loopback
) {
12097 if (rx_len
!= tx_len
)
12100 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
12101 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
12104 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
12107 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
12108 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
12109 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
12113 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
12114 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
12115 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
12117 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
12118 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
12119 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
12124 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
12125 PCI_DMA_FROMDEVICE
);
12127 rx_data
+= TG3_RX_OFFSET(tp
);
12128 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
12129 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
12136 /* tg3_free_rings will unmap and free the rx_data */
12141 #define TG3_STD_LOOPBACK_FAILED 1
12142 #define TG3_JMB_LOOPBACK_FAILED 2
12143 #define TG3_TSO_LOOPBACK_FAILED 4
12144 #define TG3_LOOPBACK_FAILED \
12145 (TG3_STD_LOOPBACK_FAILED | \
12146 TG3_JMB_LOOPBACK_FAILED | \
12147 TG3_TSO_LOOPBACK_FAILED)
12149 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
12153 u32 jmb_pkt_sz
= 9000;
12156 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
12158 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
12159 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12161 if (!netif_running(tp
->dev
)) {
12162 data
[0] = TG3_LOOPBACK_FAILED
;
12163 data
[1] = TG3_LOOPBACK_FAILED
;
12165 data
[2] = TG3_LOOPBACK_FAILED
;
12169 err
= tg3_reset_hw(tp
, 1);
12171 data
[0] = TG3_LOOPBACK_FAILED
;
12172 data
[1] = TG3_LOOPBACK_FAILED
;
12174 data
[2] = TG3_LOOPBACK_FAILED
;
12178 if (tg3_flag(tp
, ENABLE_RSS
)) {
12181 /* Reroute all rx packets to the 1st queue */
12182 for (i
= MAC_RSS_INDIR_TBL_0
;
12183 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12187 /* HW errata - mac loopback fails in some cases on 5780.
12188 * Normal traffic and PHY loopback are not affected by
12189 * errata. Also, the MAC loopback test is deprecated for
12190 * all newer ASIC revisions.
12192 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
12193 !tg3_flag(tp
, CPMU_PRESENT
)) {
12194 tg3_mac_loopback(tp
, true);
12196 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12197 data
[0] |= TG3_STD_LOOPBACK_FAILED
;
12199 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12200 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12201 data
[0] |= TG3_JMB_LOOPBACK_FAILED
;
12203 tg3_mac_loopback(tp
, false);
12206 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12207 !tg3_flag(tp
, USE_PHYLIB
)) {
12210 tg3_phy_lpbk_set(tp
, 0, false);
12212 /* Wait for link */
12213 for (i
= 0; i
< 100; i
++) {
12214 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12219 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12220 data
[1] |= TG3_STD_LOOPBACK_FAILED
;
12221 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12222 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12223 data
[1] |= TG3_TSO_LOOPBACK_FAILED
;
12224 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12225 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12226 data
[1] |= TG3_JMB_LOOPBACK_FAILED
;
12229 tg3_phy_lpbk_set(tp
, 0, true);
12231 /* All link indications report up, but the hardware
12232 * isn't really ready for about 20 msec. Double it
12237 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12238 data
[2] |= TG3_STD_LOOPBACK_FAILED
;
12239 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12240 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12241 data
[2] |= TG3_TSO_LOOPBACK_FAILED
;
12242 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12243 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12244 data
[2] |= TG3_JMB_LOOPBACK_FAILED
;
12247 /* Re-enable gphy autopowerdown. */
12248 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12249 tg3_phy_toggle_apd(tp
, true);
12252 err
= (data
[0] | data
[1] | data
[2]) ? -EIO
: 0;
12255 tp
->phy_flags
|= eee_cap
;
12260 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
12263 struct tg3
*tp
= netdev_priv(dev
);
12264 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
12266 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
12267 tg3_power_up(tp
)) {
12268 etest
->flags
|= ETH_TEST_FL_FAILED
;
12269 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
12273 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
12275 if (tg3_test_nvram(tp
) != 0) {
12276 etest
->flags
|= ETH_TEST_FL_FAILED
;
12279 if (!doextlpbk
&& tg3_test_link(tp
)) {
12280 etest
->flags
|= ETH_TEST_FL_FAILED
;
12283 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
12284 int err
, err2
= 0, irq_sync
= 0;
12286 if (netif_running(dev
)) {
12288 tg3_netif_stop(tp
);
12292 tg3_full_lock(tp
, irq_sync
);
12294 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
12295 err
= tg3_nvram_lock(tp
);
12296 tg3_halt_cpu(tp
, RX_CPU_BASE
);
12297 if (!tg3_flag(tp
, 5705_PLUS
))
12298 tg3_halt_cpu(tp
, TX_CPU_BASE
);
12300 tg3_nvram_unlock(tp
);
12302 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
12305 if (tg3_test_registers(tp
) != 0) {
12306 etest
->flags
|= ETH_TEST_FL_FAILED
;
12310 if (tg3_test_memory(tp
) != 0) {
12311 etest
->flags
|= ETH_TEST_FL_FAILED
;
12316 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
12318 if (tg3_test_loopback(tp
, &data
[4], doextlpbk
))
12319 etest
->flags
|= ETH_TEST_FL_FAILED
;
12321 tg3_full_unlock(tp
);
12323 if (tg3_test_interrupt(tp
) != 0) {
12324 etest
->flags
|= ETH_TEST_FL_FAILED
;
12328 tg3_full_lock(tp
, 0);
12330 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12331 if (netif_running(dev
)) {
12332 tg3_flag_set(tp
, INIT_COMPLETE
);
12333 err2
= tg3_restart_hw(tp
, 1);
12335 tg3_netif_start(tp
);
12338 tg3_full_unlock(tp
);
12340 if (irq_sync
&& !err2
)
12343 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
12344 tg3_power_down(tp
);
12348 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
12350 struct mii_ioctl_data
*data
= if_mii(ifr
);
12351 struct tg3
*tp
= netdev_priv(dev
);
12354 if (tg3_flag(tp
, USE_PHYLIB
)) {
12355 struct phy_device
*phydev
;
12356 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12358 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
12359 return phy_mii_ioctl(phydev
, ifr
, cmd
);
12364 data
->phy_id
= tp
->phy_addr
;
12367 case SIOCGMIIREG
: {
12370 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12371 break; /* We have no PHY */
12373 if (!netif_running(dev
))
12376 spin_lock_bh(&tp
->lock
);
12377 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
12378 spin_unlock_bh(&tp
->lock
);
12380 data
->val_out
= mii_regval
;
12386 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12387 break; /* We have no PHY */
12389 if (!netif_running(dev
))
12392 spin_lock_bh(&tp
->lock
);
12393 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
12394 spin_unlock_bh(&tp
->lock
);
12402 return -EOPNOTSUPP
;
12405 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
12407 struct tg3
*tp
= netdev_priv(dev
);
12409 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
12413 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
12415 struct tg3
*tp
= netdev_priv(dev
);
12416 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
12417 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
12419 if (!tg3_flag(tp
, 5705_PLUS
)) {
12420 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
12421 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
12422 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
12423 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
12426 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
12427 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
12428 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
12429 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
12430 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
12431 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
12432 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
12433 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
12434 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
12435 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
12438 /* No rx interrupts will be generated if both are zero */
12439 if ((ec
->rx_coalesce_usecs
== 0) &&
12440 (ec
->rx_max_coalesced_frames
== 0))
12443 /* No tx interrupts will be generated if both are zero */
12444 if ((ec
->tx_coalesce_usecs
== 0) &&
12445 (ec
->tx_max_coalesced_frames
== 0))
12448 /* Only copy relevant parameters, ignore all others. */
12449 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
12450 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
12451 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
12452 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
12453 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
12454 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
12455 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
12456 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
12457 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
12459 if (netif_running(dev
)) {
12460 tg3_full_lock(tp
, 0);
12461 __tg3_set_coalesce(tp
, &tp
->coal
);
12462 tg3_full_unlock(tp
);
12467 static const struct ethtool_ops tg3_ethtool_ops
= {
12468 .get_settings
= tg3_get_settings
,
12469 .set_settings
= tg3_set_settings
,
12470 .get_drvinfo
= tg3_get_drvinfo
,
12471 .get_regs_len
= tg3_get_regs_len
,
12472 .get_regs
= tg3_get_regs
,
12473 .get_wol
= tg3_get_wol
,
12474 .set_wol
= tg3_set_wol
,
12475 .get_msglevel
= tg3_get_msglevel
,
12476 .set_msglevel
= tg3_set_msglevel
,
12477 .nway_reset
= tg3_nway_reset
,
12478 .get_link
= ethtool_op_get_link
,
12479 .get_eeprom_len
= tg3_get_eeprom_len
,
12480 .get_eeprom
= tg3_get_eeprom
,
12481 .set_eeprom
= tg3_set_eeprom
,
12482 .get_ringparam
= tg3_get_ringparam
,
12483 .set_ringparam
= tg3_set_ringparam
,
12484 .get_pauseparam
= tg3_get_pauseparam
,
12485 .set_pauseparam
= tg3_set_pauseparam
,
12486 .self_test
= tg3_self_test
,
12487 .get_strings
= tg3_get_strings
,
12488 .set_phys_id
= tg3_set_phys_id
,
12489 .get_ethtool_stats
= tg3_get_ethtool_stats
,
12490 .get_coalesce
= tg3_get_coalesce
,
12491 .set_coalesce
= tg3_set_coalesce
,
12492 .get_sset_count
= tg3_get_sset_count
,
12493 .get_rxnfc
= tg3_get_rxnfc
,
12494 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
12495 .get_rxfh_indir
= tg3_get_rxfh_indir
,
12496 .set_rxfh_indir
= tg3_set_rxfh_indir
,
12497 .get_ts_info
= ethtool_op_get_ts_info
,
12500 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
12501 struct rtnl_link_stats64
*stats
)
12503 struct tg3
*tp
= netdev_priv(dev
);
12505 spin_lock_bh(&tp
->lock
);
12506 if (!tp
->hw_stats
) {
12507 spin_unlock_bh(&tp
->lock
);
12508 return &tp
->net_stats_prev
;
12511 tg3_get_nstats(tp
, stats
);
12512 spin_unlock_bh(&tp
->lock
);
12517 static void tg3_set_rx_mode(struct net_device
*dev
)
12519 struct tg3
*tp
= netdev_priv(dev
);
12521 if (!netif_running(dev
))
12524 tg3_full_lock(tp
, 0);
12525 __tg3_set_rx_mode(dev
);
12526 tg3_full_unlock(tp
);
12529 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
12532 dev
->mtu
= new_mtu
;
12534 if (new_mtu
> ETH_DATA_LEN
) {
12535 if (tg3_flag(tp
, 5780_CLASS
)) {
12536 netdev_update_features(dev
);
12537 tg3_flag_clear(tp
, TSO_CAPABLE
);
12539 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
12542 if (tg3_flag(tp
, 5780_CLASS
)) {
12543 tg3_flag_set(tp
, TSO_CAPABLE
);
12544 netdev_update_features(dev
);
12546 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
12550 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
12552 struct tg3
*tp
= netdev_priv(dev
);
12553 int err
, reset_phy
= 0;
12555 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
12558 if (!netif_running(dev
)) {
12559 /* We'll just catch it later when the
12562 tg3_set_mtu(dev
, tp
, new_mtu
);
12568 tg3_netif_stop(tp
);
12570 tg3_full_lock(tp
, 1);
12572 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12574 tg3_set_mtu(dev
, tp
, new_mtu
);
12576 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12577 * breaks all requests to 256 bytes.
12579 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
12582 err
= tg3_restart_hw(tp
, reset_phy
);
12585 tg3_netif_start(tp
);
12587 tg3_full_unlock(tp
);
12595 static const struct net_device_ops tg3_netdev_ops
= {
12596 .ndo_open
= tg3_open
,
12597 .ndo_stop
= tg3_close
,
12598 .ndo_start_xmit
= tg3_start_xmit
,
12599 .ndo_get_stats64
= tg3_get_stats64
,
12600 .ndo_validate_addr
= eth_validate_addr
,
12601 .ndo_set_rx_mode
= tg3_set_rx_mode
,
12602 .ndo_set_mac_address
= tg3_set_mac_addr
,
12603 .ndo_do_ioctl
= tg3_ioctl
,
12604 .ndo_tx_timeout
= tg3_tx_timeout
,
12605 .ndo_change_mtu
= tg3_change_mtu
,
12606 .ndo_fix_features
= tg3_fix_features
,
12607 .ndo_set_features
= tg3_set_features
,
12608 #ifdef CONFIG_NET_POLL_CONTROLLER
12609 .ndo_poll_controller
= tg3_poll_controller
,
12613 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
12615 u32 cursize
, val
, magic
;
12617 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
12619 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12622 if ((magic
!= TG3_EEPROM_MAGIC
) &&
12623 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
12624 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
12628 * Size the chip by reading offsets at increasing powers of two.
12629 * When we encounter our validation signature, we know the addressing
12630 * has wrapped around, and thus have our chip size.
12634 while (cursize
< tp
->nvram_size
) {
12635 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
12644 tp
->nvram_size
= cursize
;
12647 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
12651 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
12654 /* Selfboot format */
12655 if (val
!= TG3_EEPROM_MAGIC
) {
12656 tg3_get_eeprom_size(tp
);
12660 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
12662 /* This is confusing. We want to operate on the
12663 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12664 * call will read from NVRAM and byteswap the data
12665 * according to the byteswapping settings for all
12666 * other register accesses. This ensures the data we
12667 * want will always reside in the lower 16-bits.
12668 * However, the data in NVRAM is in LE format, which
12669 * means the data from the NVRAM read will always be
12670 * opposite the endianness of the CPU. The 16-bit
12671 * byteswap then brings the data to CPU endianness.
12673 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
12677 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12680 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
12684 nvcfg1
= tr32(NVRAM_CFG1
);
12685 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
12686 tg3_flag_set(tp
, FLASH
);
12688 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12689 tw32(NVRAM_CFG1
, nvcfg1
);
12692 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
12693 tg3_flag(tp
, 5780_CLASS
)) {
12694 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
12695 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
12696 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12697 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12698 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12700 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
12701 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12702 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
12704 case FLASH_VENDOR_ATMEL_EEPROM
:
12705 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12706 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12707 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12709 case FLASH_VENDOR_ST
:
12710 tp
->nvram_jedecnum
= JEDEC_ST
;
12711 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
12712 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12714 case FLASH_VENDOR_SAIFUN
:
12715 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
12716 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
12718 case FLASH_VENDOR_SST_SMALL
:
12719 case FLASH_VENDOR_SST_LARGE
:
12720 tp
->nvram_jedecnum
= JEDEC_SST
;
12721 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
12725 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12726 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
12727 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12731 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
12733 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
12734 case FLASH_5752PAGE_SIZE_256
:
12735 tp
->nvram_pagesize
= 256;
12737 case FLASH_5752PAGE_SIZE_512
:
12738 tp
->nvram_pagesize
= 512;
12740 case FLASH_5752PAGE_SIZE_1K
:
12741 tp
->nvram_pagesize
= 1024;
12743 case FLASH_5752PAGE_SIZE_2K
:
12744 tp
->nvram_pagesize
= 2048;
12746 case FLASH_5752PAGE_SIZE_4K
:
12747 tp
->nvram_pagesize
= 4096;
12749 case FLASH_5752PAGE_SIZE_264
:
12750 tp
->nvram_pagesize
= 264;
12752 case FLASH_5752PAGE_SIZE_528
:
12753 tp
->nvram_pagesize
= 528;
12758 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
12762 nvcfg1
= tr32(NVRAM_CFG1
);
12764 /* NVRAM protection for TPM */
12765 if (nvcfg1
& (1 << 27))
12766 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12768 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12769 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
12770 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
12771 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12772 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12774 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12775 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12776 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12777 tg3_flag_set(tp
, FLASH
);
12779 case FLASH_5752VENDOR_ST_M45PE10
:
12780 case FLASH_5752VENDOR_ST_M45PE20
:
12781 case FLASH_5752VENDOR_ST_M45PE40
:
12782 tp
->nvram_jedecnum
= JEDEC_ST
;
12783 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12784 tg3_flag_set(tp
, FLASH
);
12788 if (tg3_flag(tp
, FLASH
)) {
12789 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12791 /* For eeprom, set pagesize to maximum eeprom size */
12792 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12794 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12795 tw32(NVRAM_CFG1
, nvcfg1
);
12799 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
12801 u32 nvcfg1
, protect
= 0;
12803 nvcfg1
= tr32(NVRAM_CFG1
);
12805 /* NVRAM protection for TPM */
12806 if (nvcfg1
& (1 << 27)) {
12807 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12811 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12813 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12814 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12815 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12816 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
12817 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12818 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12819 tg3_flag_set(tp
, FLASH
);
12820 tp
->nvram_pagesize
= 264;
12821 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12822 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12823 tp
->nvram_size
= (protect
? 0x3e200 :
12824 TG3_NVRAM_SIZE_512KB
);
12825 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12826 tp
->nvram_size
= (protect
? 0x1f200 :
12827 TG3_NVRAM_SIZE_256KB
);
12829 tp
->nvram_size
= (protect
? 0x1f200 :
12830 TG3_NVRAM_SIZE_128KB
);
12832 case FLASH_5752VENDOR_ST_M45PE10
:
12833 case FLASH_5752VENDOR_ST_M45PE20
:
12834 case FLASH_5752VENDOR_ST_M45PE40
:
12835 tp
->nvram_jedecnum
= JEDEC_ST
;
12836 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12837 tg3_flag_set(tp
, FLASH
);
12838 tp
->nvram_pagesize
= 256;
12839 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12840 tp
->nvram_size
= (protect
?
12841 TG3_NVRAM_SIZE_64KB
:
12842 TG3_NVRAM_SIZE_128KB
);
12843 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12844 tp
->nvram_size
= (protect
?
12845 TG3_NVRAM_SIZE_64KB
:
12846 TG3_NVRAM_SIZE_256KB
);
12848 tp
->nvram_size
= (protect
?
12849 TG3_NVRAM_SIZE_128KB
:
12850 TG3_NVRAM_SIZE_512KB
);
12855 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12859 nvcfg1
= tr32(NVRAM_CFG1
);
12861 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12862 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12863 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12864 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12865 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12866 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12867 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12868 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12870 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12871 tw32(NVRAM_CFG1
, nvcfg1
);
12873 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12874 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12875 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12876 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12877 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12878 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12879 tg3_flag_set(tp
, FLASH
);
12880 tp
->nvram_pagesize
= 264;
12882 case FLASH_5752VENDOR_ST_M45PE10
:
12883 case FLASH_5752VENDOR_ST_M45PE20
:
12884 case FLASH_5752VENDOR_ST_M45PE40
:
12885 tp
->nvram_jedecnum
= JEDEC_ST
;
12886 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12887 tg3_flag_set(tp
, FLASH
);
12888 tp
->nvram_pagesize
= 256;
12893 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12895 u32 nvcfg1
, protect
= 0;
12897 nvcfg1
= tr32(NVRAM_CFG1
);
12899 /* NVRAM protection for TPM */
12900 if (nvcfg1
& (1 << 27)) {
12901 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12905 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12907 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12908 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12909 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12910 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12911 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12912 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12913 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12914 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12915 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12916 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12917 tg3_flag_set(tp
, FLASH
);
12918 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12919 tp
->nvram_pagesize
= 256;
12921 case FLASH_5761VENDOR_ST_A_M45PE20
:
12922 case FLASH_5761VENDOR_ST_A_M45PE40
:
12923 case FLASH_5761VENDOR_ST_A_M45PE80
:
12924 case FLASH_5761VENDOR_ST_A_M45PE16
:
12925 case FLASH_5761VENDOR_ST_M_M45PE20
:
12926 case FLASH_5761VENDOR_ST_M_M45PE40
:
12927 case FLASH_5761VENDOR_ST_M_M45PE80
:
12928 case FLASH_5761VENDOR_ST_M_M45PE16
:
12929 tp
->nvram_jedecnum
= JEDEC_ST
;
12930 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12931 tg3_flag_set(tp
, FLASH
);
12932 tp
->nvram_pagesize
= 256;
12937 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12940 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12941 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12942 case FLASH_5761VENDOR_ST_A_M45PE16
:
12943 case FLASH_5761VENDOR_ST_M_M45PE16
:
12944 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12946 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12947 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12948 case FLASH_5761VENDOR_ST_A_M45PE80
:
12949 case FLASH_5761VENDOR_ST_M_M45PE80
:
12950 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12952 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12953 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12954 case FLASH_5761VENDOR_ST_A_M45PE40
:
12955 case FLASH_5761VENDOR_ST_M_M45PE40
:
12956 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12958 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12959 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12960 case FLASH_5761VENDOR_ST_A_M45PE20
:
12961 case FLASH_5761VENDOR_ST_M_M45PE20
:
12962 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12968 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12970 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12971 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12972 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12975 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12979 nvcfg1
= tr32(NVRAM_CFG1
);
12981 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12982 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12983 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12984 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12985 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12986 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12988 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12989 tw32(NVRAM_CFG1
, nvcfg1
);
12991 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12992 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12993 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12994 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12995 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12996 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12997 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12998 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12999 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13000 tg3_flag_set(tp
, FLASH
);
13002 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13003 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13004 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13005 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13006 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13008 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13009 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13010 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13012 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13013 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13014 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13018 case FLASH_5752VENDOR_ST_M45PE10
:
13019 case FLASH_5752VENDOR_ST_M45PE20
:
13020 case FLASH_5752VENDOR_ST_M45PE40
:
13021 tp
->nvram_jedecnum
= JEDEC_ST
;
13022 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13023 tg3_flag_set(tp
, FLASH
);
13025 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13026 case FLASH_5752VENDOR_ST_M45PE10
:
13027 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13029 case FLASH_5752VENDOR_ST_M45PE20
:
13030 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13032 case FLASH_5752VENDOR_ST_M45PE40
:
13033 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13038 tg3_flag_set(tp
, NO_NVRAM
);
13042 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13043 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13044 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13048 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
13052 nvcfg1
= tr32(NVRAM_CFG1
);
13054 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13055 case FLASH_5717VENDOR_ATMEL_EEPROM
:
13056 case FLASH_5717VENDOR_MICRO_EEPROM
:
13057 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13058 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13059 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13061 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13062 tw32(NVRAM_CFG1
, nvcfg1
);
13064 case FLASH_5717VENDOR_ATMEL_MDB011D
:
13065 case FLASH_5717VENDOR_ATMEL_ADB011B
:
13066 case FLASH_5717VENDOR_ATMEL_ADB011D
:
13067 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13068 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13069 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13070 case FLASH_5717VENDOR_ATMEL_45USPT
:
13071 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13072 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13073 tg3_flag_set(tp
, FLASH
);
13075 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13076 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13077 /* Detect size with tg3_nvram_get_size() */
13079 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13080 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13081 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13084 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13088 case FLASH_5717VENDOR_ST_M_M25PE10
:
13089 case FLASH_5717VENDOR_ST_A_M25PE10
:
13090 case FLASH_5717VENDOR_ST_M_M45PE10
:
13091 case FLASH_5717VENDOR_ST_A_M45PE10
:
13092 case FLASH_5717VENDOR_ST_M_M25PE20
:
13093 case FLASH_5717VENDOR_ST_A_M25PE20
:
13094 case FLASH_5717VENDOR_ST_M_M45PE20
:
13095 case FLASH_5717VENDOR_ST_A_M45PE20
:
13096 case FLASH_5717VENDOR_ST_25USPT
:
13097 case FLASH_5717VENDOR_ST_45USPT
:
13098 tp
->nvram_jedecnum
= JEDEC_ST
;
13099 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13100 tg3_flag_set(tp
, FLASH
);
13102 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13103 case FLASH_5717VENDOR_ST_M_M25PE20
:
13104 case FLASH_5717VENDOR_ST_M_M45PE20
:
13105 /* Detect size with tg3_nvram_get_size() */
13107 case FLASH_5717VENDOR_ST_A_M25PE20
:
13108 case FLASH_5717VENDOR_ST_A_M45PE20
:
13109 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13112 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13117 tg3_flag_set(tp
, NO_NVRAM
);
13121 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13122 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13123 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13126 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
13128 u32 nvcfg1
, nvmpinstrp
;
13130 nvcfg1
= tr32(NVRAM_CFG1
);
13131 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
13133 switch (nvmpinstrp
) {
13134 case FLASH_5720_EEPROM_HD
:
13135 case FLASH_5720_EEPROM_LD
:
13136 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13137 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13139 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13140 tw32(NVRAM_CFG1
, nvcfg1
);
13141 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
13142 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13144 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
13146 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
13147 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
13148 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
13149 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13150 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13151 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13152 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13153 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13154 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13155 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13156 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13157 case FLASH_5720VENDOR_ATMEL_45USPT
:
13158 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13159 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13160 tg3_flag_set(tp
, FLASH
);
13162 switch (nvmpinstrp
) {
13163 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13164 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13165 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13166 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13168 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13169 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13170 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13171 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13173 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13174 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13175 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13178 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13182 case FLASH_5720VENDOR_M_ST_M25PE10
:
13183 case FLASH_5720VENDOR_M_ST_M45PE10
:
13184 case FLASH_5720VENDOR_A_ST_M25PE10
:
13185 case FLASH_5720VENDOR_A_ST_M45PE10
:
13186 case FLASH_5720VENDOR_M_ST_M25PE20
:
13187 case FLASH_5720VENDOR_M_ST_M45PE20
:
13188 case FLASH_5720VENDOR_A_ST_M25PE20
:
13189 case FLASH_5720VENDOR_A_ST_M45PE20
:
13190 case FLASH_5720VENDOR_M_ST_M25PE40
:
13191 case FLASH_5720VENDOR_M_ST_M45PE40
:
13192 case FLASH_5720VENDOR_A_ST_M25PE40
:
13193 case FLASH_5720VENDOR_A_ST_M45PE40
:
13194 case FLASH_5720VENDOR_M_ST_M25PE80
:
13195 case FLASH_5720VENDOR_M_ST_M45PE80
:
13196 case FLASH_5720VENDOR_A_ST_M25PE80
:
13197 case FLASH_5720VENDOR_A_ST_M45PE80
:
13198 case FLASH_5720VENDOR_ST_25USPT
:
13199 case FLASH_5720VENDOR_ST_45USPT
:
13200 tp
->nvram_jedecnum
= JEDEC_ST
;
13201 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13202 tg3_flag_set(tp
, FLASH
);
13204 switch (nvmpinstrp
) {
13205 case FLASH_5720VENDOR_M_ST_M25PE20
:
13206 case FLASH_5720VENDOR_M_ST_M45PE20
:
13207 case FLASH_5720VENDOR_A_ST_M25PE20
:
13208 case FLASH_5720VENDOR_A_ST_M45PE20
:
13209 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13211 case FLASH_5720VENDOR_M_ST_M25PE40
:
13212 case FLASH_5720VENDOR_M_ST_M45PE40
:
13213 case FLASH_5720VENDOR_A_ST_M25PE40
:
13214 case FLASH_5720VENDOR_A_ST_M45PE40
:
13215 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13217 case FLASH_5720VENDOR_M_ST_M25PE80
:
13218 case FLASH_5720VENDOR_M_ST_M45PE80
:
13219 case FLASH_5720VENDOR_A_ST_M25PE80
:
13220 case FLASH_5720VENDOR_A_ST_M45PE80
:
13221 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13224 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13229 tg3_flag_set(tp
, NO_NVRAM
);
13233 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13234 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13235 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13238 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13239 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
13241 tw32_f(GRC_EEPROM_ADDR
,
13242 (EEPROM_ADDR_FSM_RESET
|
13243 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
13244 EEPROM_ADDR_CLKPERD_SHIFT
)));
13248 /* Enable seeprom accesses. */
13249 tw32_f(GRC_LOCAL_CTRL
,
13250 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
13253 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13254 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
13255 tg3_flag_set(tp
, NVRAM
);
13257 if (tg3_nvram_lock(tp
)) {
13258 netdev_warn(tp
->dev
,
13259 "Cannot get nvram lock, %s failed\n",
13263 tg3_enable_nvram_access(tp
);
13265 tp
->nvram_size
= 0;
13267 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
13268 tg3_get_5752_nvram_info(tp
);
13269 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
13270 tg3_get_5755_nvram_info(tp
);
13271 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13272 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13273 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13274 tg3_get_5787_nvram_info(tp
);
13275 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
13276 tg3_get_5761_nvram_info(tp
);
13277 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13278 tg3_get_5906_nvram_info(tp
);
13279 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13280 tg3_flag(tp
, 57765_CLASS
))
13281 tg3_get_57780_nvram_info(tp
);
13282 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13283 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13284 tg3_get_5717_nvram_info(tp
);
13285 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13286 tg3_get_5720_nvram_info(tp
);
13288 tg3_get_nvram_info(tp
);
13290 if (tp
->nvram_size
== 0)
13291 tg3_get_nvram_size(tp
);
13293 tg3_disable_nvram_access(tp
);
13294 tg3_nvram_unlock(tp
);
13297 tg3_flag_clear(tp
, NVRAM
);
13298 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
13300 tg3_get_eeprom_size(tp
);
13304 struct subsys_tbl_ent
{
13305 u16 subsys_vendor
, subsys_devid
;
13309 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
13310 /* Broadcom boards. */
13311 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13312 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
13313 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13314 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
13315 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13316 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
13317 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13318 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
13319 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13320 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
13321 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13322 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
13323 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13324 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
13325 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13326 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
13327 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13328 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
13329 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13330 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
13331 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13332 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
13335 { TG3PCI_SUBVENDOR_ID_3COM
,
13336 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
13337 { TG3PCI_SUBVENDOR_ID_3COM
,
13338 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
13339 { TG3PCI_SUBVENDOR_ID_3COM
,
13340 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
13341 { TG3PCI_SUBVENDOR_ID_3COM
,
13342 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
13343 { TG3PCI_SUBVENDOR_ID_3COM
,
13344 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
13347 { TG3PCI_SUBVENDOR_ID_DELL
,
13348 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
13349 { TG3PCI_SUBVENDOR_ID_DELL
,
13350 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
13351 { TG3PCI_SUBVENDOR_ID_DELL
,
13352 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
13353 { TG3PCI_SUBVENDOR_ID_DELL
,
13354 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
13356 /* Compaq boards. */
13357 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13358 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
13359 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13360 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
13361 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13362 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
13363 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13364 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
13365 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13366 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
13369 { TG3PCI_SUBVENDOR_ID_IBM
,
13370 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
13373 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
13377 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
13378 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
13379 tp
->pdev
->subsystem_vendor
) &&
13380 (subsys_id_to_phy_id
[i
].subsys_devid
==
13381 tp
->pdev
->subsystem_device
))
13382 return &subsys_id_to_phy_id
[i
];
13387 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
13391 tp
->phy_id
= TG3_PHY_ID_INVALID
;
13392 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13394 /* Assume an onboard device and WOL capable by default. */
13395 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13396 tg3_flag_set(tp
, WOL_CAP
);
13398 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13399 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
13400 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13401 tg3_flag_set(tp
, IS_NIC
);
13403 val
= tr32(VCPU_CFGSHDW
);
13404 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
13405 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13406 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
13407 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
13408 tg3_flag_set(tp
, WOL_ENABLE
);
13409 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13414 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
13415 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
13416 u32 nic_cfg
, led_cfg
;
13417 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
13418 int eeprom_phy_serdes
= 0;
13420 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
13421 tp
->nic_sram_data_cfg
= nic_cfg
;
13423 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
13424 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
13425 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13426 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13427 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
13428 (ver
> 0) && (ver
< 0x100))
13429 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
13431 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13432 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
13434 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
13435 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
13436 eeprom_phy_serdes
= 1;
13438 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
13439 if (nic_phy_id
!= 0) {
13440 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
13441 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
13443 eeprom_phy_id
= (id1
>> 16) << 10;
13444 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
13445 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
13449 tp
->phy_id
= eeprom_phy_id
;
13450 if (eeprom_phy_serdes
) {
13451 if (!tg3_flag(tp
, 5705_PLUS
))
13452 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13454 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
13457 if (tg3_flag(tp
, 5750_PLUS
))
13458 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
13459 SHASTA_EXT_LED_MODE_MASK
);
13461 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
13465 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
13466 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13469 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
13470 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13473 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
13474 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
13476 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13477 * read on some older 5700/5701 bootcode.
13479 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13481 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13483 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13487 case SHASTA_EXT_LED_SHARED
:
13488 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
13489 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
13490 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
13491 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13492 LED_CTRL_MODE_PHY_2
);
13495 case SHASTA_EXT_LED_MAC
:
13496 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
13499 case SHASTA_EXT_LED_COMBO
:
13500 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
13501 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
13502 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
13503 LED_CTRL_MODE_PHY_2
);
13508 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13509 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
13510 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
13511 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
13513 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
13514 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13516 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
13517 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13518 if ((tp
->pdev
->subsystem_vendor
==
13519 PCI_VENDOR_ID_ARIMA
) &&
13520 (tp
->pdev
->subsystem_device
== 0x205a ||
13521 tp
->pdev
->subsystem_device
== 0x2063))
13522 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13524 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13525 tg3_flag_set(tp
, IS_NIC
);
13528 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
13529 tg3_flag_set(tp
, ENABLE_ASF
);
13530 if (tg3_flag(tp
, 5750_PLUS
))
13531 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
13534 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
13535 tg3_flag(tp
, 5750_PLUS
))
13536 tg3_flag_set(tp
, ENABLE_APE
);
13538 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
13539 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
13540 tg3_flag_clear(tp
, WOL_CAP
);
13542 if (tg3_flag(tp
, WOL_CAP
) &&
13543 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
13544 tg3_flag_set(tp
, WOL_ENABLE
);
13545 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13548 if (cfg2
& (1 << 17))
13549 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
13551 /* serdes signal pre-emphasis in register 0x590 set by */
13552 /* bootcode if bit 18 is set */
13553 if (cfg2
& (1 << 18))
13554 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
13556 if ((tg3_flag(tp
, 57765_PLUS
) ||
13557 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13558 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
13559 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
13560 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
13562 if (tg3_flag(tp
, PCI_EXPRESS
) &&
13563 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13564 !tg3_flag(tp
, 57765_PLUS
)) {
13567 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
13568 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
13569 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13572 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13573 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13574 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13575 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13576 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13577 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13580 if (tg3_flag(tp
, WOL_CAP
))
13581 device_set_wakeup_enable(&tp
->pdev
->dev
,
13582 tg3_flag(tp
, WOL_ENABLE
));
13584 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13587 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13592 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13593 tw32(OTP_CTRL
, cmd
);
13595 /* Wait for up to 1 ms for command to execute. */
13596 for (i
= 0; i
< 100; i
++) {
13597 val
= tr32(OTP_STATUS
);
13598 if (val
& OTP_STATUS_CMD_DONE
)
13603 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13606 /* Read the gphy configuration from the OTP region of the chip. The gphy
13607 * configuration is a 32-bit value that straddles the alignment boundary.
13608 * We do two 32-bit reads and then shift and merge the results.
13610 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13612 u32 bhalf_otp
, thalf_otp
;
13614 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13616 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13619 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13621 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13624 thalf_otp
= tr32(OTP_READ_DATA
);
13626 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13628 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13631 bhalf_otp
= tr32(OTP_READ_DATA
);
13633 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13636 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13638 u32 adv
= ADVERTISED_Autoneg
;
13640 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13641 adv
|= ADVERTISED_1000baseT_Half
|
13642 ADVERTISED_1000baseT_Full
;
13644 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13645 adv
|= ADVERTISED_100baseT_Half
|
13646 ADVERTISED_100baseT_Full
|
13647 ADVERTISED_10baseT_Half
|
13648 ADVERTISED_10baseT_Full
|
13651 adv
|= ADVERTISED_FIBRE
;
13653 tp
->link_config
.advertising
= adv
;
13654 tp
->link_config
.speed
= SPEED_UNKNOWN
;
13655 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
13656 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13657 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
13658 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
13663 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13665 u32 hw_phy_id_1
, hw_phy_id_2
;
13666 u32 hw_phy_id
, hw_phy_id_masked
;
13669 /* flow control autonegotiation is default behavior */
13670 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13671 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13673 if (tg3_flag(tp
, ENABLE_APE
)) {
13674 switch (tp
->pci_fn
) {
13676 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
13679 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
13682 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
13685 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
13690 if (tg3_flag(tp
, USE_PHYLIB
))
13691 return tg3_phy_init(tp
);
13693 /* Reading the PHY ID register can conflict with ASF
13694 * firmware access to the PHY hardware.
13697 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13698 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13700 /* Now read the physical PHY_ID from the chip and verify
13701 * that it is sane. If it doesn't look good, we fall back
13702 * to either the hard-coded table based PHY_ID and failing
13703 * that the value found in the eeprom area.
13705 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13706 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13708 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13709 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13710 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13712 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13715 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13716 tp
->phy_id
= hw_phy_id
;
13717 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13718 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13720 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13722 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13723 /* Do nothing, phy ID already set up in
13724 * tg3_get_eeprom_hw_cfg().
13727 struct subsys_tbl_ent
*p
;
13729 /* No eeprom signature? Try the hardcoded
13730 * subsys device table.
13732 p
= tg3_lookup_by_subsys(tp
);
13736 tp
->phy_id
= p
->phy_id
;
13738 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13739 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13743 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13744 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13745 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13746 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13747 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13748 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13749 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13750 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13752 tg3_phy_init_link_config(tp
);
13754 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13755 !tg3_flag(tp
, ENABLE_APE
) &&
13756 !tg3_flag(tp
, ENABLE_ASF
)) {
13759 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13760 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13761 (bmsr
& BMSR_LSTATUS
))
13762 goto skip_phy_reset
;
13764 err
= tg3_phy_reset(tp
);
13768 tg3_phy_set_wirespeed(tp
);
13770 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
13771 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13772 tp
->link_config
.flowctrl
);
13774 tg3_writephy(tp
, MII_BMCR
,
13775 BMCR_ANENABLE
| BMCR_ANRESTART
);
13780 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13781 err
= tg3_init_5401phy_dsp(tp
);
13785 err
= tg3_init_5401phy_dsp(tp
);
13791 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13794 unsigned int block_end
, rosize
, len
;
13798 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13802 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13804 goto out_not_found
;
13806 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13807 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13808 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13810 if (block_end
> vpdlen
)
13811 goto out_not_found
;
13813 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13814 PCI_VPD_RO_KEYWORD_MFR_ID
);
13816 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13818 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13819 if (j
+ len
> block_end
|| len
!= 4 ||
13820 memcmp(&vpd_data
[j
], "1028", 4))
13823 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13824 PCI_VPD_RO_KEYWORD_VENDOR0
);
13828 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13830 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13831 if (j
+ len
> block_end
)
13834 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13835 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
13839 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13840 PCI_VPD_RO_KEYWORD_PARTNO
);
13842 goto out_not_found
;
13844 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13846 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13847 if (len
> TG3_BPN_SIZE
||
13848 (len
+ i
) > vpdlen
)
13849 goto out_not_found
;
13851 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13855 if (tp
->board_part_number
[0])
13859 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13860 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13861 strcpy(tp
->board_part_number
, "BCM5717");
13862 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13863 strcpy(tp
->board_part_number
, "BCM5718");
13866 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13867 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13868 strcpy(tp
->board_part_number
, "BCM57780");
13869 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13870 strcpy(tp
->board_part_number
, "BCM57760");
13871 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13872 strcpy(tp
->board_part_number
, "BCM57790");
13873 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13874 strcpy(tp
->board_part_number
, "BCM57788");
13877 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13878 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13879 strcpy(tp
->board_part_number
, "BCM57761");
13880 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13881 strcpy(tp
->board_part_number
, "BCM57765");
13882 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13883 strcpy(tp
->board_part_number
, "BCM57781");
13884 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13885 strcpy(tp
->board_part_number
, "BCM57785");
13886 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13887 strcpy(tp
->board_part_number
, "BCM57791");
13888 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13889 strcpy(tp
->board_part_number
, "BCM57795");
13892 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
) {
13893 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
13894 strcpy(tp
->board_part_number
, "BCM57762");
13895 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
13896 strcpy(tp
->board_part_number
, "BCM57766");
13897 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
13898 strcpy(tp
->board_part_number
, "BCM57782");
13899 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
13900 strcpy(tp
->board_part_number
, "BCM57786");
13903 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13904 strcpy(tp
->board_part_number
, "BCM95906");
13907 strcpy(tp
->board_part_number
, "none");
13911 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13915 if (tg3_nvram_read(tp
, offset
, &val
) ||
13916 (val
& 0xfc000000) != 0x0c000000 ||
13917 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13924 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13926 u32 val
, offset
, start
, ver_offset
;
13928 bool newver
= false;
13930 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13931 tg3_nvram_read(tp
, 0x4, &start
))
13934 offset
= tg3_nvram_logical_addr(tp
, offset
);
13936 if (tg3_nvram_read(tp
, offset
, &val
))
13939 if ((val
& 0xfc000000) == 0x0c000000) {
13940 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13947 dst_off
= strlen(tp
->fw_ver
);
13950 if (TG3_VER_SIZE
- dst_off
< 16 ||
13951 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13954 offset
= offset
+ ver_offset
- start
;
13955 for (i
= 0; i
< 16; i
+= 4) {
13957 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13960 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13965 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13968 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13969 TG3_NVM_BCVER_MAJSFT
;
13970 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13971 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13972 "v%d.%02d", major
, minor
);
13976 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13978 u32 val
, major
, minor
;
13980 /* Use native endian representation */
13981 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13984 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13985 TG3_NVM_HWSB_CFG1_MAJSFT
;
13986 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13987 TG3_NVM_HWSB_CFG1_MINSFT
;
13989 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13992 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13994 u32 offset
, major
, minor
, build
;
13996 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13998 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
14001 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
14002 case TG3_EEPROM_SB_REVISION_0
:
14003 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
14005 case TG3_EEPROM_SB_REVISION_2
:
14006 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
14008 case TG3_EEPROM_SB_REVISION_3
:
14009 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
14011 case TG3_EEPROM_SB_REVISION_4
:
14012 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
14014 case TG3_EEPROM_SB_REVISION_5
:
14015 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
14017 case TG3_EEPROM_SB_REVISION_6
:
14018 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
14024 if (tg3_nvram_read(tp
, offset
, &val
))
14027 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
14028 TG3_EEPROM_SB_EDH_BLD_SHFT
;
14029 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
14030 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
14031 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
14033 if (minor
> 99 || build
> 26)
14036 offset
= strlen(tp
->fw_ver
);
14037 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
14038 " v%d.%02d", major
, minor
);
14041 offset
= strlen(tp
->fw_ver
);
14042 if (offset
< TG3_VER_SIZE
- 1)
14043 tp
->fw_ver
[offset
] = 'a' + build
- 1;
14047 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
14049 u32 val
, offset
, start
;
14052 for (offset
= TG3_NVM_DIR_START
;
14053 offset
< TG3_NVM_DIR_END
;
14054 offset
+= TG3_NVM_DIRENT_SIZE
) {
14055 if (tg3_nvram_read(tp
, offset
, &val
))
14058 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
14062 if (offset
== TG3_NVM_DIR_END
)
14065 if (!tg3_flag(tp
, 5705_PLUS
))
14066 start
= 0x08000000;
14067 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
14070 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
14071 !tg3_fw_img_is_valid(tp
, offset
) ||
14072 tg3_nvram_read(tp
, offset
+ 8, &val
))
14075 offset
+= val
- start
;
14077 vlen
= strlen(tp
->fw_ver
);
14079 tp
->fw_ver
[vlen
++] = ',';
14080 tp
->fw_ver
[vlen
++] = ' ';
14082 for (i
= 0; i
< 4; i
++) {
14084 if (tg3_nvram_read_be32(tp
, offset
, &v
))
14087 offset
+= sizeof(v
);
14089 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
14090 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
14094 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
14099 static void __devinit
tg3_probe_ncsi(struct tg3
*tp
)
14103 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
14104 if (apedata
!= APE_SEG_SIG_MAGIC
)
14107 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
14108 if (!(apedata
& APE_FW_STATUS_READY
))
14111 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
14112 tg3_flag_set(tp
, APE_HAS_NCSI
);
14115 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
14121 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
14123 if (tg3_flag(tp
, APE_HAS_NCSI
))
14128 vlen
= strlen(tp
->fw_ver
);
14130 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
14132 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
14133 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
14134 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
14135 (apedata
& APE_FW_VERSION_BLDMSK
));
14138 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
14141 bool vpd_vers
= false;
14143 if (tp
->fw_ver
[0] != 0)
14146 if (tg3_flag(tp
, NO_NVRAM
)) {
14147 strcat(tp
->fw_ver
, "sb");
14151 if (tg3_nvram_read(tp
, 0, &val
))
14154 if (val
== TG3_EEPROM_MAGIC
)
14155 tg3_read_bc_ver(tp
);
14156 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
14157 tg3_read_sb_ver(tp
, val
);
14158 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
14159 tg3_read_hwsb_ver(tp
);
14161 if (tg3_flag(tp
, ENABLE_ASF
)) {
14162 if (tg3_flag(tp
, ENABLE_APE
)) {
14163 tg3_probe_ncsi(tp
);
14165 tg3_read_dash_ver(tp
);
14166 } else if (!vpd_vers
) {
14167 tg3_read_mgmtfw_ver(tp
);
14171 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
14174 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
14176 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
14177 return TG3_RX_RET_MAX_SIZE_5717
;
14178 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
14179 return TG3_RX_RET_MAX_SIZE_5700
;
14181 return TG3_RX_RET_MAX_SIZE_5705
;
14184 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
14185 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
14186 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
14187 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
14191 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
14193 struct pci_dev
*peer
;
14194 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14196 for (func
= 0; func
< 8; func
++) {
14197 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14198 if (peer
&& peer
!= tp
->pdev
)
14202 /* 5704 can be configured in single-port mode, set peer to
14203 * tp->pdev in that case.
14211 * We don't need to keep the refcount elevated; there's no way
14212 * to remove one half of this device without removing the other
14219 static void __devinit
tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
14221 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
14222 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
14225 /* All devices that use the alternate
14226 * ASIC REV location have a CPMU.
14228 tg3_flag_set(tp
, CPMU_PRESENT
);
14230 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14231 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
14232 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
14233 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
14234 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
14235 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
14236 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
14237 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
14238 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
14239 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14240 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14241 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
14242 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
14243 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
14244 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14245 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
14247 reg
= TG3PCI_PRODID_ASICREV
;
14249 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
14252 /* Wrong chip ID in 5752 A0. This code can be removed later
14253 * as A0 is not in production.
14255 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
14256 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
14258 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14259 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14260 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14261 tg3_flag_set(tp
, 5717_PLUS
);
14263 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
14264 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
14265 tg3_flag_set(tp
, 57765_CLASS
);
14267 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
))
14268 tg3_flag_set(tp
, 57765_PLUS
);
14270 /* Intentionally exclude ASIC_REV_5906 */
14271 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14272 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14273 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14274 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14275 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14276 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14277 tg3_flag(tp
, 57765_PLUS
))
14278 tg3_flag_set(tp
, 5755_PLUS
);
14280 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
14281 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
14282 tg3_flag_set(tp
, 5780_CLASS
);
14284 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14285 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14286 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
14287 tg3_flag(tp
, 5755_PLUS
) ||
14288 tg3_flag(tp
, 5780_CLASS
))
14289 tg3_flag_set(tp
, 5750_PLUS
);
14291 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14292 tg3_flag(tp
, 5750_PLUS
))
14293 tg3_flag_set(tp
, 5705_PLUS
);
14296 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
14299 u32 pci_state_reg
, grc_misc_cfg
;
14304 /* Force memory write invalidate off. If we leave it on,
14305 * then on 5700_BX chips we have to enable a workaround.
14306 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14307 * to match the cacheline size. The Broadcom driver have this
14308 * workaround but turns MWI off all the times so never uses
14309 * it. This seems to suggest that the workaround is insufficient.
14311 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14312 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
14313 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14315 /* Important! -- Make sure register accesses are byteswapped
14316 * correctly. Also, for those chips that require it, make
14317 * sure that indirect register accesses are enabled before
14318 * the first operation.
14320 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14322 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
14323 MISC_HOST_CTRL_CHIPREV
);
14324 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14325 tp
->misc_host_ctrl
);
14327 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
14329 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14330 * we need to disable memory and use config. cycles
14331 * only to access all registers. The 5702/03 chips
14332 * can mistakenly decode the special cycles from the
14333 * ICH chipsets as memory write cycles, causing corruption
14334 * of register and memory space. Only certain ICH bridges
14335 * will drive special cycles with non-zero data during the
14336 * address phase which can fall within the 5703's address
14337 * range. This is not an ICH bug as the PCI spec allows
14338 * non-zero address during special cycles. However, only
14339 * these ICH bridges are known to drive non-zero addresses
14340 * during special cycles.
14342 * Since special cycles do not cross PCI bridges, we only
14343 * enable this workaround if the 5703 is on the secondary
14344 * bus of these ICH bridges.
14346 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
14347 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
14348 static struct tg3_dev_id
{
14352 } ich_chipsets
[] = {
14353 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
14355 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
14357 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
14359 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
14363 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
14364 struct pci_dev
*bridge
= NULL
;
14366 while (pci_id
->vendor
!= 0) {
14367 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
14373 if (pci_id
->rev
!= PCI_ANY_ID
) {
14374 if (bridge
->revision
> pci_id
->rev
)
14377 if (bridge
->subordinate
&&
14378 (bridge
->subordinate
->number
==
14379 tp
->pdev
->bus
->number
)) {
14380 tg3_flag_set(tp
, ICH_WORKAROUND
);
14381 pci_dev_put(bridge
);
14387 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14388 static struct tg3_dev_id
{
14391 } bridge_chipsets
[] = {
14392 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
14393 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
14396 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
14397 struct pci_dev
*bridge
= NULL
;
14399 while (pci_id
->vendor
!= 0) {
14400 bridge
= pci_get_device(pci_id
->vendor
,
14407 if (bridge
->subordinate
&&
14408 (bridge
->subordinate
->number
<=
14409 tp
->pdev
->bus
->number
) &&
14410 (bridge
->subordinate
->busn_res
.end
>=
14411 tp
->pdev
->bus
->number
)) {
14412 tg3_flag_set(tp
, 5701_DMA_BUG
);
14413 pci_dev_put(bridge
);
14419 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14420 * DMA addresses > 40-bit. This bridge may have other additional
14421 * 57xx devices behind it in some 4-port NIC designs for example.
14422 * Any tg3 device found behind the bridge will also need the 40-bit
14425 if (tg3_flag(tp
, 5780_CLASS
)) {
14426 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14427 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
14429 struct pci_dev
*bridge
= NULL
;
14432 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
14433 PCI_DEVICE_ID_SERVERWORKS_EPB
,
14435 if (bridge
&& bridge
->subordinate
&&
14436 (bridge
->subordinate
->number
<=
14437 tp
->pdev
->bus
->number
) &&
14438 (bridge
->subordinate
->busn_res
.end
>=
14439 tp
->pdev
->bus
->number
)) {
14440 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
14441 pci_dev_put(bridge
);
14447 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14448 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
14449 tp
->pdev_peer
= tg3_find_peer(tp
);
14451 /* Determine TSO capabilities */
14452 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
14453 ; /* Do nothing. HW bug. */
14454 else if (tg3_flag(tp
, 57765_PLUS
))
14455 tg3_flag_set(tp
, HW_TSO_3
);
14456 else if (tg3_flag(tp
, 5755_PLUS
) ||
14457 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14458 tg3_flag_set(tp
, HW_TSO_2
);
14459 else if (tg3_flag(tp
, 5750_PLUS
)) {
14460 tg3_flag_set(tp
, HW_TSO_1
);
14461 tg3_flag_set(tp
, TSO_BUG
);
14462 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
14463 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
14464 tg3_flag_clear(tp
, TSO_BUG
);
14465 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14466 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14467 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
14468 tg3_flag_set(tp
, TSO_BUG
);
14469 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
14470 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
14472 tp
->fw_needed
= FIRMWARE_TG3TSO
;
14475 /* Selectively allow TSO based on operating conditions */
14476 if (tg3_flag(tp
, HW_TSO_1
) ||
14477 tg3_flag(tp
, HW_TSO_2
) ||
14478 tg3_flag(tp
, HW_TSO_3
) ||
14480 /* For firmware TSO, assume ASF is disabled.
14481 * We'll disable TSO later if we discover ASF
14482 * is enabled in tg3_get_eeprom_hw_cfg().
14484 tg3_flag_set(tp
, TSO_CAPABLE
);
14486 tg3_flag_clear(tp
, TSO_CAPABLE
);
14487 tg3_flag_clear(tp
, TSO_BUG
);
14488 tp
->fw_needed
= NULL
;
14491 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
14492 tp
->fw_needed
= FIRMWARE_TG3
;
14496 if (tg3_flag(tp
, 5750_PLUS
)) {
14497 tg3_flag_set(tp
, SUPPORT_MSI
);
14498 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
14499 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
14500 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
14501 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
14502 tp
->pdev_peer
== tp
->pdev
))
14503 tg3_flag_clear(tp
, SUPPORT_MSI
);
14505 if (tg3_flag(tp
, 5755_PLUS
) ||
14506 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14507 tg3_flag_set(tp
, 1SHOT_MSI
);
14510 if (tg3_flag(tp
, 57765_PLUS
)) {
14511 tg3_flag_set(tp
, SUPPORT_MSIX
);
14512 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
14513 tg3_rss_init_dflt_indir_tbl(tp
);
14517 if (tg3_flag(tp
, 5755_PLUS
) ||
14518 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14519 tg3_flag_set(tp
, SHORT_DMA_BUG
);
14521 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
14522 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
14524 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14525 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14526 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14527 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
14529 if (tg3_flag(tp
, 57765_PLUS
) &&
14530 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
14531 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
14533 if (!tg3_flag(tp
, 5705_PLUS
) ||
14534 tg3_flag(tp
, 5780_CLASS
) ||
14535 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
14536 tg3_flag_set(tp
, JUMBO_CAPABLE
);
14538 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14541 if (pci_is_pcie(tp
->pdev
)) {
14544 tg3_flag_set(tp
, PCI_EXPRESS
);
14546 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
14547 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
14548 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14550 tg3_flag_clear(tp
, HW_TSO_2
);
14551 tg3_flag_clear(tp
, TSO_CAPABLE
);
14553 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14554 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14555 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
14556 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
14557 tg3_flag_set(tp
, CLKREQ_BUG
);
14558 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
14559 tg3_flag_set(tp
, L1PLLPD_EN
);
14561 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
14562 /* BCM5785 devices are effectively PCIe devices, and should
14563 * follow PCIe codepaths, but do not have a PCIe capabilities
14566 tg3_flag_set(tp
, PCI_EXPRESS
);
14567 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
14568 tg3_flag(tp
, 5780_CLASS
)) {
14569 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
14570 if (!tp
->pcix_cap
) {
14571 dev_err(&tp
->pdev
->dev
,
14572 "Cannot find PCI-X capability, aborting\n");
14576 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
14577 tg3_flag_set(tp
, PCIX_MODE
);
14580 /* If we have an AMD 762 or VIA K8T800 chipset, write
14581 * reordering to the mailbox registers done by the host
14582 * controller can cause major troubles. We read back from
14583 * every mailbox register write to force the writes to be
14584 * posted to the chip in order.
14586 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
14587 !tg3_flag(tp
, PCI_EXPRESS
))
14588 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
14590 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
14591 &tp
->pci_cacheline_sz
);
14592 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14593 &tp
->pci_lat_timer
);
14594 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14595 tp
->pci_lat_timer
< 64) {
14596 tp
->pci_lat_timer
= 64;
14597 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
14598 tp
->pci_lat_timer
);
14601 /* Important! -- It is critical that the PCI-X hw workaround
14602 * situation is decided before the first MMIO register access.
14604 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
14605 /* 5700 BX chips need to have their TX producer index
14606 * mailboxes written twice to workaround a bug.
14608 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
14610 /* If we are in PCI-X mode, enable register write workaround.
14612 * The workaround is to use indirect register accesses
14613 * for all chip writes not to mailbox registers.
14615 if (tg3_flag(tp
, PCIX_MODE
)) {
14618 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14620 /* The chip can have it's power management PCI config
14621 * space registers clobbered due to this bug.
14622 * So explicitly force the chip into D0 here.
14624 pci_read_config_dword(tp
->pdev
,
14625 tp
->pm_cap
+ PCI_PM_CTRL
,
14627 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
14628 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
14629 pci_write_config_dword(tp
->pdev
,
14630 tp
->pm_cap
+ PCI_PM_CTRL
,
14633 /* Also, force SERR#/PERR# in PCI command. */
14634 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14635 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14636 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14640 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14641 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14642 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14643 tg3_flag_set(tp
, PCI_32BIT
);
14645 /* Chip-specific fixup from Broadcom driver */
14646 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14647 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14648 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14649 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14652 /* Default fast path register access methods */
14653 tp
->read32
= tg3_read32
;
14654 tp
->write32
= tg3_write32
;
14655 tp
->read32_mbox
= tg3_read32
;
14656 tp
->write32_mbox
= tg3_write32
;
14657 tp
->write32_tx_mbox
= tg3_write32
;
14658 tp
->write32_rx_mbox
= tg3_write32
;
14660 /* Various workaround register access methods */
14661 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14662 tp
->write32
= tg3_write_indirect_reg32
;
14663 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14664 (tg3_flag(tp
, PCI_EXPRESS
) &&
14665 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14667 * Back to back register writes can cause problems on these
14668 * chips, the workaround is to read back all reg writes
14669 * except those to mailbox regs.
14671 * See tg3_write_indirect_reg32().
14673 tp
->write32
= tg3_write_flush_reg32
;
14676 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14677 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14678 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14679 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14682 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14683 tp
->read32
= tg3_read_indirect_reg32
;
14684 tp
->write32
= tg3_write_indirect_reg32
;
14685 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14686 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14687 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14688 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14693 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14694 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14695 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14697 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14698 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14699 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14700 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14701 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14704 if (tp
->write32
== tg3_write_indirect_reg32
||
14705 (tg3_flag(tp
, PCIX_MODE
) &&
14706 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14707 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14708 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14710 /* The memory arbiter has to be enabled in order for SRAM accesses
14711 * to succeed. Normally on powerup the tg3 chip firmware will make
14712 * sure it is enabled, but other entities such as system netboot
14713 * code might disable it.
14715 val
= tr32(MEMARB_MODE
);
14716 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14718 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14719 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14720 tg3_flag(tp
, 5780_CLASS
)) {
14721 if (tg3_flag(tp
, PCIX_MODE
)) {
14722 pci_read_config_dword(tp
->pdev
,
14723 tp
->pcix_cap
+ PCI_X_STATUS
,
14725 tp
->pci_fn
= val
& 0x7;
14727 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
14728 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14729 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14730 NIC_SRAM_CPMUSTAT_SIG
) {
14731 tp
->pci_fn
= val
& TG3_CPMU_STATUS_FMSK_5717
;
14732 tp
->pci_fn
= tp
->pci_fn
? 1 : 0;
14734 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14735 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
14736 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
14737 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
14738 NIC_SRAM_CPMUSTAT_SIG
) {
14739 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
14740 TG3_CPMU_STATUS_FSHFT_5719
;
14744 /* Get eeprom hw config before calling tg3_set_power_state().
14745 * In particular, the TG3_FLAG_IS_NIC flag must be
14746 * determined before calling tg3_set_power_state() so that
14747 * we know whether or not to switch out of Vaux power.
14748 * When the flag is set, it means that GPIO1 is used for eeprom
14749 * write protect and also implies that it is a LOM where GPIOs
14750 * are not used to switch power.
14752 tg3_get_eeprom_hw_cfg(tp
);
14754 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
14755 tg3_flag_clear(tp
, TSO_CAPABLE
);
14756 tg3_flag_clear(tp
, TSO_BUG
);
14757 tp
->fw_needed
= NULL
;
14760 if (tg3_flag(tp
, ENABLE_APE
)) {
14761 /* Allow reads and writes to the
14762 * APE register and memory space.
14764 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14765 PCISTATE_ALLOW_APE_SHMEM_WR
|
14766 PCISTATE_ALLOW_APE_PSPACE_WR
;
14767 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14770 tg3_ape_lock_init(tp
);
14773 /* Set up tp->grc_local_ctrl before calling
14774 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14775 * will bring 5700's external PHY out of reset.
14776 * It is also used as eeprom write protect on LOMs.
14778 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14779 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14780 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14781 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14782 GRC_LCLCTRL_GPIO_OUTPUT1
);
14783 /* Unused GPIO3 must be driven as output on 5752 because there
14784 * are no pull-up resistors on unused GPIO pins.
14786 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14787 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14789 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14790 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14791 tg3_flag(tp
, 57765_CLASS
))
14792 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14794 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14795 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14796 /* Turn off the debug UART. */
14797 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14798 if (tg3_flag(tp
, IS_NIC
))
14799 /* Keep VMain power. */
14800 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14801 GRC_LCLCTRL_GPIO_OUTPUT0
;
14804 /* Switch out of Vaux if it is a NIC */
14805 tg3_pwrsrc_switch_to_vmain(tp
);
14807 /* Derive initial jumbo mode from MTU assigned in
14808 * ether_setup() via the alloc_etherdev() call
14810 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14811 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14813 /* Determine WakeOnLan speed to use. */
14814 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14815 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14816 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14817 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14818 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14820 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14823 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14824 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14826 /* A few boards don't want Ethernet@WireSpeed phy feature */
14827 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14828 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14829 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14830 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14831 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14832 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14833 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14835 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14836 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14837 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14838 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14839 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14841 if (tg3_flag(tp
, 5705_PLUS
) &&
14842 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14843 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14844 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14845 !tg3_flag(tp
, 57765_PLUS
)) {
14846 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14847 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14848 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14849 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14850 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14851 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14852 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14853 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14854 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14856 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14859 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14860 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14861 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14862 if (tp
->phy_otp
== 0)
14863 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14866 if (tg3_flag(tp
, CPMU_PRESENT
))
14867 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14869 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14871 tp
->coalesce_mode
= 0;
14872 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14873 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14874 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14876 /* Set these bits to enable statistics workaround. */
14877 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14878 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14879 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14880 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14881 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14884 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14885 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14886 tg3_flag_set(tp
, USE_PHYLIB
);
14888 err
= tg3_mdio_init(tp
);
14892 /* Initialize data/descriptor byte/word swapping. */
14893 val
= tr32(GRC_MODE
);
14894 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14895 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14896 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14897 GRC_MODE_B2HRX_ENABLE
|
14898 GRC_MODE_HTX2B_ENABLE
|
14899 GRC_MODE_HOST_STACKUP
);
14901 val
&= GRC_MODE_HOST_STACKUP
;
14903 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14905 tg3_switch_clocks(tp
);
14907 /* Clear this out for sanity. */
14908 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14910 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14912 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14913 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14914 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14916 if (chiprevid
== CHIPREV_ID_5701_A0
||
14917 chiprevid
== CHIPREV_ID_5701_B0
||
14918 chiprevid
== CHIPREV_ID_5701_B2
||
14919 chiprevid
== CHIPREV_ID_5701_B5
) {
14920 void __iomem
*sram_base
;
14922 /* Write some dummy words into the SRAM status block
14923 * area, see if it reads back correctly. If the return
14924 * value is bad, force enable the PCIX workaround.
14926 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14928 writel(0x00000000, sram_base
);
14929 writel(0x00000000, sram_base
+ 4);
14930 writel(0xffffffff, sram_base
+ 4);
14931 if (readl(sram_base
) != 0x00000000)
14932 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14937 tg3_nvram_init(tp
);
14939 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14940 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14942 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14943 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14944 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14945 tg3_flag_set(tp
, IS_5788
);
14947 if (!tg3_flag(tp
, IS_5788
) &&
14948 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14949 tg3_flag_set(tp
, TAGGED_STATUS
);
14950 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14951 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14952 HOSTCC_MODE_CLRTICK_TXBD
);
14954 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14955 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14956 tp
->misc_host_ctrl
);
14959 /* Preserve the APE MAC_MODE bits */
14960 if (tg3_flag(tp
, ENABLE_APE
))
14961 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14965 /* these are limited to 10/100 only */
14966 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14967 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14968 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14969 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14970 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14971 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14972 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14973 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14974 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14975 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14976 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14977 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14978 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14979 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14980 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14981 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14983 err
= tg3_phy_probe(tp
);
14985 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14986 /* ... but do not return immediately ... */
14991 tg3_read_fw_ver(tp
);
14993 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14994 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14996 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14997 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14999 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15002 /* 5700 {AX,BX} chips have a broken status block link
15003 * change bit implementation, so we must use the
15004 * status register in those cases.
15006 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
15007 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15009 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
15011 /* The led_ctrl is set during tg3_phy_probe, here we might
15012 * have to force the link status polling mechanism based
15013 * upon subsystem IDs.
15015 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
15016 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
15017 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
15018 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15019 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15022 /* For all SERDES we poll the MAC status register. */
15023 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
15024 tg3_flag_set(tp
, POLL_SERDES
);
15026 tg3_flag_clear(tp
, POLL_SERDES
);
15028 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
15029 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
15030 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
15031 tg3_flag(tp
, PCIX_MODE
)) {
15032 tp
->rx_offset
= NET_SKB_PAD
;
15033 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15034 tp
->rx_copy_thresh
= ~(u16
)0;
15038 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
15039 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
15040 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
15042 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
15044 /* Increment the rx prod index on the rx std ring by at most
15045 * 8 for these chips to workaround hw errata.
15047 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
15048 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
15049 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
15050 tp
->rx_std_max_post
= 8;
15052 if (tg3_flag(tp
, ASPM_WORKAROUND
))
15053 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
15054 PCIE_PWR_MGMT_L1_THRESH_MSK
;
15059 #ifdef CONFIG_SPARC
15060 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
15062 struct net_device
*dev
= tp
->dev
;
15063 struct pci_dev
*pdev
= tp
->pdev
;
15064 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
15065 const unsigned char *addr
;
15068 addr
= of_get_property(dp
, "local-mac-address", &len
);
15069 if (addr
&& len
== 6) {
15070 memcpy(dev
->dev_addr
, addr
, 6);
15071 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
15077 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
15079 struct net_device
*dev
= tp
->dev
;
15081 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
15082 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
15087 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
15089 struct net_device
*dev
= tp
->dev
;
15090 u32 hi
, lo
, mac_offset
;
15093 #ifdef CONFIG_SPARC
15094 if (!tg3_get_macaddr_sparc(tp
))
15099 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
15100 tg3_flag(tp
, 5780_CLASS
)) {
15101 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
15103 if (tg3_nvram_lock(tp
))
15104 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
15106 tg3_nvram_unlock(tp
);
15107 } else if (tg3_flag(tp
, 5717_PLUS
)) {
15108 if (tp
->pci_fn
& 1)
15110 if (tp
->pci_fn
> 1)
15111 mac_offset
+= 0x18c;
15112 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
15115 /* First try to get it from MAC address mailbox. */
15116 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
15117 if ((hi
>> 16) == 0x484b) {
15118 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15119 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
15121 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
15122 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15123 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15124 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15125 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
15127 /* Some old bootcode may report a 0 MAC address in SRAM */
15128 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
15131 /* Next, try NVRAM. */
15132 if (!tg3_flag(tp
, NO_NVRAM
) &&
15133 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
15134 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
15135 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
15136 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
15138 /* Finally just fetch it out of the MAC control regs. */
15140 hi
= tr32(MAC_ADDR_0_HIGH
);
15141 lo
= tr32(MAC_ADDR_0_LOW
);
15143 dev
->dev_addr
[5] = lo
& 0xff;
15144 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15145 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15146 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15147 dev
->dev_addr
[1] = hi
& 0xff;
15148 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15152 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
15153 #ifdef CONFIG_SPARC
15154 if (!tg3_get_default_macaddr_sparc(tp
))
15159 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
15163 #define BOUNDARY_SINGLE_CACHELINE 1
15164 #define BOUNDARY_MULTI_CACHELINE 2
15166 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
15168 int cacheline_size
;
15172 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
15174 cacheline_size
= 1024;
15176 cacheline_size
= (int) byte
* 4;
15178 /* On 5703 and later chips, the boundary bits have no
15181 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15182 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
15183 !tg3_flag(tp
, PCI_EXPRESS
))
15186 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15187 goal
= BOUNDARY_MULTI_CACHELINE
;
15189 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15190 goal
= BOUNDARY_SINGLE_CACHELINE
;
15196 if (tg3_flag(tp
, 57765_PLUS
)) {
15197 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
15204 /* PCI controllers on most RISC systems tend to disconnect
15205 * when a device tries to burst across a cache-line boundary.
15206 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15208 * Unfortunately, for PCI-E there are only limited
15209 * write-side controls for this, and thus for reads
15210 * we will still get the disconnects. We'll also waste
15211 * these PCI cycles for both read and write for chips
15212 * other than 5700 and 5701 which do not implement the
15215 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
15216 switch (cacheline_size
) {
15221 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15222 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
15223 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
15225 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
15226 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
15231 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
15232 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
15236 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
15237 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
15240 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
15241 switch (cacheline_size
) {
15245 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15246 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
15247 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
15253 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
15254 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
15258 switch (cacheline_size
) {
15260 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15261 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
15262 DMA_RWCTRL_WRITE_BNDRY_16
);
15267 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15268 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
15269 DMA_RWCTRL_WRITE_BNDRY_32
);
15274 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15275 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
15276 DMA_RWCTRL_WRITE_BNDRY_64
);
15281 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15282 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
15283 DMA_RWCTRL_WRITE_BNDRY_128
);
15288 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
15289 DMA_RWCTRL_WRITE_BNDRY_256
);
15292 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
15293 DMA_RWCTRL_WRITE_BNDRY_512
);
15297 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
15298 DMA_RWCTRL_WRITE_BNDRY_1024
);
15307 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
15309 struct tg3_internal_buffer_desc test_desc
;
15310 u32 sram_dma_descs
;
15313 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
15315 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
15316 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
15317 tw32(RDMAC_STATUS
, 0);
15318 tw32(WDMAC_STATUS
, 0);
15320 tw32(BUFMGR_MODE
, 0);
15321 tw32(FTQ_RESET
, 0);
15323 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
15324 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
15325 test_desc
.nic_mbuf
= 0x00002100;
15326 test_desc
.len
= size
;
15329 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15330 * the *second* time the tg3 driver was getting loaded after an
15333 * Broadcom tells me:
15334 * ...the DMA engine is connected to the GRC block and a DMA
15335 * reset may affect the GRC block in some unpredictable way...
15336 * The behavior of resets to individual blocks has not been tested.
15338 * Broadcom noted the GRC reset will also reset all sub-components.
15341 test_desc
.cqid_sqid
= (13 << 8) | 2;
15343 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
15346 test_desc
.cqid_sqid
= (16 << 8) | 7;
15348 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
15351 test_desc
.flags
= 0x00000005;
15353 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
15356 val
= *(((u32
*)&test_desc
) + i
);
15357 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
15358 sram_dma_descs
+ (i
* sizeof(u32
)));
15359 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
15361 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15364 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
15366 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
15369 for (i
= 0; i
< 40; i
++) {
15373 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
15375 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
15376 if ((val
& 0xffff) == sram_dma_descs
) {
15387 #define TEST_BUFFER_SIZE 0x2000
15389 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
15390 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
15394 static int __devinit
tg3_test_dma(struct tg3
*tp
)
15396 dma_addr_t buf_dma
;
15397 u32
*buf
, saved_dma_rwctrl
;
15400 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
15401 &buf_dma
, GFP_KERNEL
);
15407 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
15408 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
15410 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
15412 if (tg3_flag(tp
, 57765_PLUS
))
15415 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15416 /* DMA read watermark not used on PCIE */
15417 tp
->dma_rwctrl
|= 0x00180000;
15418 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
15419 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
15420 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
15421 tp
->dma_rwctrl
|= 0x003f0000;
15423 tp
->dma_rwctrl
|= 0x003f000f;
15425 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15426 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
15427 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
15428 u32 read_water
= 0x7;
15430 /* If the 5704 is behind the EPB bridge, we can
15431 * do the less restrictive ONE_DMA workaround for
15432 * better performance.
15434 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
15435 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15436 tp
->dma_rwctrl
|= 0x8000;
15437 else if (ccval
== 0x6 || ccval
== 0x7)
15438 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
15440 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
15442 /* Set bit 23 to enable PCIX hw bug fix */
15444 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
15445 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
15447 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
15448 /* 5780 always in PCIX mode */
15449 tp
->dma_rwctrl
|= 0x00144000;
15450 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
15451 /* 5714 always in PCIX mode */
15452 tp
->dma_rwctrl
|= 0x00148000;
15454 tp
->dma_rwctrl
|= 0x001b000f;
15458 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
15459 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
15460 tp
->dma_rwctrl
&= 0xfffffff0;
15462 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15463 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
15464 /* Remove this if it causes problems for some boards. */
15465 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
15467 /* On 5700/5701 chips, we need to set this bit.
15468 * Otherwise the chip will issue cacheline transactions
15469 * to streamable DMA memory with not all the byte
15470 * enables turned on. This is an error on several
15471 * RISC PCI controllers, in particular sparc64.
15473 * On 5703/5704 chips, this bit has been reassigned
15474 * a different meaning. In particular, it is used
15475 * on those chips to enable a PCI-X workaround.
15477 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
15480 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15483 /* Unneeded, already done by tg3_get_invariants. */
15484 tg3_switch_clocks(tp
);
15487 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15488 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
15491 /* It is best to perform DMA test with maximum write burst size
15492 * to expose the 5700/5701 write DMA bug.
15494 saved_dma_rwctrl
= tp
->dma_rwctrl
;
15495 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15496 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15501 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
15504 /* Send the buffer to the chip. */
15505 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
15507 dev_err(&tp
->pdev
->dev
,
15508 "%s: Buffer write failed. err = %d\n",
15514 /* validate data reached card RAM correctly. */
15515 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15517 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
15518 if (le32_to_cpu(val
) != p
[i
]) {
15519 dev_err(&tp
->pdev
->dev
,
15520 "%s: Buffer corrupted on device! "
15521 "(%d != %d)\n", __func__
, val
, i
);
15522 /* ret = -ENODEV here? */
15527 /* Now read it back. */
15528 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
15530 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
15531 "err = %d\n", __func__
, ret
);
15536 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
15540 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15541 DMA_RWCTRL_WRITE_BNDRY_16
) {
15542 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15543 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15544 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15547 dev_err(&tp
->pdev
->dev
,
15548 "%s: Buffer corrupted on read back! "
15549 "(%d != %d)\n", __func__
, p
[i
], i
);
15555 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
15561 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
15562 DMA_RWCTRL_WRITE_BNDRY_16
) {
15563 /* DMA test passed without adjusting DMA boundary,
15564 * now look for chipsets that are known to expose the
15565 * DMA bug without failing the test.
15567 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
15568 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
15569 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
15571 /* Safe to use the calculated DMA boundary. */
15572 tp
->dma_rwctrl
= saved_dma_rwctrl
;
15575 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
15579 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
15584 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
15586 if (tg3_flag(tp
, 57765_PLUS
)) {
15587 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15588 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15589 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15590 DEFAULT_MB_MACRX_LOW_WATER_57765
;
15591 tp
->bufmgr_config
.mbuf_high_water
=
15592 DEFAULT_MB_HIGH_WATER_57765
;
15594 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15595 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15596 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15597 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
15598 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15599 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
15600 } else if (tg3_flag(tp
, 5705_PLUS
)) {
15601 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15602 DEFAULT_MB_RDMA_LOW_WATER_5705
;
15603 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15604 DEFAULT_MB_MACRX_LOW_WATER_5705
;
15605 tp
->bufmgr_config
.mbuf_high_water
=
15606 DEFAULT_MB_HIGH_WATER_5705
;
15607 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15608 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15609 DEFAULT_MB_MACRX_LOW_WATER_5906
;
15610 tp
->bufmgr_config
.mbuf_high_water
=
15611 DEFAULT_MB_HIGH_WATER_5906
;
15614 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15615 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
15616 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15617 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
15618 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15619 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
15621 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
15622 DEFAULT_MB_RDMA_LOW_WATER
;
15623 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
15624 DEFAULT_MB_MACRX_LOW_WATER
;
15625 tp
->bufmgr_config
.mbuf_high_water
=
15626 DEFAULT_MB_HIGH_WATER
;
15628 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
15629 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
15630 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
15631 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
15632 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
15633 DEFAULT_MB_HIGH_WATER_JUMBO
;
15636 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
15637 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
15640 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
15642 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
15643 case TG3_PHY_ID_BCM5400
: return "5400";
15644 case TG3_PHY_ID_BCM5401
: return "5401";
15645 case TG3_PHY_ID_BCM5411
: return "5411";
15646 case TG3_PHY_ID_BCM5701
: return "5701";
15647 case TG3_PHY_ID_BCM5703
: return "5703";
15648 case TG3_PHY_ID_BCM5704
: return "5704";
15649 case TG3_PHY_ID_BCM5705
: return "5705";
15650 case TG3_PHY_ID_BCM5750
: return "5750";
15651 case TG3_PHY_ID_BCM5752
: return "5752";
15652 case TG3_PHY_ID_BCM5714
: return "5714";
15653 case TG3_PHY_ID_BCM5780
: return "5780";
15654 case TG3_PHY_ID_BCM5755
: return "5755";
15655 case TG3_PHY_ID_BCM5787
: return "5787";
15656 case TG3_PHY_ID_BCM5784
: return "5784";
15657 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15658 case TG3_PHY_ID_BCM5906
: return "5906";
15659 case TG3_PHY_ID_BCM5761
: return "5761";
15660 case TG3_PHY_ID_BCM5718C
: return "5718C";
15661 case TG3_PHY_ID_BCM5718S
: return "5718S";
15662 case TG3_PHY_ID_BCM57765
: return "57765";
15663 case TG3_PHY_ID_BCM5719C
: return "5719C";
15664 case TG3_PHY_ID_BCM5720C
: return "5720C";
15665 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15666 case 0: return "serdes";
15667 default: return "unknown";
15671 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15673 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15674 strcpy(str
, "PCI Express");
15676 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15677 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15679 strcpy(str
, "PCIX:");
15681 if ((clock_ctrl
== 7) ||
15682 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15683 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15684 strcat(str
, "133MHz");
15685 else if (clock_ctrl
== 0)
15686 strcat(str
, "33MHz");
15687 else if (clock_ctrl
== 2)
15688 strcat(str
, "50MHz");
15689 else if (clock_ctrl
== 4)
15690 strcat(str
, "66MHz");
15691 else if (clock_ctrl
== 6)
15692 strcat(str
, "100MHz");
15694 strcpy(str
, "PCI:");
15695 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15696 strcat(str
, "66MHz");
15698 strcat(str
, "33MHz");
15700 if (tg3_flag(tp
, PCI_32BIT
))
15701 strcat(str
, ":32-bit");
15703 strcat(str
, ":64-bit");
15707 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15709 struct ethtool_coalesce
*ec
= &tp
->coal
;
15711 memset(ec
, 0, sizeof(*ec
));
15712 ec
->cmd
= ETHTOOL_GCOALESCE
;
15713 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15714 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15715 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15716 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15717 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15718 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15719 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15720 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15721 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15723 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15724 HOSTCC_MODE_CLRTICK_TXBD
)) {
15725 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15726 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15727 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15728 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15731 if (tg3_flag(tp
, 5705_PLUS
)) {
15732 ec
->rx_coalesce_usecs_irq
= 0;
15733 ec
->tx_coalesce_usecs_irq
= 0;
15734 ec
->stats_block_coalesce_usecs
= 0;
15738 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15739 const struct pci_device_id
*ent
)
15741 struct net_device
*dev
;
15743 int i
, err
, pm_cap
;
15744 u32 sndmbx
, rcvmbx
, intmbx
;
15746 u64 dma_mask
, persist_dma_mask
;
15747 netdev_features_t features
= 0;
15749 printk_once(KERN_INFO
"%s\n", version
);
15751 err
= pci_enable_device(pdev
);
15753 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15757 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15759 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15760 goto err_out_disable_pdev
;
15763 pci_set_master(pdev
);
15765 /* Find power-management capability. */
15766 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15768 dev_err(&pdev
->dev
,
15769 "Cannot find Power Management capability, aborting\n");
15771 goto err_out_free_res
;
15774 err
= pci_set_power_state(pdev
, PCI_D0
);
15776 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15777 goto err_out_free_res
;
15780 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15783 goto err_out_power_down
;
15786 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15788 tp
= netdev_priv(dev
);
15791 tp
->pm_cap
= pm_cap
;
15792 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15793 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15796 tp
->msg_enable
= tg3_debug
;
15798 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15800 /* The word/byte swap controls here control register access byte
15801 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15804 tp
->misc_host_ctrl
=
15805 MISC_HOST_CTRL_MASK_PCI_INT
|
15806 MISC_HOST_CTRL_WORD_SWAP
|
15807 MISC_HOST_CTRL_INDIR_ACCESS
|
15808 MISC_HOST_CTRL_PCISTATE_RW
;
15810 /* The NONFRM (non-frame) byte/word swap controls take effect
15811 * on descriptor entries, anything which isn't packet data.
15813 * The StrongARM chips on the board (one for tx, one for rx)
15814 * are running in big-endian mode.
15816 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15817 GRC_MODE_WSWAP_NONFRM_DATA
);
15818 #ifdef __BIG_ENDIAN
15819 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15821 spin_lock_init(&tp
->lock
);
15822 spin_lock_init(&tp
->indirect_lock
);
15823 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15825 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15827 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15829 goto err_out_free_dev
;
15832 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15833 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15834 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15835 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15836 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15837 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15838 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15839 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15840 tg3_flag_set(tp
, ENABLE_APE
);
15841 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15842 if (!tp
->aperegs
) {
15843 dev_err(&pdev
->dev
,
15844 "Cannot map APE registers, aborting\n");
15846 goto err_out_iounmap
;
15850 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15851 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15853 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15854 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15855 dev
->netdev_ops
= &tg3_netdev_ops
;
15856 dev
->irq
= pdev
->irq
;
15858 err
= tg3_get_invariants(tp
);
15860 dev_err(&pdev
->dev
,
15861 "Problem fetching invariants of chip, aborting\n");
15862 goto err_out_apeunmap
;
15865 /* The EPB bridge inside 5714, 5715, and 5780 and any
15866 * device behind the EPB cannot support DMA addresses > 40-bit.
15867 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15868 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15869 * do DMA address check in tg3_start_xmit().
15871 if (tg3_flag(tp
, IS_5788
))
15872 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15873 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15874 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15875 #ifdef CONFIG_HIGHMEM
15876 dma_mask
= DMA_BIT_MASK(64);
15879 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15881 /* Configure DMA attributes. */
15882 if (dma_mask
> DMA_BIT_MASK(32)) {
15883 err
= pci_set_dma_mask(pdev
, dma_mask
);
15885 features
|= NETIF_F_HIGHDMA
;
15886 err
= pci_set_consistent_dma_mask(pdev
,
15889 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15890 "DMA for consistent allocations\n");
15891 goto err_out_apeunmap
;
15895 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15896 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15898 dev_err(&pdev
->dev
,
15899 "No usable DMA configuration, aborting\n");
15900 goto err_out_apeunmap
;
15904 tg3_init_bufmgr_config(tp
);
15906 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15908 /* 5700 B0 chips do not support checksumming correctly due
15909 * to hardware bugs.
15911 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15912 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15914 if (tg3_flag(tp
, 5755_PLUS
))
15915 features
|= NETIF_F_IPV6_CSUM
;
15918 /* TSO is on by default on chips that support hardware TSO.
15919 * Firmware TSO on older chips gives lower performance, so it
15920 * is off by default, but can be enabled using ethtool.
15922 if ((tg3_flag(tp
, HW_TSO_1
) ||
15923 tg3_flag(tp
, HW_TSO_2
) ||
15924 tg3_flag(tp
, HW_TSO_3
)) &&
15925 (features
& NETIF_F_IP_CSUM
))
15926 features
|= NETIF_F_TSO
;
15927 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15928 if (features
& NETIF_F_IPV6_CSUM
)
15929 features
|= NETIF_F_TSO6
;
15930 if (tg3_flag(tp
, HW_TSO_3
) ||
15931 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15932 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15933 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15934 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15935 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15936 features
|= NETIF_F_TSO_ECN
;
15939 dev
->features
|= features
;
15940 dev
->vlan_features
|= features
;
15943 * Add loopback capability only for a subset of devices that support
15944 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15945 * loopback for the remaining devices.
15947 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15948 !tg3_flag(tp
, CPMU_PRESENT
))
15949 /* Add the loopback capability */
15950 features
|= NETIF_F_LOOPBACK
;
15952 dev
->hw_features
|= features
;
15954 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15955 !tg3_flag(tp
, TSO_CAPABLE
) &&
15956 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15957 tg3_flag_set(tp
, MAX_RXPEND_64
);
15958 tp
->rx_pending
= 63;
15961 err
= tg3_get_device_address(tp
);
15963 dev_err(&pdev
->dev
,
15964 "Could not obtain valid ethernet address, aborting\n");
15965 goto err_out_apeunmap
;
15969 * Reset chip in case UNDI or EFI driver did not shutdown
15970 * DMA self test will enable WDMAC and we'll see (spurious)
15971 * pending DMA on the PCI bus at that point.
15973 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15974 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15975 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15976 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15979 err
= tg3_test_dma(tp
);
15981 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15982 goto err_out_apeunmap
;
15985 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15986 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15987 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15988 for (i
= 0; i
< tp
->irq_max
; i
++) {
15989 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15992 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15994 tnapi
->int_mbox
= intmbx
;
16000 tnapi
->consmbox
= rcvmbx
;
16001 tnapi
->prodmbox
= sndmbx
;
16004 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
16006 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
16008 if (!tg3_flag(tp
, SUPPORT_MSIX
))
16012 * If we support MSIX, we'll be using RSS. If we're using
16013 * RSS, the first vector only handles link interrupts and the
16014 * remaining vectors handle rx and tx interrupts. Reuse the
16015 * mailbox values for the next iteration. The values we setup
16016 * above are still useful for the single vectored mode.
16031 pci_set_drvdata(pdev
, dev
);
16033 if (tg3_flag(tp
, 5717_PLUS
)) {
16034 /* Resume a low-power mode */
16035 tg3_frob_aux_power(tp
, false);
16038 tg3_timer_init(tp
);
16040 err
= register_netdev(dev
);
16042 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
16043 goto err_out_apeunmap
;
16046 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16047 tp
->board_part_number
,
16048 tp
->pci_chip_rev_id
,
16049 tg3_bus_string(tp
, str
),
16052 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
16053 struct phy_device
*phydev
;
16054 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
16056 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16057 phydev
->drv
->name
, dev_name(&phydev
->dev
));
16061 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
16062 ethtype
= "10/100Base-TX";
16063 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
16064 ethtype
= "1000Base-SX";
16066 ethtype
= "10/100/1000Base-T";
16068 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
16069 "(WireSpeed[%d], EEE[%d])\n",
16070 tg3_phy_string(tp
), ethtype
,
16071 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
16072 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
16075 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16076 (dev
->features
& NETIF_F_RXCSUM
) != 0,
16077 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
16078 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
16079 tg3_flag(tp
, ENABLE_ASF
) != 0,
16080 tg3_flag(tp
, TSO_CAPABLE
) != 0);
16081 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16083 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
16084 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
16086 pci_save_state(pdev
);
16092 iounmap(tp
->aperegs
);
16093 tp
->aperegs
= NULL
;
16105 err_out_power_down
:
16106 pci_set_power_state(pdev
, PCI_D3hot
);
16109 pci_release_regions(pdev
);
16111 err_out_disable_pdev
:
16112 pci_disable_device(pdev
);
16113 pci_set_drvdata(pdev
, NULL
);
16117 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
16119 struct net_device
*dev
= pci_get_drvdata(pdev
);
16122 struct tg3
*tp
= netdev_priv(dev
);
16124 release_firmware(tp
->fw
);
16126 tg3_reset_task_cancel(tp
);
16128 if (tg3_flag(tp
, USE_PHYLIB
)) {
16133 unregister_netdev(dev
);
16135 iounmap(tp
->aperegs
);
16136 tp
->aperegs
= NULL
;
16143 pci_release_regions(pdev
);
16144 pci_disable_device(pdev
);
16145 pci_set_drvdata(pdev
, NULL
);
16149 #ifdef CONFIG_PM_SLEEP
16150 static int tg3_suspend(struct device
*device
)
16152 struct pci_dev
*pdev
= to_pci_dev(device
);
16153 struct net_device
*dev
= pci_get_drvdata(pdev
);
16154 struct tg3
*tp
= netdev_priv(dev
);
16157 if (!netif_running(dev
))
16160 tg3_reset_task_cancel(tp
);
16162 tg3_netif_stop(tp
);
16164 tg3_timer_stop(tp
);
16166 tg3_full_lock(tp
, 1);
16167 tg3_disable_ints(tp
);
16168 tg3_full_unlock(tp
);
16170 netif_device_detach(dev
);
16172 tg3_full_lock(tp
, 0);
16173 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16174 tg3_flag_clear(tp
, INIT_COMPLETE
);
16175 tg3_full_unlock(tp
);
16177 err
= tg3_power_down_prepare(tp
);
16181 tg3_full_lock(tp
, 0);
16183 tg3_flag_set(tp
, INIT_COMPLETE
);
16184 err2
= tg3_restart_hw(tp
, 1);
16188 tg3_timer_start(tp
);
16190 netif_device_attach(dev
);
16191 tg3_netif_start(tp
);
16194 tg3_full_unlock(tp
);
16203 static int tg3_resume(struct device
*device
)
16205 struct pci_dev
*pdev
= to_pci_dev(device
);
16206 struct net_device
*dev
= pci_get_drvdata(pdev
);
16207 struct tg3
*tp
= netdev_priv(dev
);
16210 if (!netif_running(dev
))
16213 netif_device_attach(dev
);
16215 tg3_full_lock(tp
, 0);
16217 tg3_flag_set(tp
, INIT_COMPLETE
);
16218 err
= tg3_restart_hw(tp
, 1);
16222 tg3_timer_start(tp
);
16224 tg3_netif_start(tp
);
16227 tg3_full_unlock(tp
);
16235 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
16236 #define TG3_PM_OPS (&tg3_pm_ops)
16240 #define TG3_PM_OPS NULL
16242 #endif /* CONFIG_PM_SLEEP */
16245 * tg3_io_error_detected - called when PCI error is detected
16246 * @pdev: Pointer to PCI device
16247 * @state: The current pci connection state
16249 * This function is called after a PCI bus error affecting
16250 * this device has been detected.
16252 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
16253 pci_channel_state_t state
)
16255 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16256 struct tg3
*tp
= netdev_priv(netdev
);
16257 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
16259 netdev_info(netdev
, "PCI I/O error detected\n");
16263 if (!netif_running(netdev
))
16268 tg3_netif_stop(tp
);
16270 tg3_timer_stop(tp
);
16272 /* Want to make sure that the reset task doesn't run */
16273 tg3_reset_task_cancel(tp
);
16275 netif_device_detach(netdev
);
16277 /* Clean up software state, even if MMIO is blocked */
16278 tg3_full_lock(tp
, 0);
16279 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
16280 tg3_full_unlock(tp
);
16283 if (state
== pci_channel_io_perm_failure
)
16284 err
= PCI_ERS_RESULT_DISCONNECT
;
16286 pci_disable_device(pdev
);
16294 * tg3_io_slot_reset - called after the pci bus has been reset.
16295 * @pdev: Pointer to PCI device
16297 * Restart the card from scratch, as if from a cold-boot.
16298 * At this point, the card has exprienced a hard reset,
16299 * followed by fixups by BIOS, and has its config space
16300 * set up identically to what it was at cold boot.
16302 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
16304 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16305 struct tg3
*tp
= netdev_priv(netdev
);
16306 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
16311 if (pci_enable_device(pdev
)) {
16312 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
16316 pci_set_master(pdev
);
16317 pci_restore_state(pdev
);
16318 pci_save_state(pdev
);
16320 if (!netif_running(netdev
)) {
16321 rc
= PCI_ERS_RESULT_RECOVERED
;
16325 err
= tg3_power_up(tp
);
16329 rc
= PCI_ERS_RESULT_RECOVERED
;
16338 * tg3_io_resume - called when traffic can start flowing again.
16339 * @pdev: Pointer to PCI device
16341 * This callback is called when the error recovery driver tells
16342 * us that its OK to resume normal operation.
16344 static void tg3_io_resume(struct pci_dev
*pdev
)
16346 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16347 struct tg3
*tp
= netdev_priv(netdev
);
16352 if (!netif_running(netdev
))
16355 tg3_full_lock(tp
, 0);
16356 tg3_flag_set(tp
, INIT_COMPLETE
);
16357 err
= tg3_restart_hw(tp
, 1);
16358 tg3_full_unlock(tp
);
16360 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
16364 netif_device_attach(netdev
);
16366 tg3_timer_start(tp
);
16368 tg3_netif_start(tp
);
16376 static const struct pci_error_handlers tg3_err_handler
= {
16377 .error_detected
= tg3_io_error_detected
,
16378 .slot_reset
= tg3_io_slot_reset
,
16379 .resume
= tg3_io_resume
16382 static struct pci_driver tg3_driver
= {
16383 .name
= DRV_MODULE_NAME
,
16384 .id_table
= tg3_pci_tbl
,
16385 .probe
= tg3_init_one
,
16386 .remove
= __devexit_p(tg3_remove_one
),
16387 .err_handler
= &tg3_err_handler
,
16388 .driver
.pm
= TG3_PM_OPS
,
16391 static int __init
tg3_init(void)
16393 return pci_register_driver(&tg3_driver
);
16396 static void __exit
tg3_cleanup(void)
16398 pci_unregister_driver(&tg3_driver
);
16401 module_init(tg3_init
);
16402 module_exit(tg3_cleanup
);