2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version
[] =
220 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION
);
226 MODULE_FIRMWARE(FIRMWARE_TG3
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
230 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug
, int, 0);
232 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
257 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
258 TG3_DRV_DATA_FLAG_5705_10_100
},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
260 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
261 TG3_DRV_DATA_FLAG_5705_10_100
},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
272 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
287 PCI_VENDOR_ID_LENOVO
,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
292 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
312 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
313 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
315 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
316 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
330 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
332 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
353 static const struct {
354 const char string
[ETH_GSTRING_LEN
];
355 } ethtool_stats_keys
[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string
[ETH_GSTRING_LEN
];
449 } ethtool_test_keys
[] = {
450 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
451 [TG3_LINK_TEST
] = { "link test (online) " },
452 [TG3_REGISTER_TEST
] = { "register test (offline)" },
453 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
465 writel(val
, tp
->regs
+ off
);
468 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
470 return readl(tp
->regs
+ off
);
473 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
475 writel(val
, tp
->aperegs
+ off
);
478 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
480 return readl(tp
->aperegs
+ off
);
483 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
489 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
490 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
493 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
495 writel(val
, tp
->regs
+ off
);
496 readl(tp
->regs
+ off
);
499 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
504 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
505 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
506 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
507 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
511 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
515 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
516 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
517 TG3_64BIT_REG_LOW
, val
);
520 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
521 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
522 TG3_64BIT_REG_LOW
, val
);
526 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
528 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
529 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
536 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
537 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
541 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
546 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
547 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
548 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
549 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
560 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
561 /* Non-posted methods */
562 tp
->write32(tp
, off
, val
);
565 tg3_write32(tp
, off
, val
);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
579 tp
->write32_mbox(tp
, off
, val
);
580 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
581 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
582 !tg3_flag(tp
, ICH_WORKAROUND
)))
583 tp
->read32_mbox(tp
, off
);
586 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
588 void __iomem
*mbox
= tp
->regs
+ off
;
590 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
592 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
593 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
597 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
599 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
602 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
604 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
622 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
623 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
626 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
627 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
629 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
635 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
640 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
643 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
647 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
648 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
653 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
654 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
655 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
656 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
662 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
667 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
670 static void tg3_ape_lock_init(struct tg3
*tp
)
675 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
676 regbase
= TG3_APE_LOCK_GRANT
;
678 regbase
= TG3_APE_PER_LOCK_GRANT
;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
683 case TG3_APE_LOCK_PHY0
:
684 case TG3_APE_LOCK_PHY1
:
685 case TG3_APE_LOCK_PHY2
:
686 case TG3_APE_LOCK_PHY3
:
687 bit
= APE_LOCK_GRANT_DRIVER
;
691 bit
= APE_LOCK_GRANT_DRIVER
;
693 bit
= 1 << tp
->pci_fn
;
695 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
700 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
704 u32 status
, req
, gnt
, bit
;
706 if (!tg3_flag(tp
, ENABLE_APE
))
710 case TG3_APE_LOCK_GPIO
:
711 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
713 case TG3_APE_LOCK_GRC
:
714 case TG3_APE_LOCK_MEM
:
716 bit
= APE_LOCK_REQ_DRIVER
;
718 bit
= 1 << tp
->pci_fn
;
720 case TG3_APE_LOCK_PHY0
:
721 case TG3_APE_LOCK_PHY1
:
722 case TG3_APE_LOCK_PHY2
:
723 case TG3_APE_LOCK_PHY3
:
724 bit
= APE_LOCK_REQ_DRIVER
;
730 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
731 req
= TG3_APE_LOCK_REQ
;
732 gnt
= TG3_APE_LOCK_GRANT
;
734 req
= TG3_APE_PER_LOCK_REQ
;
735 gnt
= TG3_APE_PER_LOCK_GRANT
;
740 tg3_ape_write32(tp
, req
+ off
, bit
);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i
= 0; i
< 100; i
++) {
744 status
= tg3_ape_read32(tp
, gnt
+ off
);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp
, gnt
+ off
, bit
);
759 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
763 if (!tg3_flag(tp
, ENABLE_APE
))
767 case TG3_APE_LOCK_GPIO
:
768 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
770 case TG3_APE_LOCK_GRC
:
771 case TG3_APE_LOCK_MEM
:
773 bit
= APE_LOCK_GRANT_DRIVER
;
775 bit
= 1 << tp
->pci_fn
;
777 case TG3_APE_LOCK_PHY0
:
778 case TG3_APE_LOCK_PHY1
:
779 case TG3_APE_LOCK_PHY2
:
780 case TG3_APE_LOCK_PHY3
:
781 bit
= APE_LOCK_GRANT_DRIVER
;
787 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
788 gnt
= TG3_APE_LOCK_GRANT
;
790 gnt
= TG3_APE_PER_LOCK_GRANT
;
792 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
795 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
800 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
803 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
804 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
807 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
810 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
813 return timeout_us
? 0 : -EBUSY
;
816 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
820 for (i
= 0; i
< timeout_us
/ 10; i
++) {
821 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
823 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
829 return i
== timeout_us
/ 10;
832 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
836 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
838 if (!tg3_flag(tp
, APE_HAS_NCSI
))
841 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
842 if (apedata
!= APE_SEG_SIG_MAGIC
)
845 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
846 if (!(apedata
& APE_FW_STATUS_READY
))
849 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
851 msgoff
= bufoff
+ 2 * sizeof(u32
);
852 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
857 /* Cap xfer sizes to scratchpad limits. */
858 length
= (len
> maxlen
) ? maxlen
: len
;
861 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
862 if (!(apedata
& APE_FW_STATUS_READY
))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err
= tg3_ape_event_lock(tp
, 1000);
870 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
871 APE_EVENT_STATUS_SCRTCHPD_READ
|
872 APE_EVENT_STATUS_EVENT_PENDING
;
873 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
875 tg3_ape_write32(tp
, bufoff
, base_off
);
876 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
878 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
879 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
883 if (tg3_ape_wait_for_event(tp
, 30000))
886 for (i
= 0; length
; i
+= 4, length
-= 4) {
887 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
888 memcpy(data
, &val
, sizeof(u32
));
896 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
901 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
902 if (apedata
!= APE_SEG_SIG_MAGIC
)
905 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
906 if (!(apedata
& APE_FW_STATUS_READY
))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err
= tg3_ape_event_lock(tp
, 1000);
914 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
915 event
| APE_EVENT_STATUS_EVENT_PENDING
);
917 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
918 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
923 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
928 if (!tg3_flag(tp
, ENABLE_APE
))
932 case RESET_KIND_INIT
:
933 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
934 APE_HOST_SEG_SIG_MAGIC
);
935 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
936 APE_HOST_SEG_LEN_MAGIC
);
937 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
939 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
941 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
942 APE_HOST_BEHAV_NO_PHYLOCK
);
943 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
944 TG3_APE_HOST_DRVR_STATE_START
);
946 event
= APE_EVENT_STATUS_STATE_START
;
948 case RESET_KIND_SHUTDOWN
:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
956 if (device_may_wakeup(&tp
->pdev
->dev
) &&
957 tg3_flag(tp
, WOL_ENABLE
)) {
958 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
959 TG3_APE_HOST_WOL_SPEED_AUTO
);
960 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
962 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
964 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
966 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
968 case RESET_KIND_SUSPEND
:
969 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
975 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
977 tg3_ape_send_event(tp
, event
);
980 static void tg3_disable_ints(struct tg3
*tp
)
984 tw32(TG3PCI_MISC_HOST_CTRL
,
985 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
986 for (i
= 0; i
< tp
->irq_max
; i
++)
987 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
990 static void tg3_enable_ints(struct tg3
*tp
)
997 tw32(TG3PCI_MISC_HOST_CTRL
,
998 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1000 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1001 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1002 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1004 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1005 if (tg3_flag(tp
, 1SHOT_MSI
))
1006 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1008 tp
->coal_now
|= tnapi
->coal_now
;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1013 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1014 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1016 tw32(HOSTCC_MODE
, tp
->coal_now
);
1018 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1021 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1023 struct tg3
*tp
= tnapi
->tp
;
1024 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1025 unsigned int work_exists
= 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1029 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1033 /* check for TX work to do */
1034 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1037 /* check for RX work to do */
1038 if (tnapi
->rx_rcb_prod_idx
&&
1039 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1052 struct tg3
*tp
= tnapi
->tp
;
1054 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1062 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1063 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1066 static void tg3_switch_clocks(struct tg3
*tp
)
1069 u32 orig_clock_ctrl
;
1071 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1074 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1076 orig_clock_ctrl
= clock_ctrl
;
1077 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1078 CLOCK_CTRL_CLKRUN_OENABLE
|
1080 tp
->pci_clock_ctrl
= clock_ctrl
;
1082 if (tg3_flag(tp
, 5705_PLUS
)) {
1083 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1085 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1087 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1090 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1093 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1108 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1110 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1114 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1118 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1119 MI_COM_PHY_ADDR_MASK
);
1120 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1121 MI_COM_REG_ADDR_MASK
);
1122 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1124 tw32_f(MAC_MI_COM
, frame_val
);
1126 loops
= PHY_BUSY_LOOPS
;
1127 while (loops
!= 0) {
1129 frame_val
= tr32(MAC_MI_COM
);
1131 if ((frame_val
& MI_COM_BUSY
) == 0) {
1133 frame_val
= tr32(MAC_MI_COM
);
1141 *val
= frame_val
& MI_COM_DATA_MASK
;
1145 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1146 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1150 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1155 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1157 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1160 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1167 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1168 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1171 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1173 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1177 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1179 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1180 MI_COM_PHY_ADDR_MASK
);
1181 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1182 MI_COM_REG_ADDR_MASK
);
1183 frame_val
|= (val
& MI_COM_DATA_MASK
);
1184 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1186 tw32_f(MAC_MI_COM
, frame_val
);
1188 loops
= PHY_BUSY_LOOPS
;
1189 while (loops
!= 0) {
1191 frame_val
= tr32(MAC_MI_COM
);
1192 if ((frame_val
& MI_COM_BUSY
) == 0) {
1194 frame_val
= tr32(MAC_MI_COM
);
1204 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1205 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1209 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1214 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1216 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1219 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1223 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1227 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1231 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1232 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1236 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1242 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1246 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1250 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1254 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1255 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1259 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1265 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1269 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1271 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1276 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1280 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1282 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1287 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1291 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1292 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1295 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1300 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1302 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1303 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1305 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1313 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1319 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1321 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1323 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1324 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1329 static int tg3_bmcr_reset(struct tg3
*tp
)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control
= BMCR_RESET
;
1338 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1344 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1348 if ((phy_control
& BMCR_RESET
) == 0) {
1360 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1362 struct tg3
*tp
= bp
->priv
;
1365 spin_lock_bh(&tp
->lock
);
1367 if (tg3_readphy(tp
, reg
, &val
))
1370 spin_unlock_bh(&tp
->lock
);
1375 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1377 struct tg3
*tp
= bp
->priv
;
1380 spin_lock_bh(&tp
->lock
);
1382 if (tg3_writephy(tp
, reg
, val
))
1385 spin_unlock_bh(&tp
->lock
);
1390 static int tg3_mdio_reset(struct mii_bus
*bp
)
1395 static void tg3_mdio_config_5785(struct tg3
*tp
)
1398 struct phy_device
*phydev
;
1400 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1401 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1402 case PHY_ID_BCM50610
:
1403 case PHY_ID_BCM50610M
:
1404 val
= MAC_PHYCFG2_50610_LED_MODES
;
1406 case PHY_ID_BCMAC131
:
1407 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1409 case PHY_ID_RTL8211C
:
1410 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1412 case PHY_ID_RTL8201E
:
1413 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1419 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1420 tw32(MAC_PHYCFG2
, val
);
1422 val
= tr32(MAC_PHYCFG1
);
1423 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1424 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1425 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1426 tw32(MAC_PHYCFG1
, val
);
1431 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1432 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1433 MAC_PHYCFG2_FMODE_MASK_MASK
|
1434 MAC_PHYCFG2_GMODE_MASK_MASK
|
1435 MAC_PHYCFG2_ACT_MASK_MASK
|
1436 MAC_PHYCFG2_QUAL_MASK_MASK
|
1437 MAC_PHYCFG2_INBAND_ENABLE
;
1439 tw32(MAC_PHYCFG2
, val
);
1441 val
= tr32(MAC_PHYCFG1
);
1442 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1444 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1445 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1446 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1447 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1448 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1450 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1451 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1452 tw32(MAC_PHYCFG1
, val
);
1454 val
= tr32(MAC_EXT_RGMII_MODE
);
1455 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1456 MAC_RGMII_MODE_RX_QUALITY
|
1457 MAC_RGMII_MODE_RX_ACTIVITY
|
1458 MAC_RGMII_MODE_RX_ENG_DET
|
1459 MAC_RGMII_MODE_TX_ENABLE
|
1460 MAC_RGMII_MODE_TX_LOWPWR
|
1461 MAC_RGMII_MODE_TX_RESET
);
1462 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1463 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1464 val
|= MAC_RGMII_MODE_RX_INT_B
|
1465 MAC_RGMII_MODE_RX_QUALITY
|
1466 MAC_RGMII_MODE_RX_ACTIVITY
|
1467 MAC_RGMII_MODE_RX_ENG_DET
;
1468 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1469 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1470 MAC_RGMII_MODE_TX_LOWPWR
|
1471 MAC_RGMII_MODE_TX_RESET
;
1473 tw32(MAC_EXT_RGMII_MODE
, val
);
1476 static void tg3_mdio_start(struct tg3
*tp
)
1478 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1479 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1482 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1483 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1484 tg3_mdio_config_5785(tp
);
1487 static int tg3_mdio_init(struct tg3
*tp
)
1491 struct phy_device
*phydev
;
1493 if (tg3_flag(tp
, 5717_PLUS
)) {
1496 tp
->phy_addr
= tp
->pci_fn
+ 1;
1498 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1499 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1501 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1506 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1510 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1513 tp
->mdio_bus
= mdiobus_alloc();
1514 if (tp
->mdio_bus
== NULL
)
1517 tp
->mdio_bus
->name
= "tg3 mdio bus";
1518 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1519 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1520 tp
->mdio_bus
->priv
= tp
;
1521 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1522 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1523 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1524 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1525 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1526 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1528 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1529 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1539 i
= mdiobus_register(tp
->mdio_bus
);
1541 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1542 mdiobus_free(tp
->mdio_bus
);
1546 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1548 if (!phydev
|| !phydev
->drv
) {
1549 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1550 mdiobus_unregister(tp
->mdio_bus
);
1551 mdiobus_free(tp
->mdio_bus
);
1555 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1556 case PHY_ID_BCM57780
:
1557 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1558 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1560 case PHY_ID_BCM50610
:
1561 case PHY_ID_BCM50610M
:
1562 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1563 PHY_BRCM_RX_REFCLK_UNUSED
|
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1566 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1567 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1568 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1569 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1570 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1571 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1573 case PHY_ID_RTL8211C
:
1574 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1576 case PHY_ID_RTL8201E
:
1577 case PHY_ID_BCMAC131
:
1578 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1579 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1580 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1584 tg3_flag_set(tp
, MDIOBUS_INITED
);
1586 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1587 tg3_mdio_config_5785(tp
);
1592 static void tg3_mdio_fini(struct tg3
*tp
)
1594 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1595 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1596 mdiobus_unregister(tp
->mdio_bus
);
1597 mdiobus_free(tp
->mdio_bus
);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1606 val
= tr32(GRC_RX_CPU_EVENT
);
1607 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1608 tw32_f(GRC_RX_CPU_EVENT
, val
);
1610 tp
->last_event_jiffies
= jiffies
;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1619 unsigned int delay_cnt
;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1626 if (time_remain
< 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt
= jiffies_to_usecs(time_remain
);
1631 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1632 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1633 delay_cnt
= (delay_cnt
>> 3) + 1;
1635 for (i
= 0; i
< delay_cnt
; i
++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1648 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1650 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1651 val
|= (reg
& 0xffff);
1655 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1657 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1658 val
|= (reg
& 0xffff);
1662 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1663 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1665 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1666 val
|= (reg
& 0xffff);
1670 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3
*tp
)
1682 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1685 tg3_phy_gather_ump_data(tp
, data
);
1687 tg3_wait_for_event_ack(tp
);
1689 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1690 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1691 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1692 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1693 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1694 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1696 tg3_generate_fw_event(tp
);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3
*tp
)
1702 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp
);
1706 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1708 tg3_generate_fw_event(tp
);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp
);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1718 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1721 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1723 case RESET_KIND_INIT
:
1724 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1728 case RESET_KIND_SHUTDOWN
:
1729 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1733 case RESET_KIND_SUSPEND
:
1734 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1743 if (kind
== RESET_KIND_INIT
||
1744 kind
== RESET_KIND_SUSPEND
)
1745 tg3_ape_driver_state_change(tp
, kind
);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1751 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1753 case RESET_KIND_INIT
:
1754 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1755 DRV_STATE_START_DONE
);
1758 case RESET_KIND_SHUTDOWN
:
1759 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1760 DRV_STATE_UNLOAD_DONE
);
1768 if (kind
== RESET_KIND_SHUTDOWN
)
1769 tg3_ape_driver_state_change(tp
, kind
);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1775 if (tg3_flag(tp
, ENABLE_ASF
)) {
1777 case RESET_KIND_INIT
:
1778 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1782 case RESET_KIND_SHUTDOWN
:
1783 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1787 case RESET_KIND_SUSPEND
:
1788 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1798 static int tg3_poll_fw(struct tg3
*tp
)
1803 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1809 /* Wait up to 20ms for init done. */
1810 for (i
= 0; i
< 200; i
++) {
1811 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1818 /* Wait for firmware initialization to complete. */
1819 for (i
= 0; i
< 100000; i
++) {
1820 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1821 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1832 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1834 netdev_info(tp
->dev
, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3
*tp
)
1849 if (!netif_carrier_ok(tp
->dev
)) {
1850 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1851 tg3_ump_link_report(tp
);
1852 } else if (netif_msg_link(tp
)) {
1853 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1854 (tp
->link_config
.active_speed
== SPEED_1000
?
1856 (tp
->link_config
.active_speed
== SPEED_100
?
1858 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1861 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1862 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1864 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1867 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1868 netdev_info(tp
->dev
, "EEE is %s\n",
1869 tp
->setlpicnt
? "enabled" : "disabled");
1871 tg3_ump_link_report(tp
);
1875 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1879 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1880 miireg
= ADVERTISE_1000XPAUSE
;
1881 else if (flow_ctrl
& FLOW_CTRL_TX
)
1882 miireg
= ADVERTISE_1000XPSE_ASYM
;
1883 else if (flow_ctrl
& FLOW_CTRL_RX
)
1884 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1891 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1895 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1896 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1897 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1898 if (lcladv
& ADVERTISE_1000XPAUSE
)
1900 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1907 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1911 u32 old_rx_mode
= tp
->rx_mode
;
1912 u32 old_tx_mode
= tp
->tx_mode
;
1914 if (tg3_flag(tp
, USE_PHYLIB
))
1915 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1917 autoneg
= tp
->link_config
.autoneg
;
1919 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1920 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1921 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1923 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1925 flowctrl
= tp
->link_config
.flowctrl
;
1927 tp
->link_config
.active_flowctrl
= flowctrl
;
1929 if (flowctrl
& FLOW_CTRL_RX
)
1930 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1932 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1934 if (old_rx_mode
!= tp
->rx_mode
)
1935 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1937 if (flowctrl
& FLOW_CTRL_TX
)
1938 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1940 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1942 if (old_tx_mode
!= tp
->tx_mode
)
1943 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1946 static void tg3_adjust_link(struct net_device
*dev
)
1948 u8 oldflowctrl
, linkmesg
= 0;
1949 u32 mac_mode
, lcl_adv
, rmt_adv
;
1950 struct tg3
*tp
= netdev_priv(dev
);
1951 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1953 spin_lock_bh(&tp
->lock
);
1955 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1956 MAC_MODE_HALF_DUPLEX
);
1958 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1964 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1965 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1966 else if (phydev
->speed
== SPEED_1000
||
1967 tg3_asic_rev(tp
) != ASIC_REV_5785
)
1968 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1970 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1972 if (phydev
->duplex
== DUPLEX_HALF
)
1973 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1975 lcl_adv
= mii_advertise_flowctrl(
1976 tp
->link_config
.flowctrl
);
1979 rmt_adv
= LPA_PAUSE_CAP
;
1980 if (phydev
->asym_pause
)
1981 rmt_adv
|= LPA_PAUSE_ASYM
;
1984 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1986 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1988 if (mac_mode
!= tp
->mac_mode
) {
1989 tp
->mac_mode
= mac_mode
;
1990 tw32_f(MAC_MODE
, tp
->mac_mode
);
1994 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
1995 if (phydev
->speed
== SPEED_10
)
1997 MAC_MI_STAT_10MBPS_MODE
|
1998 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2000 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2003 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2004 tw32(MAC_TX_LENGTHS
,
2005 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2006 (6 << TX_LENGTHS_IPG_SHIFT
) |
2007 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2009 tw32(MAC_TX_LENGTHS
,
2010 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2011 (6 << TX_LENGTHS_IPG_SHIFT
) |
2012 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2014 if (phydev
->link
!= tp
->old_link
||
2015 phydev
->speed
!= tp
->link_config
.active_speed
||
2016 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2017 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2020 tp
->old_link
= phydev
->link
;
2021 tp
->link_config
.active_speed
= phydev
->speed
;
2022 tp
->link_config
.active_duplex
= phydev
->duplex
;
2024 spin_unlock_bh(&tp
->lock
);
2027 tg3_link_report(tp
);
2030 static int tg3_phy_init(struct tg3
*tp
)
2032 struct phy_device
*phydev
;
2034 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2037 /* Bring the PHY back to a known state. */
2040 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2042 /* Attach the MAC to the PHY. */
2043 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2044 tg3_adjust_link
, phydev
->interface
);
2045 if (IS_ERR(phydev
)) {
2046 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2047 return PTR_ERR(phydev
);
2050 /* Mask with MAC supported features. */
2051 switch (phydev
->interface
) {
2052 case PHY_INTERFACE_MODE_GMII
:
2053 case PHY_INTERFACE_MODE_RGMII
:
2054 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2055 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2057 SUPPORTED_Asym_Pause
);
2061 case PHY_INTERFACE_MODE_MII
:
2062 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2064 SUPPORTED_Asym_Pause
);
2067 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2071 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2073 phydev
->advertising
= phydev
->supported
;
2078 static void tg3_phy_start(struct tg3
*tp
)
2080 struct phy_device
*phydev
;
2082 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2085 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2087 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2088 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2089 phydev
->speed
= tp
->link_config
.speed
;
2090 phydev
->duplex
= tp
->link_config
.duplex
;
2091 phydev
->autoneg
= tp
->link_config
.autoneg
;
2092 phydev
->advertising
= tp
->link_config
.advertising
;
2097 phy_start_aneg(phydev
);
2100 static void tg3_phy_stop(struct tg3
*tp
)
2102 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2105 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2108 static void tg3_phy_fini(struct tg3
*tp
)
2110 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2111 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2112 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2116 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2121 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2124 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2125 /* Cannot do read-modify-write on 5401 */
2126 err
= tg3_phy_auxctl_write(tp
,
2127 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2128 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2133 err
= tg3_phy_auxctl_read(tp
,
2134 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2138 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2139 err
= tg3_phy_auxctl_write(tp
,
2140 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2146 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2150 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2153 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2154 phytest
| MII_TG3_FET_SHADOW_EN
);
2155 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2157 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2159 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2160 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2162 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2166 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2170 if (!tg3_flag(tp
, 5705_PLUS
) ||
2171 (tg3_flag(tp
, 5717_PLUS
) &&
2172 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2175 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2176 tg3_phy_fet_toggle_apd(tp
, enable
);
2180 reg
= MII_TG3_MISC_SHDW_WREN
|
2181 MII_TG3_MISC_SHDW_SCR5_SEL
|
2182 MII_TG3_MISC_SHDW_SCR5_LPED
|
2183 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2184 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2185 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2186 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2187 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2189 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2192 reg
= MII_TG3_MISC_SHDW_WREN
|
2193 MII_TG3_MISC_SHDW_APD_SEL
|
2194 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2196 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2198 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2201 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2205 if (!tg3_flag(tp
, 5705_PLUS
) ||
2206 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2209 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2212 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2213 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2215 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2216 ephy
| MII_TG3_FET_SHADOW_EN
);
2217 if (!tg3_readphy(tp
, reg
, &phy
)) {
2219 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2221 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2222 tg3_writephy(tp
, reg
, phy
);
2224 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2229 ret
= tg3_phy_auxctl_read(tp
,
2230 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2233 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2235 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2236 tg3_phy_auxctl_write(tp
,
2237 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2242 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2247 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2250 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2252 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2253 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2256 static void tg3_phy_apply_otp(struct tg3
*tp
)
2265 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2268 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2269 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2270 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2272 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2273 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2274 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2276 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2277 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2278 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2280 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2281 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2283 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2284 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2286 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2287 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2288 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2290 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2293 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2297 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2302 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2303 current_link_up
== 1 &&
2304 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2305 (tp
->link_config
.active_speed
== SPEED_100
||
2306 tp
->link_config
.active_speed
== SPEED_1000
)) {
2309 if (tp
->link_config
.active_speed
== SPEED_1000
)
2310 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2312 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2314 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2316 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2317 TG3_CL45_D7_EEERES_STAT
, &val
);
2319 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2320 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2324 if (!tp
->setlpicnt
) {
2325 if (current_link_up
== 1 &&
2326 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2327 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2328 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2331 val
= tr32(TG3_CPMU_EEE_MODE
);
2332 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2336 static void tg3_phy_eee_enable(struct tg3
*tp
)
2340 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2341 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2342 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2343 tg3_flag(tp
, 57765_CLASS
)) &&
2344 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2345 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2346 MII_TG3_DSP_TAP26_RMRXSTO
;
2347 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2348 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2351 val
= tr32(TG3_CPMU_EEE_MODE
);
2352 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2355 static int tg3_wait_macro_done(struct tg3
*tp
)
2362 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2363 if ((tmp32
& 0x1000) == 0)
2373 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2375 static const u32 test_pat
[4][6] = {
2376 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2377 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2378 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2379 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2383 for (chan
= 0; chan
< 4; chan
++) {
2386 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2387 (chan
* 0x2000) | 0x0200);
2388 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2390 for (i
= 0; i
< 6; i
++)
2391 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2394 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2395 if (tg3_wait_macro_done(tp
)) {
2400 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2401 (chan
* 0x2000) | 0x0200);
2402 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2403 if (tg3_wait_macro_done(tp
)) {
2408 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2409 if (tg3_wait_macro_done(tp
)) {
2414 for (i
= 0; i
< 6; i
+= 2) {
2417 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2418 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2419 tg3_wait_macro_done(tp
)) {
2425 if (low
!= test_pat
[chan
][i
] ||
2426 high
!= test_pat
[chan
][i
+1]) {
2427 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2428 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2429 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2439 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2443 for (chan
= 0; chan
< 4; chan
++) {
2446 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2447 (chan
* 0x2000) | 0x0200);
2448 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2449 for (i
= 0; i
< 6; i
++)
2450 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2451 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2452 if (tg3_wait_macro_done(tp
))
2459 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2461 u32 reg32
, phy9_orig
;
2462 int retries
, do_phy_reset
, err
;
2468 err
= tg3_bmcr_reset(tp
);
2474 /* Disable transmitter and interrupt. */
2475 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2479 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2481 /* Set full-duplex, 1000 mbps. */
2482 tg3_writephy(tp
, MII_BMCR
,
2483 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2485 /* Set to master mode. */
2486 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2489 tg3_writephy(tp
, MII_CTRL1000
,
2490 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2492 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2496 /* Block the PHY control access. */
2497 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2499 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2502 } while (--retries
);
2504 err
= tg3_phy_reset_chanpat(tp
);
2508 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2510 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2511 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2513 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2515 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2517 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2519 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2526 static void tg3_carrier_on(struct tg3
*tp
)
2528 netif_carrier_on(tp
->dev
);
2532 static void tg3_carrier_off(struct tg3
*tp
)
2534 netif_carrier_off(tp
->dev
);
2535 tp
->link_up
= false;
2538 /* This will reset the tigon3 PHY if there is no valid
2539 * link unless the FORCE argument is non-zero.
2541 static int tg3_phy_reset(struct tg3
*tp
)
2546 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2547 val
= tr32(GRC_MISC_CFG
);
2548 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2551 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2552 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2556 if (netif_running(tp
->dev
) && tp
->link_up
) {
2557 tg3_carrier_off(tp
);
2558 tg3_link_report(tp
);
2561 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2562 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2563 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2564 err
= tg3_phy_reset_5703_4_5(tp
);
2571 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2572 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2573 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2574 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2576 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2579 err
= tg3_bmcr_reset(tp
);
2583 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2584 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2585 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2587 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2590 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2591 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2592 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2593 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2594 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2595 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2597 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2601 if (tg3_flag(tp
, 5717_PLUS
) &&
2602 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2605 tg3_phy_apply_otp(tp
);
2607 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2608 tg3_phy_toggle_apd(tp
, true);
2610 tg3_phy_toggle_apd(tp
, false);
2613 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2614 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2615 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2616 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2617 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2620 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2621 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2622 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2625 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2626 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2627 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2628 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2629 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2630 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2632 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2633 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2634 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2635 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2636 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2637 tg3_writephy(tp
, MII_TG3_TEST1
,
2638 MII_TG3_TEST1_TRIM_EN
| 0x4);
2640 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2642 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2646 /* Set Extended packet length bit (bit 14) on all chips that */
2647 /* support jumbo frames */
2648 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2649 /* Cannot do read-modify-write on 5401 */
2650 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2651 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2652 /* Set bit 14 with read-modify-write to preserve other bits */
2653 err
= tg3_phy_auxctl_read(tp
,
2654 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2656 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2657 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2660 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2661 * jumbo frames transmission.
2663 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2664 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2665 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2666 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2669 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2670 /* adjust output voltage */
2671 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2674 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2675 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2677 tg3_phy_toggle_automdix(tp
, 1);
2678 tg3_phy_set_wirespeed(tp
);
2682 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2683 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2684 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2685 TG3_GPIO_MSG_NEED_VAUX)
2686 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2687 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2690 (TG3_GPIO_MSG_DRVR_PRES << 12))
2692 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2693 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2696 (TG3_GPIO_MSG_NEED_VAUX << 12))
2698 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2702 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2703 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2704 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2706 status
= tr32(TG3_CPMU_DRV_STATUS
);
2708 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2709 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2710 status
|= (newstat
<< shift
);
2712 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2713 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2714 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2716 tw32(TG3_CPMU_DRV_STATUS
, status
);
2718 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2721 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2723 if (!tg3_flag(tp
, IS_NIC
))
2726 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2727 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2728 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2729 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2732 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2734 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2735 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2737 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2739 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2740 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2746 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2750 if (!tg3_flag(tp
, IS_NIC
) ||
2751 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2752 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2755 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2757 tw32_wait_f(GRC_LOCAL_CTRL
,
2758 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2759 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2761 tw32_wait_f(GRC_LOCAL_CTRL
,
2763 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2765 tw32_wait_f(GRC_LOCAL_CTRL
,
2766 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2767 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2770 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2772 if (!tg3_flag(tp
, IS_NIC
))
2775 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2776 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2777 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2778 (GRC_LCLCTRL_GPIO_OE0
|
2779 GRC_LCLCTRL_GPIO_OE1
|
2780 GRC_LCLCTRL_GPIO_OE2
|
2781 GRC_LCLCTRL_GPIO_OUTPUT0
|
2782 GRC_LCLCTRL_GPIO_OUTPUT1
),
2783 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2784 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2785 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2786 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2787 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2788 GRC_LCLCTRL_GPIO_OE1
|
2789 GRC_LCLCTRL_GPIO_OE2
|
2790 GRC_LCLCTRL_GPIO_OUTPUT0
|
2791 GRC_LCLCTRL_GPIO_OUTPUT1
|
2793 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2796 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2797 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2800 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2801 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2802 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2805 u32 grc_local_ctrl
= 0;
2807 /* Workaround to prevent overdrawing Amps. */
2808 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2809 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2810 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2812 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2815 /* On 5753 and variants, GPIO2 cannot be used. */
2816 no_gpio2
= tp
->nic_sram_data_cfg
&
2817 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2819 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2820 GRC_LCLCTRL_GPIO_OE1
|
2821 GRC_LCLCTRL_GPIO_OE2
|
2822 GRC_LCLCTRL_GPIO_OUTPUT1
|
2823 GRC_LCLCTRL_GPIO_OUTPUT2
;
2825 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2826 GRC_LCLCTRL_GPIO_OUTPUT2
);
2828 tw32_wait_f(GRC_LOCAL_CTRL
,
2829 tp
->grc_local_ctrl
| grc_local_ctrl
,
2830 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2832 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2834 tw32_wait_f(GRC_LOCAL_CTRL
,
2835 tp
->grc_local_ctrl
| grc_local_ctrl
,
2836 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2839 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2840 tw32_wait_f(GRC_LOCAL_CTRL
,
2841 tp
->grc_local_ctrl
| grc_local_ctrl
,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2847 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2851 /* Serialize power state transitions */
2852 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2855 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2856 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2858 msg
= tg3_set_function_status(tp
, msg
);
2860 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2863 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2864 tg3_pwrsrc_switch_to_vaux(tp
);
2866 tg3_pwrsrc_die_with_vmain(tp
);
2869 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2872 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2874 bool need_vaux
= false;
2876 /* The GPIOs do something completely different on 57765. */
2877 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2880 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2881 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2882 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2883 tg3_frob_aux_power_5717(tp
, include_wol
?
2884 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2888 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2889 struct net_device
*dev_peer
;
2891 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2893 /* remove_one() may have been run on the peer. */
2895 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2897 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2900 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2901 tg3_flag(tp_peer
, ENABLE_ASF
))
2906 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2907 tg3_flag(tp
, ENABLE_ASF
))
2911 tg3_pwrsrc_switch_to_vaux(tp
);
2913 tg3_pwrsrc_die_with_vmain(tp
);
2916 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2918 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2920 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2921 if (speed
!= SPEED_10
)
2923 } else if (speed
== SPEED_10
)
2929 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2933 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2934 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
2935 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2936 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2939 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2940 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2941 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2946 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2948 val
= tr32(GRC_MISC_CFG
);
2949 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2952 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2954 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2957 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2958 tg3_writephy(tp
, MII_BMCR
,
2959 BMCR_ANENABLE
| BMCR_ANRESTART
);
2961 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2962 phytest
| MII_TG3_FET_SHADOW_EN
);
2963 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2964 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2966 MII_TG3_FET_SHDW_AUXMODE4
,
2969 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2972 } else if (do_low_power
) {
2973 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2974 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2976 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2977 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2978 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2979 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2982 /* The PHY should not be powered down on some chips because
2985 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2986 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2987 (tg3_asic_rev(tp
) == ASIC_REV_5780
&&
2988 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2989 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
2993 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2994 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2995 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2996 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2997 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2998 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3001 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3004 /* tp->lock is held. */
3005 static int tg3_nvram_lock(struct tg3
*tp
)
3007 if (tg3_flag(tp
, NVRAM
)) {
3010 if (tp
->nvram_lock_cnt
== 0) {
3011 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3012 for (i
= 0; i
< 8000; i
++) {
3013 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3018 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3022 tp
->nvram_lock_cnt
++;
3027 /* tp->lock is held. */
3028 static void tg3_nvram_unlock(struct tg3
*tp
)
3030 if (tg3_flag(tp
, NVRAM
)) {
3031 if (tp
->nvram_lock_cnt
> 0)
3032 tp
->nvram_lock_cnt
--;
3033 if (tp
->nvram_lock_cnt
== 0)
3034 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3038 /* tp->lock is held. */
3039 static void tg3_enable_nvram_access(struct tg3
*tp
)
3041 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3042 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3044 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3048 /* tp->lock is held. */
3049 static void tg3_disable_nvram_access(struct tg3
*tp
)
3051 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3052 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3054 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3058 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3059 u32 offset
, u32
*val
)
3064 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3067 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3068 EEPROM_ADDR_DEVID_MASK
|
3070 tw32(GRC_EEPROM_ADDR
,
3072 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3073 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3074 EEPROM_ADDR_ADDR_MASK
) |
3075 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3077 for (i
= 0; i
< 1000; i
++) {
3078 tmp
= tr32(GRC_EEPROM_ADDR
);
3080 if (tmp
& EEPROM_ADDR_COMPLETE
)
3084 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3087 tmp
= tr32(GRC_EEPROM_DATA
);
3090 * The data will always be opposite the native endian
3091 * format. Perform a blind byteswap to compensate.
3098 #define NVRAM_CMD_TIMEOUT 10000
3100 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3104 tw32(NVRAM_CMD
, nvram_cmd
);
3105 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3107 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3113 if (i
== NVRAM_CMD_TIMEOUT
)
3119 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3121 if (tg3_flag(tp
, NVRAM
) &&
3122 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3123 tg3_flag(tp
, FLASH
) &&
3124 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3125 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3127 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3128 ATMEL_AT45DB0X1B_PAGE_POS
) +
3129 (addr
% tp
->nvram_pagesize
);
3134 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3136 if (tg3_flag(tp
, NVRAM
) &&
3137 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3138 tg3_flag(tp
, FLASH
) &&
3139 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3140 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3142 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3143 tp
->nvram_pagesize
) +
3144 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3149 /* NOTE: Data read in from NVRAM is byteswapped according to
3150 * the byteswapping settings for all other register accesses.
3151 * tg3 devices are BE devices, so on a BE machine, the data
3152 * returned will be exactly as it is seen in NVRAM. On a LE
3153 * machine, the 32-bit value will be byteswapped.
3155 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3159 if (!tg3_flag(tp
, NVRAM
))
3160 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3162 offset
= tg3_nvram_phys_addr(tp
, offset
);
3164 if (offset
> NVRAM_ADDR_MSK
)
3167 ret
= tg3_nvram_lock(tp
);
3171 tg3_enable_nvram_access(tp
);
3173 tw32(NVRAM_ADDR
, offset
);
3174 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3175 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3178 *val
= tr32(NVRAM_RDDATA
);
3180 tg3_disable_nvram_access(tp
);
3182 tg3_nvram_unlock(tp
);
3187 /* Ensures NVRAM data is in bytestream format. */
3188 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3191 int res
= tg3_nvram_read(tp
, offset
, &v
);
3193 *val
= cpu_to_be32(v
);
3197 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3198 u32 offset
, u32 len
, u8
*buf
)
3203 for (i
= 0; i
< len
; i
+= 4) {
3209 memcpy(&data
, buf
+ i
, 4);
3212 * The SEEPROM interface expects the data to always be opposite
3213 * the native endian format. We accomplish this by reversing
3214 * all the operations that would have been performed on the
3215 * data from a call to tg3_nvram_read_be32().
3217 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3219 val
= tr32(GRC_EEPROM_ADDR
);
3220 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3222 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3224 tw32(GRC_EEPROM_ADDR
, val
|
3225 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3226 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3230 for (j
= 0; j
< 1000; j
++) {
3231 val
= tr32(GRC_EEPROM_ADDR
);
3233 if (val
& EEPROM_ADDR_COMPLETE
)
3237 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3246 /* offset and length are dword aligned */
3247 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3251 u32 pagesize
= tp
->nvram_pagesize
;
3252 u32 pagemask
= pagesize
- 1;
3256 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3262 u32 phy_addr
, page_off
, size
;
3264 phy_addr
= offset
& ~pagemask
;
3266 for (j
= 0; j
< pagesize
; j
+= 4) {
3267 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3268 (__be32
*) (tmp
+ j
));
3275 page_off
= offset
& pagemask
;
3282 memcpy(tmp
+ page_off
, buf
, size
);
3284 offset
= offset
+ (pagesize
- page_off
);
3286 tg3_enable_nvram_access(tp
);
3289 * Before we can erase the flash page, we need
3290 * to issue a special "write enable" command.
3292 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3294 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3297 /* Erase the target page */
3298 tw32(NVRAM_ADDR
, phy_addr
);
3300 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3301 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3303 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3306 /* Issue another write enable to start the write. */
3307 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3309 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3312 for (j
= 0; j
< pagesize
; j
+= 4) {
3315 data
= *((__be32
*) (tmp
+ j
));
3317 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3319 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3321 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3325 nvram_cmd
|= NVRAM_CMD_FIRST
;
3326 else if (j
== (pagesize
- 4))
3327 nvram_cmd
|= NVRAM_CMD_LAST
;
3329 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3337 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3338 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3345 /* offset and length are dword aligned */
3346 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3351 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3352 u32 page_off
, phy_addr
, nvram_cmd
;
3355 memcpy(&data
, buf
+ i
, 4);
3356 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3358 page_off
= offset
% tp
->nvram_pagesize
;
3360 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3362 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3364 if (page_off
== 0 || i
== 0)
3365 nvram_cmd
|= NVRAM_CMD_FIRST
;
3366 if (page_off
== (tp
->nvram_pagesize
- 4))
3367 nvram_cmd
|= NVRAM_CMD_LAST
;
3370 nvram_cmd
|= NVRAM_CMD_LAST
;
3372 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3373 !tg3_flag(tp
, FLASH
) ||
3374 !tg3_flag(tp
, 57765_PLUS
))
3375 tw32(NVRAM_ADDR
, phy_addr
);
3377 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3378 !tg3_flag(tp
, 5755_PLUS
) &&
3379 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3380 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3383 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3384 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3388 if (!tg3_flag(tp
, FLASH
)) {
3389 /* We always do complete word writes to eeprom. */
3390 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3393 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3400 /* offset and length are dword aligned */
3401 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3405 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3406 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3407 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3411 if (!tg3_flag(tp
, NVRAM
)) {
3412 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3416 ret
= tg3_nvram_lock(tp
);
3420 tg3_enable_nvram_access(tp
);
3421 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3422 tw32(NVRAM_WRITE1
, 0x406);
3424 grc_mode
= tr32(GRC_MODE
);
3425 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3427 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3428 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3431 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3435 grc_mode
= tr32(GRC_MODE
);
3436 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3438 tg3_disable_nvram_access(tp
);
3439 tg3_nvram_unlock(tp
);
3442 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3443 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3450 #define RX_CPU_SCRATCH_BASE 0x30000
3451 #define RX_CPU_SCRATCH_SIZE 0x04000
3452 #define TX_CPU_SCRATCH_BASE 0x34000
3453 #define TX_CPU_SCRATCH_SIZE 0x04000
3455 /* tp->lock is held. */
3456 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3459 const int iters
= 10000;
3461 for (i
= 0; i
< iters
; i
++) {
3462 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3463 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3464 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3468 return (i
== iters
) ? -EBUSY
: 0;
3471 /* tp->lock is held. */
3472 static int tg3_rxcpu_pause(struct tg3
*tp
)
3474 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3476 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3477 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3483 /* tp->lock is held. */
3484 static int tg3_txcpu_pause(struct tg3
*tp
)
3486 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3489 /* tp->lock is held. */
3490 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3492 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3493 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3496 /* tp->lock is held. */
3497 static void tg3_rxcpu_resume(struct tg3
*tp
)
3499 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3502 /* tp->lock is held. */
3503 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3507 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3509 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3510 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3512 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3515 if (cpu_base
== RX_CPU_BASE
) {
3516 rc
= tg3_rxcpu_pause(tp
);
3519 * There is only an Rx CPU for the 5750 derivative in the
3522 if (tg3_flag(tp
, IS_SSB_CORE
))
3525 rc
= tg3_txcpu_pause(tp
);
3529 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3530 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3534 /* Clear firmware's nvram arbitration. */
3535 if (tg3_flag(tp
, NVRAM
))
3536 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3540 static int tg3_fw_data_len(struct tg3
*tp
,
3541 const struct tg3_firmware_hdr
*fw_hdr
)
3545 /* Non fragmented firmware have one firmware header followed by a
3546 * contiguous chunk of data to be written. The length field in that
3547 * header is not the length of data to be written but the complete
3548 * length of the bss. The data length is determined based on
3549 * tp->fw->size minus headers.
3551 * Fragmented firmware have a main header followed by multiple
3552 * fragments. Each fragment is identical to non fragmented firmware
3553 * with a firmware header followed by a contiguous chunk of data. In
3554 * the main header, the length field is unused and set to 0xffffffff.
3555 * In each fragment header the length is the entire size of that
3556 * fragment i.e. fragment data + header length. Data length is
3557 * therefore length field in the header minus TG3_FW_HDR_LEN.
3559 if (tp
->fw_len
== 0xffffffff)
3560 fw_len
= be32_to_cpu(fw_hdr
->len
);
3562 fw_len
= tp
->fw
->size
;
3564 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3567 /* tp->lock is held. */
3568 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3569 u32 cpu_scratch_base
, int cpu_scratch_size
,
3570 const struct tg3_firmware_hdr
*fw_hdr
)
3573 void (*write_op
)(struct tg3
*, u32
, u32
);
3574 int total_len
= tp
->fw
->size
;
3576 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3578 "%s: Trying to load TX cpu firmware which is 5705\n",
3583 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3584 write_op
= tg3_write_mem
;
3586 write_op
= tg3_write_indirect_reg32
;
3588 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3589 /* It is possible that bootcode is still loading at this point.
3590 * Get the nvram lock first before halting the cpu.
3592 int lock_err
= tg3_nvram_lock(tp
);
3593 err
= tg3_halt_cpu(tp
, cpu_base
);
3595 tg3_nvram_unlock(tp
);
3599 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3600 write_op(tp
, cpu_scratch_base
+ i
, 0);
3601 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3602 tw32(cpu_base
+ CPU_MODE
,
3603 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3605 /* Subtract additional main header for fragmented firmware and
3606 * advance to the first fragment
3608 total_len
-= TG3_FW_HDR_LEN
;
3613 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3614 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3615 write_op(tp
, cpu_scratch_base
+
3616 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3618 be32_to_cpu(fw_data
[i
]));
3620 total_len
-= be32_to_cpu(fw_hdr
->len
);
3622 /* Advance to next fragment */
3623 fw_hdr
= (struct tg3_firmware_hdr
*)
3624 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3625 } while (total_len
> 0);
3633 /* tp->lock is held. */
3634 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3637 const int iters
= 5;
3639 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3640 tw32_f(cpu_base
+ CPU_PC
, pc
);
3642 for (i
= 0; i
< iters
; i
++) {
3643 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3645 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3646 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3647 tw32_f(cpu_base
+ CPU_PC
, pc
);
3651 return (i
== iters
) ? -EBUSY
: 0;
3654 /* tp->lock is held. */
3655 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3657 const struct tg3_firmware_hdr
*fw_hdr
;
3660 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3662 /* Firmware blob starts with version numbers, followed by
3663 start address and length. We are setting complete length.
3664 length = end_address_of_bss - start_address_of_text.
3665 Remainder is the blob to be loaded contiguously
3666 from start address. */
3668 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3669 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3674 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3675 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3680 /* Now startup only the RX cpu. */
3681 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3682 be32_to_cpu(fw_hdr
->base_addr
));
3684 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3685 "should be %08x\n", __func__
,
3686 tr32(RX_CPU_BASE
+ CPU_PC
),
3687 be32_to_cpu(fw_hdr
->base_addr
));
3691 tg3_rxcpu_resume(tp
);
3696 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3698 const int iters
= 1000;
3702 /* Wait for boot code to complete initialization and enter service
3703 * loop. It is then safe to download service patches
3705 for (i
= 0; i
< iters
; i
++) {
3706 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3713 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3717 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3719 netdev_warn(tp
->dev
,
3720 "Other patches exist. Not downloading EEE patch\n");
3727 /* tp->lock is held. */
3728 static void tg3_load_57766_firmware(struct tg3
*tp
)
3730 struct tg3_firmware_hdr
*fw_hdr
;
3732 if (!tg3_flag(tp
, NO_NVRAM
))
3735 if (tg3_validate_rxcpu_state(tp
))
3741 /* This firmware blob has a different format than older firmware
3742 * releases as given below. The main difference is we have fragmented
3743 * data to be written to non-contiguous locations.
3745 * In the beginning we have a firmware header identical to other
3746 * firmware which consists of version, base addr and length. The length
3747 * here is unused and set to 0xffffffff.
3749 * This is followed by a series of firmware fragments which are
3750 * individually identical to previous firmware. i.e. they have the
3751 * firmware header and followed by data for that fragment. The version
3752 * field of the individual fragment header is unused.
3755 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3756 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3759 if (tg3_rxcpu_pause(tp
))
3762 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3763 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3765 tg3_rxcpu_resume(tp
);
3768 /* tp->lock is held. */
3769 static int tg3_load_tso_firmware(struct tg3
*tp
)
3771 const struct tg3_firmware_hdr
*fw_hdr
;
3772 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3775 if (!tg3_flag(tp
, FW_TSO
))
3778 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3780 /* Firmware blob starts with version numbers, followed by
3781 start address and length. We are setting complete length.
3782 length = end_address_of_bss - start_address_of_text.
3783 Remainder is the blob to be loaded contiguously
3784 from start address. */
3786 cpu_scratch_size
= tp
->fw_len
;
3788 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3789 cpu_base
= RX_CPU_BASE
;
3790 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3792 cpu_base
= TX_CPU_BASE
;
3793 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3794 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3797 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3798 cpu_scratch_base
, cpu_scratch_size
,
3803 /* Now startup the cpu. */
3804 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3805 be32_to_cpu(fw_hdr
->base_addr
));
3808 "%s fails to set CPU PC, is %08x should be %08x\n",
3809 __func__
, tr32(cpu_base
+ CPU_PC
),
3810 be32_to_cpu(fw_hdr
->base_addr
));
3814 tg3_resume_cpu(tp
, cpu_base
);
3819 /* tp->lock is held. */
3820 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3822 u32 addr_high
, addr_low
;
3825 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3826 tp
->dev
->dev_addr
[1]);
3827 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3828 (tp
->dev
->dev_addr
[3] << 16) |
3829 (tp
->dev
->dev_addr
[4] << 8) |
3830 (tp
->dev
->dev_addr
[5] << 0));
3831 for (i
= 0; i
< 4; i
++) {
3832 if (i
== 1 && skip_mac_1
)
3834 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3835 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3838 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3839 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3840 for (i
= 0; i
< 12; i
++) {
3841 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3842 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3846 addr_high
= (tp
->dev
->dev_addr
[0] +
3847 tp
->dev
->dev_addr
[1] +
3848 tp
->dev
->dev_addr
[2] +
3849 tp
->dev
->dev_addr
[3] +
3850 tp
->dev
->dev_addr
[4] +
3851 tp
->dev
->dev_addr
[5]) &
3852 TX_BACKOFF_SEED_MASK
;
3853 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3856 static void tg3_enable_register_access(struct tg3
*tp
)
3859 * Make sure register accesses (indirect or otherwise) will function
3862 pci_write_config_dword(tp
->pdev
,
3863 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3866 static int tg3_power_up(struct tg3
*tp
)
3870 tg3_enable_register_access(tp
);
3872 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3874 /* Switch out of Vaux if it is a NIC */
3875 tg3_pwrsrc_switch_to_vmain(tp
);
3877 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3883 static int tg3_setup_phy(struct tg3
*, int);
3885 static int tg3_power_down_prepare(struct tg3
*tp
)
3888 bool device_should_wake
, do_low_power
;
3890 tg3_enable_register_access(tp
);
3892 /* Restore the CLKREQ setting. */
3893 if (tg3_flag(tp
, CLKREQ_BUG
))
3894 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3895 PCI_EXP_LNKCTL_CLKREQ_EN
);
3897 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3898 tw32(TG3PCI_MISC_HOST_CTRL
,
3899 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3901 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3902 tg3_flag(tp
, WOL_ENABLE
);
3904 if (tg3_flag(tp
, USE_PHYLIB
)) {
3905 do_low_power
= false;
3906 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3907 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3908 struct phy_device
*phydev
;
3909 u32 phyid
, advertising
;
3911 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3913 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3915 tp
->link_config
.speed
= phydev
->speed
;
3916 tp
->link_config
.duplex
= phydev
->duplex
;
3917 tp
->link_config
.autoneg
= phydev
->autoneg
;
3918 tp
->link_config
.advertising
= phydev
->advertising
;
3920 advertising
= ADVERTISED_TP
|
3922 ADVERTISED_Autoneg
|
3923 ADVERTISED_10baseT_Half
;
3925 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3926 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3928 ADVERTISED_100baseT_Half
|
3929 ADVERTISED_100baseT_Full
|
3930 ADVERTISED_10baseT_Full
;
3932 advertising
|= ADVERTISED_10baseT_Full
;
3935 phydev
->advertising
= advertising
;
3937 phy_start_aneg(phydev
);
3939 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3940 if (phyid
!= PHY_ID_BCMAC131
) {
3941 phyid
&= PHY_BCM_OUI_MASK
;
3942 if (phyid
== PHY_BCM_OUI_1
||
3943 phyid
== PHY_BCM_OUI_2
||
3944 phyid
== PHY_BCM_OUI_3
)
3945 do_low_power
= true;
3949 do_low_power
= true;
3951 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3952 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3954 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3955 tg3_setup_phy(tp
, 0);
3958 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3961 val
= tr32(GRC_VCPU_EXT_CTRL
);
3962 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3963 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3967 for (i
= 0; i
< 200; i
++) {
3968 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3969 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3974 if (tg3_flag(tp
, WOL_CAP
))
3975 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3976 WOL_DRV_STATE_SHUTDOWN
|
3980 if (device_should_wake
) {
3983 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3985 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3986 tg3_phy_auxctl_write(tp
,
3987 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3988 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3989 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3990 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3994 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3995 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3997 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3999 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4000 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4001 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4002 SPEED_100
: SPEED_10
;
4003 if (tg3_5700_link_polarity(tp
, speed
))
4004 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4006 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4009 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4012 if (!tg3_flag(tp
, 5750_PLUS
))
4013 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4015 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4016 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4017 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4018 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4020 if (tg3_flag(tp
, ENABLE_APE
))
4021 mac_mode
|= MAC_MODE_APE_TX_EN
|
4022 MAC_MODE_APE_RX_EN
|
4023 MAC_MODE_TDE_ENABLE
;
4025 tw32_f(MAC_MODE
, mac_mode
);
4028 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4032 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4033 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4034 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4037 base_val
= tp
->pci_clock_ctrl
;
4038 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4039 CLOCK_CTRL_TXCLK_DISABLE
);
4041 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4042 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4043 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4044 tg3_flag(tp
, CPMU_PRESENT
) ||
4045 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4047 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4048 u32 newbits1
, newbits2
;
4050 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4051 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4052 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4053 CLOCK_CTRL_TXCLK_DISABLE
|
4055 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4056 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4057 newbits1
= CLOCK_CTRL_625_CORE
;
4058 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4060 newbits1
= CLOCK_CTRL_ALTCLK
;
4061 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4064 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4067 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4070 if (!tg3_flag(tp
, 5705_PLUS
)) {
4073 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4074 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4075 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4076 CLOCK_CTRL_TXCLK_DISABLE
|
4077 CLOCK_CTRL_44MHZ_CORE
);
4079 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4082 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4083 tp
->pci_clock_ctrl
| newbits3
, 40);
4087 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4088 tg3_power_down_phy(tp
, do_low_power
);
4090 tg3_frob_aux_power(tp
, true);
4092 /* Workaround for unstable PLL clock */
4093 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4094 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4095 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4096 u32 val
= tr32(0x7d00);
4098 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4100 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4103 err
= tg3_nvram_lock(tp
);
4104 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4106 tg3_nvram_unlock(tp
);
4110 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4115 static void tg3_power_down(struct tg3
*tp
)
4117 tg3_power_down_prepare(tp
);
4119 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4120 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4123 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4125 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4126 case MII_TG3_AUX_STAT_10HALF
:
4128 *duplex
= DUPLEX_HALF
;
4131 case MII_TG3_AUX_STAT_10FULL
:
4133 *duplex
= DUPLEX_FULL
;
4136 case MII_TG3_AUX_STAT_100HALF
:
4138 *duplex
= DUPLEX_HALF
;
4141 case MII_TG3_AUX_STAT_100FULL
:
4143 *duplex
= DUPLEX_FULL
;
4146 case MII_TG3_AUX_STAT_1000HALF
:
4147 *speed
= SPEED_1000
;
4148 *duplex
= DUPLEX_HALF
;
4151 case MII_TG3_AUX_STAT_1000FULL
:
4152 *speed
= SPEED_1000
;
4153 *duplex
= DUPLEX_FULL
;
4157 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4158 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4160 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4164 *speed
= SPEED_UNKNOWN
;
4165 *duplex
= DUPLEX_UNKNOWN
;
4170 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4175 new_adv
= ADVERTISE_CSMA
;
4176 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4177 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4179 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4183 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4184 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4186 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4187 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4188 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4190 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4195 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4198 tw32(TG3_CPMU_EEE_MODE
,
4199 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4201 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4206 /* Advertise 100-BaseTX EEE ability */
4207 if (advertise
& ADVERTISED_100baseT_Full
)
4208 val
|= MDIO_AN_EEE_ADV_100TX
;
4209 /* Advertise 1000-BaseT EEE ability */
4210 if (advertise
& ADVERTISED_1000baseT_Full
)
4211 val
|= MDIO_AN_EEE_ADV_1000T
;
4212 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4216 switch (tg3_asic_rev(tp
)) {
4218 case ASIC_REV_57765
:
4219 case ASIC_REV_57766
:
4221 /* If we advertised any eee advertisements above... */
4223 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4224 MII_TG3_DSP_TAP26_RMRXSTO
|
4225 MII_TG3_DSP_TAP26_OPCSINPT
;
4226 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4230 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4231 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4232 MII_TG3_DSP_CH34TP2_HIBW01
);
4235 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4244 static void tg3_phy_copper_begin(struct tg3
*tp
)
4246 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4247 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4250 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
4251 adv
= ADVERTISED_10baseT_Half
|
4252 ADVERTISED_10baseT_Full
;
4253 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4254 adv
|= ADVERTISED_100baseT_Half
|
4255 ADVERTISED_100baseT_Full
;
4257 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4259 adv
= tp
->link_config
.advertising
;
4260 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4261 adv
&= ~(ADVERTISED_1000baseT_Half
|
4262 ADVERTISED_1000baseT_Full
);
4264 fc
= tp
->link_config
.flowctrl
;
4267 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4269 tg3_writephy(tp
, MII_BMCR
,
4270 BMCR_ANENABLE
| BMCR_ANRESTART
);
4273 u32 bmcr
, orig_bmcr
;
4275 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4276 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4279 switch (tp
->link_config
.speed
) {
4285 bmcr
|= BMCR_SPEED100
;
4289 bmcr
|= BMCR_SPEED1000
;
4293 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4294 bmcr
|= BMCR_FULLDPLX
;
4296 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4297 (bmcr
!= orig_bmcr
)) {
4298 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4299 for (i
= 0; i
< 1500; i
++) {
4303 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4304 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4306 if (!(tmp
& BMSR_LSTATUS
)) {
4311 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4317 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4321 /* Turn off tap power management. */
4322 /* Set Extended packet length bit */
4323 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4325 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4326 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4327 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4328 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4329 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4336 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4338 u32 advmsk
, tgtadv
, advertising
;
4340 advertising
= tp
->link_config
.advertising
;
4341 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4343 advmsk
= ADVERTISE_ALL
;
4344 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4345 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4346 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4349 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4352 if ((*lcladv
& advmsk
) != tgtadv
)
4355 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4358 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4360 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4364 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4365 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4366 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4367 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4368 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4370 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4373 if (tg3_ctrl
!= tgtadv
)
4380 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4384 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4387 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4390 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4393 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4396 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4397 tp
->link_config
.rmt_adv
= lpeth
;
4402 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, int curr_link_up
)
4404 if (curr_link_up
!= tp
->link_up
) {
4408 tg3_carrier_off(tp
);
4409 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4410 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4413 tg3_link_report(tp
);
4420 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4422 int current_link_up
;
4424 u32 lcl_adv
, rmt_adv
;
4432 (MAC_STATUS_SYNC_CHANGED
|
4433 MAC_STATUS_CFG_CHANGED
|
4434 MAC_STATUS_MI_COMPLETION
|
4435 MAC_STATUS_LNKSTATE_CHANGED
));
4438 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4440 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4444 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4446 /* Some third-party PHYs need to be reset on link going
4449 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4450 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4451 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4453 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4454 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4455 !(bmsr
& BMSR_LSTATUS
))
4461 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4462 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4463 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4464 !tg3_flag(tp
, INIT_COMPLETE
))
4467 if (!(bmsr
& BMSR_LSTATUS
)) {
4468 err
= tg3_init_5401phy_dsp(tp
);
4472 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4473 for (i
= 0; i
< 1000; i
++) {
4475 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4476 (bmsr
& BMSR_LSTATUS
)) {
4482 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4483 TG3_PHY_REV_BCM5401_B0
&&
4484 !(bmsr
& BMSR_LSTATUS
) &&
4485 tp
->link_config
.active_speed
== SPEED_1000
) {
4486 err
= tg3_phy_reset(tp
);
4488 err
= tg3_init_5401phy_dsp(tp
);
4493 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4494 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4495 /* 5701 {A0,B0} CRC bug workaround */
4496 tg3_writephy(tp
, 0x15, 0x0a75);
4497 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4498 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4499 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4502 /* Clear pending interrupts... */
4503 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4504 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4506 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4507 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4508 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4509 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4511 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4512 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4513 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4514 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4515 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4517 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4520 current_link_up
= 0;
4521 current_speed
= SPEED_UNKNOWN
;
4522 current_duplex
= DUPLEX_UNKNOWN
;
4523 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4524 tp
->link_config
.rmt_adv
= 0;
4526 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4527 err
= tg3_phy_auxctl_read(tp
,
4528 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4530 if (!err
&& !(val
& (1 << 10))) {
4531 tg3_phy_auxctl_write(tp
,
4532 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4539 for (i
= 0; i
< 100; i
++) {
4540 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4541 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4542 (bmsr
& BMSR_LSTATUS
))
4547 if (bmsr
& BMSR_LSTATUS
) {
4550 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4551 for (i
= 0; i
< 2000; i
++) {
4553 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4558 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4563 for (i
= 0; i
< 200; i
++) {
4564 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4565 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4567 if (bmcr
&& bmcr
!= 0x7fff)
4575 tp
->link_config
.active_speed
= current_speed
;
4576 tp
->link_config
.active_duplex
= current_duplex
;
4578 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4579 if ((bmcr
& BMCR_ANENABLE
) &&
4580 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4581 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4582 current_link_up
= 1;
4584 if (!(bmcr
& BMCR_ANENABLE
) &&
4585 tp
->link_config
.speed
== current_speed
&&
4586 tp
->link_config
.duplex
== current_duplex
&&
4587 tp
->link_config
.flowctrl
==
4588 tp
->link_config
.active_flowctrl
) {
4589 current_link_up
= 1;
4593 if (current_link_up
== 1 &&
4594 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4597 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4598 reg
= MII_TG3_FET_GEN_STAT
;
4599 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4601 reg
= MII_TG3_EXT_STAT
;
4602 bit
= MII_TG3_EXT_STAT_MDIX
;
4605 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4606 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4608 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4613 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4614 tg3_phy_copper_begin(tp
);
4616 if (tg3_flag(tp
, ROBOSWITCH
)) {
4617 current_link_up
= 1;
4618 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4619 current_speed
= SPEED_1000
;
4620 current_duplex
= DUPLEX_FULL
;
4621 tp
->link_config
.active_speed
= current_speed
;
4622 tp
->link_config
.active_duplex
= current_duplex
;
4625 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4626 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4627 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4628 current_link_up
= 1;
4631 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4632 if (current_link_up
== 1) {
4633 if (tp
->link_config
.active_speed
== SPEED_100
||
4634 tp
->link_config
.active_speed
== SPEED_10
)
4635 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4637 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4638 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4639 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4641 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4643 /* In order for the 5750 core in BCM4785 chip to work properly
4644 * in RGMII mode, the Led Control Register must be set up.
4646 if (tg3_flag(tp
, RGMII_MODE
)) {
4647 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4648 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4650 if (tp
->link_config
.active_speed
== SPEED_10
)
4651 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4652 else if (tp
->link_config
.active_speed
== SPEED_100
)
4653 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4654 LED_CTRL_100MBPS_ON
);
4655 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4656 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4657 LED_CTRL_1000MBPS_ON
);
4659 tw32(MAC_LED_CTRL
, led_ctrl
);
4663 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4664 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4665 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4667 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4668 if (current_link_up
== 1 &&
4669 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4670 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4672 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4675 /* ??? Without this setting Netgear GA302T PHY does not
4676 * ??? send/receive packets...
4678 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4679 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4680 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4681 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4685 tw32_f(MAC_MODE
, tp
->mac_mode
);
4688 tg3_phy_eee_adjust(tp
, current_link_up
);
4690 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4691 /* Polled via timer. */
4692 tw32_f(MAC_EVENT
, 0);
4694 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4698 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
4699 current_link_up
== 1 &&
4700 tp
->link_config
.active_speed
== SPEED_1000
&&
4701 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4704 (MAC_STATUS_SYNC_CHANGED
|
4705 MAC_STATUS_CFG_CHANGED
));
4708 NIC_SRAM_FIRMWARE_MBOX
,
4709 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4712 /* Prevent send BD corruption. */
4713 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4714 if (tp
->link_config
.active_speed
== SPEED_100
||
4715 tp
->link_config
.active_speed
== SPEED_10
)
4716 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4717 PCI_EXP_LNKCTL_CLKREQ_EN
);
4719 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4720 PCI_EXP_LNKCTL_CLKREQ_EN
);
4723 tg3_test_and_report_link_chg(tp
, current_link_up
);
4728 struct tg3_fiber_aneginfo
{
4730 #define ANEG_STATE_UNKNOWN 0
4731 #define ANEG_STATE_AN_ENABLE 1
4732 #define ANEG_STATE_RESTART_INIT 2
4733 #define ANEG_STATE_RESTART 3
4734 #define ANEG_STATE_DISABLE_LINK_OK 4
4735 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4736 #define ANEG_STATE_ABILITY_DETECT 6
4737 #define ANEG_STATE_ACK_DETECT_INIT 7
4738 #define ANEG_STATE_ACK_DETECT 8
4739 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4740 #define ANEG_STATE_COMPLETE_ACK 10
4741 #define ANEG_STATE_IDLE_DETECT_INIT 11
4742 #define ANEG_STATE_IDLE_DETECT 12
4743 #define ANEG_STATE_LINK_OK 13
4744 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4745 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4748 #define MR_AN_ENABLE 0x00000001
4749 #define MR_RESTART_AN 0x00000002
4750 #define MR_AN_COMPLETE 0x00000004
4751 #define MR_PAGE_RX 0x00000008
4752 #define MR_NP_LOADED 0x00000010
4753 #define MR_TOGGLE_TX 0x00000020
4754 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4755 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4756 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4757 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4758 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4759 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4760 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4761 #define MR_TOGGLE_RX 0x00002000
4762 #define MR_NP_RX 0x00004000
4764 #define MR_LINK_OK 0x80000000
4766 unsigned long link_time
, cur_time
;
4768 u32 ability_match_cfg
;
4769 int ability_match_count
;
4771 char ability_match
, idle_match
, ack_match
;
4773 u32 txconfig
, rxconfig
;
4774 #define ANEG_CFG_NP 0x00000080
4775 #define ANEG_CFG_ACK 0x00000040
4776 #define ANEG_CFG_RF2 0x00000020
4777 #define ANEG_CFG_RF1 0x00000010
4778 #define ANEG_CFG_PS2 0x00000001
4779 #define ANEG_CFG_PS1 0x00008000
4780 #define ANEG_CFG_HD 0x00004000
4781 #define ANEG_CFG_FD 0x00002000
4782 #define ANEG_CFG_INVAL 0x00001f06
4787 #define ANEG_TIMER_ENAB 2
4788 #define ANEG_FAILED -1
4790 #define ANEG_STATE_SETTLE_TIME 10000
4792 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4793 struct tg3_fiber_aneginfo
*ap
)
4796 unsigned long delta
;
4800 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4804 ap
->ability_match_cfg
= 0;
4805 ap
->ability_match_count
= 0;
4806 ap
->ability_match
= 0;
4812 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4813 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4815 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4816 ap
->ability_match_cfg
= rx_cfg_reg
;
4817 ap
->ability_match
= 0;
4818 ap
->ability_match_count
= 0;
4820 if (++ap
->ability_match_count
> 1) {
4821 ap
->ability_match
= 1;
4822 ap
->ability_match_cfg
= rx_cfg_reg
;
4825 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4833 ap
->ability_match_cfg
= 0;
4834 ap
->ability_match_count
= 0;
4835 ap
->ability_match
= 0;
4841 ap
->rxconfig
= rx_cfg_reg
;
4844 switch (ap
->state
) {
4845 case ANEG_STATE_UNKNOWN
:
4846 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4847 ap
->state
= ANEG_STATE_AN_ENABLE
;
4850 case ANEG_STATE_AN_ENABLE
:
4851 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4852 if (ap
->flags
& MR_AN_ENABLE
) {
4855 ap
->ability_match_cfg
= 0;
4856 ap
->ability_match_count
= 0;
4857 ap
->ability_match
= 0;
4861 ap
->state
= ANEG_STATE_RESTART_INIT
;
4863 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4867 case ANEG_STATE_RESTART_INIT
:
4868 ap
->link_time
= ap
->cur_time
;
4869 ap
->flags
&= ~(MR_NP_LOADED
);
4871 tw32(MAC_TX_AUTO_NEG
, 0);
4872 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4873 tw32_f(MAC_MODE
, tp
->mac_mode
);
4876 ret
= ANEG_TIMER_ENAB
;
4877 ap
->state
= ANEG_STATE_RESTART
;
4880 case ANEG_STATE_RESTART
:
4881 delta
= ap
->cur_time
- ap
->link_time
;
4882 if (delta
> ANEG_STATE_SETTLE_TIME
)
4883 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4885 ret
= ANEG_TIMER_ENAB
;
4888 case ANEG_STATE_DISABLE_LINK_OK
:
4892 case ANEG_STATE_ABILITY_DETECT_INIT
:
4893 ap
->flags
&= ~(MR_TOGGLE_TX
);
4894 ap
->txconfig
= ANEG_CFG_FD
;
4895 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4896 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4897 ap
->txconfig
|= ANEG_CFG_PS1
;
4898 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4899 ap
->txconfig
|= ANEG_CFG_PS2
;
4900 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4901 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4902 tw32_f(MAC_MODE
, tp
->mac_mode
);
4905 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4908 case ANEG_STATE_ABILITY_DETECT
:
4909 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4910 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4913 case ANEG_STATE_ACK_DETECT_INIT
:
4914 ap
->txconfig
|= ANEG_CFG_ACK
;
4915 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4916 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4917 tw32_f(MAC_MODE
, tp
->mac_mode
);
4920 ap
->state
= ANEG_STATE_ACK_DETECT
;
4923 case ANEG_STATE_ACK_DETECT
:
4924 if (ap
->ack_match
!= 0) {
4925 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4926 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4927 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4929 ap
->state
= ANEG_STATE_AN_ENABLE
;
4931 } else if (ap
->ability_match
!= 0 &&
4932 ap
->rxconfig
== 0) {
4933 ap
->state
= ANEG_STATE_AN_ENABLE
;
4937 case ANEG_STATE_COMPLETE_ACK_INIT
:
4938 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4942 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4943 MR_LP_ADV_HALF_DUPLEX
|
4944 MR_LP_ADV_SYM_PAUSE
|
4945 MR_LP_ADV_ASYM_PAUSE
|
4946 MR_LP_ADV_REMOTE_FAULT1
|
4947 MR_LP_ADV_REMOTE_FAULT2
|
4948 MR_LP_ADV_NEXT_PAGE
|
4951 if (ap
->rxconfig
& ANEG_CFG_FD
)
4952 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4953 if (ap
->rxconfig
& ANEG_CFG_HD
)
4954 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4955 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4956 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4957 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4958 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4959 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4960 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4961 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4962 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4963 if (ap
->rxconfig
& ANEG_CFG_NP
)
4964 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4966 ap
->link_time
= ap
->cur_time
;
4968 ap
->flags
^= (MR_TOGGLE_TX
);
4969 if (ap
->rxconfig
& 0x0008)
4970 ap
->flags
|= MR_TOGGLE_RX
;
4971 if (ap
->rxconfig
& ANEG_CFG_NP
)
4972 ap
->flags
|= MR_NP_RX
;
4973 ap
->flags
|= MR_PAGE_RX
;
4975 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4976 ret
= ANEG_TIMER_ENAB
;
4979 case ANEG_STATE_COMPLETE_ACK
:
4980 if (ap
->ability_match
!= 0 &&
4981 ap
->rxconfig
== 0) {
4982 ap
->state
= ANEG_STATE_AN_ENABLE
;
4985 delta
= ap
->cur_time
- ap
->link_time
;
4986 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4987 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4988 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4990 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4991 !(ap
->flags
& MR_NP_RX
)) {
4992 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5000 case ANEG_STATE_IDLE_DETECT_INIT
:
5001 ap
->link_time
= ap
->cur_time
;
5002 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5003 tw32_f(MAC_MODE
, tp
->mac_mode
);
5006 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5007 ret
= ANEG_TIMER_ENAB
;
5010 case ANEG_STATE_IDLE_DETECT
:
5011 if (ap
->ability_match
!= 0 &&
5012 ap
->rxconfig
== 0) {
5013 ap
->state
= ANEG_STATE_AN_ENABLE
;
5016 delta
= ap
->cur_time
- ap
->link_time
;
5017 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5018 /* XXX another gem from the Broadcom driver :( */
5019 ap
->state
= ANEG_STATE_LINK_OK
;
5023 case ANEG_STATE_LINK_OK
:
5024 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5028 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5029 /* ??? unimplemented */
5032 case ANEG_STATE_NEXT_PAGE_WAIT
:
5033 /* ??? unimplemented */
5044 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5047 struct tg3_fiber_aneginfo aninfo
;
5048 int status
= ANEG_FAILED
;
5052 tw32_f(MAC_TX_AUTO_NEG
, 0);
5054 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5055 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5058 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5061 memset(&aninfo
, 0, sizeof(aninfo
));
5062 aninfo
.flags
|= MR_AN_ENABLE
;
5063 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5064 aninfo
.cur_time
= 0;
5066 while (++tick
< 195000) {
5067 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5068 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5074 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5075 tw32_f(MAC_MODE
, tp
->mac_mode
);
5078 *txflags
= aninfo
.txconfig
;
5079 *rxflags
= aninfo
.flags
;
5081 if (status
== ANEG_DONE
&&
5082 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5083 MR_LP_ADV_FULL_DUPLEX
)))
5089 static void tg3_init_bcm8002(struct tg3
*tp
)
5091 u32 mac_status
= tr32(MAC_STATUS
);
5094 /* Reset when initting first time or we have a link. */
5095 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5096 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5099 /* Set PLL lock range. */
5100 tg3_writephy(tp
, 0x16, 0x8007);
5103 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5105 /* Wait for reset to complete. */
5106 /* XXX schedule_timeout() ... */
5107 for (i
= 0; i
< 500; i
++)
5110 /* Config mode; select PMA/Ch 1 regs. */
5111 tg3_writephy(tp
, 0x10, 0x8411);
5113 /* Enable auto-lock and comdet, select txclk for tx. */
5114 tg3_writephy(tp
, 0x11, 0x0a10);
5116 tg3_writephy(tp
, 0x18, 0x00a0);
5117 tg3_writephy(tp
, 0x16, 0x41ff);
5119 /* Assert and deassert POR. */
5120 tg3_writephy(tp
, 0x13, 0x0400);
5122 tg3_writephy(tp
, 0x13, 0x0000);
5124 tg3_writephy(tp
, 0x11, 0x0a50);
5126 tg3_writephy(tp
, 0x11, 0x0a10);
5128 /* Wait for signal to stabilize */
5129 /* XXX schedule_timeout() ... */
5130 for (i
= 0; i
< 15000; i
++)
5133 /* Deselect the channel register so we can read the PHYID
5136 tg3_writephy(tp
, 0x10, 0x8011);
5139 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5142 u32 sg_dig_ctrl
, sg_dig_status
;
5143 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5144 int workaround
, port_a
;
5145 int current_link_up
;
5148 expected_sg_dig_ctrl
= 0;
5151 current_link_up
= 0;
5153 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5154 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5156 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5159 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5160 /* preserve bits 20-23 for voltage regulator */
5161 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5164 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5166 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5167 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5169 u32 val
= serdes_cfg
;
5175 tw32_f(MAC_SERDES_CFG
, val
);
5178 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5180 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5181 tg3_setup_flow_control(tp
, 0, 0);
5182 current_link_up
= 1;
5187 /* Want auto-negotiation. */
5188 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5190 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5191 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5192 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5193 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5194 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5196 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5197 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5198 tp
->serdes_counter
&&
5199 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5200 MAC_STATUS_RCVD_CFG
)) ==
5201 MAC_STATUS_PCS_SYNCED
)) {
5202 tp
->serdes_counter
--;
5203 current_link_up
= 1;
5208 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5209 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5211 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5213 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5214 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5215 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5216 MAC_STATUS_SIGNAL_DET
)) {
5217 sg_dig_status
= tr32(SG_DIG_STATUS
);
5218 mac_status
= tr32(MAC_STATUS
);
5220 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5221 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5222 u32 local_adv
= 0, remote_adv
= 0;
5224 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5225 local_adv
|= ADVERTISE_1000XPAUSE
;
5226 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5227 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5229 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5230 remote_adv
|= LPA_1000XPAUSE
;
5231 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5232 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5234 tp
->link_config
.rmt_adv
=
5235 mii_adv_to_ethtool_adv_x(remote_adv
);
5237 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5238 current_link_up
= 1;
5239 tp
->serdes_counter
= 0;
5240 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5241 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5242 if (tp
->serdes_counter
)
5243 tp
->serdes_counter
--;
5246 u32 val
= serdes_cfg
;
5253 tw32_f(MAC_SERDES_CFG
, val
);
5256 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5259 /* Link parallel detection - link is up */
5260 /* only if we have PCS_SYNC and not */
5261 /* receiving config code words */
5262 mac_status
= tr32(MAC_STATUS
);
5263 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5264 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5265 tg3_setup_flow_control(tp
, 0, 0);
5266 current_link_up
= 1;
5268 TG3_PHYFLG_PARALLEL_DETECT
;
5269 tp
->serdes_counter
=
5270 SERDES_PARALLEL_DET_TIMEOUT
;
5272 goto restart_autoneg
;
5276 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5277 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5281 return current_link_up
;
5284 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5286 int current_link_up
= 0;
5288 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5291 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5292 u32 txflags
, rxflags
;
5295 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5296 u32 local_adv
= 0, remote_adv
= 0;
5298 if (txflags
& ANEG_CFG_PS1
)
5299 local_adv
|= ADVERTISE_1000XPAUSE
;
5300 if (txflags
& ANEG_CFG_PS2
)
5301 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5303 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5304 remote_adv
|= LPA_1000XPAUSE
;
5305 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5306 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5308 tp
->link_config
.rmt_adv
=
5309 mii_adv_to_ethtool_adv_x(remote_adv
);
5311 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5313 current_link_up
= 1;
5315 for (i
= 0; i
< 30; i
++) {
5318 (MAC_STATUS_SYNC_CHANGED
|
5319 MAC_STATUS_CFG_CHANGED
));
5321 if ((tr32(MAC_STATUS
) &
5322 (MAC_STATUS_SYNC_CHANGED
|
5323 MAC_STATUS_CFG_CHANGED
)) == 0)
5327 mac_status
= tr32(MAC_STATUS
);
5328 if (current_link_up
== 0 &&
5329 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5330 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5331 current_link_up
= 1;
5333 tg3_setup_flow_control(tp
, 0, 0);
5335 /* Forcing 1000FD link up. */
5336 current_link_up
= 1;
5338 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5341 tw32_f(MAC_MODE
, tp
->mac_mode
);
5346 return current_link_up
;
5349 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5352 u16 orig_active_speed
;
5353 u8 orig_active_duplex
;
5355 int current_link_up
;
5358 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5359 orig_active_speed
= tp
->link_config
.active_speed
;
5360 orig_active_duplex
= tp
->link_config
.active_duplex
;
5362 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5364 tg3_flag(tp
, INIT_COMPLETE
)) {
5365 mac_status
= tr32(MAC_STATUS
);
5366 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5367 MAC_STATUS_SIGNAL_DET
|
5368 MAC_STATUS_CFG_CHANGED
|
5369 MAC_STATUS_RCVD_CFG
);
5370 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5371 MAC_STATUS_SIGNAL_DET
)) {
5372 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5373 MAC_STATUS_CFG_CHANGED
));
5378 tw32_f(MAC_TX_AUTO_NEG
, 0);
5380 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5381 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5382 tw32_f(MAC_MODE
, tp
->mac_mode
);
5385 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5386 tg3_init_bcm8002(tp
);
5388 /* Enable link change event even when serdes polling. */
5389 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5392 current_link_up
= 0;
5393 tp
->link_config
.rmt_adv
= 0;
5394 mac_status
= tr32(MAC_STATUS
);
5396 if (tg3_flag(tp
, HW_AUTONEG
))
5397 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5399 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5401 tp
->napi
[0].hw_status
->status
=
5402 (SD_STATUS_UPDATED
|
5403 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5405 for (i
= 0; i
< 100; i
++) {
5406 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5407 MAC_STATUS_CFG_CHANGED
));
5409 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5410 MAC_STATUS_CFG_CHANGED
|
5411 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5415 mac_status
= tr32(MAC_STATUS
);
5416 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5417 current_link_up
= 0;
5418 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5419 tp
->serdes_counter
== 0) {
5420 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5421 MAC_MODE_SEND_CONFIGS
));
5423 tw32_f(MAC_MODE
, tp
->mac_mode
);
5427 if (current_link_up
== 1) {
5428 tp
->link_config
.active_speed
= SPEED_1000
;
5429 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5430 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5431 LED_CTRL_LNKLED_OVERRIDE
|
5432 LED_CTRL_1000MBPS_ON
));
5434 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5435 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5436 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5437 LED_CTRL_LNKLED_OVERRIDE
|
5438 LED_CTRL_TRAFFIC_OVERRIDE
));
5441 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5442 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5443 if (orig_pause_cfg
!= now_pause_cfg
||
5444 orig_active_speed
!= tp
->link_config
.active_speed
||
5445 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5446 tg3_link_report(tp
);
5452 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5454 int current_link_up
, err
= 0;
5458 u32 local_adv
, remote_adv
;
5460 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5461 tw32_f(MAC_MODE
, tp
->mac_mode
);
5467 (MAC_STATUS_SYNC_CHANGED
|
5468 MAC_STATUS_CFG_CHANGED
|
5469 MAC_STATUS_MI_COMPLETION
|
5470 MAC_STATUS_LNKSTATE_CHANGED
));
5476 current_link_up
= 0;
5477 current_speed
= SPEED_UNKNOWN
;
5478 current_duplex
= DUPLEX_UNKNOWN
;
5479 tp
->link_config
.rmt_adv
= 0;
5481 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5482 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5483 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5484 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5485 bmsr
|= BMSR_LSTATUS
;
5487 bmsr
&= ~BMSR_LSTATUS
;
5490 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5492 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5493 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5494 /* do nothing, just check for link up at the end */
5495 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5498 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5499 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5500 ADVERTISE_1000XPAUSE
|
5501 ADVERTISE_1000XPSE_ASYM
|
5504 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5505 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5507 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5508 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5509 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5510 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5512 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5513 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5514 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5521 bmcr
&= ~BMCR_SPEED1000
;
5522 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5524 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5525 new_bmcr
|= BMCR_FULLDPLX
;
5527 if (new_bmcr
!= bmcr
) {
5528 /* BMCR_SPEED1000 is a reserved bit that needs
5529 * to be set on write.
5531 new_bmcr
|= BMCR_SPEED1000
;
5533 /* Force a linkdown */
5537 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5538 adv
&= ~(ADVERTISE_1000XFULL
|
5539 ADVERTISE_1000XHALF
|
5541 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5542 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5546 tg3_carrier_off(tp
);
5548 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5550 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5551 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5552 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5553 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5554 bmsr
|= BMSR_LSTATUS
;
5556 bmsr
&= ~BMSR_LSTATUS
;
5558 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5562 if (bmsr
& BMSR_LSTATUS
) {
5563 current_speed
= SPEED_1000
;
5564 current_link_up
= 1;
5565 if (bmcr
& BMCR_FULLDPLX
)
5566 current_duplex
= DUPLEX_FULL
;
5568 current_duplex
= DUPLEX_HALF
;
5573 if (bmcr
& BMCR_ANENABLE
) {
5576 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5577 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5578 common
= local_adv
& remote_adv
;
5579 if (common
& (ADVERTISE_1000XHALF
|
5580 ADVERTISE_1000XFULL
)) {
5581 if (common
& ADVERTISE_1000XFULL
)
5582 current_duplex
= DUPLEX_FULL
;
5584 current_duplex
= DUPLEX_HALF
;
5586 tp
->link_config
.rmt_adv
=
5587 mii_adv_to_ethtool_adv_x(remote_adv
);
5588 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5589 /* Link is up via parallel detect */
5591 current_link_up
= 0;
5596 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5597 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5599 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5600 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5601 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5603 tw32_f(MAC_MODE
, tp
->mac_mode
);
5606 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5608 tp
->link_config
.active_speed
= current_speed
;
5609 tp
->link_config
.active_duplex
= current_duplex
;
5611 tg3_test_and_report_link_chg(tp
, current_link_up
);
5615 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5617 if (tp
->serdes_counter
) {
5618 /* Give autoneg time to complete. */
5619 tp
->serdes_counter
--;
5624 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5627 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5628 if (bmcr
& BMCR_ANENABLE
) {
5631 /* Select shadow register 0x1f */
5632 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5633 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5635 /* Select expansion interrupt status register */
5636 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5637 MII_TG3_DSP_EXP1_INT_STAT
);
5638 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5639 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5641 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5642 /* We have signal detect and not receiving
5643 * config code words, link is up by parallel
5647 bmcr
&= ~BMCR_ANENABLE
;
5648 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5649 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5650 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5653 } else if (tp
->link_up
&&
5654 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5655 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5658 /* Select expansion interrupt status register */
5659 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5660 MII_TG3_DSP_EXP1_INT_STAT
);
5661 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5665 /* Config code words received, turn on autoneg. */
5666 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5667 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5669 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5675 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5680 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5681 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5682 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5683 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5685 err
= tg3_setup_copper_phy(tp
, force_reset
);
5687 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
5690 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5691 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5693 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5698 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5699 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5700 tw32(GRC_MISC_CFG
, val
);
5703 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5704 (6 << TX_LENGTHS_IPG_SHIFT
);
5705 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
5706 tg3_asic_rev(tp
) == ASIC_REV_5762
)
5707 val
|= tr32(MAC_TX_LENGTHS
) &
5708 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5709 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5711 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5712 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5713 tw32(MAC_TX_LENGTHS
, val
|
5714 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5716 tw32(MAC_TX_LENGTHS
, val
|
5717 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5719 if (!tg3_flag(tp
, 5705_PLUS
)) {
5721 tw32(HOSTCC_STAT_COAL_TICKS
,
5722 tp
->coal
.stats_block_coalesce_usecs
);
5724 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5728 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5729 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5731 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5734 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5735 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5741 /* tp->lock must be held */
5742 static u64
tg3_refclk_read(struct tg3
*tp
)
5744 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
5745 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
5748 /* tp->lock must be held */
5749 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
5751 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
5752 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
5753 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
5754 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
5757 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
5758 static inline void tg3_full_unlock(struct tg3
*tp
);
5759 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
5761 struct tg3
*tp
= netdev_priv(dev
);
5763 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
5764 SOF_TIMESTAMPING_RX_SOFTWARE
|
5765 SOF_TIMESTAMPING_SOFTWARE
|
5766 SOF_TIMESTAMPING_TX_HARDWARE
|
5767 SOF_TIMESTAMPING_RX_HARDWARE
|
5768 SOF_TIMESTAMPING_RAW_HARDWARE
;
5771 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
5773 info
->phc_index
= -1;
5775 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
5777 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
5778 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
5779 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
5780 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
5784 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
5786 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5787 bool neg_adj
= false;
5795 /* Frequency adjustment is performed using hardware with a 24 bit
5796 * accumulator and a programmable correction value. On each clk, the
5797 * correction value gets added to the accumulator and when it
5798 * overflows, the time counter is incremented/decremented.
5800 * So conversion from ppb to correction value is
5801 * ppb * (1 << 24) / 1000000000
5803 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
5804 TG3_EAV_REF_CLK_CORRECT_MASK
;
5806 tg3_full_lock(tp
, 0);
5809 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
5810 TG3_EAV_REF_CLK_CORRECT_EN
|
5811 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
5813 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
5815 tg3_full_unlock(tp
);
5820 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
5822 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5824 tg3_full_lock(tp
, 0);
5825 tp
->ptp_adjust
+= delta
;
5826 tg3_full_unlock(tp
);
5831 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
5835 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5837 tg3_full_lock(tp
, 0);
5838 ns
= tg3_refclk_read(tp
);
5839 ns
+= tp
->ptp_adjust
;
5840 tg3_full_unlock(tp
);
5842 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
5843 ts
->tv_nsec
= remainder
;
5848 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
5849 const struct timespec
*ts
)
5852 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5854 ns
= timespec_to_ns(ts
);
5856 tg3_full_lock(tp
, 0);
5857 tg3_refclk_write(tp
, ns
);
5859 tg3_full_unlock(tp
);
5864 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
5865 struct ptp_clock_request
*rq
, int on
)
5870 static const struct ptp_clock_info tg3_ptp_caps
= {
5871 .owner
= THIS_MODULE
,
5872 .name
= "tg3 clock",
5873 .max_adj
= 250000000,
5878 .adjfreq
= tg3_ptp_adjfreq
,
5879 .adjtime
= tg3_ptp_adjtime
,
5880 .gettime
= tg3_ptp_gettime
,
5881 .settime
= tg3_ptp_settime
,
5882 .enable
= tg3_ptp_enable
,
5885 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
5886 struct skb_shared_hwtstamps
*timestamp
)
5888 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
5889 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
5893 /* tp->lock must be held */
5894 static void tg3_ptp_init(struct tg3
*tp
)
5896 if (!tg3_flag(tp
, PTP_CAPABLE
))
5899 /* Initialize the hardware clock to the system time. */
5900 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
5902 tp
->ptp_info
= tg3_ptp_caps
;
5905 /* tp->lock must be held */
5906 static void tg3_ptp_resume(struct tg3
*tp
)
5908 if (!tg3_flag(tp
, PTP_CAPABLE
))
5911 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
5915 static void tg3_ptp_fini(struct tg3
*tp
)
5917 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
5920 ptp_clock_unregister(tp
->ptp_clock
);
5921 tp
->ptp_clock
= NULL
;
5925 static inline int tg3_irq_sync(struct tg3
*tp
)
5927 return tp
->irq_sync
;
5930 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5934 dst
= (u32
*)((u8
*)dst
+ off
);
5935 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5936 *dst
++ = tr32(off
+ i
);
5939 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5941 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5942 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5943 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5944 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5945 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5946 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5947 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5948 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5949 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5950 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5951 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5952 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5953 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5954 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5955 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5956 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5957 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5958 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5959 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5961 if (tg3_flag(tp
, SUPPORT_MSIX
))
5962 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5964 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5965 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5966 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5967 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5968 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5969 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5970 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5971 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5973 if (!tg3_flag(tp
, 5705_PLUS
)) {
5974 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5975 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5976 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5979 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5980 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5981 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5982 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5983 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5985 if (tg3_flag(tp
, NVRAM
))
5986 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5989 static void tg3_dump_state(struct tg3
*tp
)
5994 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5998 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5999 /* Read up to but not including private PCI registers */
6000 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6001 regs
[i
/ sizeof(u32
)] = tr32(i
);
6003 tg3_dump_legacy_regs(tp
, regs
);
6005 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6006 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6007 !regs
[i
+ 2] && !regs
[i
+ 3])
6010 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6012 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6017 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6018 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6020 /* SW status block */
6022 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6024 tnapi
->hw_status
->status
,
6025 tnapi
->hw_status
->status_tag
,
6026 tnapi
->hw_status
->rx_jumbo_consumer
,
6027 tnapi
->hw_status
->rx_consumer
,
6028 tnapi
->hw_status
->rx_mini_consumer
,
6029 tnapi
->hw_status
->idx
[0].rx_producer
,
6030 tnapi
->hw_status
->idx
[0].tx_consumer
);
6033 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6035 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6036 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6038 tnapi
->prodring
.rx_std_prod_idx
,
6039 tnapi
->prodring
.rx_std_cons_idx
,
6040 tnapi
->prodring
.rx_jmb_prod_idx
,
6041 tnapi
->prodring
.rx_jmb_cons_idx
);
6045 /* This is called whenever we suspect that the system chipset is re-
6046 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6047 * is bogus tx completions. We try to recover by setting the
6048 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6051 static void tg3_tx_recover(struct tg3
*tp
)
6053 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6054 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6056 netdev_warn(tp
->dev
,
6057 "The system may be re-ordering memory-mapped I/O "
6058 "cycles to the network device, attempting to recover. "
6059 "Please report the problem to the driver maintainer "
6060 "and include system chipset information.\n");
6062 spin_lock(&tp
->lock
);
6063 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6064 spin_unlock(&tp
->lock
);
6067 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6069 /* Tell compiler to fetch tx indices from memory. */
6071 return tnapi
->tx_pending
-
6072 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6075 /* Tigon3 never reports partial packet sends. So we do not
6076 * need special logic to handle SKBs that have not had all
6077 * of their frags sent yet, like SunGEM does.
6079 static void tg3_tx(struct tg3_napi
*tnapi
)
6081 struct tg3
*tp
= tnapi
->tp
;
6082 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6083 u32 sw_idx
= tnapi
->tx_cons
;
6084 struct netdev_queue
*txq
;
6085 int index
= tnapi
- tp
->napi
;
6086 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6088 if (tg3_flag(tp
, ENABLE_TSS
))
6091 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6093 while (sw_idx
!= hw_idx
) {
6094 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6095 struct sk_buff
*skb
= ri
->skb
;
6098 if (unlikely(skb
== NULL
)) {
6103 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6104 struct skb_shared_hwtstamps timestamp
;
6105 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6106 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6108 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6110 skb_tstamp_tx(skb
, ×tamp
);
6113 pci_unmap_single(tp
->pdev
,
6114 dma_unmap_addr(ri
, mapping
),
6120 while (ri
->fragmented
) {
6121 ri
->fragmented
= false;
6122 sw_idx
= NEXT_TX(sw_idx
);
6123 ri
= &tnapi
->tx_buffers
[sw_idx
];
6126 sw_idx
= NEXT_TX(sw_idx
);
6128 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6129 ri
= &tnapi
->tx_buffers
[sw_idx
];
6130 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6133 pci_unmap_page(tp
->pdev
,
6134 dma_unmap_addr(ri
, mapping
),
6135 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6138 while (ri
->fragmented
) {
6139 ri
->fragmented
= false;
6140 sw_idx
= NEXT_TX(sw_idx
);
6141 ri
= &tnapi
->tx_buffers
[sw_idx
];
6144 sw_idx
= NEXT_TX(sw_idx
);
6148 bytes_compl
+= skb
->len
;
6152 if (unlikely(tx_bug
)) {
6158 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6160 tnapi
->tx_cons
= sw_idx
;
6162 /* Need to make the tx_cons update visible to tg3_start_xmit()
6163 * before checking for netif_queue_stopped(). Without the
6164 * memory barrier, there is a small possibility that tg3_start_xmit()
6165 * will miss it and cause the queue to be stopped forever.
6169 if (unlikely(netif_tx_queue_stopped(txq
) &&
6170 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6171 __netif_tx_lock(txq
, smp_processor_id());
6172 if (netif_tx_queue_stopped(txq
) &&
6173 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6174 netif_tx_wake_queue(txq
);
6175 __netif_tx_unlock(txq
);
6179 static void tg3_frag_free(bool is_frag
, void *data
)
6182 put_page(virt_to_head_page(data
));
6187 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6189 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6195 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6196 map_sz
, PCI_DMA_FROMDEVICE
);
6197 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6202 /* Returns size of skb allocated or < 0 on error.
6204 * We only need to fill in the address because the other members
6205 * of the RX descriptor are invariant, see tg3_init_rings.
6207 * Note the purposeful assymetry of cpu vs. chip accesses. For
6208 * posting buffers we only dirty the first cache line of the RX
6209 * descriptor (containing the address). Whereas for the RX status
6210 * buffers the cpu only reads the last cacheline of the RX descriptor
6211 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6213 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6214 u32 opaque_key
, u32 dest_idx_unmasked
,
6215 unsigned int *frag_size
)
6217 struct tg3_rx_buffer_desc
*desc
;
6218 struct ring_info
*map
;
6221 int skb_size
, data_size
, dest_idx
;
6223 switch (opaque_key
) {
6224 case RXD_OPAQUE_RING_STD
:
6225 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6226 desc
= &tpr
->rx_std
[dest_idx
];
6227 map
= &tpr
->rx_std_buffers
[dest_idx
];
6228 data_size
= tp
->rx_pkt_map_sz
;
6231 case RXD_OPAQUE_RING_JUMBO
:
6232 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6233 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6234 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6235 data_size
= TG3_RX_JMB_MAP_SZ
;
6242 /* Do not overwrite any of the map or rp information
6243 * until we are sure we can commit to a new buffer.
6245 * Callers depend upon this behavior and assume that
6246 * we leave everything unchanged if we fail.
6248 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6250 if (skb_size
<= PAGE_SIZE
) {
6251 data
= netdev_alloc_frag(skb_size
);
6252 *frag_size
= skb_size
;
6254 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6260 mapping
= pci_map_single(tp
->pdev
,
6261 data
+ TG3_RX_OFFSET(tp
),
6263 PCI_DMA_FROMDEVICE
);
6264 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6265 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6270 dma_unmap_addr_set(map
, mapping
, mapping
);
6272 desc
->addr_hi
= ((u64
)mapping
>> 32);
6273 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6278 /* We only need to move over in the address because the other
6279 * members of the RX descriptor are invariant. See notes above
6280 * tg3_alloc_rx_data for full details.
6282 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6283 struct tg3_rx_prodring_set
*dpr
,
6284 u32 opaque_key
, int src_idx
,
6285 u32 dest_idx_unmasked
)
6287 struct tg3
*tp
= tnapi
->tp
;
6288 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6289 struct ring_info
*src_map
, *dest_map
;
6290 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6293 switch (opaque_key
) {
6294 case RXD_OPAQUE_RING_STD
:
6295 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6296 dest_desc
= &dpr
->rx_std
[dest_idx
];
6297 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6298 src_desc
= &spr
->rx_std
[src_idx
];
6299 src_map
= &spr
->rx_std_buffers
[src_idx
];
6302 case RXD_OPAQUE_RING_JUMBO
:
6303 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6304 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6305 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6306 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6307 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6314 dest_map
->data
= src_map
->data
;
6315 dma_unmap_addr_set(dest_map
, mapping
,
6316 dma_unmap_addr(src_map
, mapping
));
6317 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6318 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6320 /* Ensure that the update to the skb happens after the physical
6321 * addresses have been transferred to the new BD location.
6325 src_map
->data
= NULL
;
6328 /* The RX ring scheme is composed of multiple rings which post fresh
6329 * buffers to the chip, and one special ring the chip uses to report
6330 * status back to the host.
6332 * The special ring reports the status of received packets to the
6333 * host. The chip does not write into the original descriptor the
6334 * RX buffer was obtained from. The chip simply takes the original
6335 * descriptor as provided by the host, updates the status and length
6336 * field, then writes this into the next status ring entry.
6338 * Each ring the host uses to post buffers to the chip is described
6339 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6340 * it is first placed into the on-chip ram. When the packet's length
6341 * is known, it walks down the TG3_BDINFO entries to select the ring.
6342 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6343 * which is within the range of the new packet's length is chosen.
6345 * The "separate ring for rx status" scheme may sound queer, but it makes
6346 * sense from a cache coherency perspective. If only the host writes
6347 * to the buffer post rings, and only the chip writes to the rx status
6348 * rings, then cache lines never move beyond shared-modified state.
6349 * If both the host and chip were to write into the same ring, cache line
6350 * eviction could occur since both entities want it in an exclusive state.
6352 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6354 struct tg3
*tp
= tnapi
->tp
;
6355 u32 work_mask
, rx_std_posted
= 0;
6356 u32 std_prod_idx
, jmb_prod_idx
;
6357 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6360 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6362 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6364 * We need to order the read of hw_idx and the read of
6365 * the opaque cookie.
6370 std_prod_idx
= tpr
->rx_std_prod_idx
;
6371 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6372 while (sw_idx
!= hw_idx
&& budget
> 0) {
6373 struct ring_info
*ri
;
6374 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6376 struct sk_buff
*skb
;
6377 dma_addr_t dma_addr
;
6378 u32 opaque_key
, desc_idx
, *post_ptr
;
6382 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6383 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6384 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6385 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6386 dma_addr
= dma_unmap_addr(ri
, mapping
);
6388 post_ptr
= &std_prod_idx
;
6390 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6391 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6392 dma_addr
= dma_unmap_addr(ri
, mapping
);
6394 post_ptr
= &jmb_prod_idx
;
6396 goto next_pkt_nopost
;
6398 work_mask
|= opaque_key
;
6400 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6401 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6403 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6404 desc_idx
, *post_ptr
);
6406 /* Other statistics kept track of by card. */
6411 prefetch(data
+ TG3_RX_OFFSET(tp
));
6412 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6415 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6416 RXD_FLAG_PTPSTAT_PTPV1
||
6417 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6418 RXD_FLAG_PTPSTAT_PTPV2
) {
6419 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6420 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6423 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6425 unsigned int frag_size
;
6427 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6428 *post_ptr
, &frag_size
);
6432 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6433 PCI_DMA_FROMDEVICE
);
6435 skb
= build_skb(data
, frag_size
);
6437 tg3_frag_free(frag_size
!= 0, data
);
6438 goto drop_it_no_recycle
;
6440 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6441 /* Ensure that the update to the data happens
6442 * after the usage of the old DMA mapping.
6449 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6450 desc_idx
, *post_ptr
);
6452 skb
= netdev_alloc_skb(tp
->dev
,
6453 len
+ TG3_RAW_IP_ALIGN
);
6455 goto drop_it_no_recycle
;
6457 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6458 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6460 data
+ TG3_RX_OFFSET(tp
),
6462 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6467 tg3_hwclock_to_timestamp(tp
, tstamp
,
6468 skb_hwtstamps(skb
));
6470 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6471 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6472 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6473 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6474 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6476 skb_checksum_none_assert(skb
);
6478 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6480 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6481 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6483 goto drop_it_no_recycle
;
6486 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6487 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6488 __vlan_hwaccel_put_tag(skb
,
6489 desc
->err_vlan
& RXD_VLAN_MASK
);
6491 napi_gro_receive(&tnapi
->napi
, skb
);
6499 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6500 tpr
->rx_std_prod_idx
= std_prod_idx
&
6501 tp
->rx_std_ring_mask
;
6502 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6503 tpr
->rx_std_prod_idx
);
6504 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6509 sw_idx
&= tp
->rx_ret_ring_mask
;
6511 /* Refresh hw_idx to see if there is new work */
6512 if (sw_idx
== hw_idx
) {
6513 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6518 /* ACK the status ring. */
6519 tnapi
->rx_rcb_ptr
= sw_idx
;
6520 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6522 /* Refill RX ring(s). */
6523 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6524 /* Sync BD data before updating mailbox */
6527 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6528 tpr
->rx_std_prod_idx
= std_prod_idx
&
6529 tp
->rx_std_ring_mask
;
6530 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6531 tpr
->rx_std_prod_idx
);
6533 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6534 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6535 tp
->rx_jmb_ring_mask
;
6536 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6537 tpr
->rx_jmb_prod_idx
);
6540 } else if (work_mask
) {
6541 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6542 * updated before the producer indices can be updated.
6546 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6547 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6549 if (tnapi
!= &tp
->napi
[1]) {
6550 tp
->rx_refill
= true;
6551 napi_schedule(&tp
->napi
[1].napi
);
6558 static void tg3_poll_link(struct tg3
*tp
)
6560 /* handle link change and other phy events */
6561 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6562 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6564 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6565 sblk
->status
= SD_STATUS_UPDATED
|
6566 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6567 spin_lock(&tp
->lock
);
6568 if (tg3_flag(tp
, USE_PHYLIB
)) {
6570 (MAC_STATUS_SYNC_CHANGED
|
6571 MAC_STATUS_CFG_CHANGED
|
6572 MAC_STATUS_MI_COMPLETION
|
6573 MAC_STATUS_LNKSTATE_CHANGED
));
6576 tg3_setup_phy(tp
, 0);
6577 spin_unlock(&tp
->lock
);
6582 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6583 struct tg3_rx_prodring_set
*dpr
,
6584 struct tg3_rx_prodring_set
*spr
)
6586 u32 si
, di
, cpycnt
, src_prod_idx
;
6590 src_prod_idx
= spr
->rx_std_prod_idx
;
6592 /* Make sure updates to the rx_std_buffers[] entries and the
6593 * standard producer index are seen in the correct order.
6597 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6600 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6601 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6603 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6604 spr
->rx_std_cons_idx
;
6606 cpycnt
= min(cpycnt
,
6607 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6609 si
= spr
->rx_std_cons_idx
;
6610 di
= dpr
->rx_std_prod_idx
;
6612 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6613 if (dpr
->rx_std_buffers
[i
].data
) {
6623 /* Ensure that updates to the rx_std_buffers ring and the
6624 * shadowed hardware producer ring from tg3_recycle_skb() are
6625 * ordered correctly WRT the skb check above.
6629 memcpy(&dpr
->rx_std_buffers
[di
],
6630 &spr
->rx_std_buffers
[si
],
6631 cpycnt
* sizeof(struct ring_info
));
6633 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6634 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6635 sbd
= &spr
->rx_std
[si
];
6636 dbd
= &dpr
->rx_std
[di
];
6637 dbd
->addr_hi
= sbd
->addr_hi
;
6638 dbd
->addr_lo
= sbd
->addr_lo
;
6641 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6642 tp
->rx_std_ring_mask
;
6643 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6644 tp
->rx_std_ring_mask
;
6648 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6650 /* Make sure updates to the rx_jmb_buffers[] entries and
6651 * the jumbo producer index are seen in the correct order.
6655 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6658 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6659 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6661 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6662 spr
->rx_jmb_cons_idx
;
6664 cpycnt
= min(cpycnt
,
6665 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6667 si
= spr
->rx_jmb_cons_idx
;
6668 di
= dpr
->rx_jmb_prod_idx
;
6670 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6671 if (dpr
->rx_jmb_buffers
[i
].data
) {
6681 /* Ensure that updates to the rx_jmb_buffers ring and the
6682 * shadowed hardware producer ring from tg3_recycle_skb() are
6683 * ordered correctly WRT the skb check above.
6687 memcpy(&dpr
->rx_jmb_buffers
[di
],
6688 &spr
->rx_jmb_buffers
[si
],
6689 cpycnt
* sizeof(struct ring_info
));
6691 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6692 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6693 sbd
= &spr
->rx_jmb
[si
].std
;
6694 dbd
= &dpr
->rx_jmb
[di
].std
;
6695 dbd
->addr_hi
= sbd
->addr_hi
;
6696 dbd
->addr_lo
= sbd
->addr_lo
;
6699 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6700 tp
->rx_jmb_ring_mask
;
6701 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6702 tp
->rx_jmb_ring_mask
;
6708 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6710 struct tg3
*tp
= tnapi
->tp
;
6712 /* run TX completion thread */
6713 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6715 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6719 if (!tnapi
->rx_rcb_prod_idx
)
6722 /* run RX thread, within the bounds set by NAPI.
6723 * All RX "locking" is done by ensuring outside
6724 * code synchronizes with tg3->napi.poll()
6726 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6727 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6729 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6730 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6732 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6733 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6735 tp
->rx_refill
= false;
6736 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
6737 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6738 &tp
->napi
[i
].prodring
);
6742 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6743 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6744 dpr
->rx_std_prod_idx
);
6746 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6747 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6748 dpr
->rx_jmb_prod_idx
);
6753 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6759 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6761 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6762 schedule_work(&tp
->reset_task
);
6765 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6767 cancel_work_sync(&tp
->reset_task
);
6768 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6769 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6772 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6774 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6775 struct tg3
*tp
= tnapi
->tp
;
6777 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6780 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6782 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6785 if (unlikely(work_done
>= budget
))
6788 /* tp->last_tag is used in tg3_int_reenable() below
6789 * to tell the hw how much work has been processed,
6790 * so we must read it before checking for more work.
6792 tnapi
->last_tag
= sblk
->status_tag
;
6793 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6796 /* check for RX/TX work to do */
6797 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6798 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6800 /* This test here is not race free, but will reduce
6801 * the number of interrupts by looping again.
6803 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6806 napi_complete(napi
);
6807 /* Reenable interrupts. */
6808 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6810 /* This test here is synchronized by napi_schedule()
6811 * and napi_complete() to close the race condition.
6813 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6814 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6815 HOSTCC_MODE_ENABLE
|
6826 /* work_done is guaranteed to be less than budget. */
6827 napi_complete(napi
);
6828 tg3_reset_task_schedule(tp
);
6832 static void tg3_process_error(struct tg3
*tp
)
6835 bool real_error
= false;
6837 if (tg3_flag(tp
, ERROR_PROCESSED
))
6840 /* Check Flow Attention register */
6841 val
= tr32(HOSTCC_FLOW_ATTN
);
6842 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6843 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6847 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6848 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6852 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6853 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6862 tg3_flag_set(tp
, ERROR_PROCESSED
);
6863 tg3_reset_task_schedule(tp
);
6866 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6868 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6869 struct tg3
*tp
= tnapi
->tp
;
6871 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6874 if (sblk
->status
& SD_STATUS_ERROR
)
6875 tg3_process_error(tp
);
6879 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6881 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6884 if (unlikely(work_done
>= budget
))
6887 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6888 /* tp->last_tag is used in tg3_int_reenable() below
6889 * to tell the hw how much work has been processed,
6890 * so we must read it before checking for more work.
6892 tnapi
->last_tag
= sblk
->status_tag
;
6893 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6896 sblk
->status
&= ~SD_STATUS_UPDATED
;
6898 if (likely(!tg3_has_work(tnapi
))) {
6899 napi_complete(napi
);
6900 tg3_int_reenable(tnapi
);
6908 /* work_done is guaranteed to be less than budget. */
6909 napi_complete(napi
);
6910 tg3_reset_task_schedule(tp
);
6914 static void tg3_napi_disable(struct tg3
*tp
)
6918 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6919 napi_disable(&tp
->napi
[i
].napi
);
6922 static void tg3_napi_enable(struct tg3
*tp
)
6926 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6927 napi_enable(&tp
->napi
[i
].napi
);
6930 static void tg3_napi_init(struct tg3
*tp
)
6934 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6935 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6936 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6939 static void tg3_napi_fini(struct tg3
*tp
)
6943 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6944 netif_napi_del(&tp
->napi
[i
].napi
);
6947 static inline void tg3_netif_stop(struct tg3
*tp
)
6949 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6950 tg3_napi_disable(tp
);
6951 netif_carrier_off(tp
->dev
);
6952 netif_tx_disable(tp
->dev
);
6955 /* tp->lock must be held */
6956 static inline void tg3_netif_start(struct tg3
*tp
)
6960 /* NOTE: unconditional netif_tx_wake_all_queues is only
6961 * appropriate so long as all callers are assured to
6962 * have free tx slots (such as after tg3_init_hw)
6964 netif_tx_wake_all_queues(tp
->dev
);
6967 netif_carrier_on(tp
->dev
);
6969 tg3_napi_enable(tp
);
6970 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6971 tg3_enable_ints(tp
);
6974 static void tg3_irq_quiesce(struct tg3
*tp
)
6978 BUG_ON(tp
->irq_sync
);
6983 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6984 synchronize_irq(tp
->napi
[i
].irq_vec
);
6987 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6988 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6989 * with as well. Most of the time, this is not necessary except when
6990 * shutting down the device.
6992 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6994 spin_lock_bh(&tp
->lock
);
6996 tg3_irq_quiesce(tp
);
6999 static inline void tg3_full_unlock(struct tg3
*tp
)
7001 spin_unlock_bh(&tp
->lock
);
7004 /* One-shot MSI handler - Chip automatically disables interrupt
7005 * after sending MSI so driver doesn't have to do it.
7007 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7009 struct tg3_napi
*tnapi
= dev_id
;
7010 struct tg3
*tp
= tnapi
->tp
;
7012 prefetch(tnapi
->hw_status
);
7014 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7016 if (likely(!tg3_irq_sync(tp
)))
7017 napi_schedule(&tnapi
->napi
);
7022 /* MSI ISR - No need to check for interrupt sharing and no need to
7023 * flush status block and interrupt mailbox. PCI ordering rules
7024 * guarantee that MSI will arrive after the status block.
7026 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7028 struct tg3_napi
*tnapi
= dev_id
;
7029 struct tg3
*tp
= tnapi
->tp
;
7031 prefetch(tnapi
->hw_status
);
7033 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7035 * Writing any value to intr-mbox-0 clears PCI INTA# and
7036 * chip-internal interrupt pending events.
7037 * Writing non-zero to intr-mbox-0 additional tells the
7038 * NIC to stop sending us irqs, engaging "in-intr-handler"
7041 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7042 if (likely(!tg3_irq_sync(tp
)))
7043 napi_schedule(&tnapi
->napi
);
7045 return IRQ_RETVAL(1);
7048 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7050 struct tg3_napi
*tnapi
= dev_id
;
7051 struct tg3
*tp
= tnapi
->tp
;
7052 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7053 unsigned int handled
= 1;
7055 /* In INTx mode, it is possible for the interrupt to arrive at
7056 * the CPU before the status block posted prior to the interrupt.
7057 * Reading the PCI State register will confirm whether the
7058 * interrupt is ours and will flush the status block.
7060 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7061 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7062 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7069 * Writing any value to intr-mbox-0 clears PCI INTA# and
7070 * chip-internal interrupt pending events.
7071 * Writing non-zero to intr-mbox-0 additional tells the
7072 * NIC to stop sending us irqs, engaging "in-intr-handler"
7075 * Flush the mailbox to de-assert the IRQ immediately to prevent
7076 * spurious interrupts. The flush impacts performance but
7077 * excessive spurious interrupts can be worse in some cases.
7079 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7080 if (tg3_irq_sync(tp
))
7082 sblk
->status
&= ~SD_STATUS_UPDATED
;
7083 if (likely(tg3_has_work(tnapi
))) {
7084 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7085 napi_schedule(&tnapi
->napi
);
7087 /* No work, shared interrupt perhaps? re-enable
7088 * interrupts, and flush that PCI write
7090 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7094 return IRQ_RETVAL(handled
);
7097 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7099 struct tg3_napi
*tnapi
= dev_id
;
7100 struct tg3
*tp
= tnapi
->tp
;
7101 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7102 unsigned int handled
= 1;
7104 /* In INTx mode, it is possible for the interrupt to arrive at
7105 * the CPU before the status block posted prior to the interrupt.
7106 * Reading the PCI State register will confirm whether the
7107 * interrupt is ours and will flush the status block.
7109 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7110 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7111 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7118 * writing any value to intr-mbox-0 clears PCI INTA# and
7119 * chip-internal interrupt pending events.
7120 * writing non-zero to intr-mbox-0 additional tells the
7121 * NIC to stop sending us irqs, engaging "in-intr-handler"
7124 * Flush the mailbox to de-assert the IRQ immediately to prevent
7125 * spurious interrupts. The flush impacts performance but
7126 * excessive spurious interrupts can be worse in some cases.
7128 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7131 * In a shared interrupt configuration, sometimes other devices'
7132 * interrupts will scream. We record the current status tag here
7133 * so that the above check can report that the screaming interrupts
7134 * are unhandled. Eventually they will be silenced.
7136 tnapi
->last_irq_tag
= sblk
->status_tag
;
7138 if (tg3_irq_sync(tp
))
7141 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7143 napi_schedule(&tnapi
->napi
);
7146 return IRQ_RETVAL(handled
);
7149 /* ISR for interrupt test */
7150 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7152 struct tg3_napi
*tnapi
= dev_id
;
7153 struct tg3
*tp
= tnapi
->tp
;
7154 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7156 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7157 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7158 tg3_disable_ints(tp
);
7159 return IRQ_RETVAL(1);
7161 return IRQ_RETVAL(0);
7164 #ifdef CONFIG_NET_POLL_CONTROLLER
7165 static void tg3_poll_controller(struct net_device
*dev
)
7168 struct tg3
*tp
= netdev_priv(dev
);
7170 if (tg3_irq_sync(tp
))
7173 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7174 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7178 static void tg3_tx_timeout(struct net_device
*dev
)
7180 struct tg3
*tp
= netdev_priv(dev
);
7182 if (netif_msg_tx_err(tp
)) {
7183 netdev_err(dev
, "transmit timed out, resetting\n");
7187 tg3_reset_task_schedule(tp
);
7190 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7191 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7193 u32 base
= (u32
) mapping
& 0xffffffff;
7195 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7198 /* Test for DMA addresses > 40-bit */
7199 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7202 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7203 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7204 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7211 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7212 dma_addr_t mapping
, u32 len
, u32 flags
,
7215 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7216 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7217 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7218 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7221 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7222 dma_addr_t map
, u32 len
, u32 flags
,
7225 struct tg3
*tp
= tnapi
->tp
;
7228 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7231 if (tg3_4g_overflow_test(map
, len
))
7234 if (tg3_40bit_overflow_test(tp
, map
, len
))
7237 if (tp
->dma_limit
) {
7238 u32 prvidx
= *entry
;
7239 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7240 while (len
> tp
->dma_limit
&& *budget
) {
7241 u32 frag_len
= tp
->dma_limit
;
7242 len
-= tp
->dma_limit
;
7244 /* Avoid the 8byte DMA problem */
7246 len
+= tp
->dma_limit
/ 2;
7247 frag_len
= tp
->dma_limit
/ 2;
7250 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7252 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7253 frag_len
, tmp_flag
, mss
, vlan
);
7256 *entry
= NEXT_TX(*entry
);
7263 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7264 len
, flags
, mss
, vlan
);
7266 *entry
= NEXT_TX(*entry
);
7269 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7273 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7274 len
, flags
, mss
, vlan
);
7275 *entry
= NEXT_TX(*entry
);
7281 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7284 struct sk_buff
*skb
;
7285 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7290 pci_unmap_single(tnapi
->tp
->pdev
,
7291 dma_unmap_addr(txb
, mapping
),
7295 while (txb
->fragmented
) {
7296 txb
->fragmented
= false;
7297 entry
= NEXT_TX(entry
);
7298 txb
= &tnapi
->tx_buffers
[entry
];
7301 for (i
= 0; i
<= last
; i
++) {
7302 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7304 entry
= NEXT_TX(entry
);
7305 txb
= &tnapi
->tx_buffers
[entry
];
7307 pci_unmap_page(tnapi
->tp
->pdev
,
7308 dma_unmap_addr(txb
, mapping
),
7309 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7311 while (txb
->fragmented
) {
7312 txb
->fragmented
= false;
7313 entry
= NEXT_TX(entry
);
7314 txb
= &tnapi
->tx_buffers
[entry
];
7319 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7320 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7321 struct sk_buff
**pskb
,
7322 u32
*entry
, u32
*budget
,
7323 u32 base_flags
, u32 mss
, u32 vlan
)
7325 struct tg3
*tp
= tnapi
->tp
;
7326 struct sk_buff
*new_skb
, *skb
= *pskb
;
7327 dma_addr_t new_addr
= 0;
7330 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7331 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7333 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7335 new_skb
= skb_copy_expand(skb
,
7336 skb_headroom(skb
) + more_headroom
,
7337 skb_tailroom(skb
), GFP_ATOMIC
);
7343 /* New SKB is guaranteed to be linear. */
7344 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7346 /* Make sure the mapping succeeded */
7347 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7348 dev_kfree_skb(new_skb
);
7351 u32 save_entry
= *entry
;
7353 base_flags
|= TXD_FLAG_END
;
7355 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7356 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7359 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7360 new_skb
->len
, base_flags
,
7362 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7363 dev_kfree_skb(new_skb
);
7374 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7376 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7377 * TSO header is greater than 80 bytes.
7379 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7381 struct sk_buff
*segs
, *nskb
;
7382 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7384 /* Estimate the number of fragments in the worst case */
7385 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7386 netif_stop_queue(tp
->dev
);
7388 /* netif_tx_stop_queue() must be done before checking
7389 * checking tx index in tg3_tx_avail() below, because in
7390 * tg3_tx(), we update tx index before checking for
7391 * netif_tx_queue_stopped().
7394 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7395 return NETDEV_TX_BUSY
;
7397 netif_wake_queue(tp
->dev
);
7400 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7402 goto tg3_tso_bug_end
;
7408 tg3_start_xmit(nskb
, tp
->dev
);
7414 return NETDEV_TX_OK
;
7417 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7418 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7420 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7422 struct tg3
*tp
= netdev_priv(dev
);
7423 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7425 int i
= -1, would_hit_hwbug
;
7427 struct tg3_napi
*tnapi
;
7428 struct netdev_queue
*txq
;
7431 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7432 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7433 if (tg3_flag(tp
, ENABLE_TSS
))
7436 budget
= tg3_tx_avail(tnapi
);
7438 /* We are running in BH disabled context with netif_tx_lock
7439 * and TX reclaim runs via tp->napi.poll inside of a software
7440 * interrupt. Furthermore, IRQ processing runs lockless so we have
7441 * no IRQ context deadlocks to worry about either. Rejoice!
7443 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7444 if (!netif_tx_queue_stopped(txq
)) {
7445 netif_tx_stop_queue(txq
);
7447 /* This is a hard error, log it. */
7449 "BUG! Tx Ring full when queue awake!\n");
7451 return NETDEV_TX_BUSY
;
7454 entry
= tnapi
->tx_prod
;
7456 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7457 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7459 mss
= skb_shinfo(skb
)->gso_size
;
7462 u32 tcp_opt_len
, hdr_len
;
7464 if (skb_header_cloned(skb
) &&
7465 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7469 tcp_opt_len
= tcp_optlen(skb
);
7471 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7473 if (!skb_is_gso_v6(skb
)) {
7475 iph
->tot_len
= htons(mss
+ hdr_len
);
7478 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7479 tg3_flag(tp
, TSO_BUG
))
7480 return tg3_tso_bug(tp
, skb
);
7482 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7483 TXD_FLAG_CPU_POST_DMA
);
7485 if (tg3_flag(tp
, HW_TSO_1
) ||
7486 tg3_flag(tp
, HW_TSO_2
) ||
7487 tg3_flag(tp
, HW_TSO_3
)) {
7488 tcp_hdr(skb
)->check
= 0;
7489 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7491 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7496 if (tg3_flag(tp
, HW_TSO_3
)) {
7497 mss
|= (hdr_len
& 0xc) << 12;
7499 base_flags
|= 0x00000010;
7500 base_flags
|= (hdr_len
& 0x3e0) << 5;
7501 } else if (tg3_flag(tp
, HW_TSO_2
))
7502 mss
|= hdr_len
<< 9;
7503 else if (tg3_flag(tp
, HW_TSO_1
) ||
7504 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7505 if (tcp_opt_len
|| iph
->ihl
> 5) {
7508 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7509 mss
|= (tsflags
<< 11);
7512 if (tcp_opt_len
|| iph
->ihl
> 5) {
7515 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7516 base_flags
|= tsflags
<< 12;
7521 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7522 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7523 base_flags
|= TXD_FLAG_JMB_PKT
;
7525 if (vlan_tx_tag_present(skb
)) {
7526 base_flags
|= TXD_FLAG_VLAN
;
7527 vlan
= vlan_tx_tag_get(skb
);
7530 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7531 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7532 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7533 base_flags
|= TXD_FLAG_HWTSTAMP
;
7536 len
= skb_headlen(skb
);
7538 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7539 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7543 tnapi
->tx_buffers
[entry
].skb
= skb
;
7544 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7546 would_hit_hwbug
= 0;
7548 if (tg3_flag(tp
, 5701_DMA_BUG
))
7549 would_hit_hwbug
= 1;
7551 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7552 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7554 would_hit_hwbug
= 1;
7555 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7558 if (!tg3_flag(tp
, HW_TSO_1
) &&
7559 !tg3_flag(tp
, HW_TSO_2
) &&
7560 !tg3_flag(tp
, HW_TSO_3
))
7563 /* Now loop through additional data
7564 * fragments, and queue them.
7566 last
= skb_shinfo(skb
)->nr_frags
- 1;
7567 for (i
= 0; i
<= last
; i
++) {
7568 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7570 len
= skb_frag_size(frag
);
7571 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7572 len
, DMA_TO_DEVICE
);
7574 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7575 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7577 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7581 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7583 ((i
== last
) ? TXD_FLAG_END
: 0),
7585 would_hit_hwbug
= 1;
7591 if (would_hit_hwbug
) {
7592 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7594 /* If the workaround fails due to memory/mapping
7595 * failure, silently drop this packet.
7597 entry
= tnapi
->tx_prod
;
7598 budget
= tg3_tx_avail(tnapi
);
7599 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7600 base_flags
, mss
, vlan
))
7604 skb_tx_timestamp(skb
);
7605 netdev_tx_sent_queue(txq
, skb
->len
);
7607 /* Sync BD data before updating mailbox */
7610 /* Packets are ready, update Tx producer idx local and on card. */
7611 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7613 tnapi
->tx_prod
= entry
;
7614 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7615 netif_tx_stop_queue(txq
);
7617 /* netif_tx_stop_queue() must be done before checking
7618 * checking tx index in tg3_tx_avail() below, because in
7619 * tg3_tx(), we update tx index before checking for
7620 * netif_tx_queue_stopped().
7623 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7624 netif_tx_wake_queue(txq
);
7628 return NETDEV_TX_OK
;
7631 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7632 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7637 return NETDEV_TX_OK
;
7640 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7643 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7644 MAC_MODE_PORT_MODE_MASK
);
7646 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7648 if (!tg3_flag(tp
, 5705_PLUS
))
7649 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7651 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7652 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7654 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7656 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7658 if (tg3_flag(tp
, 5705_PLUS
) ||
7659 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7660 tg3_asic_rev(tp
) == ASIC_REV_5700
)
7661 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7664 tw32(MAC_MODE
, tp
->mac_mode
);
7668 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7670 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7672 tg3_phy_toggle_apd(tp
, false);
7673 tg3_phy_toggle_automdix(tp
, 0);
7675 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7678 bmcr
= BMCR_FULLDPLX
;
7683 bmcr
|= BMCR_SPEED100
;
7687 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7689 bmcr
|= BMCR_SPEED100
;
7692 bmcr
|= BMCR_SPEED1000
;
7697 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7698 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7699 val
|= CTL1000_AS_MASTER
|
7700 CTL1000_ENABLE_MASTER
;
7701 tg3_writephy(tp
, MII_CTRL1000
, val
);
7703 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7704 MII_TG3_FET_PTEST_TRIM_2
;
7705 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7708 bmcr
|= BMCR_LOOPBACK
;
7710 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7712 /* The write needs to be flushed for the FETs */
7713 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7714 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7718 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7719 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
7720 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7721 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7722 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7724 /* The write needs to be flushed for the AC131 */
7725 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7728 /* Reset to prevent losing 1st rx packet intermittently */
7729 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7730 tg3_flag(tp
, 5780_CLASS
)) {
7731 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7733 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7736 mac_mode
= tp
->mac_mode
&
7737 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7738 if (speed
== SPEED_1000
)
7739 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7741 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7743 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
7744 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7746 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7747 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7748 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7749 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7751 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7752 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7755 tw32(MAC_MODE
, mac_mode
);
7761 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7763 struct tg3
*tp
= netdev_priv(dev
);
7765 if (features
& NETIF_F_LOOPBACK
) {
7766 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7769 spin_lock_bh(&tp
->lock
);
7770 tg3_mac_loopback(tp
, true);
7771 netif_carrier_on(tp
->dev
);
7772 spin_unlock_bh(&tp
->lock
);
7773 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7775 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7778 spin_lock_bh(&tp
->lock
);
7779 tg3_mac_loopback(tp
, false);
7780 /* Force link status check */
7781 tg3_setup_phy(tp
, 1);
7782 spin_unlock_bh(&tp
->lock
);
7783 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7787 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7788 netdev_features_t features
)
7790 struct tg3
*tp
= netdev_priv(dev
);
7792 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7793 features
&= ~NETIF_F_ALL_TSO
;
7798 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7800 netdev_features_t changed
= dev
->features
^ features
;
7802 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7803 tg3_set_loopback(dev
, features
);
7808 static void tg3_rx_prodring_free(struct tg3
*tp
,
7809 struct tg3_rx_prodring_set
*tpr
)
7813 if (tpr
!= &tp
->napi
[0].prodring
) {
7814 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7815 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7816 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7819 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7820 for (i
= tpr
->rx_jmb_cons_idx
;
7821 i
!= tpr
->rx_jmb_prod_idx
;
7822 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7823 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7831 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7832 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7835 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7836 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7837 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7842 /* Initialize rx rings for packet processing.
7844 * The chip has been shut down and the driver detached from
7845 * the networking, so no interrupts or new tx packets will
7846 * end up in the driver. tp->{tx,}lock are held and thus
7849 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7850 struct tg3_rx_prodring_set
*tpr
)
7852 u32 i
, rx_pkt_dma_sz
;
7854 tpr
->rx_std_cons_idx
= 0;
7855 tpr
->rx_std_prod_idx
= 0;
7856 tpr
->rx_jmb_cons_idx
= 0;
7857 tpr
->rx_jmb_prod_idx
= 0;
7859 if (tpr
!= &tp
->napi
[0].prodring
) {
7860 memset(&tpr
->rx_std_buffers
[0], 0,
7861 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7862 if (tpr
->rx_jmb_buffers
)
7863 memset(&tpr
->rx_jmb_buffers
[0], 0,
7864 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7868 /* Zero out all descriptors. */
7869 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7871 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7872 if (tg3_flag(tp
, 5780_CLASS
) &&
7873 tp
->dev
->mtu
> ETH_DATA_LEN
)
7874 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7875 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7877 /* Initialize invariants of the rings, we only set this
7878 * stuff once. This works because the card does not
7879 * write into the rx buffer posting rings.
7881 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7882 struct tg3_rx_buffer_desc
*rxd
;
7884 rxd
= &tpr
->rx_std
[i
];
7885 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7886 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7887 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7888 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7891 /* Now allocate fresh SKBs for each rx ring. */
7892 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7893 unsigned int frag_size
;
7895 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
7897 netdev_warn(tp
->dev
,
7898 "Using a smaller RX standard ring. Only "
7899 "%d out of %d buffers were allocated "
7900 "successfully\n", i
, tp
->rx_pending
);
7908 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7911 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7913 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7916 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7917 struct tg3_rx_buffer_desc
*rxd
;
7919 rxd
= &tpr
->rx_jmb
[i
].std
;
7920 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7921 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7923 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7924 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7927 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7928 unsigned int frag_size
;
7930 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
7932 netdev_warn(tp
->dev
,
7933 "Using a smaller RX jumbo ring. Only %d "
7934 "out of %d buffers were allocated "
7935 "successfully\n", i
, tp
->rx_jumbo_pending
);
7938 tp
->rx_jumbo_pending
= i
;
7947 tg3_rx_prodring_free(tp
, tpr
);
7951 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7952 struct tg3_rx_prodring_set
*tpr
)
7954 kfree(tpr
->rx_std_buffers
);
7955 tpr
->rx_std_buffers
= NULL
;
7956 kfree(tpr
->rx_jmb_buffers
);
7957 tpr
->rx_jmb_buffers
= NULL
;
7959 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7960 tpr
->rx_std
, tpr
->rx_std_mapping
);
7964 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7965 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7970 static int tg3_rx_prodring_init(struct tg3
*tp
,
7971 struct tg3_rx_prodring_set
*tpr
)
7973 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7975 if (!tpr
->rx_std_buffers
)
7978 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7979 TG3_RX_STD_RING_BYTES(tp
),
7980 &tpr
->rx_std_mapping
,
7985 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7986 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7988 if (!tpr
->rx_jmb_buffers
)
7991 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7992 TG3_RX_JMB_RING_BYTES(tp
),
7993 &tpr
->rx_jmb_mapping
,
8002 tg3_rx_prodring_fini(tp
, tpr
);
8006 /* Free up pending packets in all rx/tx rings.
8008 * The chip has been shut down and the driver detached from
8009 * the networking, so no interrupts or new tx packets will
8010 * end up in the driver. tp->{tx,}lock is not held and we are not
8011 * in an interrupt context and thus may sleep.
8013 static void tg3_free_rings(struct tg3
*tp
)
8017 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8018 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8020 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8022 if (!tnapi
->tx_buffers
)
8025 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8026 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8031 tg3_tx_skb_unmap(tnapi
, i
,
8032 skb_shinfo(skb
)->nr_frags
- 1);
8034 dev_kfree_skb_any(skb
);
8036 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8040 /* Initialize tx/rx rings for packet processing.
8042 * The chip has been shut down and the driver detached from
8043 * the networking, so no interrupts or new tx packets will
8044 * end up in the driver. tp->{tx,}lock are held and thus
8047 static int tg3_init_rings(struct tg3
*tp
)
8051 /* Free up all the SKBs. */
8054 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8055 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8057 tnapi
->last_tag
= 0;
8058 tnapi
->last_irq_tag
= 0;
8059 tnapi
->hw_status
->status
= 0;
8060 tnapi
->hw_status
->status_tag
= 0;
8061 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8066 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8068 tnapi
->rx_rcb_ptr
= 0;
8070 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8072 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8081 static void tg3_mem_tx_release(struct tg3
*tp
)
8085 for (i
= 0; i
< tp
->irq_max
; i
++) {
8086 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8088 if (tnapi
->tx_ring
) {
8089 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8090 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8091 tnapi
->tx_ring
= NULL
;
8094 kfree(tnapi
->tx_buffers
);
8095 tnapi
->tx_buffers
= NULL
;
8099 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8102 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8104 /* If multivector TSS is enabled, vector 0 does not handle
8105 * tx interrupts. Don't allocate any resources for it.
8107 if (tg3_flag(tp
, ENABLE_TSS
))
8110 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8111 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8112 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8113 if (!tnapi
->tx_buffers
)
8116 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8118 &tnapi
->tx_desc_mapping
,
8120 if (!tnapi
->tx_ring
)
8127 tg3_mem_tx_release(tp
);
8131 static void tg3_mem_rx_release(struct tg3
*tp
)
8135 for (i
= 0; i
< tp
->irq_max
; i
++) {
8136 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8138 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8143 dma_free_coherent(&tp
->pdev
->dev
,
8144 TG3_RX_RCB_RING_BYTES(tp
),
8146 tnapi
->rx_rcb_mapping
);
8147 tnapi
->rx_rcb
= NULL
;
8151 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8153 unsigned int i
, limit
;
8155 limit
= tp
->rxq_cnt
;
8157 /* If RSS is enabled, we need a (dummy) producer ring
8158 * set on vector zero. This is the true hw prodring.
8160 if (tg3_flag(tp
, ENABLE_RSS
))
8163 for (i
= 0; i
< limit
; i
++) {
8164 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8166 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8169 /* If multivector RSS is enabled, vector 0
8170 * does not handle rx or tx interrupts.
8171 * Don't allocate any resources for it.
8173 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8176 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8177 TG3_RX_RCB_RING_BYTES(tp
),
8178 &tnapi
->rx_rcb_mapping
,
8183 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8189 tg3_mem_rx_release(tp
);
8194 * Must not be invoked with interrupt sources disabled and
8195 * the hardware shutdown down.
8197 static void tg3_free_consistent(struct tg3
*tp
)
8201 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8202 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8204 if (tnapi
->hw_status
) {
8205 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8207 tnapi
->status_mapping
);
8208 tnapi
->hw_status
= NULL
;
8212 tg3_mem_rx_release(tp
);
8213 tg3_mem_tx_release(tp
);
8216 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8217 tp
->hw_stats
, tp
->stats_mapping
);
8218 tp
->hw_stats
= NULL
;
8223 * Must not be invoked with interrupt sources disabled and
8224 * the hardware shutdown down. Can sleep.
8226 static int tg3_alloc_consistent(struct tg3
*tp
)
8230 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8231 sizeof(struct tg3_hw_stats
),
8237 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8239 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8240 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8241 struct tg3_hw_status
*sblk
;
8243 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8245 &tnapi
->status_mapping
,
8247 if (!tnapi
->hw_status
)
8250 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8251 sblk
= tnapi
->hw_status
;
8253 if (tg3_flag(tp
, ENABLE_RSS
)) {
8254 u16
*prodptr
= NULL
;
8257 * When RSS is enabled, the status block format changes
8258 * slightly. The "rx_jumbo_consumer", "reserved",
8259 * and "rx_mini_consumer" members get mapped to the
8260 * other three rx return ring producer indexes.
8264 prodptr
= &sblk
->idx
[0].rx_producer
;
8267 prodptr
= &sblk
->rx_jumbo_consumer
;
8270 prodptr
= &sblk
->reserved
;
8273 prodptr
= &sblk
->rx_mini_consumer
;
8276 tnapi
->rx_rcb_prod_idx
= prodptr
;
8278 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8282 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8288 tg3_free_consistent(tp
);
8292 #define MAX_WAIT_CNT 1000
8294 /* To stop a block, clear the enable bit and poll till it
8295 * clears. tp->lock is held.
8297 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
8302 if (tg3_flag(tp
, 5705_PLUS
)) {
8309 /* We can't enable/disable these bits of the
8310 * 5705/5750, just say success.
8323 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8326 if ((val
& enable_bit
) == 0)
8330 if (i
== MAX_WAIT_CNT
&& !silent
) {
8331 dev_err(&tp
->pdev
->dev
,
8332 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8340 /* tp->lock is held. */
8341 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
8345 tg3_disable_ints(tp
);
8347 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8348 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8351 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8352 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8353 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8354 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8355 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8356 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8358 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8359 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8360 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8361 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8362 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8363 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8364 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8366 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8367 tw32_f(MAC_MODE
, tp
->mac_mode
);
8370 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8371 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8373 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8375 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8378 if (i
>= MAX_WAIT_CNT
) {
8379 dev_err(&tp
->pdev
->dev
,
8380 "%s timed out, TX_MODE_ENABLE will not clear "
8381 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8385 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8386 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8387 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8389 tw32(FTQ_RESET
, 0xffffffff);
8390 tw32(FTQ_RESET
, 0x00000000);
8392 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8393 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8395 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8396 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8397 if (tnapi
->hw_status
)
8398 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8404 /* Save PCI command register before chip reset */
8405 static void tg3_save_pci_state(struct tg3
*tp
)
8407 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8410 /* Restore PCI state after chip reset */
8411 static void tg3_restore_pci_state(struct tg3
*tp
)
8415 /* Re-enable indirect register accesses. */
8416 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8417 tp
->misc_host_ctrl
);
8419 /* Set MAX PCI retry to zero. */
8420 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8421 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8422 tg3_flag(tp
, PCIX_MODE
))
8423 val
|= PCISTATE_RETRY_SAME_DMA
;
8424 /* Allow reads and writes to the APE register and memory space. */
8425 if (tg3_flag(tp
, ENABLE_APE
))
8426 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8427 PCISTATE_ALLOW_APE_SHMEM_WR
|
8428 PCISTATE_ALLOW_APE_PSPACE_WR
;
8429 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8431 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8433 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8434 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8435 tp
->pci_cacheline_sz
);
8436 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8440 /* Make sure PCI-X relaxed ordering bit is clear. */
8441 if (tg3_flag(tp
, PCIX_MODE
)) {
8444 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8446 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8447 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8451 if (tg3_flag(tp
, 5780_CLASS
)) {
8453 /* Chip reset on 5780 will reset MSI enable bit,
8454 * so need to restore it.
8456 if (tg3_flag(tp
, USING_MSI
)) {
8459 pci_read_config_word(tp
->pdev
,
8460 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8462 pci_write_config_word(tp
->pdev
,
8463 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8464 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8465 val
= tr32(MSGINT_MODE
);
8466 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8471 /* tp->lock is held. */
8472 static int tg3_chip_reset(struct tg3
*tp
)
8475 void (*write_op
)(struct tg3
*, u32
, u32
);
8480 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8482 /* No matching tg3_nvram_unlock() after this because
8483 * chip reset below will undo the nvram lock.
8485 tp
->nvram_lock_cnt
= 0;
8487 /* GRC_MISC_CFG core clock reset will clear the memory
8488 * enable bit in PCI register 4 and the MSI enable bit
8489 * on some chips, so we save relevant registers here.
8491 tg3_save_pci_state(tp
);
8493 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8494 tg3_flag(tp
, 5755_PLUS
))
8495 tw32(GRC_FASTBOOT_PC
, 0);
8498 * We must avoid the readl() that normally takes place.
8499 * It locks machines, causes machine checks, and other
8500 * fun things. So, temporarily disable the 5701
8501 * hardware workaround, while we do the reset.
8503 write_op
= tp
->write32
;
8504 if (write_op
== tg3_write_flush_reg32
)
8505 tp
->write32
= tg3_write32
;
8507 /* Prevent the irq handler from reading or writing PCI registers
8508 * during chip reset when the memory enable bit in the PCI command
8509 * register may be cleared. The chip does not generate interrupt
8510 * at this time, but the irq handler may still be called due to irq
8511 * sharing or irqpoll.
8513 tg3_flag_set(tp
, CHIP_RESETTING
);
8514 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8515 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8516 if (tnapi
->hw_status
) {
8517 tnapi
->hw_status
->status
= 0;
8518 tnapi
->hw_status
->status_tag
= 0;
8520 tnapi
->last_tag
= 0;
8521 tnapi
->last_irq_tag
= 0;
8525 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8526 synchronize_irq(tp
->napi
[i
].irq_vec
);
8528 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8529 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8530 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8534 val
= GRC_MISC_CFG_CORECLK_RESET
;
8536 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8537 /* Force PCIe 1.0a mode */
8538 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8539 !tg3_flag(tp
, 57765_PLUS
) &&
8540 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8541 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8542 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8544 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8545 tw32(GRC_MISC_CFG
, (1 << 29));
8550 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8551 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8552 tw32(GRC_VCPU_EXT_CTRL
,
8553 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8556 /* Manage gphy power for all CPMU absent PCIe devices. */
8557 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8558 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8560 tw32(GRC_MISC_CFG
, val
);
8562 /* restore 5701 hardware bug workaround write method */
8563 tp
->write32
= write_op
;
8565 /* Unfortunately, we have to delay before the PCI read back.
8566 * Some 575X chips even will not respond to a PCI cfg access
8567 * when the reset command is given to the chip.
8569 * How do these hardware designers expect things to work
8570 * properly if the PCI write is posted for a long period
8571 * of time? It is always necessary to have some method by
8572 * which a register read back can occur to push the write
8573 * out which does the reset.
8575 * For most tg3 variants the trick below was working.
8580 /* Flush PCI posted writes. The normal MMIO registers
8581 * are inaccessible at this time so this is the only
8582 * way to make this reliably (actually, this is no longer
8583 * the case, see above). I tried to use indirect
8584 * register read/write but this upset some 5701 variants.
8586 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8590 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8593 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8597 /* Wait for link training to complete. */
8598 for (j
= 0; j
< 5000; j
++)
8601 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8602 pci_write_config_dword(tp
->pdev
, 0xc4,
8603 cfg_val
| (1 << 15));
8606 /* Clear the "no snoop" and "relaxed ordering" bits. */
8607 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8609 * Older PCIe devices only support the 128 byte
8610 * MPS setting. Enforce the restriction.
8612 if (!tg3_flag(tp
, CPMU_PRESENT
))
8613 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8614 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8616 /* Clear error status */
8617 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8618 PCI_EXP_DEVSTA_CED
|
8619 PCI_EXP_DEVSTA_NFED
|
8620 PCI_EXP_DEVSTA_FED
|
8621 PCI_EXP_DEVSTA_URD
);
8624 tg3_restore_pci_state(tp
);
8626 tg3_flag_clear(tp
, CHIP_RESETTING
);
8627 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8630 if (tg3_flag(tp
, 5780_CLASS
))
8631 val
= tr32(MEMARB_MODE
);
8632 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8634 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
8636 tw32(0x5000, 0x400);
8639 if (tg3_flag(tp
, IS_SSB_CORE
)) {
8641 * BCM4785: In order to avoid repercussions from using
8642 * potentially defective internal ROM, stop the Rx RISC CPU,
8643 * which is not required.
8646 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8649 tw32(GRC_MODE
, tp
->grc_mode
);
8651 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
8654 tw32(0xc4, val
| (1 << 15));
8657 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8658 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8659 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8660 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
8661 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8662 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8665 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8666 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8668 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8669 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8674 tw32_f(MAC_MODE
, val
);
8677 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8679 err
= tg3_poll_fw(tp
);
8685 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8686 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
8687 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8688 !tg3_flag(tp
, 57765_PLUS
)) {
8691 tw32(0x7c00, val
| (1 << 25));
8694 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
8695 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8696 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8699 /* Reprobe ASF enable state. */
8700 tg3_flag_clear(tp
, ENABLE_ASF
);
8701 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8702 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8703 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8706 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8707 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8708 tg3_flag_set(tp
, ENABLE_ASF
);
8709 tp
->last_event_jiffies
= jiffies
;
8710 if (tg3_flag(tp
, 5750_PLUS
))
8711 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8718 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8719 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8721 /* tp->lock is held. */
8722 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8728 tg3_write_sig_pre_reset(tp
, kind
);
8730 tg3_abort_hw(tp
, silent
);
8731 err
= tg3_chip_reset(tp
);
8733 __tg3_set_mac_addr(tp
, 0);
8735 tg3_write_sig_legacy(tp
, kind
);
8736 tg3_write_sig_post_reset(tp
, kind
);
8739 /* Save the stats across chip resets... */
8740 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8741 tg3_get_estats(tp
, &tp
->estats_prev
);
8743 /* And make sure the next sample is new data */
8744 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8753 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8755 struct tg3
*tp
= netdev_priv(dev
);
8756 struct sockaddr
*addr
= p
;
8757 int err
= 0, skip_mac_1
= 0;
8759 if (!is_valid_ether_addr(addr
->sa_data
))
8760 return -EADDRNOTAVAIL
;
8762 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8764 if (!netif_running(dev
))
8767 if (tg3_flag(tp
, ENABLE_ASF
)) {
8768 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8770 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8771 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8772 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8773 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8775 /* Skip MAC addr 1 if ASF is using it. */
8776 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8777 !(addr1_high
== 0 && addr1_low
== 0))
8780 spin_lock_bh(&tp
->lock
);
8781 __tg3_set_mac_addr(tp
, skip_mac_1
);
8782 spin_unlock_bh(&tp
->lock
);
8787 /* tp->lock is held. */
8788 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8789 dma_addr_t mapping
, u32 maxlen_flags
,
8793 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8794 ((u64
) mapping
>> 32));
8796 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8797 ((u64
) mapping
& 0xffffffff));
8799 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8802 if (!tg3_flag(tp
, 5705_PLUS
))
8804 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8809 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8813 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8814 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8815 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8816 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8818 tw32(HOSTCC_TXCOL_TICKS
, 0);
8819 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8820 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8822 for (; i
< tp
->txq_cnt
; i
++) {
8825 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8826 tw32(reg
, ec
->tx_coalesce_usecs
);
8827 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8828 tw32(reg
, ec
->tx_max_coalesced_frames
);
8829 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8830 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8834 for (; i
< tp
->irq_max
- 1; i
++) {
8835 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8836 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8837 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8841 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8844 u32 limit
= tp
->rxq_cnt
;
8846 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8847 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8848 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8849 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8852 tw32(HOSTCC_RXCOL_TICKS
, 0);
8853 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8854 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8857 for (; i
< limit
; i
++) {
8860 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8861 tw32(reg
, ec
->rx_coalesce_usecs
);
8862 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8863 tw32(reg
, ec
->rx_max_coalesced_frames
);
8864 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8865 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8868 for (; i
< tp
->irq_max
- 1; i
++) {
8869 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8870 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8871 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8875 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8877 tg3_coal_tx_init(tp
, ec
);
8878 tg3_coal_rx_init(tp
, ec
);
8880 if (!tg3_flag(tp
, 5705_PLUS
)) {
8881 u32 val
= ec
->stats_block_coalesce_usecs
;
8883 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8884 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8889 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8893 /* tp->lock is held. */
8894 static void tg3_rings_reset(struct tg3
*tp
)
8897 u32 stblk
, txrcb
, rxrcb
, limit
;
8898 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8900 /* Disable all transmit rings but the first. */
8901 if (!tg3_flag(tp
, 5705_PLUS
))
8902 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8903 else if (tg3_flag(tp
, 5717_PLUS
))
8904 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8905 else if (tg3_flag(tp
, 57765_CLASS
) ||
8906 tg3_asic_rev(tp
) == ASIC_REV_5762
)
8907 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8909 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8911 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8912 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8913 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8914 BDINFO_FLAGS_DISABLED
);
8917 /* Disable all receive return rings but the first. */
8918 if (tg3_flag(tp
, 5717_PLUS
))
8919 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8920 else if (!tg3_flag(tp
, 5705_PLUS
))
8921 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8922 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8923 tg3_asic_rev(tp
) == ASIC_REV_5762
||
8924 tg3_flag(tp
, 57765_CLASS
))
8925 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8927 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8929 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8930 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8931 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8932 BDINFO_FLAGS_DISABLED
);
8934 /* Disable interrupts */
8935 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8936 tp
->napi
[0].chk_msi_cnt
= 0;
8937 tp
->napi
[0].last_rx_cons
= 0;
8938 tp
->napi
[0].last_tx_cons
= 0;
8940 /* Zero mailbox registers. */
8941 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8942 for (i
= 1; i
< tp
->irq_max
; i
++) {
8943 tp
->napi
[i
].tx_prod
= 0;
8944 tp
->napi
[i
].tx_cons
= 0;
8945 if (tg3_flag(tp
, ENABLE_TSS
))
8946 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8947 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8948 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8949 tp
->napi
[i
].chk_msi_cnt
= 0;
8950 tp
->napi
[i
].last_rx_cons
= 0;
8951 tp
->napi
[i
].last_tx_cons
= 0;
8953 if (!tg3_flag(tp
, ENABLE_TSS
))
8954 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8956 tp
->napi
[0].tx_prod
= 0;
8957 tp
->napi
[0].tx_cons
= 0;
8958 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8959 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8962 /* Make sure the NIC-based send BD rings are disabled. */
8963 if (!tg3_flag(tp
, 5705_PLUS
)) {
8964 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8965 for (i
= 0; i
< 16; i
++)
8966 tw32_tx_mbox(mbox
+ i
* 8, 0);
8969 txrcb
= NIC_SRAM_SEND_RCB
;
8970 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8972 /* Clear status block in ram. */
8973 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8975 /* Set status block DMA address */
8976 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8977 ((u64
) tnapi
->status_mapping
>> 32));
8978 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8979 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8981 if (tnapi
->tx_ring
) {
8982 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8983 (TG3_TX_RING_SIZE
<<
8984 BDINFO_FLAGS_MAXLEN_SHIFT
),
8985 NIC_SRAM_TX_BUFFER_DESC
);
8986 txrcb
+= TG3_BDINFO_SIZE
;
8989 if (tnapi
->rx_rcb
) {
8990 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8991 (tp
->rx_ret_ring_mask
+ 1) <<
8992 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8993 rxrcb
+= TG3_BDINFO_SIZE
;
8996 stblk
= HOSTCC_STATBLCK_RING1
;
8998 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8999 u64 mapping
= (u64
)tnapi
->status_mapping
;
9000 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9001 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9003 /* Clear status block in ram. */
9004 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9006 if (tnapi
->tx_ring
) {
9007 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9008 (TG3_TX_RING_SIZE
<<
9009 BDINFO_FLAGS_MAXLEN_SHIFT
),
9010 NIC_SRAM_TX_BUFFER_DESC
);
9011 txrcb
+= TG3_BDINFO_SIZE
;
9014 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9015 ((tp
->rx_ret_ring_mask
+ 1) <<
9016 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
9019 rxrcb
+= TG3_BDINFO_SIZE
;
9023 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9025 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9027 if (!tg3_flag(tp
, 5750_PLUS
) ||
9028 tg3_flag(tp
, 5780_CLASS
) ||
9029 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9030 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9031 tg3_flag(tp
, 57765_PLUS
))
9032 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9033 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9034 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9035 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9037 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9039 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9040 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9042 val
= min(nic_rep_thresh
, host_rep_thresh
);
9043 tw32(RCVBDI_STD_THRESH
, val
);
9045 if (tg3_flag(tp
, 57765_PLUS
))
9046 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9048 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9051 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9053 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9055 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9056 tw32(RCVBDI_JUMBO_THRESH
, val
);
9058 if (tg3_flag(tp
, 57765_PLUS
))
9059 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9062 static inline u32
calc_crc(unsigned char *buf
, int len
)
9070 for (j
= 0; j
< len
; j
++) {
9073 for (k
= 0; k
< 8; k
++) {
9086 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9088 /* accept or reject all multicast frames */
9089 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9090 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9091 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9092 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9095 static void __tg3_set_rx_mode(struct net_device
*dev
)
9097 struct tg3
*tp
= netdev_priv(dev
);
9100 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9101 RX_MODE_KEEP_VLAN_TAG
);
9103 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9104 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9107 if (!tg3_flag(tp
, ENABLE_ASF
))
9108 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9111 if (dev
->flags
& IFF_PROMISC
) {
9112 /* Promiscuous mode. */
9113 rx_mode
|= RX_MODE_PROMISC
;
9114 } else if (dev
->flags
& IFF_ALLMULTI
) {
9115 /* Accept all multicast. */
9116 tg3_set_multi(tp
, 1);
9117 } else if (netdev_mc_empty(dev
)) {
9118 /* Reject all multicast. */
9119 tg3_set_multi(tp
, 0);
9121 /* Accept one or more multicast(s). */
9122 struct netdev_hw_addr
*ha
;
9123 u32 mc_filter
[4] = { 0, };
9128 netdev_for_each_mc_addr(ha
, dev
) {
9129 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9131 regidx
= (bit
& 0x60) >> 5;
9133 mc_filter
[regidx
] |= (1 << bit
);
9136 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9137 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9138 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9139 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9142 if (rx_mode
!= tp
->rx_mode
) {
9143 tp
->rx_mode
= rx_mode
;
9144 tw32_f(MAC_RX_MODE
, rx_mode
);
9149 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9153 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9154 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9157 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9161 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9164 if (tp
->rxq_cnt
== 1) {
9165 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9169 /* Validate table against current IRQ count */
9170 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9171 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9175 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9176 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9179 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9182 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9184 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9185 u32 val
= tp
->rss_ind_tbl
[i
];
9187 for (; i
% 8; i
++) {
9189 val
|= tp
->rss_ind_tbl
[i
];
9196 /* tp->lock is held. */
9197 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
9199 u32 val
, rdmac_mode
;
9201 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9203 tg3_disable_ints(tp
);
9207 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9209 if (tg3_flag(tp
, INIT_COMPLETE
))
9210 tg3_abort_hw(tp
, 1);
9212 /* Enable MAC control of LPI */
9213 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
9214 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
9215 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
9216 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9217 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
9219 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
9221 tw32_f(TG3_CPMU_EEE_CTRL
,
9222 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9224 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9225 TG3_CPMU_EEEMD_LPI_IN_TX
|
9226 TG3_CPMU_EEEMD_LPI_IN_RX
|
9227 TG3_CPMU_EEEMD_EEE_ENABLE
;
9229 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
9230 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9232 if (tg3_flag(tp
, ENABLE_APE
))
9233 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9235 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9237 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9238 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9239 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9241 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9242 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9243 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9249 err
= tg3_chip_reset(tp
);
9253 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9255 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9256 val
= tr32(TG3_CPMU_CTRL
);
9257 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9258 tw32(TG3_CPMU_CTRL
, val
);
9260 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9261 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9262 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9263 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9265 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9266 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9267 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9268 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9270 val
= tr32(TG3_CPMU_HST_ACC
);
9271 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9272 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9273 tw32(TG3_CPMU_HST_ACC
, val
);
9276 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9277 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9278 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9279 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9280 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9282 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9283 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9285 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9287 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9288 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9291 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9292 u32 grc_mode
= tr32(GRC_MODE
);
9294 /* Access the lower 1K of PL PCIE block registers. */
9295 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9296 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9298 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9299 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9300 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9302 tw32(GRC_MODE
, grc_mode
);
9305 if (tg3_flag(tp
, 57765_CLASS
)) {
9306 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9307 u32 grc_mode
= tr32(GRC_MODE
);
9309 /* Access the lower 1K of PL PCIE block registers. */
9310 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9311 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9313 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9314 TG3_PCIE_PL_LO_PHYCTL5
);
9315 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9316 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9318 tw32(GRC_MODE
, grc_mode
);
9321 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9324 /* Fix transmit hangs */
9325 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9326 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9327 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9329 grc_mode
= tr32(GRC_MODE
);
9331 /* Access the lower 1K of DL PCIE block registers. */
9332 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9333 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9335 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9336 TG3_PCIE_DL_LO_FTSMAX
);
9337 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9338 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9339 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9341 tw32(GRC_MODE
, grc_mode
);
9344 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9345 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9346 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9347 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9350 /* This works around an issue with Athlon chipsets on
9351 * B3 tigon3 silicon. This bit has no effect on any
9352 * other revision. But do not set this on PCI Express
9353 * chips and don't even touch the clocks if the CPMU is present.
9355 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9356 if (!tg3_flag(tp
, PCI_EXPRESS
))
9357 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9358 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9361 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9362 tg3_flag(tp
, PCIX_MODE
)) {
9363 val
= tr32(TG3PCI_PCISTATE
);
9364 val
|= PCISTATE_RETRY_SAME_DMA
;
9365 tw32(TG3PCI_PCISTATE
, val
);
9368 if (tg3_flag(tp
, ENABLE_APE
)) {
9369 /* Allow reads and writes to the
9370 * APE register and memory space.
9372 val
= tr32(TG3PCI_PCISTATE
);
9373 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9374 PCISTATE_ALLOW_APE_SHMEM_WR
|
9375 PCISTATE_ALLOW_APE_PSPACE_WR
;
9376 tw32(TG3PCI_PCISTATE
, val
);
9379 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9380 /* Enable some hw fixes. */
9381 val
= tr32(TG3PCI_MSI_DATA
);
9382 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9383 tw32(TG3PCI_MSI_DATA
, val
);
9386 /* Descriptor ring init may make accesses to the
9387 * NIC SRAM area to setup the TX descriptors, so we
9388 * can only do this after the hardware has been
9389 * successfully reset.
9391 err
= tg3_init_rings(tp
);
9395 if (tg3_flag(tp
, 57765_PLUS
)) {
9396 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9397 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9398 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9399 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9400 if (!tg3_flag(tp
, 57765_CLASS
) &&
9401 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9402 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9403 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9404 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9405 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9406 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9407 /* This value is determined during the probe time DMA
9408 * engine test, tg3_test_dma.
9410 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9413 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9414 GRC_MODE_4X_NIC_SEND_RINGS
|
9415 GRC_MODE_NO_TX_PHDR_CSUM
|
9416 GRC_MODE_NO_RX_PHDR_CSUM
);
9417 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9419 /* Pseudo-header checksum is done by hardware logic and not
9420 * the offload processers, so make the chip do the pseudo-
9421 * header checksums on receive. For transmit it is more
9422 * convenient to do the pseudo-header checksum in software
9423 * as Linux does that on transmit for us in all cases.
9425 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9427 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9429 tw32(TG3_RX_PTP_CTL
,
9430 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9432 if (tg3_flag(tp
, PTP_CAPABLE
))
9433 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9435 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9437 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9438 val
= tr32(GRC_MISC_CFG
);
9440 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9441 tw32(GRC_MISC_CFG
, val
);
9443 /* Initialize MBUF/DESC pool. */
9444 if (tg3_flag(tp
, 5750_PLUS
)) {
9446 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9447 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9448 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9449 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9451 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9452 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9453 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9454 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9457 fw_len
= tp
->fw_len
;
9458 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9459 tw32(BUFMGR_MB_POOL_ADDR
,
9460 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9461 tw32(BUFMGR_MB_POOL_SIZE
,
9462 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9465 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9466 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9467 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9468 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9469 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9470 tw32(BUFMGR_MB_HIGH_WATER
,
9471 tp
->bufmgr_config
.mbuf_high_water
);
9473 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9474 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9475 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9476 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9477 tw32(BUFMGR_MB_HIGH_WATER
,
9478 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9480 tw32(BUFMGR_DMA_LOW_WATER
,
9481 tp
->bufmgr_config
.dma_low_water
);
9482 tw32(BUFMGR_DMA_HIGH_WATER
,
9483 tp
->bufmgr_config
.dma_high_water
);
9485 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9486 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9487 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9488 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9489 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9490 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9491 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9492 tw32(BUFMGR_MODE
, val
);
9493 for (i
= 0; i
< 2000; i
++) {
9494 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9499 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9503 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9504 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9506 tg3_setup_rxbd_thresholds(tp
);
9508 /* Initialize TG3_BDINFO's at:
9509 * RCVDBDI_STD_BD: standard eth size rx ring
9510 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9511 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9514 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9515 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9516 * ring attribute flags
9517 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9519 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9520 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9522 * The size of each ring is fixed in the firmware, but the location is
9525 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9526 ((u64
) tpr
->rx_std_mapping
>> 32));
9527 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9528 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9529 if (!tg3_flag(tp
, 5717_PLUS
))
9530 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9531 NIC_SRAM_RX_BUFFER_DESC
);
9533 /* Disable the mini ring */
9534 if (!tg3_flag(tp
, 5705_PLUS
))
9535 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9536 BDINFO_FLAGS_DISABLED
);
9538 /* Program the jumbo buffer descriptor ring control
9539 * blocks on those devices that have them.
9541 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9542 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9544 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9545 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9546 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9547 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9548 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9549 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9550 BDINFO_FLAGS_MAXLEN_SHIFT
;
9551 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9552 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9553 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9554 tg3_flag(tp
, 57765_CLASS
) ||
9555 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9556 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9557 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9559 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9560 BDINFO_FLAGS_DISABLED
);
9563 if (tg3_flag(tp
, 57765_PLUS
)) {
9564 val
= TG3_RX_STD_RING_SIZE(tp
);
9565 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9566 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9568 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9570 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9572 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9574 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9575 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9577 tpr
->rx_jmb_prod_idx
=
9578 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9579 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9581 tg3_rings_reset(tp
);
9583 /* Initialize MAC address and backoff seed. */
9584 __tg3_set_mac_addr(tp
, 0);
9586 /* MTU + ethernet header + FCS + optional VLAN tag */
9587 tw32(MAC_RX_MTU_SIZE
,
9588 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9590 /* The slot time is changed by tg3_setup_phy if we
9591 * run at gigabit with half duplex.
9593 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9594 (6 << TX_LENGTHS_IPG_SHIFT
) |
9595 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9597 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9598 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9599 val
|= tr32(MAC_TX_LENGTHS
) &
9600 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9601 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9603 tw32(MAC_TX_LENGTHS
, val
);
9605 /* Receive rules. */
9606 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9607 tw32(RCVLPC_CONFIG
, 0x0181);
9609 /* Calculate RDMAC_MODE setting early, we need it to determine
9610 * the RCVLPC_STATE_ENABLE mask.
9612 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9613 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9614 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9615 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9616 RDMAC_MODE_LNGREAD_ENAB
);
9618 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
9619 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9621 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
9622 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9623 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9624 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9625 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9626 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9628 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9629 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9630 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9631 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9632 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9633 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9634 !tg3_flag(tp
, IS_5788
)) {
9635 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9639 if (tg3_flag(tp
, PCI_EXPRESS
))
9640 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9642 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9644 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9645 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
9646 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
9650 if (tg3_flag(tp
, HW_TSO_1
) ||
9651 tg3_flag(tp
, HW_TSO_2
) ||
9652 tg3_flag(tp
, HW_TSO_3
))
9653 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9655 if (tg3_flag(tp
, 57765_PLUS
) ||
9656 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9657 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9658 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9660 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9661 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9662 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9664 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
9665 tg3_asic_rev(tp
) == ASIC_REV_5784
||
9666 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9667 tg3_asic_rev(tp
) == ASIC_REV_57780
||
9668 tg3_flag(tp
, 57765_PLUS
)) {
9671 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9672 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
9674 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
9677 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9678 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9679 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9680 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9681 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9682 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9683 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9684 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9686 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9689 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
9690 tg3_asic_rev(tp
) == ASIC_REV_5720
||
9691 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9694 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9695 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
9697 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
9701 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9702 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9705 /* Receive/send statistics. */
9706 if (tg3_flag(tp
, 5750_PLUS
)) {
9707 val
= tr32(RCVLPC_STATS_ENABLE
);
9708 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9709 tw32(RCVLPC_STATS_ENABLE
, val
);
9710 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9711 tg3_flag(tp
, TSO_CAPABLE
)) {
9712 val
= tr32(RCVLPC_STATS_ENABLE
);
9713 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9714 tw32(RCVLPC_STATS_ENABLE
, val
);
9716 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9718 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9719 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9720 tw32(SNDDATAI_STATSCTRL
,
9721 (SNDDATAI_SCTRL_ENABLE
|
9722 SNDDATAI_SCTRL_FASTUPD
));
9724 /* Setup host coalescing engine. */
9725 tw32(HOSTCC_MODE
, 0);
9726 for (i
= 0; i
< 2000; i
++) {
9727 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9732 __tg3_set_coalesce(tp
, &tp
->coal
);
9734 if (!tg3_flag(tp
, 5705_PLUS
)) {
9735 /* Status/statistics block address. See tg3_timer,
9736 * the tg3_periodic_fetch_stats call there, and
9737 * tg3_get_stats to see how this works for 5705/5750 chips.
9739 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9740 ((u64
) tp
->stats_mapping
>> 32));
9741 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9742 ((u64
) tp
->stats_mapping
& 0xffffffff));
9743 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9745 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9747 /* Clear statistics and status block memory areas */
9748 for (i
= NIC_SRAM_STATS_BLK
;
9749 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9751 tg3_write_mem(tp
, i
, 0);
9756 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9758 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9759 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9760 if (!tg3_flag(tp
, 5705_PLUS
))
9761 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9763 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9764 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9765 /* reset to prevent losing 1st rx packet intermittently */
9766 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9770 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9771 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9772 MAC_MODE_FHDE_ENABLE
;
9773 if (tg3_flag(tp
, ENABLE_APE
))
9774 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9775 if (!tg3_flag(tp
, 5705_PLUS
) &&
9776 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9777 tg3_asic_rev(tp
) != ASIC_REV_5700
)
9778 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9779 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9782 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9783 * If TG3_FLAG_IS_NIC is zero, we should read the
9784 * register to preserve the GPIO settings for LOMs. The GPIOs,
9785 * whether used as inputs or outputs, are set by boot code after
9788 if (!tg3_flag(tp
, IS_NIC
)) {
9791 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9792 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9793 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9795 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
9796 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9797 GRC_LCLCTRL_GPIO_OUTPUT3
;
9799 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
9800 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9802 tp
->grc_local_ctrl
&= ~gpio_mask
;
9803 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9805 /* GPIO1 must be driven high for eeprom write protect */
9806 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9807 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9808 GRC_LCLCTRL_GPIO_OUTPUT1
);
9810 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9813 if (tg3_flag(tp
, USING_MSIX
)) {
9814 val
= tr32(MSGINT_MODE
);
9815 val
|= MSGINT_MODE_ENABLE
;
9816 if (tp
->irq_cnt
> 1)
9817 val
|= MSGINT_MODE_MULTIVEC_EN
;
9818 if (!tg3_flag(tp
, 1SHOT_MSI
))
9819 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9820 tw32(MSGINT_MODE
, val
);
9823 if (!tg3_flag(tp
, 5705_PLUS
)) {
9824 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9828 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9829 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9830 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9831 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9832 WDMAC_MODE_LNGREAD_ENAB
);
9834 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9835 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9836 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9837 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
9838 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
9840 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9841 !tg3_flag(tp
, IS_5788
)) {
9842 val
|= WDMAC_MODE_RX_ACCEL
;
9846 /* Enable host coalescing bug fix */
9847 if (tg3_flag(tp
, 5755_PLUS
))
9848 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9850 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
9851 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9853 tw32_f(WDMAC_MODE
, val
);
9856 if (tg3_flag(tp
, PCIX_MODE
)) {
9859 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9861 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
9862 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9863 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9864 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
9865 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9866 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9868 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9872 tw32_f(RDMAC_MODE
, rdmac_mode
);
9875 if (tg3_asic_rev(tp
) == ASIC_REV_5719
) {
9876 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9877 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9880 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9881 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9882 val
|= TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9883 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9884 tg3_flag_set(tp
, 5719_RDMA_BUG
);
9888 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9889 if (!tg3_flag(tp
, 5705_PLUS
))
9890 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9892 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
9894 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9896 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9898 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9899 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9900 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9901 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9902 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9903 tw32(RCVDBDI_MODE
, val
);
9904 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9905 if (tg3_flag(tp
, HW_TSO_1
) ||
9906 tg3_flag(tp
, HW_TSO_2
) ||
9907 tg3_flag(tp
, HW_TSO_3
))
9908 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9909 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9910 if (tg3_flag(tp
, ENABLE_TSS
))
9911 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9912 tw32(SNDBDI_MODE
, val
);
9913 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9915 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
9916 err
= tg3_load_5701_a0_firmware_fix(tp
);
9921 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9922 /* Ignore any errors for the firmware download. If download
9923 * fails, the device will operate with EEE disabled
9925 tg3_load_57766_firmware(tp
);
9928 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9929 err
= tg3_load_tso_firmware(tp
);
9934 tp
->tx_mode
= TX_MODE_ENABLE
;
9936 if (tg3_flag(tp
, 5755_PLUS
) ||
9937 tg3_asic_rev(tp
) == ASIC_REV_5906
)
9938 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9940 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9941 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9942 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9943 tp
->tx_mode
&= ~val
;
9944 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9947 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9950 if (tg3_flag(tp
, ENABLE_RSS
)) {
9951 tg3_rss_write_indir_tbl(tp
);
9953 /* Setup the "secret" hash key. */
9954 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9955 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9956 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9957 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9958 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9959 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9960 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9961 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9962 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9963 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9966 tp
->rx_mode
= RX_MODE_ENABLE
;
9967 if (tg3_flag(tp
, 5755_PLUS
))
9968 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9970 if (tg3_flag(tp
, ENABLE_RSS
))
9971 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9972 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9973 RX_MODE_RSS_IPV6_HASH_EN
|
9974 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9975 RX_MODE_RSS_IPV4_HASH_EN
|
9976 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9978 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9981 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9983 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9984 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9985 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9988 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9991 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9992 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
9993 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9994 /* Set drive transmission level to 1.2V */
9995 /* only if the signal pre-emphasis bit is not set */
9996 val
= tr32(MAC_SERDES_CFG
);
9999 tw32(MAC_SERDES_CFG
, val
);
10001 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10002 tw32(MAC_SERDES_CFG
, 0x616000);
10005 /* Prevent chip from dropping frames when flow control
10008 if (tg3_flag(tp
, 57765_CLASS
))
10012 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10014 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10015 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10016 /* Use hardware link auto-negotiation */
10017 tg3_flag_set(tp
, HW_AUTONEG
);
10020 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10021 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10024 tmp
= tr32(SERDES_RX_CTRL
);
10025 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10026 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10027 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10028 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10031 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10032 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10033 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10035 err
= tg3_setup_phy(tp
, 0);
10039 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10040 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10043 /* Clear CRC stats. */
10044 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10045 tg3_writephy(tp
, MII_TG3_TEST1
,
10046 tmp
| MII_TG3_TEST1_CRC_EN
);
10047 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10052 __tg3_set_rx_mode(tp
->dev
);
10054 /* Initialize receive rules. */
10055 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10056 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10057 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10058 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10060 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10064 if (tg3_flag(tp
, ENABLE_ASF
))
10068 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10070 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10072 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10074 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10076 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10078 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10080 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10082 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10084 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10086 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10088 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10090 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10092 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10094 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10102 if (tg3_flag(tp
, ENABLE_APE
))
10103 /* Write our heartbeat update interval to APE. */
10104 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10105 APE_HOST_HEARTBEAT_INT_DISABLE
);
10107 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10112 /* Called at device open time to get the chip ready for
10113 * packet processing. Invoked with tp->lock held.
10115 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
10117 tg3_switch_clocks(tp
);
10119 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10121 return tg3_reset_hw(tp
, reset_phy
);
10124 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10128 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10129 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10131 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10134 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10135 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10136 memset(ocir
, 0, TG3_OCIR_LEN
);
10140 /* sysfs attributes for hwmon */
10141 static ssize_t
tg3_show_temp(struct device
*dev
,
10142 struct device_attribute
*devattr
, char *buf
)
10144 struct pci_dev
*pdev
= to_pci_dev(dev
);
10145 struct net_device
*netdev
= pci_get_drvdata(pdev
);
10146 struct tg3
*tp
= netdev_priv(netdev
);
10147 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10150 spin_lock_bh(&tp
->lock
);
10151 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10152 sizeof(temperature
));
10153 spin_unlock_bh(&tp
->lock
);
10154 return sprintf(buf
, "%u\n", temperature
);
10158 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10159 TG3_TEMP_SENSOR_OFFSET
);
10160 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10161 TG3_TEMP_CAUTION_OFFSET
);
10162 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10163 TG3_TEMP_MAX_OFFSET
);
10165 static struct attribute
*tg3_attributes
[] = {
10166 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10167 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10168 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10172 static const struct attribute_group tg3_group
= {
10173 .attrs
= tg3_attributes
,
10176 static void tg3_hwmon_close(struct tg3
*tp
)
10178 if (tp
->hwmon_dev
) {
10179 hwmon_device_unregister(tp
->hwmon_dev
);
10180 tp
->hwmon_dev
= NULL
;
10181 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10185 static void tg3_hwmon_open(struct tg3
*tp
)
10189 struct pci_dev
*pdev
= tp
->pdev
;
10190 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10192 tg3_sd_scan_scratchpad(tp
, ocirs
);
10194 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10195 if (!ocirs
[i
].src_data_length
)
10198 size
+= ocirs
[i
].src_hdr_length
;
10199 size
+= ocirs
[i
].src_data_length
;
10205 /* Register hwmon sysfs hooks */
10206 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10208 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10212 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10213 if (IS_ERR(tp
->hwmon_dev
)) {
10214 tp
->hwmon_dev
= NULL
;
10215 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10216 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10221 #define TG3_STAT_ADD32(PSTAT, REG) \
10222 do { u32 __val = tr32(REG); \
10223 (PSTAT)->low += __val; \
10224 if ((PSTAT)->low < __val) \
10225 (PSTAT)->high += 1; \
10228 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10230 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10235 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10236 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10237 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10238 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10239 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10240 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10241 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10242 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10243 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10244 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10245 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10246 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10247 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10248 if (unlikely(tg3_flag(tp
, 5719_RDMA_BUG
) &&
10249 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10250 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10253 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10254 val
&= ~TG3_LSO_RD_DMA_TX_LENGTH_WA
;
10255 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10256 tg3_flag_clear(tp
, 5719_RDMA_BUG
);
10259 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10260 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10261 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10262 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10263 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10264 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10265 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10266 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10267 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10268 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10269 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10270 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10271 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10272 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10274 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10275 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10276 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10277 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10278 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10280 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10281 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10283 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10284 sp
->rx_discards
.low
+= val
;
10285 if (sp
->rx_discards
.low
< val
)
10286 sp
->rx_discards
.high
+= 1;
10288 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10290 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10293 static void tg3_chk_missed_msi(struct tg3
*tp
)
10297 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10298 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10300 if (tg3_has_work(tnapi
)) {
10301 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10302 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10303 if (tnapi
->chk_msi_cnt
< 1) {
10304 tnapi
->chk_msi_cnt
++;
10310 tnapi
->chk_msi_cnt
= 0;
10311 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10312 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10316 static void tg3_timer(unsigned long __opaque
)
10318 struct tg3
*tp
= (struct tg3
*) __opaque
;
10320 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10321 goto restart_timer
;
10323 spin_lock(&tp
->lock
);
10325 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10326 tg3_flag(tp
, 57765_CLASS
))
10327 tg3_chk_missed_msi(tp
);
10329 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10330 /* BCM4785: Flush posted writes from GbE to host memory. */
10334 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10335 /* All of this garbage is because when using non-tagged
10336 * IRQ status the mailbox/status_block protocol the chip
10337 * uses with the cpu is race prone.
10339 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10340 tw32(GRC_LOCAL_CTRL
,
10341 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10343 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10344 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10347 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10348 spin_unlock(&tp
->lock
);
10349 tg3_reset_task_schedule(tp
);
10350 goto restart_timer
;
10354 /* This part only runs once per second. */
10355 if (!--tp
->timer_counter
) {
10356 if (tg3_flag(tp
, 5705_PLUS
))
10357 tg3_periodic_fetch_stats(tp
);
10359 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10360 tg3_phy_eee_enable(tp
);
10362 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10366 mac_stat
= tr32(MAC_STATUS
);
10369 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10370 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10372 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10376 tg3_setup_phy(tp
, 0);
10377 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10378 u32 mac_stat
= tr32(MAC_STATUS
);
10379 int need_setup
= 0;
10382 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10385 if (!tp
->link_up
&&
10386 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10387 MAC_STATUS_SIGNAL_DET
))) {
10391 if (!tp
->serdes_counter
) {
10394 ~MAC_MODE_PORT_MODE_MASK
));
10396 tw32_f(MAC_MODE
, tp
->mac_mode
);
10399 tg3_setup_phy(tp
, 0);
10401 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10402 tg3_flag(tp
, 5780_CLASS
)) {
10403 tg3_serdes_parallel_detect(tp
);
10406 tp
->timer_counter
= tp
->timer_multiplier
;
10409 /* Heartbeat is only sent once every 2 seconds.
10411 * The heartbeat is to tell the ASF firmware that the host
10412 * driver is still alive. In the event that the OS crashes,
10413 * ASF needs to reset the hardware to free up the FIFO space
10414 * that may be filled with rx packets destined for the host.
10415 * If the FIFO is full, ASF will no longer function properly.
10417 * Unintended resets have been reported on real time kernels
10418 * where the timer doesn't run on time. Netpoll will also have
10421 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10422 * to check the ring condition when the heartbeat is expiring
10423 * before doing the reset. This will prevent most unintended
10426 if (!--tp
->asf_counter
) {
10427 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10428 tg3_wait_for_event_ack(tp
);
10430 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10431 FWCMD_NICDRV_ALIVE3
);
10432 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10433 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10434 TG3_FW_UPDATE_TIMEOUT_SEC
);
10436 tg3_generate_fw_event(tp
);
10438 tp
->asf_counter
= tp
->asf_multiplier
;
10441 spin_unlock(&tp
->lock
);
10444 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10445 add_timer(&tp
->timer
);
10448 static void tg3_timer_init(struct tg3
*tp
)
10450 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10451 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10452 !tg3_flag(tp
, 57765_CLASS
))
10453 tp
->timer_offset
= HZ
;
10455 tp
->timer_offset
= HZ
/ 10;
10457 BUG_ON(tp
->timer_offset
> HZ
);
10459 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10460 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10461 TG3_FW_UPDATE_FREQ_SEC
;
10463 init_timer(&tp
->timer
);
10464 tp
->timer
.data
= (unsigned long) tp
;
10465 tp
->timer
.function
= tg3_timer
;
10468 static void tg3_timer_start(struct tg3
*tp
)
10470 tp
->asf_counter
= tp
->asf_multiplier
;
10471 tp
->timer_counter
= tp
->timer_multiplier
;
10473 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10474 add_timer(&tp
->timer
);
10477 static void tg3_timer_stop(struct tg3
*tp
)
10479 del_timer_sync(&tp
->timer
);
10482 /* Restart hardware after configuration changes, self-test, etc.
10483 * Invoked with tp->lock held.
10485 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
10486 __releases(tp
->lock
)
10487 __acquires(tp
->lock
)
10491 err
= tg3_init_hw(tp
, reset_phy
);
10493 netdev_err(tp
->dev
,
10494 "Failed to re-initialize device, aborting\n");
10495 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10496 tg3_full_unlock(tp
);
10497 tg3_timer_stop(tp
);
10499 tg3_napi_enable(tp
);
10500 dev_close(tp
->dev
);
10501 tg3_full_lock(tp
, 0);
10506 static void tg3_reset_task(struct work_struct
*work
)
10508 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10511 tg3_full_lock(tp
, 0);
10513 if (!netif_running(tp
->dev
)) {
10514 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10515 tg3_full_unlock(tp
);
10519 tg3_full_unlock(tp
);
10523 tg3_netif_stop(tp
);
10525 tg3_full_lock(tp
, 1);
10527 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10528 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10529 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10530 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10531 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10534 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10535 err
= tg3_init_hw(tp
, 1);
10539 tg3_netif_start(tp
);
10542 tg3_full_unlock(tp
);
10547 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10550 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10553 unsigned long flags
;
10555 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10557 if (tp
->irq_cnt
== 1)
10558 name
= tp
->dev
->name
;
10560 name
= &tnapi
->irq_lbl
[0];
10561 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10562 name
[IFNAMSIZ
-1] = 0;
10565 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10567 if (tg3_flag(tp
, 1SHOT_MSI
))
10568 fn
= tg3_msi_1shot
;
10571 fn
= tg3_interrupt
;
10572 if (tg3_flag(tp
, TAGGED_STATUS
))
10573 fn
= tg3_interrupt_tagged
;
10574 flags
= IRQF_SHARED
;
10577 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10580 static int tg3_test_interrupt(struct tg3
*tp
)
10582 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10583 struct net_device
*dev
= tp
->dev
;
10584 int err
, i
, intr_ok
= 0;
10587 if (!netif_running(dev
))
10590 tg3_disable_ints(tp
);
10592 free_irq(tnapi
->irq_vec
, tnapi
);
10595 * Turn off MSI one shot mode. Otherwise this test has no
10596 * observable way to know whether the interrupt was delivered.
10598 if (tg3_flag(tp
, 57765_PLUS
)) {
10599 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10600 tw32(MSGINT_MODE
, val
);
10603 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10604 IRQF_SHARED
, dev
->name
, tnapi
);
10608 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10609 tg3_enable_ints(tp
);
10611 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10614 for (i
= 0; i
< 5; i
++) {
10615 u32 int_mbox
, misc_host_ctrl
;
10617 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10618 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10620 if ((int_mbox
!= 0) ||
10621 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10626 if (tg3_flag(tp
, 57765_PLUS
) &&
10627 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10628 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10633 tg3_disable_ints(tp
);
10635 free_irq(tnapi
->irq_vec
, tnapi
);
10637 err
= tg3_request_irq(tp
, 0);
10643 /* Reenable MSI one shot mode. */
10644 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10645 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10646 tw32(MSGINT_MODE
, val
);
10654 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10655 * successfully restored
10657 static int tg3_test_msi(struct tg3
*tp
)
10662 if (!tg3_flag(tp
, USING_MSI
))
10665 /* Turn off SERR reporting in case MSI terminates with Master
10668 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10669 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
10670 pci_cmd
& ~PCI_COMMAND_SERR
);
10672 err
= tg3_test_interrupt(tp
);
10674 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10679 /* other failures */
10683 /* MSI test failed, go back to INTx mode */
10684 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
10685 "to INTx mode. Please report this failure to the PCI "
10686 "maintainer and include system chipset information\n");
10688 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10690 pci_disable_msi(tp
->pdev
);
10692 tg3_flag_clear(tp
, USING_MSI
);
10693 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10695 err
= tg3_request_irq(tp
, 0);
10699 /* Need to reset the chip because the MSI cycle may have terminated
10700 * with Master Abort.
10702 tg3_full_lock(tp
, 1);
10704 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10705 err
= tg3_init_hw(tp
, 1);
10707 tg3_full_unlock(tp
);
10710 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10715 static int tg3_request_firmware(struct tg3
*tp
)
10717 const struct tg3_firmware_hdr
*fw_hdr
;
10719 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
10720 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
10725 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
10727 /* Firmware blob starts with version numbers, followed by
10728 * start address and _full_ length including BSS sections
10729 * (which must be longer than the actual data, of course
10732 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
10733 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
10734 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
10735 tp
->fw_len
, tp
->fw_needed
);
10736 release_firmware(tp
->fw
);
10741 /* We no longer need firmware; we have it. */
10742 tp
->fw_needed
= NULL
;
10746 static u32
tg3_irq_count(struct tg3
*tp
)
10748 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
10751 /* We want as many rx rings enabled as there are cpus.
10752 * In multiqueue MSI-X mode, the first MSI-X vector
10753 * only deals with link interrupts, etc, so we add
10754 * one to the number of vectors we are requesting.
10756 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
10762 static bool tg3_enable_msix(struct tg3
*tp
)
10765 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
10767 tp
->txq_cnt
= tp
->txq_req
;
10768 tp
->rxq_cnt
= tp
->rxq_req
;
10770 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
10771 if (tp
->rxq_cnt
> tp
->rxq_max
)
10772 tp
->rxq_cnt
= tp
->rxq_max
;
10774 /* Disable multiple TX rings by default. Simple round-robin hardware
10775 * scheduling of the TX rings can cause starvation of rings with
10776 * small packets when other rings have TSO or jumbo packets.
10781 tp
->irq_cnt
= tg3_irq_count(tp
);
10783 for (i
= 0; i
< tp
->irq_max
; i
++) {
10784 msix_ent
[i
].entry
= i
;
10785 msix_ent
[i
].vector
= 0;
10788 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
10791 } else if (rc
!= 0) {
10792 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
10794 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
10797 tp
->rxq_cnt
= max(rc
- 1, 1);
10799 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
10802 for (i
= 0; i
< tp
->irq_max
; i
++)
10803 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
10805 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
10806 pci_disable_msix(tp
->pdev
);
10810 if (tp
->irq_cnt
== 1)
10813 tg3_flag_set(tp
, ENABLE_RSS
);
10815 if (tp
->txq_cnt
> 1)
10816 tg3_flag_set(tp
, ENABLE_TSS
);
10818 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
10823 static void tg3_ints_init(struct tg3
*tp
)
10825 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10826 !tg3_flag(tp
, TAGGED_STATUS
)) {
10827 /* All MSI supporting chips should support tagged
10828 * status. Assert that this is the case.
10830 netdev_warn(tp
->dev
,
10831 "MSI without TAGGED_STATUS? Not using MSI\n");
10835 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10836 tg3_flag_set(tp
, USING_MSIX
);
10837 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10838 tg3_flag_set(tp
, USING_MSI
);
10840 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10841 u32 msi_mode
= tr32(MSGINT_MODE
);
10842 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10843 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10844 if (!tg3_flag(tp
, 1SHOT_MSI
))
10845 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10846 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10849 if (!tg3_flag(tp
, USING_MSIX
)) {
10851 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10854 if (tp
->irq_cnt
== 1) {
10857 netif_set_real_num_tx_queues(tp
->dev
, 1);
10858 netif_set_real_num_rx_queues(tp
->dev
, 1);
10862 static void tg3_ints_fini(struct tg3
*tp
)
10864 if (tg3_flag(tp
, USING_MSIX
))
10865 pci_disable_msix(tp
->pdev
);
10866 else if (tg3_flag(tp
, USING_MSI
))
10867 pci_disable_msi(tp
->pdev
);
10868 tg3_flag_clear(tp
, USING_MSI
);
10869 tg3_flag_clear(tp
, USING_MSIX
);
10870 tg3_flag_clear(tp
, ENABLE_RSS
);
10871 tg3_flag_clear(tp
, ENABLE_TSS
);
10874 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
10877 struct net_device
*dev
= tp
->dev
;
10881 * Setup interrupts first so we know how
10882 * many NAPI resources to allocate
10886 tg3_rss_check_indir_tbl(tp
);
10888 /* The placement of this call is tied
10889 * to the setup and use of Host TX descriptors.
10891 err
= tg3_alloc_consistent(tp
);
10897 tg3_napi_enable(tp
);
10899 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10900 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10901 err
= tg3_request_irq(tp
, i
);
10903 for (i
--; i
>= 0; i
--) {
10904 tnapi
= &tp
->napi
[i
];
10905 free_irq(tnapi
->irq_vec
, tnapi
);
10911 tg3_full_lock(tp
, 0);
10913 err
= tg3_init_hw(tp
, reset_phy
);
10915 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10916 tg3_free_rings(tp
);
10919 tg3_full_unlock(tp
);
10924 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
10925 err
= tg3_test_msi(tp
);
10928 tg3_full_lock(tp
, 0);
10929 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10930 tg3_free_rings(tp
);
10931 tg3_full_unlock(tp
);
10936 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10937 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10939 tw32(PCIE_TRANSACTION_CFG
,
10940 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10946 tg3_hwmon_open(tp
);
10948 tg3_full_lock(tp
, 0);
10950 tg3_timer_start(tp
);
10951 tg3_flag_set(tp
, INIT_COMPLETE
);
10952 tg3_enable_ints(tp
);
10957 tg3_ptp_resume(tp
);
10960 tg3_full_unlock(tp
);
10962 netif_tx_start_all_queues(dev
);
10965 * Reset loopback feature if it was turned on while the device was down
10966 * make sure that it's installed properly now.
10968 if (dev
->features
& NETIF_F_LOOPBACK
)
10969 tg3_set_loopback(dev
, dev
->features
);
10974 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10975 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10976 free_irq(tnapi
->irq_vec
, tnapi
);
10980 tg3_napi_disable(tp
);
10982 tg3_free_consistent(tp
);
10990 static void tg3_stop(struct tg3
*tp
)
10994 tg3_reset_task_cancel(tp
);
10995 tg3_netif_stop(tp
);
10997 tg3_timer_stop(tp
);
10999 tg3_hwmon_close(tp
);
11003 tg3_full_lock(tp
, 1);
11005 tg3_disable_ints(tp
);
11007 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11008 tg3_free_rings(tp
);
11009 tg3_flag_clear(tp
, INIT_COMPLETE
);
11011 tg3_full_unlock(tp
);
11013 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11014 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11015 free_irq(tnapi
->irq_vec
, tnapi
);
11022 tg3_free_consistent(tp
);
11025 static int tg3_open(struct net_device
*dev
)
11027 struct tg3
*tp
= netdev_priv(dev
);
11030 if (tp
->fw_needed
) {
11031 err
= tg3_request_firmware(tp
);
11032 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11034 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11035 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11036 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11037 netdev_warn(tp
->dev
, "EEE capability restored\n");
11038 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11040 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11044 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11045 tg3_flag_clear(tp
, TSO_CAPABLE
);
11046 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11047 netdev_notice(tp
->dev
, "TSO capability restored\n");
11048 tg3_flag_set(tp
, TSO_CAPABLE
);
11052 tg3_carrier_off(tp
);
11054 err
= tg3_power_up(tp
);
11058 tg3_full_lock(tp
, 0);
11060 tg3_disable_ints(tp
);
11061 tg3_flag_clear(tp
, INIT_COMPLETE
);
11063 tg3_full_unlock(tp
);
11065 err
= tg3_start(tp
, true, true, true);
11067 tg3_frob_aux_power(tp
, false);
11068 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11071 if (tg3_flag(tp
, PTP_CAPABLE
)) {
11072 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
11074 if (IS_ERR(tp
->ptp_clock
))
11075 tp
->ptp_clock
= NULL
;
11081 static int tg3_close(struct net_device
*dev
)
11083 struct tg3
*tp
= netdev_priv(dev
);
11089 /* Clear stats across close / open calls */
11090 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
11091 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
11093 tg3_power_down(tp
);
11095 tg3_carrier_off(tp
);
11100 static inline u64
get_stat64(tg3_stat64_t
*val
)
11102 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11105 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11107 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11109 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11110 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11111 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11114 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11115 tg3_writephy(tp
, MII_TG3_TEST1
,
11116 val
| MII_TG3_TEST1_CRC_EN
);
11117 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11121 tp
->phy_crc_errors
+= val
;
11123 return tp
->phy_crc_errors
;
11126 return get_stat64(&hw_stats
->rx_fcs_errors
);
11129 #define ESTAT_ADD(member) \
11130 estats->member = old_estats->member + \
11131 get_stat64(&hw_stats->member)
11133 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11135 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11136 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11138 ESTAT_ADD(rx_octets
);
11139 ESTAT_ADD(rx_fragments
);
11140 ESTAT_ADD(rx_ucast_packets
);
11141 ESTAT_ADD(rx_mcast_packets
);
11142 ESTAT_ADD(rx_bcast_packets
);
11143 ESTAT_ADD(rx_fcs_errors
);
11144 ESTAT_ADD(rx_align_errors
);
11145 ESTAT_ADD(rx_xon_pause_rcvd
);
11146 ESTAT_ADD(rx_xoff_pause_rcvd
);
11147 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11148 ESTAT_ADD(rx_xoff_entered
);
11149 ESTAT_ADD(rx_frame_too_long_errors
);
11150 ESTAT_ADD(rx_jabbers
);
11151 ESTAT_ADD(rx_undersize_packets
);
11152 ESTAT_ADD(rx_in_length_errors
);
11153 ESTAT_ADD(rx_out_length_errors
);
11154 ESTAT_ADD(rx_64_or_less_octet_packets
);
11155 ESTAT_ADD(rx_65_to_127_octet_packets
);
11156 ESTAT_ADD(rx_128_to_255_octet_packets
);
11157 ESTAT_ADD(rx_256_to_511_octet_packets
);
11158 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11159 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11160 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11161 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11162 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11163 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11165 ESTAT_ADD(tx_octets
);
11166 ESTAT_ADD(tx_collisions
);
11167 ESTAT_ADD(tx_xon_sent
);
11168 ESTAT_ADD(tx_xoff_sent
);
11169 ESTAT_ADD(tx_flow_control
);
11170 ESTAT_ADD(tx_mac_errors
);
11171 ESTAT_ADD(tx_single_collisions
);
11172 ESTAT_ADD(tx_mult_collisions
);
11173 ESTAT_ADD(tx_deferred
);
11174 ESTAT_ADD(tx_excessive_collisions
);
11175 ESTAT_ADD(tx_late_collisions
);
11176 ESTAT_ADD(tx_collide_2times
);
11177 ESTAT_ADD(tx_collide_3times
);
11178 ESTAT_ADD(tx_collide_4times
);
11179 ESTAT_ADD(tx_collide_5times
);
11180 ESTAT_ADD(tx_collide_6times
);
11181 ESTAT_ADD(tx_collide_7times
);
11182 ESTAT_ADD(tx_collide_8times
);
11183 ESTAT_ADD(tx_collide_9times
);
11184 ESTAT_ADD(tx_collide_10times
);
11185 ESTAT_ADD(tx_collide_11times
);
11186 ESTAT_ADD(tx_collide_12times
);
11187 ESTAT_ADD(tx_collide_13times
);
11188 ESTAT_ADD(tx_collide_14times
);
11189 ESTAT_ADD(tx_collide_15times
);
11190 ESTAT_ADD(tx_ucast_packets
);
11191 ESTAT_ADD(tx_mcast_packets
);
11192 ESTAT_ADD(tx_bcast_packets
);
11193 ESTAT_ADD(tx_carrier_sense_errors
);
11194 ESTAT_ADD(tx_discards
);
11195 ESTAT_ADD(tx_errors
);
11197 ESTAT_ADD(dma_writeq_full
);
11198 ESTAT_ADD(dma_write_prioq_full
);
11199 ESTAT_ADD(rxbds_empty
);
11200 ESTAT_ADD(rx_discards
);
11201 ESTAT_ADD(rx_errors
);
11202 ESTAT_ADD(rx_threshold_hit
);
11204 ESTAT_ADD(dma_readq_full
);
11205 ESTAT_ADD(dma_read_prioq_full
);
11206 ESTAT_ADD(tx_comp_queue_full
);
11208 ESTAT_ADD(ring_set_send_prod_index
);
11209 ESTAT_ADD(ring_status_update
);
11210 ESTAT_ADD(nic_irqs
);
11211 ESTAT_ADD(nic_avoided_irqs
);
11212 ESTAT_ADD(nic_tx_threshold_hit
);
11214 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11217 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11219 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11220 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11222 stats
->rx_packets
= old_stats
->rx_packets
+
11223 get_stat64(&hw_stats
->rx_ucast_packets
) +
11224 get_stat64(&hw_stats
->rx_mcast_packets
) +
11225 get_stat64(&hw_stats
->rx_bcast_packets
);
11227 stats
->tx_packets
= old_stats
->tx_packets
+
11228 get_stat64(&hw_stats
->tx_ucast_packets
) +
11229 get_stat64(&hw_stats
->tx_mcast_packets
) +
11230 get_stat64(&hw_stats
->tx_bcast_packets
);
11232 stats
->rx_bytes
= old_stats
->rx_bytes
+
11233 get_stat64(&hw_stats
->rx_octets
);
11234 stats
->tx_bytes
= old_stats
->tx_bytes
+
11235 get_stat64(&hw_stats
->tx_octets
);
11237 stats
->rx_errors
= old_stats
->rx_errors
+
11238 get_stat64(&hw_stats
->rx_errors
);
11239 stats
->tx_errors
= old_stats
->tx_errors
+
11240 get_stat64(&hw_stats
->tx_errors
) +
11241 get_stat64(&hw_stats
->tx_mac_errors
) +
11242 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11243 get_stat64(&hw_stats
->tx_discards
);
11245 stats
->multicast
= old_stats
->multicast
+
11246 get_stat64(&hw_stats
->rx_mcast_packets
);
11247 stats
->collisions
= old_stats
->collisions
+
11248 get_stat64(&hw_stats
->tx_collisions
);
11250 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11251 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11252 get_stat64(&hw_stats
->rx_undersize_packets
);
11254 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11255 get_stat64(&hw_stats
->rxbds_empty
);
11256 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11257 get_stat64(&hw_stats
->rx_align_errors
);
11258 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11259 get_stat64(&hw_stats
->tx_discards
);
11260 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11261 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11263 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11264 tg3_calc_crc_errors(tp
);
11266 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11267 get_stat64(&hw_stats
->rx_discards
);
11269 stats
->rx_dropped
= tp
->rx_dropped
;
11270 stats
->tx_dropped
= tp
->tx_dropped
;
11273 static int tg3_get_regs_len(struct net_device
*dev
)
11275 return TG3_REG_BLK_SIZE
;
11278 static void tg3_get_regs(struct net_device
*dev
,
11279 struct ethtool_regs
*regs
, void *_p
)
11281 struct tg3
*tp
= netdev_priv(dev
);
11285 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11287 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11290 tg3_full_lock(tp
, 0);
11292 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11294 tg3_full_unlock(tp
);
11297 static int tg3_get_eeprom_len(struct net_device
*dev
)
11299 struct tg3
*tp
= netdev_priv(dev
);
11301 return tp
->nvram_size
;
11304 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11306 struct tg3
*tp
= netdev_priv(dev
);
11309 u32 i
, offset
, len
, b_offset
, b_count
;
11312 if (tg3_flag(tp
, NO_NVRAM
))
11315 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11318 offset
= eeprom
->offset
;
11322 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11325 /* adjustments to start on required 4 byte boundary */
11326 b_offset
= offset
& 3;
11327 b_count
= 4 - b_offset
;
11328 if (b_count
> len
) {
11329 /* i.e. offset=1 len=2 */
11332 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11335 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11338 eeprom
->len
+= b_count
;
11341 /* read bytes up to the last 4 byte boundary */
11342 pd
= &data
[eeprom
->len
];
11343 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11344 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11349 memcpy(pd
+ i
, &val
, 4);
11354 /* read last bytes not ending on 4 byte boundary */
11355 pd
= &data
[eeprom
->len
];
11357 b_offset
= offset
+ len
- b_count
;
11358 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11361 memcpy(pd
, &val
, b_count
);
11362 eeprom
->len
+= b_count
;
11367 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11369 struct tg3
*tp
= netdev_priv(dev
);
11371 u32 offset
, len
, b_offset
, odd_len
;
11375 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11378 if (tg3_flag(tp
, NO_NVRAM
) ||
11379 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11382 offset
= eeprom
->offset
;
11385 if ((b_offset
= (offset
& 3))) {
11386 /* adjustments to start on required 4 byte boundary */
11387 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11398 /* adjustments to end on required 4 byte boundary */
11400 len
= (len
+ 3) & ~3;
11401 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11407 if (b_offset
|| odd_len
) {
11408 buf
= kmalloc(len
, GFP_KERNEL
);
11412 memcpy(buf
, &start
, 4);
11414 memcpy(buf
+len
-4, &end
, 4);
11415 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11418 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11426 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11428 struct tg3
*tp
= netdev_priv(dev
);
11430 if (tg3_flag(tp
, USE_PHYLIB
)) {
11431 struct phy_device
*phydev
;
11432 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11434 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11435 return phy_ethtool_gset(phydev
, cmd
);
11438 cmd
->supported
= (SUPPORTED_Autoneg
);
11440 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11441 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11442 SUPPORTED_1000baseT_Full
);
11444 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11445 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11446 SUPPORTED_100baseT_Full
|
11447 SUPPORTED_10baseT_Half
|
11448 SUPPORTED_10baseT_Full
|
11450 cmd
->port
= PORT_TP
;
11452 cmd
->supported
|= SUPPORTED_FIBRE
;
11453 cmd
->port
= PORT_FIBRE
;
11456 cmd
->advertising
= tp
->link_config
.advertising
;
11457 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11458 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11459 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11460 cmd
->advertising
|= ADVERTISED_Pause
;
11462 cmd
->advertising
|= ADVERTISED_Pause
|
11463 ADVERTISED_Asym_Pause
;
11465 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11466 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11469 if (netif_running(dev
) && tp
->link_up
) {
11470 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11471 cmd
->duplex
= tp
->link_config
.active_duplex
;
11472 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11473 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11474 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11475 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11477 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11480 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11481 cmd
->duplex
= DUPLEX_UNKNOWN
;
11482 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11484 cmd
->phy_address
= tp
->phy_addr
;
11485 cmd
->transceiver
= XCVR_INTERNAL
;
11486 cmd
->autoneg
= tp
->link_config
.autoneg
;
11492 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11494 struct tg3
*tp
= netdev_priv(dev
);
11495 u32 speed
= ethtool_cmd_speed(cmd
);
11497 if (tg3_flag(tp
, USE_PHYLIB
)) {
11498 struct phy_device
*phydev
;
11499 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11501 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11502 return phy_ethtool_sset(phydev
, cmd
);
11505 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11506 cmd
->autoneg
!= AUTONEG_DISABLE
)
11509 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11510 cmd
->duplex
!= DUPLEX_FULL
&&
11511 cmd
->duplex
!= DUPLEX_HALF
)
11514 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11515 u32 mask
= ADVERTISED_Autoneg
|
11517 ADVERTISED_Asym_Pause
;
11519 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11520 mask
|= ADVERTISED_1000baseT_Half
|
11521 ADVERTISED_1000baseT_Full
;
11523 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11524 mask
|= ADVERTISED_100baseT_Half
|
11525 ADVERTISED_100baseT_Full
|
11526 ADVERTISED_10baseT_Half
|
11527 ADVERTISED_10baseT_Full
|
11530 mask
|= ADVERTISED_FIBRE
;
11532 if (cmd
->advertising
& ~mask
)
11535 mask
&= (ADVERTISED_1000baseT_Half
|
11536 ADVERTISED_1000baseT_Full
|
11537 ADVERTISED_100baseT_Half
|
11538 ADVERTISED_100baseT_Full
|
11539 ADVERTISED_10baseT_Half
|
11540 ADVERTISED_10baseT_Full
);
11542 cmd
->advertising
&= mask
;
11544 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11545 if (speed
!= SPEED_1000
)
11548 if (cmd
->duplex
!= DUPLEX_FULL
)
11551 if (speed
!= SPEED_100
&&
11557 tg3_full_lock(tp
, 0);
11559 tp
->link_config
.autoneg
= cmd
->autoneg
;
11560 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11561 tp
->link_config
.advertising
= (cmd
->advertising
|
11562 ADVERTISED_Autoneg
);
11563 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11564 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11566 tp
->link_config
.advertising
= 0;
11567 tp
->link_config
.speed
= speed
;
11568 tp
->link_config
.duplex
= cmd
->duplex
;
11571 if (netif_running(dev
))
11572 tg3_setup_phy(tp
, 1);
11574 tg3_full_unlock(tp
);
11579 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11581 struct tg3
*tp
= netdev_priv(dev
);
11583 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11584 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11585 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11586 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11589 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11591 struct tg3
*tp
= netdev_priv(dev
);
11593 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11594 wol
->supported
= WAKE_MAGIC
;
11596 wol
->supported
= 0;
11598 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11599 wol
->wolopts
= WAKE_MAGIC
;
11600 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11603 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11605 struct tg3
*tp
= netdev_priv(dev
);
11606 struct device
*dp
= &tp
->pdev
->dev
;
11608 if (wol
->wolopts
& ~WAKE_MAGIC
)
11610 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11611 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11614 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11616 spin_lock_bh(&tp
->lock
);
11617 if (device_may_wakeup(dp
))
11618 tg3_flag_set(tp
, WOL_ENABLE
);
11620 tg3_flag_clear(tp
, WOL_ENABLE
);
11621 spin_unlock_bh(&tp
->lock
);
11626 static u32
tg3_get_msglevel(struct net_device
*dev
)
11628 struct tg3
*tp
= netdev_priv(dev
);
11629 return tp
->msg_enable
;
11632 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11634 struct tg3
*tp
= netdev_priv(dev
);
11635 tp
->msg_enable
= value
;
11638 static int tg3_nway_reset(struct net_device
*dev
)
11640 struct tg3
*tp
= netdev_priv(dev
);
11643 if (!netif_running(dev
))
11646 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11649 if (tg3_flag(tp
, USE_PHYLIB
)) {
11650 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11652 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
11656 spin_lock_bh(&tp
->lock
);
11658 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
11659 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
11660 ((bmcr
& BMCR_ANENABLE
) ||
11661 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
11662 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
11666 spin_unlock_bh(&tp
->lock
);
11672 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11674 struct tg3
*tp
= netdev_priv(dev
);
11676 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
11677 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11678 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
11680 ering
->rx_jumbo_max_pending
= 0;
11682 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
11684 ering
->rx_pending
= tp
->rx_pending
;
11685 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11686 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
11688 ering
->rx_jumbo_pending
= 0;
11690 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
11693 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11695 struct tg3
*tp
= netdev_priv(dev
);
11696 int i
, irq_sync
= 0, err
= 0;
11698 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
11699 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
11700 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
11701 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
11702 (tg3_flag(tp
, TSO_BUG
) &&
11703 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
11706 if (netif_running(dev
)) {
11708 tg3_netif_stop(tp
);
11712 tg3_full_lock(tp
, irq_sync
);
11714 tp
->rx_pending
= ering
->rx_pending
;
11716 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
11717 tp
->rx_pending
> 63)
11718 tp
->rx_pending
= 63;
11719 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
11721 for (i
= 0; i
< tp
->irq_max
; i
++)
11722 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
11724 if (netif_running(dev
)) {
11725 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11726 err
= tg3_restart_hw(tp
, 1);
11728 tg3_netif_start(tp
);
11731 tg3_full_unlock(tp
);
11733 if (irq_sync
&& !err
)
11739 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11741 struct tg3
*tp
= netdev_priv(dev
);
11743 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
11745 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
11746 epause
->rx_pause
= 1;
11748 epause
->rx_pause
= 0;
11750 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
11751 epause
->tx_pause
= 1;
11753 epause
->tx_pause
= 0;
11756 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11758 struct tg3
*tp
= netdev_priv(dev
);
11761 if (tg3_flag(tp
, USE_PHYLIB
)) {
11763 struct phy_device
*phydev
;
11765 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11767 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
11768 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
11769 (epause
->rx_pause
!= epause
->tx_pause
)))
11772 tp
->link_config
.flowctrl
= 0;
11773 if (epause
->rx_pause
) {
11774 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11776 if (epause
->tx_pause
) {
11777 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11778 newadv
= ADVERTISED_Pause
;
11780 newadv
= ADVERTISED_Pause
|
11781 ADVERTISED_Asym_Pause
;
11782 } else if (epause
->tx_pause
) {
11783 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11784 newadv
= ADVERTISED_Asym_Pause
;
11788 if (epause
->autoneg
)
11789 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11791 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11793 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
11794 u32 oldadv
= phydev
->advertising
&
11795 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
11796 if (oldadv
!= newadv
) {
11797 phydev
->advertising
&=
11798 ~(ADVERTISED_Pause
|
11799 ADVERTISED_Asym_Pause
);
11800 phydev
->advertising
|= newadv
;
11801 if (phydev
->autoneg
) {
11803 * Always renegotiate the link to
11804 * inform our link partner of our
11805 * flow control settings, even if the
11806 * flow control is forced. Let
11807 * tg3_adjust_link() do the final
11808 * flow control setup.
11810 return phy_start_aneg(phydev
);
11814 if (!epause
->autoneg
)
11815 tg3_setup_flow_control(tp
, 0, 0);
11817 tp
->link_config
.advertising
&=
11818 ~(ADVERTISED_Pause
|
11819 ADVERTISED_Asym_Pause
);
11820 tp
->link_config
.advertising
|= newadv
;
11825 if (netif_running(dev
)) {
11826 tg3_netif_stop(tp
);
11830 tg3_full_lock(tp
, irq_sync
);
11832 if (epause
->autoneg
)
11833 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11835 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11836 if (epause
->rx_pause
)
11837 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11839 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
11840 if (epause
->tx_pause
)
11841 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11843 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
11845 if (netif_running(dev
)) {
11846 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11847 err
= tg3_restart_hw(tp
, 1);
11849 tg3_netif_start(tp
);
11852 tg3_full_unlock(tp
);
11858 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11862 return TG3_NUM_TEST
;
11864 return TG3_NUM_STATS
;
11866 return -EOPNOTSUPP
;
11870 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11871 u32
*rules __always_unused
)
11873 struct tg3
*tp
= netdev_priv(dev
);
11875 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11876 return -EOPNOTSUPP
;
11878 switch (info
->cmd
) {
11879 case ETHTOOL_GRXRINGS
:
11880 if (netif_running(tp
->dev
))
11881 info
->data
= tp
->rxq_cnt
;
11883 info
->data
= num_online_cpus();
11884 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
11885 info
->data
= TG3_RSS_MAX_NUM_QS
;
11888 /* The first interrupt vector only
11889 * handles link interrupts.
11895 return -EOPNOTSUPP
;
11899 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11902 struct tg3
*tp
= netdev_priv(dev
);
11904 if (tg3_flag(tp
, SUPPORT_MSIX
))
11905 size
= TG3_RSS_INDIR_TBL_SIZE
;
11910 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11912 struct tg3
*tp
= netdev_priv(dev
);
11915 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11916 indir
[i
] = tp
->rss_ind_tbl
[i
];
11921 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11923 struct tg3
*tp
= netdev_priv(dev
);
11926 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11927 tp
->rss_ind_tbl
[i
] = indir
[i
];
11929 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11932 /* It is legal to write the indirection
11933 * table while the device is running.
11935 tg3_full_lock(tp
, 0);
11936 tg3_rss_write_indir_tbl(tp
);
11937 tg3_full_unlock(tp
);
11942 static void tg3_get_channels(struct net_device
*dev
,
11943 struct ethtool_channels
*channel
)
11945 struct tg3
*tp
= netdev_priv(dev
);
11946 u32 deflt_qs
= netif_get_num_default_rss_queues();
11948 channel
->max_rx
= tp
->rxq_max
;
11949 channel
->max_tx
= tp
->txq_max
;
11951 if (netif_running(dev
)) {
11952 channel
->rx_count
= tp
->rxq_cnt
;
11953 channel
->tx_count
= tp
->txq_cnt
;
11956 channel
->rx_count
= tp
->rxq_req
;
11958 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
11961 channel
->tx_count
= tp
->txq_req
;
11963 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
11967 static int tg3_set_channels(struct net_device
*dev
,
11968 struct ethtool_channels
*channel
)
11970 struct tg3
*tp
= netdev_priv(dev
);
11972 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11973 return -EOPNOTSUPP
;
11975 if (channel
->rx_count
> tp
->rxq_max
||
11976 channel
->tx_count
> tp
->txq_max
)
11979 tp
->rxq_req
= channel
->rx_count
;
11980 tp
->txq_req
= channel
->tx_count
;
11982 if (!netif_running(dev
))
11987 tg3_carrier_off(tp
);
11989 tg3_start(tp
, true, false, false);
11994 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11996 switch (stringset
) {
11998 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12001 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12004 WARN_ON(1); /* we need a WARN() */
12009 static int tg3_set_phys_id(struct net_device
*dev
,
12010 enum ethtool_phys_id_state state
)
12012 struct tg3
*tp
= netdev_priv(dev
);
12014 if (!netif_running(tp
->dev
))
12018 case ETHTOOL_ID_ACTIVE
:
12019 return 1; /* cycle on/off once per second */
12021 case ETHTOOL_ID_ON
:
12022 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12023 LED_CTRL_1000MBPS_ON
|
12024 LED_CTRL_100MBPS_ON
|
12025 LED_CTRL_10MBPS_ON
|
12026 LED_CTRL_TRAFFIC_OVERRIDE
|
12027 LED_CTRL_TRAFFIC_BLINK
|
12028 LED_CTRL_TRAFFIC_LED
);
12031 case ETHTOOL_ID_OFF
:
12032 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12033 LED_CTRL_TRAFFIC_OVERRIDE
);
12036 case ETHTOOL_ID_INACTIVE
:
12037 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12044 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12045 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12047 struct tg3
*tp
= netdev_priv(dev
);
12050 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12052 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12055 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12059 u32 offset
= 0, len
= 0;
12062 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12065 if (magic
== TG3_EEPROM_MAGIC
) {
12066 for (offset
= TG3_NVM_DIR_START
;
12067 offset
< TG3_NVM_DIR_END
;
12068 offset
+= TG3_NVM_DIRENT_SIZE
) {
12069 if (tg3_nvram_read(tp
, offset
, &val
))
12072 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12073 TG3_NVM_DIRTYPE_EXTVPD
)
12077 if (offset
!= TG3_NVM_DIR_END
) {
12078 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12079 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12082 offset
= tg3_nvram_logical_addr(tp
, offset
);
12086 if (!offset
|| !len
) {
12087 offset
= TG3_NVM_VPD_OFF
;
12088 len
= TG3_NVM_VPD_LEN
;
12091 buf
= kmalloc(len
, GFP_KERNEL
);
12095 if (magic
== TG3_EEPROM_MAGIC
) {
12096 for (i
= 0; i
< len
; i
+= 4) {
12097 /* The data is in little-endian format in NVRAM.
12098 * Use the big-endian read routines to preserve
12099 * the byte order as it exists in NVRAM.
12101 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12107 unsigned int pos
= 0;
12109 ptr
= (u8
*)&buf
[0];
12110 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12111 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12113 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12131 #define NVRAM_TEST_SIZE 0x100
12132 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12133 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12134 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12135 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12136 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12137 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12138 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12139 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12141 static int tg3_test_nvram(struct tg3
*tp
)
12143 u32 csum
, magic
, len
;
12145 int i
, j
, k
, err
= 0, size
;
12147 if (tg3_flag(tp
, NO_NVRAM
))
12150 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12153 if (magic
== TG3_EEPROM_MAGIC
)
12154 size
= NVRAM_TEST_SIZE
;
12155 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12156 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12157 TG3_EEPROM_SB_FORMAT_1
) {
12158 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12159 case TG3_EEPROM_SB_REVISION_0
:
12160 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12162 case TG3_EEPROM_SB_REVISION_2
:
12163 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12165 case TG3_EEPROM_SB_REVISION_3
:
12166 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12168 case TG3_EEPROM_SB_REVISION_4
:
12169 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12171 case TG3_EEPROM_SB_REVISION_5
:
12172 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12174 case TG3_EEPROM_SB_REVISION_6
:
12175 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12182 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12183 size
= NVRAM_SELFBOOT_HW_SIZE
;
12187 buf
= kmalloc(size
, GFP_KERNEL
);
12192 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12193 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12200 /* Selfboot format */
12201 magic
= be32_to_cpu(buf
[0]);
12202 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12203 TG3_EEPROM_MAGIC_FW
) {
12204 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12206 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12207 TG3_EEPROM_SB_REVISION_2
) {
12208 /* For rev 2, the csum doesn't include the MBA. */
12209 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12211 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12214 for (i
= 0; i
< size
; i
++)
12227 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12228 TG3_EEPROM_MAGIC_HW
) {
12229 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12230 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12231 u8
*buf8
= (u8
*) buf
;
12233 /* Separate the parity bits and the data bytes. */
12234 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12235 if ((i
== 0) || (i
== 8)) {
12239 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12240 parity
[k
++] = buf8
[i
] & msk
;
12242 } else if (i
== 16) {
12246 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12247 parity
[k
++] = buf8
[i
] & msk
;
12250 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12251 parity
[k
++] = buf8
[i
] & msk
;
12254 data
[j
++] = buf8
[i
];
12258 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12259 u8 hw8
= hweight8(data
[i
]);
12261 if ((hw8
& 0x1) && parity
[i
])
12263 else if (!(hw8
& 0x1) && !parity
[i
])
12272 /* Bootstrap checksum at offset 0x10 */
12273 csum
= calc_crc((unsigned char *) buf
, 0x10);
12274 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12277 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12278 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12279 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12284 buf
= tg3_vpd_readblock(tp
, &len
);
12288 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12290 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12294 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12297 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12298 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12299 PCI_VPD_RO_KEYWORD_CHKSUM
);
12303 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12305 for (i
= 0; i
<= j
; i
++)
12306 csum8
+= ((u8
*)buf
)[i
];
12320 #define TG3_SERDES_TIMEOUT_SEC 2
12321 #define TG3_COPPER_TIMEOUT_SEC 6
12323 static int tg3_test_link(struct tg3
*tp
)
12327 if (!netif_running(tp
->dev
))
12330 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12331 max
= TG3_SERDES_TIMEOUT_SEC
;
12333 max
= TG3_COPPER_TIMEOUT_SEC
;
12335 for (i
= 0; i
< max
; i
++) {
12339 if (msleep_interruptible(1000))
12346 /* Only test the commonly used registers */
12347 static int tg3_test_registers(struct tg3
*tp
)
12349 int i
, is_5705
, is_5750
;
12350 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12354 #define TG3_FL_5705 0x1
12355 #define TG3_FL_NOT_5705 0x2
12356 #define TG3_FL_NOT_5788 0x4
12357 #define TG3_FL_NOT_5750 0x8
12361 /* MAC Control Registers */
12362 { MAC_MODE
, TG3_FL_NOT_5705
,
12363 0x00000000, 0x00ef6f8c },
12364 { MAC_MODE
, TG3_FL_5705
,
12365 0x00000000, 0x01ef6b8c },
12366 { MAC_STATUS
, TG3_FL_NOT_5705
,
12367 0x03800107, 0x00000000 },
12368 { MAC_STATUS
, TG3_FL_5705
,
12369 0x03800100, 0x00000000 },
12370 { MAC_ADDR_0_HIGH
, 0x0000,
12371 0x00000000, 0x0000ffff },
12372 { MAC_ADDR_0_LOW
, 0x0000,
12373 0x00000000, 0xffffffff },
12374 { MAC_RX_MTU_SIZE
, 0x0000,
12375 0x00000000, 0x0000ffff },
12376 { MAC_TX_MODE
, 0x0000,
12377 0x00000000, 0x00000070 },
12378 { MAC_TX_LENGTHS
, 0x0000,
12379 0x00000000, 0x00003fff },
12380 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12381 0x00000000, 0x000007fc },
12382 { MAC_RX_MODE
, TG3_FL_5705
,
12383 0x00000000, 0x000007dc },
12384 { MAC_HASH_REG_0
, 0x0000,
12385 0x00000000, 0xffffffff },
12386 { MAC_HASH_REG_1
, 0x0000,
12387 0x00000000, 0xffffffff },
12388 { MAC_HASH_REG_2
, 0x0000,
12389 0x00000000, 0xffffffff },
12390 { MAC_HASH_REG_3
, 0x0000,
12391 0x00000000, 0xffffffff },
12393 /* Receive Data and Receive BD Initiator Control Registers. */
12394 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12395 0x00000000, 0xffffffff },
12396 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12397 0x00000000, 0xffffffff },
12398 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12399 0x00000000, 0x00000003 },
12400 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12401 0x00000000, 0xffffffff },
12402 { RCVDBDI_STD_BD
+0, 0x0000,
12403 0x00000000, 0xffffffff },
12404 { RCVDBDI_STD_BD
+4, 0x0000,
12405 0x00000000, 0xffffffff },
12406 { RCVDBDI_STD_BD
+8, 0x0000,
12407 0x00000000, 0xffff0002 },
12408 { RCVDBDI_STD_BD
+0xc, 0x0000,
12409 0x00000000, 0xffffffff },
12411 /* Receive BD Initiator Control Registers. */
12412 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12413 0x00000000, 0xffffffff },
12414 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12415 0x00000000, 0x000003ff },
12416 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12417 0x00000000, 0xffffffff },
12419 /* Host Coalescing Control Registers. */
12420 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12421 0x00000000, 0x00000004 },
12422 { HOSTCC_MODE
, TG3_FL_5705
,
12423 0x00000000, 0x000000f6 },
12424 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12425 0x00000000, 0xffffffff },
12426 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12427 0x00000000, 0x000003ff },
12428 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12429 0x00000000, 0xffffffff },
12430 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12431 0x00000000, 0x000003ff },
12432 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12433 0x00000000, 0xffffffff },
12434 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12435 0x00000000, 0x000000ff },
12436 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12437 0x00000000, 0xffffffff },
12438 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12439 0x00000000, 0x000000ff },
12440 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12441 0x00000000, 0xffffffff },
12442 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12443 0x00000000, 0xffffffff },
12444 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12445 0x00000000, 0xffffffff },
12446 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12447 0x00000000, 0x000000ff },
12448 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12449 0x00000000, 0xffffffff },
12450 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12451 0x00000000, 0x000000ff },
12452 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12453 0x00000000, 0xffffffff },
12454 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12455 0x00000000, 0xffffffff },
12456 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12457 0x00000000, 0xffffffff },
12458 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12459 0x00000000, 0xffffffff },
12460 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12461 0x00000000, 0xffffffff },
12462 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12463 0xffffffff, 0x00000000 },
12464 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12465 0xffffffff, 0x00000000 },
12467 /* Buffer Manager Control Registers. */
12468 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12469 0x00000000, 0x007fff80 },
12470 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12471 0x00000000, 0x007fffff },
12472 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12473 0x00000000, 0x0000003f },
12474 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12475 0x00000000, 0x000001ff },
12476 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12477 0x00000000, 0x000001ff },
12478 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12479 0xffffffff, 0x00000000 },
12480 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12481 0xffffffff, 0x00000000 },
12483 /* Mailbox Registers */
12484 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12485 0x00000000, 0x000001ff },
12486 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12487 0x00000000, 0x000001ff },
12488 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12489 0x00000000, 0x000007ff },
12490 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12491 0x00000000, 0x000001ff },
12493 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12496 is_5705
= is_5750
= 0;
12497 if (tg3_flag(tp
, 5705_PLUS
)) {
12499 if (tg3_flag(tp
, 5750_PLUS
))
12503 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12504 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12507 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12510 if (tg3_flag(tp
, IS_5788
) &&
12511 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12514 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12517 offset
= (u32
) reg_tbl
[i
].offset
;
12518 read_mask
= reg_tbl
[i
].read_mask
;
12519 write_mask
= reg_tbl
[i
].write_mask
;
12521 /* Save the original register content */
12522 save_val
= tr32(offset
);
12524 /* Determine the read-only value. */
12525 read_val
= save_val
& read_mask
;
12527 /* Write zero to the register, then make sure the read-only bits
12528 * are not changed and the read/write bits are all zeros.
12532 val
= tr32(offset
);
12534 /* Test the read-only and read/write bits. */
12535 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12538 /* Write ones to all the bits defined by RdMask and WrMask, then
12539 * make sure the read-only bits are not changed and the
12540 * read/write bits are all ones.
12542 tw32(offset
, read_mask
| write_mask
);
12544 val
= tr32(offset
);
12546 /* Test the read-only bits. */
12547 if ((val
& read_mask
) != read_val
)
12550 /* Test the read/write bits. */
12551 if ((val
& write_mask
) != write_mask
)
12554 tw32(offset
, save_val
);
12560 if (netif_msg_hw(tp
))
12561 netdev_err(tp
->dev
,
12562 "Register test failed at offset %x\n", offset
);
12563 tw32(offset
, save_val
);
12567 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12569 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12573 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12574 for (j
= 0; j
< len
; j
+= 4) {
12577 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12578 tg3_read_mem(tp
, offset
+ j
, &val
);
12579 if (val
!= test_pattern
[i
])
12586 static int tg3_test_memory(struct tg3
*tp
)
12588 static struct mem_entry
{
12591 } mem_tbl_570x
[] = {
12592 { 0x00000000, 0x00b50},
12593 { 0x00002000, 0x1c000},
12594 { 0xffffffff, 0x00000}
12595 }, mem_tbl_5705
[] = {
12596 { 0x00000100, 0x0000c},
12597 { 0x00000200, 0x00008},
12598 { 0x00004000, 0x00800},
12599 { 0x00006000, 0x01000},
12600 { 0x00008000, 0x02000},
12601 { 0x00010000, 0x0e000},
12602 { 0xffffffff, 0x00000}
12603 }, mem_tbl_5755
[] = {
12604 { 0x00000200, 0x00008},
12605 { 0x00004000, 0x00800},
12606 { 0x00006000, 0x00800},
12607 { 0x00008000, 0x02000},
12608 { 0x00010000, 0x0c000},
12609 { 0xffffffff, 0x00000}
12610 }, mem_tbl_5906
[] = {
12611 { 0x00000200, 0x00008},
12612 { 0x00004000, 0x00400},
12613 { 0x00006000, 0x00400},
12614 { 0x00008000, 0x01000},
12615 { 0x00010000, 0x01000},
12616 { 0xffffffff, 0x00000}
12617 }, mem_tbl_5717
[] = {
12618 { 0x00000200, 0x00008},
12619 { 0x00010000, 0x0a000},
12620 { 0x00020000, 0x13c00},
12621 { 0xffffffff, 0x00000}
12622 }, mem_tbl_57765
[] = {
12623 { 0x00000200, 0x00008},
12624 { 0x00004000, 0x00800},
12625 { 0x00006000, 0x09800},
12626 { 0x00010000, 0x0a000},
12627 { 0xffffffff, 0x00000}
12629 struct mem_entry
*mem_tbl
;
12633 if (tg3_flag(tp
, 5717_PLUS
))
12634 mem_tbl
= mem_tbl_5717
;
12635 else if (tg3_flag(tp
, 57765_CLASS
) ||
12636 tg3_asic_rev(tp
) == ASIC_REV_5762
)
12637 mem_tbl
= mem_tbl_57765
;
12638 else if (tg3_flag(tp
, 5755_PLUS
))
12639 mem_tbl
= mem_tbl_5755
;
12640 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
12641 mem_tbl
= mem_tbl_5906
;
12642 else if (tg3_flag(tp
, 5705_PLUS
))
12643 mem_tbl
= mem_tbl_5705
;
12645 mem_tbl
= mem_tbl_570x
;
12647 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
12648 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
12656 #define TG3_TSO_MSS 500
12658 #define TG3_TSO_IP_HDR_LEN 20
12659 #define TG3_TSO_TCP_HDR_LEN 20
12660 #define TG3_TSO_TCP_OPT_LEN 12
12662 static const u8 tg3_tso_header
[] = {
12664 0x45, 0x00, 0x00, 0x00,
12665 0x00, 0x00, 0x40, 0x00,
12666 0x40, 0x06, 0x00, 0x00,
12667 0x0a, 0x00, 0x00, 0x01,
12668 0x0a, 0x00, 0x00, 0x02,
12669 0x0d, 0x00, 0xe0, 0x00,
12670 0x00, 0x00, 0x01, 0x00,
12671 0x00, 0x00, 0x02, 0x00,
12672 0x80, 0x10, 0x10, 0x00,
12673 0x14, 0x09, 0x00, 0x00,
12674 0x01, 0x01, 0x08, 0x0a,
12675 0x11, 0x11, 0x11, 0x11,
12676 0x11, 0x11, 0x11, 0x11,
12679 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
12681 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
12682 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
12684 struct sk_buff
*skb
;
12685 u8
*tx_data
, *rx_data
;
12687 int num_pkts
, tx_len
, rx_len
, i
, err
;
12688 struct tg3_rx_buffer_desc
*desc
;
12689 struct tg3_napi
*tnapi
, *rnapi
;
12690 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
12692 tnapi
= &tp
->napi
[0];
12693 rnapi
= &tp
->napi
[0];
12694 if (tp
->irq_cnt
> 1) {
12695 if (tg3_flag(tp
, ENABLE_RSS
))
12696 rnapi
= &tp
->napi
[1];
12697 if (tg3_flag(tp
, ENABLE_TSS
))
12698 tnapi
= &tp
->napi
[1];
12700 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
12705 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
12709 tx_data
= skb_put(skb
, tx_len
);
12710 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
12711 memset(tx_data
+ 6, 0x0, 8);
12713 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
12715 if (tso_loopback
) {
12716 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
12718 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
12719 TG3_TSO_TCP_OPT_LEN
;
12721 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
12722 sizeof(tg3_tso_header
));
12725 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
12726 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
12728 /* Set the total length field in the IP header */
12729 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
12731 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
12732 TXD_FLAG_CPU_POST_DMA
);
12734 if (tg3_flag(tp
, HW_TSO_1
) ||
12735 tg3_flag(tp
, HW_TSO_2
) ||
12736 tg3_flag(tp
, HW_TSO_3
)) {
12738 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
12739 th
= (struct tcphdr
*)&tx_data
[val
];
12742 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
12744 if (tg3_flag(tp
, HW_TSO_3
)) {
12745 mss
|= (hdr_len
& 0xc) << 12;
12746 if (hdr_len
& 0x10)
12747 base_flags
|= 0x00000010;
12748 base_flags
|= (hdr_len
& 0x3e0) << 5;
12749 } else if (tg3_flag(tp
, HW_TSO_2
))
12750 mss
|= hdr_len
<< 9;
12751 else if (tg3_flag(tp
, HW_TSO_1
) ||
12752 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
12753 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
12755 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
12758 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
12761 data_off
= ETH_HLEN
;
12763 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
12764 tx_len
> VLAN_ETH_FRAME_LEN
)
12765 base_flags
|= TXD_FLAG_JMB_PKT
;
12768 for (i
= data_off
; i
< tx_len
; i
++)
12769 tx_data
[i
] = (u8
) (i
& 0xff);
12771 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
12772 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
12773 dev_kfree_skb(skb
);
12777 val
= tnapi
->tx_prod
;
12778 tnapi
->tx_buffers
[val
].skb
= skb
;
12779 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
12781 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12786 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12788 budget
= tg3_tx_avail(tnapi
);
12789 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
12790 base_flags
| TXD_FLAG_END
, mss
, 0)) {
12791 tnapi
->tx_buffers
[val
].skb
= NULL
;
12792 dev_kfree_skb(skb
);
12798 /* Sync BD data before updating mailbox */
12801 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
12802 tr32_mailbox(tnapi
->prodmbox
);
12806 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12807 for (i
= 0; i
< 35; i
++) {
12808 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12813 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
12814 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12815 if ((tx_idx
== tnapi
->tx_prod
) &&
12816 (rx_idx
== (rx_start_idx
+ num_pkts
)))
12820 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
12821 dev_kfree_skb(skb
);
12823 if (tx_idx
!= tnapi
->tx_prod
)
12826 if (rx_idx
!= rx_start_idx
+ num_pkts
)
12830 while (rx_idx
!= rx_start_idx
) {
12831 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
12832 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
12833 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
12835 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
12836 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
12839 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
12842 if (!tso_loopback
) {
12843 if (rx_len
!= tx_len
)
12846 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
12847 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
12850 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
12853 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
12854 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
12855 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
12859 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
12860 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
12861 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
12863 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
12864 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
12865 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
12870 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
12871 PCI_DMA_FROMDEVICE
);
12873 rx_data
+= TG3_RX_OFFSET(tp
);
12874 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
12875 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
12882 /* tg3_free_rings will unmap and free the rx_data */
12887 #define TG3_STD_LOOPBACK_FAILED 1
12888 #define TG3_JMB_LOOPBACK_FAILED 2
12889 #define TG3_TSO_LOOPBACK_FAILED 4
12890 #define TG3_LOOPBACK_FAILED \
12891 (TG3_STD_LOOPBACK_FAILED | \
12892 TG3_JMB_LOOPBACK_FAILED | \
12893 TG3_TSO_LOOPBACK_FAILED)
12895 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
12899 u32 jmb_pkt_sz
= 9000;
12902 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
12904 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
12905 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12907 if (!netif_running(tp
->dev
)) {
12908 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12909 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12911 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12915 err
= tg3_reset_hw(tp
, 1);
12917 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12918 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12920 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12924 if (tg3_flag(tp
, ENABLE_RSS
)) {
12927 /* Reroute all rx packets to the 1st queue */
12928 for (i
= MAC_RSS_INDIR_TBL_0
;
12929 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12933 /* HW errata - mac loopback fails in some cases on 5780.
12934 * Normal traffic and PHY loopback are not affected by
12935 * errata. Also, the MAC loopback test is deprecated for
12936 * all newer ASIC revisions.
12938 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
12939 !tg3_flag(tp
, CPMU_PRESENT
)) {
12940 tg3_mac_loopback(tp
, true);
12942 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12943 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12945 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12946 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12947 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12949 tg3_mac_loopback(tp
, false);
12952 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12953 !tg3_flag(tp
, USE_PHYLIB
)) {
12956 tg3_phy_lpbk_set(tp
, 0, false);
12958 /* Wait for link */
12959 for (i
= 0; i
< 100; i
++) {
12960 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12965 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12966 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12967 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12968 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12969 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
12970 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12971 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12972 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12975 tg3_phy_lpbk_set(tp
, 0, true);
12977 /* All link indications report up, but the hardware
12978 * isn't really ready for about 20 msec. Double it
12983 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12984 data
[TG3_EXT_LOOPB_TEST
] |=
12985 TG3_STD_LOOPBACK_FAILED
;
12986 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12987 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12988 data
[TG3_EXT_LOOPB_TEST
] |=
12989 TG3_TSO_LOOPBACK_FAILED
;
12990 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12991 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12992 data
[TG3_EXT_LOOPB_TEST
] |=
12993 TG3_JMB_LOOPBACK_FAILED
;
12996 /* Re-enable gphy autopowerdown. */
12997 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12998 tg3_phy_toggle_apd(tp
, true);
13001 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13002 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13005 tp
->phy_flags
|= eee_cap
;
13010 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13013 struct tg3
*tp
= netdev_priv(dev
);
13014 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13016 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
13017 tg3_power_up(tp
)) {
13018 etest
->flags
|= ETH_TEST_FL_FAILED
;
13019 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13023 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13025 if (tg3_test_nvram(tp
) != 0) {
13026 etest
->flags
|= ETH_TEST_FL_FAILED
;
13027 data
[TG3_NVRAM_TEST
] = 1;
13029 if (!doextlpbk
&& tg3_test_link(tp
)) {
13030 etest
->flags
|= ETH_TEST_FL_FAILED
;
13031 data
[TG3_LINK_TEST
] = 1;
13033 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13034 int err
, err2
= 0, irq_sync
= 0;
13036 if (netif_running(dev
)) {
13038 tg3_netif_stop(tp
);
13042 tg3_full_lock(tp
, irq_sync
);
13043 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13044 err
= tg3_nvram_lock(tp
);
13045 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13046 if (!tg3_flag(tp
, 5705_PLUS
))
13047 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13049 tg3_nvram_unlock(tp
);
13051 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13054 if (tg3_test_registers(tp
) != 0) {
13055 etest
->flags
|= ETH_TEST_FL_FAILED
;
13056 data
[TG3_REGISTER_TEST
] = 1;
13059 if (tg3_test_memory(tp
) != 0) {
13060 etest
->flags
|= ETH_TEST_FL_FAILED
;
13061 data
[TG3_MEMORY_TEST
] = 1;
13065 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13067 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13068 etest
->flags
|= ETH_TEST_FL_FAILED
;
13070 tg3_full_unlock(tp
);
13072 if (tg3_test_interrupt(tp
) != 0) {
13073 etest
->flags
|= ETH_TEST_FL_FAILED
;
13074 data
[TG3_INTERRUPT_TEST
] = 1;
13077 tg3_full_lock(tp
, 0);
13079 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13080 if (netif_running(dev
)) {
13081 tg3_flag_set(tp
, INIT_COMPLETE
);
13082 err2
= tg3_restart_hw(tp
, 1);
13084 tg3_netif_start(tp
);
13087 tg3_full_unlock(tp
);
13089 if (irq_sync
&& !err2
)
13092 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13093 tg3_power_down(tp
);
13097 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
13098 struct ifreq
*ifr
, int cmd
)
13100 struct tg3
*tp
= netdev_priv(dev
);
13101 struct hwtstamp_config stmpconf
;
13103 if (!tg3_flag(tp
, PTP_CAPABLE
))
13106 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13109 if (stmpconf
.flags
)
13112 switch (stmpconf
.tx_type
) {
13113 case HWTSTAMP_TX_ON
:
13114 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13116 case HWTSTAMP_TX_OFF
:
13117 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13123 switch (stmpconf
.rx_filter
) {
13124 case HWTSTAMP_FILTER_NONE
:
13127 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13128 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13129 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13131 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13132 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13133 TG3_RX_PTP_CTL_SYNC_EVNT
;
13135 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13136 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13137 TG3_RX_PTP_CTL_DELAY_REQ
;
13139 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13140 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13141 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13143 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13144 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13145 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13147 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13148 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13149 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13151 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13152 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13153 TG3_RX_PTP_CTL_SYNC_EVNT
;
13155 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13156 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13157 TG3_RX_PTP_CTL_SYNC_EVNT
;
13159 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13160 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13161 TG3_RX_PTP_CTL_SYNC_EVNT
;
13163 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13164 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13165 TG3_RX_PTP_CTL_DELAY_REQ
;
13167 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13168 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13169 TG3_RX_PTP_CTL_DELAY_REQ
;
13171 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13172 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13173 TG3_RX_PTP_CTL_DELAY_REQ
;
13179 if (netif_running(dev
) && tp
->rxptpctl
)
13180 tw32(TG3_RX_PTP_CTL
,
13181 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13183 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13187 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13189 struct mii_ioctl_data
*data
= if_mii(ifr
);
13190 struct tg3
*tp
= netdev_priv(dev
);
13193 if (tg3_flag(tp
, USE_PHYLIB
)) {
13194 struct phy_device
*phydev
;
13195 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13197 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13198 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13203 data
->phy_id
= tp
->phy_addr
;
13206 case SIOCGMIIREG
: {
13209 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13210 break; /* We have no PHY */
13212 if (!netif_running(dev
))
13215 spin_lock_bh(&tp
->lock
);
13216 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13217 data
->reg_num
& 0x1f, &mii_regval
);
13218 spin_unlock_bh(&tp
->lock
);
13220 data
->val_out
= mii_regval
;
13226 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13227 break; /* We have no PHY */
13229 if (!netif_running(dev
))
13232 spin_lock_bh(&tp
->lock
);
13233 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13234 data
->reg_num
& 0x1f, data
->val_in
);
13235 spin_unlock_bh(&tp
->lock
);
13239 case SIOCSHWTSTAMP
:
13240 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13246 return -EOPNOTSUPP
;
13249 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13251 struct tg3
*tp
= netdev_priv(dev
);
13253 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13257 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13259 struct tg3
*tp
= netdev_priv(dev
);
13260 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13261 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13263 if (!tg3_flag(tp
, 5705_PLUS
)) {
13264 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13265 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13266 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13267 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13270 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13271 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13272 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13273 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13274 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13275 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13276 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13277 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13278 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13279 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13282 /* No rx interrupts will be generated if both are zero */
13283 if ((ec
->rx_coalesce_usecs
== 0) &&
13284 (ec
->rx_max_coalesced_frames
== 0))
13287 /* No tx interrupts will be generated if both are zero */
13288 if ((ec
->tx_coalesce_usecs
== 0) &&
13289 (ec
->tx_max_coalesced_frames
== 0))
13292 /* Only copy relevant parameters, ignore all others. */
13293 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13294 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13295 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13296 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13297 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13298 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13299 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13300 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13301 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13303 if (netif_running(dev
)) {
13304 tg3_full_lock(tp
, 0);
13305 __tg3_set_coalesce(tp
, &tp
->coal
);
13306 tg3_full_unlock(tp
);
13311 static const struct ethtool_ops tg3_ethtool_ops
= {
13312 .get_settings
= tg3_get_settings
,
13313 .set_settings
= tg3_set_settings
,
13314 .get_drvinfo
= tg3_get_drvinfo
,
13315 .get_regs_len
= tg3_get_regs_len
,
13316 .get_regs
= tg3_get_regs
,
13317 .get_wol
= tg3_get_wol
,
13318 .set_wol
= tg3_set_wol
,
13319 .get_msglevel
= tg3_get_msglevel
,
13320 .set_msglevel
= tg3_set_msglevel
,
13321 .nway_reset
= tg3_nway_reset
,
13322 .get_link
= ethtool_op_get_link
,
13323 .get_eeprom_len
= tg3_get_eeprom_len
,
13324 .get_eeprom
= tg3_get_eeprom
,
13325 .set_eeprom
= tg3_set_eeprom
,
13326 .get_ringparam
= tg3_get_ringparam
,
13327 .set_ringparam
= tg3_set_ringparam
,
13328 .get_pauseparam
= tg3_get_pauseparam
,
13329 .set_pauseparam
= tg3_set_pauseparam
,
13330 .self_test
= tg3_self_test
,
13331 .get_strings
= tg3_get_strings
,
13332 .set_phys_id
= tg3_set_phys_id
,
13333 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13334 .get_coalesce
= tg3_get_coalesce
,
13335 .set_coalesce
= tg3_set_coalesce
,
13336 .get_sset_count
= tg3_get_sset_count
,
13337 .get_rxnfc
= tg3_get_rxnfc
,
13338 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13339 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13340 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13341 .get_channels
= tg3_get_channels
,
13342 .set_channels
= tg3_set_channels
,
13343 .get_ts_info
= tg3_get_ts_info
,
13346 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13347 struct rtnl_link_stats64
*stats
)
13349 struct tg3
*tp
= netdev_priv(dev
);
13351 spin_lock_bh(&tp
->lock
);
13352 if (!tp
->hw_stats
) {
13353 spin_unlock_bh(&tp
->lock
);
13354 return &tp
->net_stats_prev
;
13357 tg3_get_nstats(tp
, stats
);
13358 spin_unlock_bh(&tp
->lock
);
13363 static void tg3_set_rx_mode(struct net_device
*dev
)
13365 struct tg3
*tp
= netdev_priv(dev
);
13367 if (!netif_running(dev
))
13370 tg3_full_lock(tp
, 0);
13371 __tg3_set_rx_mode(dev
);
13372 tg3_full_unlock(tp
);
13375 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13378 dev
->mtu
= new_mtu
;
13380 if (new_mtu
> ETH_DATA_LEN
) {
13381 if (tg3_flag(tp
, 5780_CLASS
)) {
13382 netdev_update_features(dev
);
13383 tg3_flag_clear(tp
, TSO_CAPABLE
);
13385 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13388 if (tg3_flag(tp
, 5780_CLASS
)) {
13389 tg3_flag_set(tp
, TSO_CAPABLE
);
13390 netdev_update_features(dev
);
13392 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13396 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13398 struct tg3
*tp
= netdev_priv(dev
);
13399 int err
, reset_phy
= 0;
13401 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13404 if (!netif_running(dev
)) {
13405 /* We'll just catch it later when the
13408 tg3_set_mtu(dev
, tp
, new_mtu
);
13414 tg3_netif_stop(tp
);
13416 tg3_full_lock(tp
, 1);
13418 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13420 tg3_set_mtu(dev
, tp
, new_mtu
);
13422 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13423 * breaks all requests to 256 bytes.
13425 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13428 err
= tg3_restart_hw(tp
, reset_phy
);
13431 tg3_netif_start(tp
);
13433 tg3_full_unlock(tp
);
13441 static const struct net_device_ops tg3_netdev_ops
= {
13442 .ndo_open
= tg3_open
,
13443 .ndo_stop
= tg3_close
,
13444 .ndo_start_xmit
= tg3_start_xmit
,
13445 .ndo_get_stats64
= tg3_get_stats64
,
13446 .ndo_validate_addr
= eth_validate_addr
,
13447 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13448 .ndo_set_mac_address
= tg3_set_mac_addr
,
13449 .ndo_do_ioctl
= tg3_ioctl
,
13450 .ndo_tx_timeout
= tg3_tx_timeout
,
13451 .ndo_change_mtu
= tg3_change_mtu
,
13452 .ndo_fix_features
= tg3_fix_features
,
13453 .ndo_set_features
= tg3_set_features
,
13454 #ifdef CONFIG_NET_POLL_CONTROLLER
13455 .ndo_poll_controller
= tg3_poll_controller
,
13459 static void tg3_get_eeprom_size(struct tg3
*tp
)
13461 u32 cursize
, val
, magic
;
13463 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13465 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13468 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13469 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13470 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13474 * Size the chip by reading offsets at increasing powers of two.
13475 * When we encounter our validation signature, we know the addressing
13476 * has wrapped around, and thus have our chip size.
13480 while (cursize
< tp
->nvram_size
) {
13481 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13490 tp
->nvram_size
= cursize
;
13493 static void tg3_get_nvram_size(struct tg3
*tp
)
13497 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13500 /* Selfboot format */
13501 if (val
!= TG3_EEPROM_MAGIC
) {
13502 tg3_get_eeprom_size(tp
);
13506 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13508 /* This is confusing. We want to operate on the
13509 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13510 * call will read from NVRAM and byteswap the data
13511 * according to the byteswapping settings for all
13512 * other register accesses. This ensures the data we
13513 * want will always reside in the lower 16-bits.
13514 * However, the data in NVRAM is in LE format, which
13515 * means the data from the NVRAM read will always be
13516 * opposite the endianness of the CPU. The 16-bit
13517 * byteswap then brings the data to CPU endianness.
13519 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13523 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13526 static void tg3_get_nvram_info(struct tg3
*tp
)
13530 nvcfg1
= tr32(NVRAM_CFG1
);
13531 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13532 tg3_flag_set(tp
, FLASH
);
13534 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13535 tw32(NVRAM_CFG1
, nvcfg1
);
13538 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
13539 tg3_flag(tp
, 5780_CLASS
)) {
13540 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13541 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13542 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13543 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13544 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13546 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13547 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13548 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13550 case FLASH_VENDOR_ATMEL_EEPROM
:
13551 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13552 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13553 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13555 case FLASH_VENDOR_ST
:
13556 tp
->nvram_jedecnum
= JEDEC_ST
;
13557 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13558 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13560 case FLASH_VENDOR_SAIFUN
:
13561 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13562 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13564 case FLASH_VENDOR_SST_SMALL
:
13565 case FLASH_VENDOR_SST_LARGE
:
13566 tp
->nvram_jedecnum
= JEDEC_SST
;
13567 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13571 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13572 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13573 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13577 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13579 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13580 case FLASH_5752PAGE_SIZE_256
:
13581 tp
->nvram_pagesize
= 256;
13583 case FLASH_5752PAGE_SIZE_512
:
13584 tp
->nvram_pagesize
= 512;
13586 case FLASH_5752PAGE_SIZE_1K
:
13587 tp
->nvram_pagesize
= 1024;
13589 case FLASH_5752PAGE_SIZE_2K
:
13590 tp
->nvram_pagesize
= 2048;
13592 case FLASH_5752PAGE_SIZE_4K
:
13593 tp
->nvram_pagesize
= 4096;
13595 case FLASH_5752PAGE_SIZE_264
:
13596 tp
->nvram_pagesize
= 264;
13598 case FLASH_5752PAGE_SIZE_528
:
13599 tp
->nvram_pagesize
= 528;
13604 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13608 nvcfg1
= tr32(NVRAM_CFG1
);
13610 /* NVRAM protection for TPM */
13611 if (nvcfg1
& (1 << 27))
13612 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13614 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13615 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13616 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13617 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13618 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13620 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13621 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13622 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13623 tg3_flag_set(tp
, FLASH
);
13625 case FLASH_5752VENDOR_ST_M45PE10
:
13626 case FLASH_5752VENDOR_ST_M45PE20
:
13627 case FLASH_5752VENDOR_ST_M45PE40
:
13628 tp
->nvram_jedecnum
= JEDEC_ST
;
13629 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13630 tg3_flag_set(tp
, FLASH
);
13634 if (tg3_flag(tp
, FLASH
)) {
13635 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13637 /* For eeprom, set pagesize to maximum eeprom size */
13638 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13640 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13641 tw32(NVRAM_CFG1
, nvcfg1
);
13645 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
13647 u32 nvcfg1
, protect
= 0;
13649 nvcfg1
= tr32(NVRAM_CFG1
);
13651 /* NVRAM protection for TPM */
13652 if (nvcfg1
& (1 << 27)) {
13653 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13657 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13659 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13660 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13661 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13662 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
13663 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13664 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13665 tg3_flag_set(tp
, FLASH
);
13666 tp
->nvram_pagesize
= 264;
13667 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
13668 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
13669 tp
->nvram_size
= (protect
? 0x3e200 :
13670 TG3_NVRAM_SIZE_512KB
);
13671 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
13672 tp
->nvram_size
= (protect
? 0x1f200 :
13673 TG3_NVRAM_SIZE_256KB
);
13675 tp
->nvram_size
= (protect
? 0x1f200 :
13676 TG3_NVRAM_SIZE_128KB
);
13678 case FLASH_5752VENDOR_ST_M45PE10
:
13679 case FLASH_5752VENDOR_ST_M45PE20
:
13680 case FLASH_5752VENDOR_ST_M45PE40
:
13681 tp
->nvram_jedecnum
= JEDEC_ST
;
13682 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13683 tg3_flag_set(tp
, FLASH
);
13684 tp
->nvram_pagesize
= 256;
13685 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
13686 tp
->nvram_size
= (protect
?
13687 TG3_NVRAM_SIZE_64KB
:
13688 TG3_NVRAM_SIZE_128KB
);
13689 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
13690 tp
->nvram_size
= (protect
?
13691 TG3_NVRAM_SIZE_64KB
:
13692 TG3_NVRAM_SIZE_256KB
);
13694 tp
->nvram_size
= (protect
?
13695 TG3_NVRAM_SIZE_128KB
:
13696 TG3_NVRAM_SIZE_512KB
);
13701 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
13705 nvcfg1
= tr32(NVRAM_CFG1
);
13707 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13708 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
13709 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13710 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
13711 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13712 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13713 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13714 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13716 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13717 tw32(NVRAM_CFG1
, nvcfg1
);
13719 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13720 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13721 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13722 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13723 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13724 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13725 tg3_flag_set(tp
, FLASH
);
13726 tp
->nvram_pagesize
= 264;
13728 case FLASH_5752VENDOR_ST_M45PE10
:
13729 case FLASH_5752VENDOR_ST_M45PE20
:
13730 case FLASH_5752VENDOR_ST_M45PE40
:
13731 tp
->nvram_jedecnum
= JEDEC_ST
;
13732 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13733 tg3_flag_set(tp
, FLASH
);
13734 tp
->nvram_pagesize
= 256;
13739 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
13741 u32 nvcfg1
, protect
= 0;
13743 nvcfg1
= tr32(NVRAM_CFG1
);
13745 /* NVRAM protection for TPM */
13746 if (nvcfg1
& (1 << 27)) {
13747 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13751 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13753 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13754 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13755 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13756 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13757 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13758 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13759 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13760 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13761 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13762 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13763 tg3_flag_set(tp
, FLASH
);
13764 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13765 tp
->nvram_pagesize
= 256;
13767 case FLASH_5761VENDOR_ST_A_M45PE20
:
13768 case FLASH_5761VENDOR_ST_A_M45PE40
:
13769 case FLASH_5761VENDOR_ST_A_M45PE80
:
13770 case FLASH_5761VENDOR_ST_A_M45PE16
:
13771 case FLASH_5761VENDOR_ST_M_M45PE20
:
13772 case FLASH_5761VENDOR_ST_M_M45PE40
:
13773 case FLASH_5761VENDOR_ST_M_M45PE80
:
13774 case FLASH_5761VENDOR_ST_M_M45PE16
:
13775 tp
->nvram_jedecnum
= JEDEC_ST
;
13776 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13777 tg3_flag_set(tp
, FLASH
);
13778 tp
->nvram_pagesize
= 256;
13783 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
13786 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13787 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13788 case FLASH_5761VENDOR_ST_A_M45PE16
:
13789 case FLASH_5761VENDOR_ST_M_M45PE16
:
13790 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
13792 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13793 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13794 case FLASH_5761VENDOR_ST_A_M45PE80
:
13795 case FLASH_5761VENDOR_ST_M_M45PE80
:
13796 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13798 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13799 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13800 case FLASH_5761VENDOR_ST_A_M45PE40
:
13801 case FLASH_5761VENDOR_ST_M_M45PE40
:
13802 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13804 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13805 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13806 case FLASH_5761VENDOR_ST_A_M45PE20
:
13807 case FLASH_5761VENDOR_ST_M_M45PE20
:
13808 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13814 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
13816 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13817 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13818 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13821 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
13825 nvcfg1
= tr32(NVRAM_CFG1
);
13827 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13828 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13829 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13830 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13831 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13832 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13834 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13835 tw32(NVRAM_CFG1
, nvcfg1
);
13837 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13838 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13839 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13840 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13841 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13842 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13843 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13844 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13845 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13846 tg3_flag_set(tp
, FLASH
);
13848 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13849 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13850 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13851 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13852 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13854 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13855 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13856 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13858 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13859 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13860 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13864 case FLASH_5752VENDOR_ST_M45PE10
:
13865 case FLASH_5752VENDOR_ST_M45PE20
:
13866 case FLASH_5752VENDOR_ST_M45PE40
:
13867 tp
->nvram_jedecnum
= JEDEC_ST
;
13868 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13869 tg3_flag_set(tp
, FLASH
);
13871 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13872 case FLASH_5752VENDOR_ST_M45PE10
:
13873 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13875 case FLASH_5752VENDOR_ST_M45PE20
:
13876 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13878 case FLASH_5752VENDOR_ST_M45PE40
:
13879 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13884 tg3_flag_set(tp
, NO_NVRAM
);
13888 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13889 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13890 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13894 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
13898 nvcfg1
= tr32(NVRAM_CFG1
);
13900 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13901 case FLASH_5717VENDOR_ATMEL_EEPROM
:
13902 case FLASH_5717VENDOR_MICRO_EEPROM
:
13903 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13904 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13905 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13907 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13908 tw32(NVRAM_CFG1
, nvcfg1
);
13910 case FLASH_5717VENDOR_ATMEL_MDB011D
:
13911 case FLASH_5717VENDOR_ATMEL_ADB011B
:
13912 case FLASH_5717VENDOR_ATMEL_ADB011D
:
13913 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13914 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13915 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13916 case FLASH_5717VENDOR_ATMEL_45USPT
:
13917 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13918 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13919 tg3_flag_set(tp
, FLASH
);
13921 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13922 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13923 /* Detect size with tg3_nvram_get_size() */
13925 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13926 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13927 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13930 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13934 case FLASH_5717VENDOR_ST_M_M25PE10
:
13935 case FLASH_5717VENDOR_ST_A_M25PE10
:
13936 case FLASH_5717VENDOR_ST_M_M45PE10
:
13937 case FLASH_5717VENDOR_ST_A_M45PE10
:
13938 case FLASH_5717VENDOR_ST_M_M25PE20
:
13939 case FLASH_5717VENDOR_ST_A_M25PE20
:
13940 case FLASH_5717VENDOR_ST_M_M45PE20
:
13941 case FLASH_5717VENDOR_ST_A_M45PE20
:
13942 case FLASH_5717VENDOR_ST_25USPT
:
13943 case FLASH_5717VENDOR_ST_45USPT
:
13944 tp
->nvram_jedecnum
= JEDEC_ST
;
13945 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13946 tg3_flag_set(tp
, FLASH
);
13948 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13949 case FLASH_5717VENDOR_ST_M_M25PE20
:
13950 case FLASH_5717VENDOR_ST_M_M45PE20
:
13951 /* Detect size with tg3_nvram_get_size() */
13953 case FLASH_5717VENDOR_ST_A_M25PE20
:
13954 case FLASH_5717VENDOR_ST_A_M45PE20
:
13955 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13958 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13963 tg3_flag_set(tp
, NO_NVRAM
);
13967 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13968 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13969 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13972 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
13974 u32 nvcfg1
, nvmpinstrp
;
13976 nvcfg1
= tr32(NVRAM_CFG1
);
13977 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
13979 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13980 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
13981 tg3_flag_set(tp
, NO_NVRAM
);
13985 switch (nvmpinstrp
) {
13986 case FLASH_5762_EEPROM_HD
:
13987 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
13989 case FLASH_5762_EEPROM_LD
:
13990 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
13995 switch (nvmpinstrp
) {
13996 case FLASH_5720_EEPROM_HD
:
13997 case FLASH_5720_EEPROM_LD
:
13998 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13999 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14001 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14002 tw32(NVRAM_CFG1
, nvcfg1
);
14003 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14004 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14006 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14008 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14009 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14010 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14011 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14012 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14013 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14014 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14015 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14016 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14017 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14018 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14019 case FLASH_5720VENDOR_ATMEL_45USPT
:
14020 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14021 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14022 tg3_flag_set(tp
, FLASH
);
14024 switch (nvmpinstrp
) {
14025 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14026 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14027 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14028 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14030 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14031 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14032 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14033 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14035 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14036 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14037 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14040 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14041 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14045 case FLASH_5720VENDOR_M_ST_M25PE10
:
14046 case FLASH_5720VENDOR_M_ST_M45PE10
:
14047 case FLASH_5720VENDOR_A_ST_M25PE10
:
14048 case FLASH_5720VENDOR_A_ST_M45PE10
:
14049 case FLASH_5720VENDOR_M_ST_M25PE20
:
14050 case FLASH_5720VENDOR_M_ST_M45PE20
:
14051 case FLASH_5720VENDOR_A_ST_M25PE20
:
14052 case FLASH_5720VENDOR_A_ST_M45PE20
:
14053 case FLASH_5720VENDOR_M_ST_M25PE40
:
14054 case FLASH_5720VENDOR_M_ST_M45PE40
:
14055 case FLASH_5720VENDOR_A_ST_M25PE40
:
14056 case FLASH_5720VENDOR_A_ST_M45PE40
:
14057 case FLASH_5720VENDOR_M_ST_M25PE80
:
14058 case FLASH_5720VENDOR_M_ST_M45PE80
:
14059 case FLASH_5720VENDOR_A_ST_M25PE80
:
14060 case FLASH_5720VENDOR_A_ST_M45PE80
:
14061 case FLASH_5720VENDOR_ST_25USPT
:
14062 case FLASH_5720VENDOR_ST_45USPT
:
14063 tp
->nvram_jedecnum
= JEDEC_ST
;
14064 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14065 tg3_flag_set(tp
, FLASH
);
14067 switch (nvmpinstrp
) {
14068 case FLASH_5720VENDOR_M_ST_M25PE20
:
14069 case FLASH_5720VENDOR_M_ST_M45PE20
:
14070 case FLASH_5720VENDOR_A_ST_M25PE20
:
14071 case FLASH_5720VENDOR_A_ST_M45PE20
:
14072 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14074 case FLASH_5720VENDOR_M_ST_M25PE40
:
14075 case FLASH_5720VENDOR_M_ST_M45PE40
:
14076 case FLASH_5720VENDOR_A_ST_M25PE40
:
14077 case FLASH_5720VENDOR_A_ST_M45PE40
:
14078 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14080 case FLASH_5720VENDOR_M_ST_M25PE80
:
14081 case FLASH_5720VENDOR_M_ST_M45PE80
:
14082 case FLASH_5720VENDOR_A_ST_M25PE80
:
14083 case FLASH_5720VENDOR_A_ST_M45PE80
:
14084 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14087 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14088 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14093 tg3_flag_set(tp
, NO_NVRAM
);
14097 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14098 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14099 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14101 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14104 if (tg3_nvram_read(tp
, 0, &val
))
14107 if (val
!= TG3_EEPROM_MAGIC
&&
14108 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14109 tg3_flag_set(tp
, NO_NVRAM
);
14113 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14114 static void tg3_nvram_init(struct tg3
*tp
)
14116 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14117 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14118 tg3_flag_clear(tp
, NVRAM
);
14119 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14120 tg3_flag_set(tp
, NO_NVRAM
);
14124 tw32_f(GRC_EEPROM_ADDR
,
14125 (EEPROM_ADDR_FSM_RESET
|
14126 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14127 EEPROM_ADDR_CLKPERD_SHIFT
)));
14131 /* Enable seeprom accesses. */
14132 tw32_f(GRC_LOCAL_CTRL
,
14133 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14136 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14137 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14138 tg3_flag_set(tp
, NVRAM
);
14140 if (tg3_nvram_lock(tp
)) {
14141 netdev_warn(tp
->dev
,
14142 "Cannot get nvram lock, %s failed\n",
14146 tg3_enable_nvram_access(tp
);
14148 tp
->nvram_size
= 0;
14150 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14151 tg3_get_5752_nvram_info(tp
);
14152 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14153 tg3_get_5755_nvram_info(tp
);
14154 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14155 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14156 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14157 tg3_get_5787_nvram_info(tp
);
14158 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14159 tg3_get_5761_nvram_info(tp
);
14160 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14161 tg3_get_5906_nvram_info(tp
);
14162 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14163 tg3_flag(tp
, 57765_CLASS
))
14164 tg3_get_57780_nvram_info(tp
);
14165 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14166 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14167 tg3_get_5717_nvram_info(tp
);
14168 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14169 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14170 tg3_get_5720_nvram_info(tp
);
14172 tg3_get_nvram_info(tp
);
14174 if (tp
->nvram_size
== 0)
14175 tg3_get_nvram_size(tp
);
14177 tg3_disable_nvram_access(tp
);
14178 tg3_nvram_unlock(tp
);
14181 tg3_flag_clear(tp
, NVRAM
);
14182 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14184 tg3_get_eeprom_size(tp
);
14188 struct subsys_tbl_ent
{
14189 u16 subsys_vendor
, subsys_devid
;
14193 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14194 /* Broadcom boards. */
14195 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14196 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14197 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14198 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14199 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14200 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14201 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14202 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14203 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14204 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14205 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14206 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14207 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14208 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14209 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14210 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14211 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14212 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14213 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14214 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14215 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14216 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14219 { TG3PCI_SUBVENDOR_ID_3COM
,
14220 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14221 { TG3PCI_SUBVENDOR_ID_3COM
,
14222 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14223 { TG3PCI_SUBVENDOR_ID_3COM
,
14224 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14225 { TG3PCI_SUBVENDOR_ID_3COM
,
14226 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14227 { TG3PCI_SUBVENDOR_ID_3COM
,
14228 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14231 { TG3PCI_SUBVENDOR_ID_DELL
,
14232 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14233 { TG3PCI_SUBVENDOR_ID_DELL
,
14234 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14235 { TG3PCI_SUBVENDOR_ID_DELL
,
14236 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14237 { TG3PCI_SUBVENDOR_ID_DELL
,
14238 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14240 /* Compaq boards. */
14241 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14242 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14243 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14244 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14245 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14246 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14247 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14248 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14249 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14250 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14253 { TG3PCI_SUBVENDOR_ID_IBM
,
14254 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14257 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14261 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14262 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14263 tp
->pdev
->subsystem_vendor
) &&
14264 (subsys_id_to_phy_id
[i
].subsys_devid
==
14265 tp
->pdev
->subsystem_device
))
14266 return &subsys_id_to_phy_id
[i
];
14271 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14275 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14276 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14278 /* Assume an onboard device and WOL capable by default. */
14279 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14280 tg3_flag_set(tp
, WOL_CAP
);
14282 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14283 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14284 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14285 tg3_flag_set(tp
, IS_NIC
);
14287 val
= tr32(VCPU_CFGSHDW
);
14288 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14289 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14290 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14291 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14292 tg3_flag_set(tp
, WOL_ENABLE
);
14293 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14298 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14299 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14300 u32 nic_cfg
, led_cfg
;
14301 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14302 int eeprom_phy_serdes
= 0;
14304 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14305 tp
->nic_sram_data_cfg
= nic_cfg
;
14307 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14308 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14309 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14310 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14311 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14312 (ver
> 0) && (ver
< 0x100))
14313 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14315 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14316 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14318 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14319 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14320 eeprom_phy_serdes
= 1;
14322 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14323 if (nic_phy_id
!= 0) {
14324 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14325 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14327 eeprom_phy_id
= (id1
>> 16) << 10;
14328 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14329 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14333 tp
->phy_id
= eeprom_phy_id
;
14334 if (eeprom_phy_serdes
) {
14335 if (!tg3_flag(tp
, 5705_PLUS
))
14336 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14338 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14341 if (tg3_flag(tp
, 5750_PLUS
))
14342 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14343 SHASTA_EXT_LED_MODE_MASK
);
14345 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14349 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14350 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14353 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14354 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14357 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14358 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14360 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14361 * read on some older 5700/5701 bootcode.
14363 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14364 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14365 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14369 case SHASTA_EXT_LED_SHARED
:
14370 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14371 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14372 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14373 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14374 LED_CTRL_MODE_PHY_2
);
14377 case SHASTA_EXT_LED_MAC
:
14378 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14381 case SHASTA_EXT_LED_COMBO
:
14382 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14383 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14384 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14385 LED_CTRL_MODE_PHY_2
);
14390 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14391 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14392 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14393 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14395 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14396 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14398 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14399 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14400 if ((tp
->pdev
->subsystem_vendor
==
14401 PCI_VENDOR_ID_ARIMA
) &&
14402 (tp
->pdev
->subsystem_device
== 0x205a ||
14403 tp
->pdev
->subsystem_device
== 0x2063))
14404 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14406 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14407 tg3_flag_set(tp
, IS_NIC
);
14410 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14411 tg3_flag_set(tp
, ENABLE_ASF
);
14412 if (tg3_flag(tp
, 5750_PLUS
))
14413 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14416 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14417 tg3_flag(tp
, 5750_PLUS
))
14418 tg3_flag_set(tp
, ENABLE_APE
);
14420 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14421 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14422 tg3_flag_clear(tp
, WOL_CAP
);
14424 if (tg3_flag(tp
, WOL_CAP
) &&
14425 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14426 tg3_flag_set(tp
, WOL_ENABLE
);
14427 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14430 if (cfg2
& (1 << 17))
14431 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14433 /* serdes signal pre-emphasis in register 0x590 set by */
14434 /* bootcode if bit 18 is set */
14435 if (cfg2
& (1 << 18))
14436 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14438 if ((tg3_flag(tp
, 57765_PLUS
) ||
14439 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14440 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14441 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14442 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14444 if (tg3_flag(tp
, PCI_EXPRESS
) &&
14445 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14446 !tg3_flag(tp
, 57765_PLUS
)) {
14449 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14450 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
14451 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14454 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14455 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14456 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14457 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14458 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14459 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14462 if (tg3_flag(tp
, WOL_CAP
))
14463 device_set_wakeup_enable(&tp
->pdev
->dev
,
14464 tg3_flag(tp
, WOL_ENABLE
));
14466 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14469 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14472 u32 val2
, off
= offset
* 8;
14474 err
= tg3_nvram_lock(tp
);
14478 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14479 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14480 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14481 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14484 for (i
= 0; i
< 100; i
++) {
14485 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14486 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14487 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14493 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14495 tg3_nvram_unlock(tp
);
14496 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14502 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14507 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14508 tw32(OTP_CTRL
, cmd
);
14510 /* Wait for up to 1 ms for command to execute. */
14511 for (i
= 0; i
< 100; i
++) {
14512 val
= tr32(OTP_STATUS
);
14513 if (val
& OTP_STATUS_CMD_DONE
)
14518 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14521 /* Read the gphy configuration from the OTP region of the chip. The gphy
14522 * configuration is a 32-bit value that straddles the alignment boundary.
14523 * We do two 32-bit reads and then shift and merge the results.
14525 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14527 u32 bhalf_otp
, thalf_otp
;
14529 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14531 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14534 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14536 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14539 thalf_otp
= tr32(OTP_READ_DATA
);
14541 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14543 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14546 bhalf_otp
= tr32(OTP_READ_DATA
);
14548 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14551 static void tg3_phy_init_link_config(struct tg3
*tp
)
14553 u32 adv
= ADVERTISED_Autoneg
;
14555 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14556 adv
|= ADVERTISED_1000baseT_Half
|
14557 ADVERTISED_1000baseT_Full
;
14559 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14560 adv
|= ADVERTISED_100baseT_Half
|
14561 ADVERTISED_100baseT_Full
|
14562 ADVERTISED_10baseT_Half
|
14563 ADVERTISED_10baseT_Full
|
14566 adv
|= ADVERTISED_FIBRE
;
14568 tp
->link_config
.advertising
= adv
;
14569 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14570 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14571 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14572 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14573 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14578 static int tg3_phy_probe(struct tg3
*tp
)
14580 u32 hw_phy_id_1
, hw_phy_id_2
;
14581 u32 hw_phy_id
, hw_phy_id_masked
;
14584 /* flow control autonegotiation is default behavior */
14585 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14586 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14588 if (tg3_flag(tp
, ENABLE_APE
)) {
14589 switch (tp
->pci_fn
) {
14591 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14594 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14597 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14600 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14605 if (tg3_flag(tp
, USE_PHYLIB
))
14606 return tg3_phy_init(tp
);
14608 /* Reading the PHY ID register can conflict with ASF
14609 * firmware access to the PHY hardware.
14612 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14613 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14615 /* Now read the physical PHY_ID from the chip and verify
14616 * that it is sane. If it doesn't look good, we fall back
14617 * to either the hard-coded table based PHY_ID and failing
14618 * that the value found in the eeprom area.
14620 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
14621 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
14623 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
14624 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
14625 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
14627 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
14630 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
14631 tp
->phy_id
= hw_phy_id
;
14632 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
14633 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14635 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
14637 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
14638 /* Do nothing, phy ID already set up in
14639 * tg3_get_eeprom_hw_cfg().
14642 struct subsys_tbl_ent
*p
;
14644 /* No eeprom signature? Try the hardcoded
14645 * subsys device table.
14647 p
= tg3_lookup_by_subsys(tp
);
14649 tp
->phy_id
= p
->phy_id
;
14650 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
14651 /* For now we saw the IDs 0xbc050cd0,
14652 * 0xbc050f80 and 0xbc050c30 on devices
14653 * connected to an BCM4785 and there are
14654 * probably more. Just assume that the phy is
14655 * supported when it is connected to a SSB core
14662 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
14663 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14667 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14668 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
14669 tg3_asic_rev(tp
) == ASIC_REV_5720
||
14670 tg3_asic_rev(tp
) == ASIC_REV_57766
||
14671 tg3_asic_rev(tp
) == ASIC_REV_5762
||
14672 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
14673 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
14674 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
14675 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
)))
14676 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
14678 tg3_phy_init_link_config(tp
);
14680 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14681 !tg3_flag(tp
, ENABLE_APE
) &&
14682 !tg3_flag(tp
, ENABLE_ASF
)) {
14685 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
14686 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
14687 (bmsr
& BMSR_LSTATUS
))
14688 goto skip_phy_reset
;
14690 err
= tg3_phy_reset(tp
);
14694 tg3_phy_set_wirespeed(tp
);
14696 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
14697 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
14698 tp
->link_config
.flowctrl
);
14700 tg3_writephy(tp
, MII_BMCR
,
14701 BMCR_ANENABLE
| BMCR_ANRESTART
);
14706 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
14707 err
= tg3_init_5401phy_dsp(tp
);
14711 err
= tg3_init_5401phy_dsp(tp
);
14717 static void tg3_read_vpd(struct tg3
*tp
)
14720 unsigned int block_end
, rosize
, len
;
14724 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
14728 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
14730 goto out_not_found
;
14732 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
14733 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
14734 i
+= PCI_VPD_LRDT_TAG_SIZE
;
14736 if (block_end
> vpdlen
)
14737 goto out_not_found
;
14739 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14740 PCI_VPD_RO_KEYWORD_MFR_ID
);
14742 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14744 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14745 if (j
+ len
> block_end
|| len
!= 4 ||
14746 memcmp(&vpd_data
[j
], "1028", 4))
14749 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14750 PCI_VPD_RO_KEYWORD_VENDOR0
);
14754 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14756 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14757 if (j
+ len
> block_end
)
14760 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
14761 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
14765 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14766 PCI_VPD_RO_KEYWORD_PARTNO
);
14768 goto out_not_found
;
14770 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
14772 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14773 if (len
> TG3_BPN_SIZE
||
14774 (len
+ i
) > vpdlen
)
14775 goto out_not_found
;
14777 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
14781 if (tp
->board_part_number
[0])
14785 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
14786 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14787 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
14788 strcpy(tp
->board_part_number
, "BCM5717");
14789 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
14790 strcpy(tp
->board_part_number
, "BCM5718");
14793 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
14794 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
14795 strcpy(tp
->board_part_number
, "BCM57780");
14796 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
14797 strcpy(tp
->board_part_number
, "BCM57760");
14798 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
14799 strcpy(tp
->board_part_number
, "BCM57790");
14800 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
14801 strcpy(tp
->board_part_number
, "BCM57788");
14804 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
14805 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
14806 strcpy(tp
->board_part_number
, "BCM57761");
14807 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
14808 strcpy(tp
->board_part_number
, "BCM57765");
14809 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
14810 strcpy(tp
->board_part_number
, "BCM57781");
14811 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
14812 strcpy(tp
->board_part_number
, "BCM57785");
14813 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
14814 strcpy(tp
->board_part_number
, "BCM57791");
14815 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
14816 strcpy(tp
->board_part_number
, "BCM57795");
14819 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
14820 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
14821 strcpy(tp
->board_part_number
, "BCM57762");
14822 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
14823 strcpy(tp
->board_part_number
, "BCM57766");
14824 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
14825 strcpy(tp
->board_part_number
, "BCM57782");
14826 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14827 strcpy(tp
->board_part_number
, "BCM57786");
14830 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14831 strcpy(tp
->board_part_number
, "BCM95906");
14834 strcpy(tp
->board_part_number
, "none");
14838 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
14842 if (tg3_nvram_read(tp
, offset
, &val
) ||
14843 (val
& 0xfc000000) != 0x0c000000 ||
14844 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
14851 static void tg3_read_bc_ver(struct tg3
*tp
)
14853 u32 val
, offset
, start
, ver_offset
;
14855 bool newver
= false;
14857 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
14858 tg3_nvram_read(tp
, 0x4, &start
))
14861 offset
= tg3_nvram_logical_addr(tp
, offset
);
14863 if (tg3_nvram_read(tp
, offset
, &val
))
14866 if ((val
& 0xfc000000) == 0x0c000000) {
14867 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
14874 dst_off
= strlen(tp
->fw_ver
);
14877 if (TG3_VER_SIZE
- dst_off
< 16 ||
14878 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
14881 offset
= offset
+ ver_offset
- start
;
14882 for (i
= 0; i
< 16; i
+= 4) {
14884 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
14887 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
14892 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
14895 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
14896 TG3_NVM_BCVER_MAJSFT
;
14897 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
14898 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
14899 "v%d.%02d", major
, minor
);
14903 static void tg3_read_hwsb_ver(struct tg3
*tp
)
14905 u32 val
, major
, minor
;
14907 /* Use native endian representation */
14908 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
14911 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
14912 TG3_NVM_HWSB_CFG1_MAJSFT
;
14913 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
14914 TG3_NVM_HWSB_CFG1_MINSFT
;
14916 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
14919 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
14921 u32 offset
, major
, minor
, build
;
14923 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
14925 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
14928 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
14929 case TG3_EEPROM_SB_REVISION_0
:
14930 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
14932 case TG3_EEPROM_SB_REVISION_2
:
14933 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
14935 case TG3_EEPROM_SB_REVISION_3
:
14936 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
14938 case TG3_EEPROM_SB_REVISION_4
:
14939 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
14941 case TG3_EEPROM_SB_REVISION_5
:
14942 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
14944 case TG3_EEPROM_SB_REVISION_6
:
14945 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
14951 if (tg3_nvram_read(tp
, offset
, &val
))
14954 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
14955 TG3_EEPROM_SB_EDH_BLD_SHFT
;
14956 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
14957 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
14958 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
14960 if (minor
> 99 || build
> 26)
14963 offset
= strlen(tp
->fw_ver
);
14964 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
14965 " v%d.%02d", major
, minor
);
14968 offset
= strlen(tp
->fw_ver
);
14969 if (offset
< TG3_VER_SIZE
- 1)
14970 tp
->fw_ver
[offset
] = 'a' + build
- 1;
14974 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
14976 u32 val
, offset
, start
;
14979 for (offset
= TG3_NVM_DIR_START
;
14980 offset
< TG3_NVM_DIR_END
;
14981 offset
+= TG3_NVM_DIRENT_SIZE
) {
14982 if (tg3_nvram_read(tp
, offset
, &val
))
14985 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
14989 if (offset
== TG3_NVM_DIR_END
)
14992 if (!tg3_flag(tp
, 5705_PLUS
))
14993 start
= 0x08000000;
14994 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
14997 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
14998 !tg3_fw_img_is_valid(tp
, offset
) ||
14999 tg3_nvram_read(tp
, offset
+ 8, &val
))
15002 offset
+= val
- start
;
15004 vlen
= strlen(tp
->fw_ver
);
15006 tp
->fw_ver
[vlen
++] = ',';
15007 tp
->fw_ver
[vlen
++] = ' ';
15009 for (i
= 0; i
< 4; i
++) {
15011 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15014 offset
+= sizeof(v
);
15016 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15017 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15021 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15026 static void tg3_probe_ncsi(struct tg3
*tp
)
15030 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15031 if (apedata
!= APE_SEG_SIG_MAGIC
)
15034 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15035 if (!(apedata
& APE_FW_STATUS_READY
))
15038 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15039 tg3_flag_set(tp
, APE_HAS_NCSI
);
15042 static void tg3_read_dash_ver(struct tg3
*tp
)
15048 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15050 if (tg3_flag(tp
, APE_HAS_NCSI
))
15052 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15057 vlen
= strlen(tp
->fw_ver
);
15059 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15061 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15062 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15063 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15064 (apedata
& APE_FW_VERSION_BLDMSK
));
15067 static void tg3_read_otp_ver(struct tg3
*tp
)
15071 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15074 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15075 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15076 TG3_OTP_MAGIC0_VALID(val
)) {
15077 u64 val64
= (u64
) val
<< 32 | val2
;
15081 for (i
= 0; i
< 7; i
++) {
15082 if ((val64
& 0xff) == 0)
15084 ver
= val64
& 0xff;
15087 vlen
= strlen(tp
->fw_ver
);
15088 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15092 static void tg3_read_fw_ver(struct tg3
*tp
)
15095 bool vpd_vers
= false;
15097 if (tp
->fw_ver
[0] != 0)
15100 if (tg3_flag(tp
, NO_NVRAM
)) {
15101 strcat(tp
->fw_ver
, "sb");
15102 tg3_read_otp_ver(tp
);
15106 if (tg3_nvram_read(tp
, 0, &val
))
15109 if (val
== TG3_EEPROM_MAGIC
)
15110 tg3_read_bc_ver(tp
);
15111 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15112 tg3_read_sb_ver(tp
, val
);
15113 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15114 tg3_read_hwsb_ver(tp
);
15116 if (tg3_flag(tp
, ENABLE_ASF
)) {
15117 if (tg3_flag(tp
, ENABLE_APE
)) {
15118 tg3_probe_ncsi(tp
);
15120 tg3_read_dash_ver(tp
);
15121 } else if (!vpd_vers
) {
15122 tg3_read_mgmtfw_ver(tp
);
15126 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15129 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15131 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15132 return TG3_RX_RET_MAX_SIZE_5717
;
15133 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15134 return TG3_RX_RET_MAX_SIZE_5700
;
15136 return TG3_RX_RET_MAX_SIZE_5705
;
15139 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15140 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15141 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15142 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15146 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15148 struct pci_dev
*peer
;
15149 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15151 for (func
= 0; func
< 8; func
++) {
15152 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15153 if (peer
&& peer
!= tp
->pdev
)
15157 /* 5704 can be configured in single-port mode, set peer to
15158 * tp->pdev in that case.
15166 * We don't need to keep the refcount elevated; there's no way
15167 * to remove one half of this device without removing the other
15174 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15176 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15177 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15180 /* All devices that use the alternate
15181 * ASIC REV location have a CPMU.
15183 tg3_flag_set(tp
, CPMU_PRESENT
);
15185 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15186 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15187 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15188 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15189 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15190 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15191 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15192 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15193 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15194 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15195 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15196 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15197 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15198 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15199 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15200 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15201 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15202 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15203 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15204 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15206 reg
= TG3PCI_PRODID_ASICREV
;
15208 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15211 /* Wrong chip ID in 5752 A0. This code can be removed later
15212 * as A0 is not in production.
15214 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15215 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15217 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15218 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15220 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15221 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15222 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15223 tg3_flag_set(tp
, 5717_PLUS
);
15225 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15226 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15227 tg3_flag_set(tp
, 57765_CLASS
);
15229 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15230 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15231 tg3_flag_set(tp
, 57765_PLUS
);
15233 /* Intentionally exclude ASIC_REV_5906 */
15234 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15235 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15236 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15237 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15238 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15239 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15240 tg3_flag(tp
, 57765_PLUS
))
15241 tg3_flag_set(tp
, 5755_PLUS
);
15243 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15244 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15245 tg3_flag_set(tp
, 5780_CLASS
);
15247 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15248 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15249 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15250 tg3_flag(tp
, 5755_PLUS
) ||
15251 tg3_flag(tp
, 5780_CLASS
))
15252 tg3_flag_set(tp
, 5750_PLUS
);
15254 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15255 tg3_flag(tp
, 5750_PLUS
))
15256 tg3_flag_set(tp
, 5705_PLUS
);
15259 static bool tg3_10_100_only_device(struct tg3
*tp
,
15260 const struct pci_device_id
*ent
)
15262 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15264 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15265 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15266 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15269 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15270 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15271 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15281 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15284 u32 pci_state_reg
, grc_misc_cfg
;
15289 /* Force memory write invalidate off. If we leave it on,
15290 * then on 5700_BX chips we have to enable a workaround.
15291 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15292 * to match the cacheline size. The Broadcom driver have this
15293 * workaround but turns MWI off all the times so never uses
15294 * it. This seems to suggest that the workaround is insufficient.
15296 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15297 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15298 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15300 /* Important! -- Make sure register accesses are byteswapped
15301 * correctly. Also, for those chips that require it, make
15302 * sure that indirect register accesses are enabled before
15303 * the first operation.
15305 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15307 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15308 MISC_HOST_CTRL_CHIPREV
);
15309 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15310 tp
->misc_host_ctrl
);
15312 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15314 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15315 * we need to disable memory and use config. cycles
15316 * only to access all registers. The 5702/03 chips
15317 * can mistakenly decode the special cycles from the
15318 * ICH chipsets as memory write cycles, causing corruption
15319 * of register and memory space. Only certain ICH bridges
15320 * will drive special cycles with non-zero data during the
15321 * address phase which can fall within the 5703's address
15322 * range. This is not an ICH bug as the PCI spec allows
15323 * non-zero address during special cycles. However, only
15324 * these ICH bridges are known to drive non-zero addresses
15325 * during special cycles.
15327 * Since special cycles do not cross PCI bridges, we only
15328 * enable this workaround if the 5703 is on the secondary
15329 * bus of these ICH bridges.
15331 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15332 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15333 static struct tg3_dev_id
{
15337 } ich_chipsets
[] = {
15338 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15340 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15342 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15344 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15348 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15349 struct pci_dev
*bridge
= NULL
;
15351 while (pci_id
->vendor
!= 0) {
15352 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15358 if (pci_id
->rev
!= PCI_ANY_ID
) {
15359 if (bridge
->revision
> pci_id
->rev
)
15362 if (bridge
->subordinate
&&
15363 (bridge
->subordinate
->number
==
15364 tp
->pdev
->bus
->number
)) {
15365 tg3_flag_set(tp
, ICH_WORKAROUND
);
15366 pci_dev_put(bridge
);
15372 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15373 static struct tg3_dev_id
{
15376 } bridge_chipsets
[] = {
15377 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15378 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15381 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15382 struct pci_dev
*bridge
= NULL
;
15384 while (pci_id
->vendor
!= 0) {
15385 bridge
= pci_get_device(pci_id
->vendor
,
15392 if (bridge
->subordinate
&&
15393 (bridge
->subordinate
->number
<=
15394 tp
->pdev
->bus
->number
) &&
15395 (bridge
->subordinate
->busn_res
.end
>=
15396 tp
->pdev
->bus
->number
)) {
15397 tg3_flag_set(tp
, 5701_DMA_BUG
);
15398 pci_dev_put(bridge
);
15404 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15405 * DMA addresses > 40-bit. This bridge may have other additional
15406 * 57xx devices behind it in some 4-port NIC designs for example.
15407 * Any tg3 device found behind the bridge will also need the 40-bit
15410 if (tg3_flag(tp
, 5780_CLASS
)) {
15411 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15412 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15414 struct pci_dev
*bridge
= NULL
;
15417 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15418 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15420 if (bridge
&& bridge
->subordinate
&&
15421 (bridge
->subordinate
->number
<=
15422 tp
->pdev
->bus
->number
) &&
15423 (bridge
->subordinate
->busn_res
.end
>=
15424 tp
->pdev
->bus
->number
)) {
15425 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15426 pci_dev_put(bridge
);
15432 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15433 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15434 tp
->pdev_peer
= tg3_find_peer(tp
);
15436 /* Determine TSO capabilities */
15437 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15438 ; /* Do nothing. HW bug. */
15439 else if (tg3_flag(tp
, 57765_PLUS
))
15440 tg3_flag_set(tp
, HW_TSO_3
);
15441 else if (tg3_flag(tp
, 5755_PLUS
) ||
15442 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15443 tg3_flag_set(tp
, HW_TSO_2
);
15444 else if (tg3_flag(tp
, 5750_PLUS
)) {
15445 tg3_flag_set(tp
, HW_TSO_1
);
15446 tg3_flag_set(tp
, TSO_BUG
);
15447 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15448 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15449 tg3_flag_clear(tp
, TSO_BUG
);
15450 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15451 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15452 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15453 tg3_flag_set(tp
, FW_TSO
);
15454 tg3_flag_set(tp
, TSO_BUG
);
15455 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15456 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15458 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15461 /* Selectively allow TSO based on operating conditions */
15462 if (tg3_flag(tp
, HW_TSO_1
) ||
15463 tg3_flag(tp
, HW_TSO_2
) ||
15464 tg3_flag(tp
, HW_TSO_3
) ||
15465 tg3_flag(tp
, FW_TSO
)) {
15466 /* For firmware TSO, assume ASF is disabled.
15467 * We'll disable TSO later if we discover ASF
15468 * is enabled in tg3_get_eeprom_hw_cfg().
15470 tg3_flag_set(tp
, TSO_CAPABLE
);
15472 tg3_flag_clear(tp
, TSO_CAPABLE
);
15473 tg3_flag_clear(tp
, TSO_BUG
);
15474 tp
->fw_needed
= NULL
;
15477 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15478 tp
->fw_needed
= FIRMWARE_TG3
;
15480 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
15481 tp
->fw_needed
= FIRMWARE_TG357766
;
15485 if (tg3_flag(tp
, 5750_PLUS
)) {
15486 tg3_flag_set(tp
, SUPPORT_MSI
);
15487 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15488 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15489 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15490 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15491 tp
->pdev_peer
== tp
->pdev
))
15492 tg3_flag_clear(tp
, SUPPORT_MSI
);
15494 if (tg3_flag(tp
, 5755_PLUS
) ||
15495 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15496 tg3_flag_set(tp
, 1SHOT_MSI
);
15499 if (tg3_flag(tp
, 57765_PLUS
)) {
15500 tg3_flag_set(tp
, SUPPORT_MSIX
);
15501 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15507 if (tp
->irq_max
> 1) {
15508 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15509 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15511 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15512 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15513 tp
->txq_max
= tp
->irq_max
- 1;
15516 if (tg3_flag(tp
, 5755_PLUS
) ||
15517 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15518 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15520 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
15521 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15523 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15524 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15525 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15526 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15527 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15529 if (tg3_flag(tp
, 57765_PLUS
) &&
15530 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
15531 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15533 if (!tg3_flag(tp
, 5705_PLUS
) ||
15534 tg3_flag(tp
, 5780_CLASS
) ||
15535 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15536 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15538 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15541 if (pci_is_pcie(tp
->pdev
)) {
15544 tg3_flag_set(tp
, PCI_EXPRESS
);
15546 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15547 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15548 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15549 tg3_flag_clear(tp
, HW_TSO_2
);
15550 tg3_flag_clear(tp
, TSO_CAPABLE
);
15552 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
15553 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15554 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
15555 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
15556 tg3_flag_set(tp
, CLKREQ_BUG
);
15557 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
15558 tg3_flag_set(tp
, L1PLLPD_EN
);
15560 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
15561 /* BCM5785 devices are effectively PCIe devices, and should
15562 * follow PCIe codepaths, but do not have a PCIe capabilities
15565 tg3_flag_set(tp
, PCI_EXPRESS
);
15566 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15567 tg3_flag(tp
, 5780_CLASS
)) {
15568 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15569 if (!tp
->pcix_cap
) {
15570 dev_err(&tp
->pdev
->dev
,
15571 "Cannot find PCI-X capability, aborting\n");
15575 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15576 tg3_flag_set(tp
, PCIX_MODE
);
15579 /* If we have an AMD 762 or VIA K8T800 chipset, write
15580 * reordering to the mailbox registers done by the host
15581 * controller can cause major troubles. We read back from
15582 * every mailbox register write to force the writes to be
15583 * posted to the chip in order.
15585 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15586 !tg3_flag(tp
, PCI_EXPRESS
))
15587 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15589 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15590 &tp
->pci_cacheline_sz
);
15591 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15592 &tp
->pci_lat_timer
);
15593 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15594 tp
->pci_lat_timer
< 64) {
15595 tp
->pci_lat_timer
= 64;
15596 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15597 tp
->pci_lat_timer
);
15600 /* Important! -- It is critical that the PCI-X hw workaround
15601 * situation is decided before the first MMIO register access.
15603 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
15604 /* 5700 BX chips need to have their TX producer index
15605 * mailboxes written twice to workaround a bug.
15607 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15609 /* If we are in PCI-X mode, enable register write workaround.
15611 * The workaround is to use indirect register accesses
15612 * for all chip writes not to mailbox registers.
15614 if (tg3_flag(tp
, PCIX_MODE
)) {
15617 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15619 /* The chip can have it's power management PCI config
15620 * space registers clobbered due to this bug.
15621 * So explicitly force the chip into D0 here.
15623 pci_read_config_dword(tp
->pdev
,
15624 tp
->pm_cap
+ PCI_PM_CTRL
,
15626 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
15627 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
15628 pci_write_config_dword(tp
->pdev
,
15629 tp
->pm_cap
+ PCI_PM_CTRL
,
15632 /* Also, force SERR#/PERR# in PCI command. */
15633 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15634 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
15635 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15639 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
15640 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
15641 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
15642 tg3_flag_set(tp
, PCI_32BIT
);
15644 /* Chip-specific fixup from Broadcom driver */
15645 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
15646 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
15647 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
15648 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
15651 /* Default fast path register access methods */
15652 tp
->read32
= tg3_read32
;
15653 tp
->write32
= tg3_write32
;
15654 tp
->read32_mbox
= tg3_read32
;
15655 tp
->write32_mbox
= tg3_write32
;
15656 tp
->write32_tx_mbox
= tg3_write32
;
15657 tp
->write32_rx_mbox
= tg3_write32
;
15659 /* Various workaround register access methods */
15660 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
15661 tp
->write32
= tg3_write_indirect_reg32
;
15662 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
15663 (tg3_flag(tp
, PCI_EXPRESS
) &&
15664 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
15666 * Back to back register writes can cause problems on these
15667 * chips, the workaround is to read back all reg writes
15668 * except those to mailbox regs.
15670 * See tg3_write_indirect_reg32().
15672 tp
->write32
= tg3_write_flush_reg32
;
15675 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
15676 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
15677 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
15678 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15681 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
15682 tp
->read32
= tg3_read_indirect_reg32
;
15683 tp
->write32
= tg3_write_indirect_reg32
;
15684 tp
->read32_mbox
= tg3_read_indirect_mbox
;
15685 tp
->write32_mbox
= tg3_write_indirect_mbox
;
15686 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
15687 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
15692 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15693 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
15694 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15696 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15697 tp
->read32_mbox
= tg3_read32_mbox_5906
;
15698 tp
->write32_mbox
= tg3_write32_mbox_5906
;
15699 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
15700 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
15703 if (tp
->write32
== tg3_write_indirect_reg32
||
15704 (tg3_flag(tp
, PCIX_MODE
) &&
15705 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15706 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
15707 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
15709 /* The memory arbiter has to be enabled in order for SRAM accesses
15710 * to succeed. Normally on powerup the tg3 chip firmware will make
15711 * sure it is enabled, but other entities such as system netboot
15712 * code might disable it.
15714 val
= tr32(MEMARB_MODE
);
15715 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
15717 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
15718 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15719 tg3_flag(tp
, 5780_CLASS
)) {
15720 if (tg3_flag(tp
, PCIX_MODE
)) {
15721 pci_read_config_dword(tp
->pdev
,
15722 tp
->pcix_cap
+ PCI_X_STATUS
,
15724 tp
->pci_fn
= val
& 0x7;
15726 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15727 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15728 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
15729 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
15730 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
15731 val
= tr32(TG3_CPMU_STATUS
);
15733 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
15734 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
15736 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
15737 TG3_CPMU_STATUS_FSHFT_5719
;
15740 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
15741 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
15742 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15745 /* Get eeprom hw config before calling tg3_set_power_state().
15746 * In particular, the TG3_FLAG_IS_NIC flag must be
15747 * determined before calling tg3_set_power_state() so that
15748 * we know whether or not to switch out of Vaux power.
15749 * When the flag is set, it means that GPIO1 is used for eeprom
15750 * write protect and also implies that it is a LOM where GPIOs
15751 * are not used to switch power.
15753 tg3_get_eeprom_hw_cfg(tp
);
15755 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
15756 tg3_flag_clear(tp
, TSO_CAPABLE
);
15757 tg3_flag_clear(tp
, TSO_BUG
);
15758 tp
->fw_needed
= NULL
;
15761 if (tg3_flag(tp
, ENABLE_APE
)) {
15762 /* Allow reads and writes to the
15763 * APE register and memory space.
15765 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
15766 PCISTATE_ALLOW_APE_SHMEM_WR
|
15767 PCISTATE_ALLOW_APE_PSPACE_WR
;
15768 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15771 tg3_ape_lock_init(tp
);
15774 /* Set up tp->grc_local_ctrl before calling
15775 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15776 * will bring 5700's external PHY out of reset.
15777 * It is also used as eeprom write protect on LOMs.
15779 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
15780 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15781 tg3_flag(tp
, EEPROM_WRITE_PROT
))
15782 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
15783 GRC_LCLCTRL_GPIO_OUTPUT1
);
15784 /* Unused GPIO3 must be driven as output on 5752 because there
15785 * are no pull-up resistors on unused GPIO pins.
15787 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15788 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
15790 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15791 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15792 tg3_flag(tp
, 57765_CLASS
))
15793 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15795 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15796 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
15797 /* Turn off the debug UART. */
15798 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15799 if (tg3_flag(tp
, IS_NIC
))
15800 /* Keep VMain power. */
15801 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
15802 GRC_LCLCTRL_GPIO_OUTPUT0
;
15805 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
15806 tp
->grc_local_ctrl
|=
15807 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
15809 /* Switch out of Vaux if it is a NIC */
15810 tg3_pwrsrc_switch_to_vmain(tp
);
15812 /* Derive initial jumbo mode from MTU assigned in
15813 * ether_setup() via the alloc_etherdev() call
15815 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
15816 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
15818 /* Determine WakeOnLan speed to use. */
15819 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15820 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15821 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15822 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
15823 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
15825 tg3_flag_set(tp
, WOL_SPEED_100MB
);
15828 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15829 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
15831 /* A few boards don't want Ethernet@WireSpeed phy feature */
15832 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15833 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15834 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
15835 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
15836 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
15837 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15838 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
15840 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
15841 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
15842 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
15843 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
15844 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
15846 if (tg3_flag(tp
, 5705_PLUS
) &&
15847 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
15848 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15849 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
15850 !tg3_flag(tp
, 57765_PLUS
)) {
15851 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15852 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15853 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15854 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
15855 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
15856 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
15857 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
15858 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
15859 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
15861 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
15864 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15865 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
15866 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
15867 if (tp
->phy_otp
== 0)
15868 tp
->phy_otp
= TG3_OTP_DEFAULT
;
15871 if (tg3_flag(tp
, CPMU_PRESENT
))
15872 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
15874 tp
->mi_mode
= MAC_MI_MODE_BASE
;
15876 tp
->coalesce_mode
= 0;
15877 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
15878 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
15879 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
15881 /* Set these bits to enable statistics workaround. */
15882 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15883 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
15884 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
15885 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
15886 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
15889 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
15890 tg3_asic_rev(tp
) == ASIC_REV_57780
)
15891 tg3_flag_set(tp
, USE_PHYLIB
);
15893 err
= tg3_mdio_init(tp
);
15897 /* Initialize data/descriptor byte/word swapping. */
15898 val
= tr32(GRC_MODE
);
15899 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15900 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15901 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
15902 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
15903 GRC_MODE_B2HRX_ENABLE
|
15904 GRC_MODE_HTX2B_ENABLE
|
15905 GRC_MODE_HOST_STACKUP
);
15907 val
&= GRC_MODE_HOST_STACKUP
;
15909 tw32(GRC_MODE
, val
| tp
->grc_mode
);
15911 tg3_switch_clocks(tp
);
15913 /* Clear this out for sanity. */
15914 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15916 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15918 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
15919 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
15920 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15921 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15922 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
15923 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
15924 void __iomem
*sram_base
;
15926 /* Write some dummy words into the SRAM status block
15927 * area, see if it reads back correctly. If the return
15928 * value is bad, force enable the PCIX workaround.
15930 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
15932 writel(0x00000000, sram_base
);
15933 writel(0x00000000, sram_base
+ 4);
15934 writel(0xffffffff, sram_base
+ 4);
15935 if (readl(sram_base
) != 0x00000000)
15936 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15941 tg3_nvram_init(tp
);
15943 /* If the device has an NVRAM, no need to load patch firmware */
15944 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
15945 !tg3_flag(tp
, NO_NVRAM
))
15946 tp
->fw_needed
= NULL
;
15948 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
15949 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
15951 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15952 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
15953 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
15954 tg3_flag_set(tp
, IS_5788
);
15956 if (!tg3_flag(tp
, IS_5788
) &&
15957 tg3_asic_rev(tp
) != ASIC_REV_5700
)
15958 tg3_flag_set(tp
, TAGGED_STATUS
);
15959 if (tg3_flag(tp
, TAGGED_STATUS
)) {
15960 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
15961 HOSTCC_MODE_CLRTICK_TXBD
);
15963 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
15964 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15965 tp
->misc_host_ctrl
);
15968 /* Preserve the APE MAC_MODE bits */
15969 if (tg3_flag(tp
, ENABLE_APE
))
15970 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
15974 if (tg3_10_100_only_device(tp
, ent
))
15975 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
15977 err
= tg3_phy_probe(tp
);
15979 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
15980 /* ... but do not return immediately ... */
15985 tg3_read_fw_ver(tp
);
15987 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
15988 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15990 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15991 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15993 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15996 /* 5700 {AX,BX} chips have a broken status block link
15997 * change bit implementation, so we must use the
15998 * status register in those cases.
16000 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16001 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16003 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16005 /* The led_ctrl is set during tg3_phy_probe, here we might
16006 * have to force the link status polling mechanism based
16007 * upon subsystem IDs.
16009 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16010 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16011 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16012 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16013 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16016 /* For all SERDES we poll the MAC status register. */
16017 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16018 tg3_flag_set(tp
, POLL_SERDES
);
16020 tg3_flag_clear(tp
, POLL_SERDES
);
16022 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16023 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16024 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16025 tg3_flag(tp
, PCIX_MODE
)) {
16026 tp
->rx_offset
= NET_SKB_PAD
;
16027 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16028 tp
->rx_copy_thresh
= ~(u16
)0;
16032 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16033 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16034 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16036 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16038 /* Increment the rx prod index on the rx std ring by at most
16039 * 8 for these chips to workaround hw errata.
16041 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16042 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16043 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16044 tp
->rx_std_max_post
= 8;
16046 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16047 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16048 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16053 #ifdef CONFIG_SPARC
16054 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16056 struct net_device
*dev
= tp
->dev
;
16057 struct pci_dev
*pdev
= tp
->pdev
;
16058 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16059 const unsigned char *addr
;
16062 addr
= of_get_property(dp
, "local-mac-address", &len
);
16063 if (addr
&& len
== 6) {
16064 memcpy(dev
->dev_addr
, addr
, 6);
16070 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16072 struct net_device
*dev
= tp
->dev
;
16074 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
16079 static int tg3_get_device_address(struct tg3
*tp
)
16081 struct net_device
*dev
= tp
->dev
;
16082 u32 hi
, lo
, mac_offset
;
16086 #ifdef CONFIG_SPARC
16087 if (!tg3_get_macaddr_sparc(tp
))
16091 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16092 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16093 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16098 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16099 tg3_flag(tp
, 5780_CLASS
)) {
16100 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16102 if (tg3_nvram_lock(tp
))
16103 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16105 tg3_nvram_unlock(tp
);
16106 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16107 if (tp
->pci_fn
& 1)
16109 if (tp
->pci_fn
> 1)
16110 mac_offset
+= 0x18c;
16111 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16114 /* First try to get it from MAC address mailbox. */
16115 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16116 if ((hi
>> 16) == 0x484b) {
16117 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16118 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16120 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16121 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16122 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16123 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16124 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16126 /* Some old bootcode may report a 0 MAC address in SRAM */
16127 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16130 /* Next, try NVRAM. */
16131 if (!tg3_flag(tp
, NO_NVRAM
) &&
16132 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16133 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16134 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16135 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16137 /* Finally just fetch it out of the MAC control regs. */
16139 hi
= tr32(MAC_ADDR_0_HIGH
);
16140 lo
= tr32(MAC_ADDR_0_LOW
);
16142 dev
->dev_addr
[5] = lo
& 0xff;
16143 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16144 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16145 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16146 dev
->dev_addr
[1] = hi
& 0xff;
16147 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16151 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16152 #ifdef CONFIG_SPARC
16153 if (!tg3_get_default_macaddr_sparc(tp
))
16161 #define BOUNDARY_SINGLE_CACHELINE 1
16162 #define BOUNDARY_MULTI_CACHELINE 2
16164 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16166 int cacheline_size
;
16170 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16172 cacheline_size
= 1024;
16174 cacheline_size
= (int) byte
* 4;
16176 /* On 5703 and later chips, the boundary bits have no
16179 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16180 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16181 !tg3_flag(tp
, PCI_EXPRESS
))
16184 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16185 goal
= BOUNDARY_MULTI_CACHELINE
;
16187 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16188 goal
= BOUNDARY_SINGLE_CACHELINE
;
16194 if (tg3_flag(tp
, 57765_PLUS
)) {
16195 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16202 /* PCI controllers on most RISC systems tend to disconnect
16203 * when a device tries to burst across a cache-line boundary.
16204 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16206 * Unfortunately, for PCI-E there are only limited
16207 * write-side controls for this, and thus for reads
16208 * we will still get the disconnects. We'll also waste
16209 * these PCI cycles for both read and write for chips
16210 * other than 5700 and 5701 which do not implement the
16213 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16214 switch (cacheline_size
) {
16219 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16220 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16221 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16223 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16224 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16229 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16230 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16234 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16235 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16238 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16239 switch (cacheline_size
) {
16243 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16244 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16245 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16251 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16252 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16256 switch (cacheline_size
) {
16258 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16259 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16260 DMA_RWCTRL_WRITE_BNDRY_16
);
16265 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16266 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16267 DMA_RWCTRL_WRITE_BNDRY_32
);
16272 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16273 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16274 DMA_RWCTRL_WRITE_BNDRY_64
);
16279 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16280 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16281 DMA_RWCTRL_WRITE_BNDRY_128
);
16286 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16287 DMA_RWCTRL_WRITE_BNDRY_256
);
16290 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16291 DMA_RWCTRL_WRITE_BNDRY_512
);
16295 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16296 DMA_RWCTRL_WRITE_BNDRY_1024
);
16305 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16306 int size
, int to_device
)
16308 struct tg3_internal_buffer_desc test_desc
;
16309 u32 sram_dma_descs
;
16312 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16314 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16315 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16316 tw32(RDMAC_STATUS
, 0);
16317 tw32(WDMAC_STATUS
, 0);
16319 tw32(BUFMGR_MODE
, 0);
16320 tw32(FTQ_RESET
, 0);
16322 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16323 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16324 test_desc
.nic_mbuf
= 0x00002100;
16325 test_desc
.len
= size
;
16328 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16329 * the *second* time the tg3 driver was getting loaded after an
16332 * Broadcom tells me:
16333 * ...the DMA engine is connected to the GRC block and a DMA
16334 * reset may affect the GRC block in some unpredictable way...
16335 * The behavior of resets to individual blocks has not been tested.
16337 * Broadcom noted the GRC reset will also reset all sub-components.
16340 test_desc
.cqid_sqid
= (13 << 8) | 2;
16342 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16345 test_desc
.cqid_sqid
= (16 << 8) | 7;
16347 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16350 test_desc
.flags
= 0x00000005;
16352 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16355 val
= *(((u32
*)&test_desc
) + i
);
16356 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16357 sram_dma_descs
+ (i
* sizeof(u32
)));
16358 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16360 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16363 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16365 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16368 for (i
= 0; i
< 40; i
++) {
16372 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16374 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16375 if ((val
& 0xffff) == sram_dma_descs
) {
16386 #define TEST_BUFFER_SIZE 0x2000
16388 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16389 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16393 static int tg3_test_dma(struct tg3
*tp
)
16395 dma_addr_t buf_dma
;
16396 u32
*buf
, saved_dma_rwctrl
;
16399 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16400 &buf_dma
, GFP_KERNEL
);
16406 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16407 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16409 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16411 if (tg3_flag(tp
, 57765_PLUS
))
16414 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16415 /* DMA read watermark not used on PCIE */
16416 tp
->dma_rwctrl
|= 0x00180000;
16417 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16418 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16419 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16420 tp
->dma_rwctrl
|= 0x003f0000;
16422 tp
->dma_rwctrl
|= 0x003f000f;
16424 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16425 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16426 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16427 u32 read_water
= 0x7;
16429 /* If the 5704 is behind the EPB bridge, we can
16430 * do the less restrictive ONE_DMA workaround for
16431 * better performance.
16433 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16434 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16435 tp
->dma_rwctrl
|= 0x8000;
16436 else if (ccval
== 0x6 || ccval
== 0x7)
16437 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16439 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16441 /* Set bit 23 to enable PCIX hw bug fix */
16443 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16444 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16446 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16447 /* 5780 always in PCIX mode */
16448 tp
->dma_rwctrl
|= 0x00144000;
16449 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16450 /* 5714 always in PCIX mode */
16451 tp
->dma_rwctrl
|= 0x00148000;
16453 tp
->dma_rwctrl
|= 0x001b000f;
16456 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16457 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16459 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16460 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16461 tp
->dma_rwctrl
&= 0xfffffff0;
16463 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16464 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16465 /* Remove this if it causes problems for some boards. */
16466 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16468 /* On 5700/5701 chips, we need to set this bit.
16469 * Otherwise the chip will issue cacheline transactions
16470 * to streamable DMA memory with not all the byte
16471 * enables turned on. This is an error on several
16472 * RISC PCI controllers, in particular sparc64.
16474 * On 5703/5704 chips, this bit has been reassigned
16475 * a different meaning. In particular, it is used
16476 * on those chips to enable a PCI-X workaround.
16478 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16481 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16484 /* Unneeded, already done by tg3_get_invariants. */
16485 tg3_switch_clocks(tp
);
16488 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16489 tg3_asic_rev(tp
) != ASIC_REV_5701
)
16492 /* It is best to perform DMA test with maximum write burst size
16493 * to expose the 5700/5701 write DMA bug.
16495 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16496 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16497 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16502 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16505 /* Send the buffer to the chip. */
16506 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
16508 dev_err(&tp
->pdev
->dev
,
16509 "%s: Buffer write failed. err = %d\n",
16515 /* validate data reached card RAM correctly. */
16516 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16518 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16519 if (le32_to_cpu(val
) != p
[i
]) {
16520 dev_err(&tp
->pdev
->dev
,
16521 "%s: Buffer corrupted on device! "
16522 "(%d != %d)\n", __func__
, val
, i
);
16523 /* ret = -ENODEV here? */
16528 /* Now read it back. */
16529 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
16531 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16532 "err = %d\n", __func__
, ret
);
16537 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16541 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16542 DMA_RWCTRL_WRITE_BNDRY_16
) {
16543 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16544 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16545 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16548 dev_err(&tp
->pdev
->dev
,
16549 "%s: Buffer corrupted on read back! "
16550 "(%d != %d)\n", __func__
, p
[i
], i
);
16556 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16562 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16563 DMA_RWCTRL_WRITE_BNDRY_16
) {
16564 /* DMA test passed without adjusting DMA boundary,
16565 * now look for chipsets that are known to expose the
16566 * DMA bug without failing the test.
16568 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16569 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16570 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16572 /* Safe to use the calculated DMA boundary. */
16573 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16576 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16580 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16585 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16587 if (tg3_flag(tp
, 57765_PLUS
)) {
16588 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16589 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16590 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16591 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16592 tp
->bufmgr_config
.mbuf_high_water
=
16593 DEFAULT_MB_HIGH_WATER_57765
;
16595 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16596 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16597 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16598 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16599 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16600 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16601 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16602 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16603 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16604 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16605 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16606 tp
->bufmgr_config
.mbuf_high_water
=
16607 DEFAULT_MB_HIGH_WATER_5705
;
16608 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16609 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16610 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16611 tp
->bufmgr_config
.mbuf_high_water
=
16612 DEFAULT_MB_HIGH_WATER_5906
;
16615 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16616 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
16617 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16618 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
16619 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16620 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
16622 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16623 DEFAULT_MB_RDMA_LOW_WATER
;
16624 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16625 DEFAULT_MB_MACRX_LOW_WATER
;
16626 tp
->bufmgr_config
.mbuf_high_water
=
16627 DEFAULT_MB_HIGH_WATER
;
16629 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16630 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
16631 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16632 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
16633 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16634 DEFAULT_MB_HIGH_WATER_JUMBO
;
16637 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
16638 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
16641 static char *tg3_phy_string(struct tg3
*tp
)
16643 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
16644 case TG3_PHY_ID_BCM5400
: return "5400";
16645 case TG3_PHY_ID_BCM5401
: return "5401";
16646 case TG3_PHY_ID_BCM5411
: return "5411";
16647 case TG3_PHY_ID_BCM5701
: return "5701";
16648 case TG3_PHY_ID_BCM5703
: return "5703";
16649 case TG3_PHY_ID_BCM5704
: return "5704";
16650 case TG3_PHY_ID_BCM5705
: return "5705";
16651 case TG3_PHY_ID_BCM5750
: return "5750";
16652 case TG3_PHY_ID_BCM5752
: return "5752";
16653 case TG3_PHY_ID_BCM5714
: return "5714";
16654 case TG3_PHY_ID_BCM5780
: return "5780";
16655 case TG3_PHY_ID_BCM5755
: return "5755";
16656 case TG3_PHY_ID_BCM5787
: return "5787";
16657 case TG3_PHY_ID_BCM5784
: return "5784";
16658 case TG3_PHY_ID_BCM5756
: return "5722/5756";
16659 case TG3_PHY_ID_BCM5906
: return "5906";
16660 case TG3_PHY_ID_BCM5761
: return "5761";
16661 case TG3_PHY_ID_BCM5718C
: return "5718C";
16662 case TG3_PHY_ID_BCM5718S
: return "5718S";
16663 case TG3_PHY_ID_BCM57765
: return "57765";
16664 case TG3_PHY_ID_BCM5719C
: return "5719C";
16665 case TG3_PHY_ID_BCM5720C
: return "5720C";
16666 case TG3_PHY_ID_BCM5762
: return "5762C";
16667 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
16668 case 0: return "serdes";
16669 default: return "unknown";
16673 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
16675 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16676 strcpy(str
, "PCI Express");
16678 } else if (tg3_flag(tp
, PCIX_MODE
)) {
16679 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
16681 strcpy(str
, "PCIX:");
16683 if ((clock_ctrl
== 7) ||
16684 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
16685 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
16686 strcat(str
, "133MHz");
16687 else if (clock_ctrl
== 0)
16688 strcat(str
, "33MHz");
16689 else if (clock_ctrl
== 2)
16690 strcat(str
, "50MHz");
16691 else if (clock_ctrl
== 4)
16692 strcat(str
, "66MHz");
16693 else if (clock_ctrl
== 6)
16694 strcat(str
, "100MHz");
16696 strcpy(str
, "PCI:");
16697 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
16698 strcat(str
, "66MHz");
16700 strcat(str
, "33MHz");
16702 if (tg3_flag(tp
, PCI_32BIT
))
16703 strcat(str
, ":32-bit");
16705 strcat(str
, ":64-bit");
16709 static void tg3_init_coal(struct tg3
*tp
)
16711 struct ethtool_coalesce
*ec
= &tp
->coal
;
16713 memset(ec
, 0, sizeof(*ec
));
16714 ec
->cmd
= ETHTOOL_GCOALESCE
;
16715 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
16716 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
16717 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
16718 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
16719 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
16720 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
16721 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
16722 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
16723 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
16725 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
16726 HOSTCC_MODE_CLRTICK_TXBD
)) {
16727 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
16728 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
16729 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
16730 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
16733 if (tg3_flag(tp
, 5705_PLUS
)) {
16734 ec
->rx_coalesce_usecs_irq
= 0;
16735 ec
->tx_coalesce_usecs_irq
= 0;
16736 ec
->stats_block_coalesce_usecs
= 0;
16740 static int tg3_init_one(struct pci_dev
*pdev
,
16741 const struct pci_device_id
*ent
)
16743 struct net_device
*dev
;
16745 int i
, err
, pm_cap
;
16746 u32 sndmbx
, rcvmbx
, intmbx
;
16748 u64 dma_mask
, persist_dma_mask
;
16749 netdev_features_t features
= 0;
16751 printk_once(KERN_INFO
"%s\n", version
);
16753 err
= pci_enable_device(pdev
);
16755 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
16759 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
16761 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
16762 goto err_out_disable_pdev
;
16765 pci_set_master(pdev
);
16767 /* Find power-management capability. */
16768 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
16770 dev_err(&pdev
->dev
,
16771 "Cannot find Power Management capability, aborting\n");
16773 goto err_out_free_res
;
16776 err
= pci_set_power_state(pdev
, PCI_D0
);
16778 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
16779 goto err_out_free_res
;
16782 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
16785 goto err_out_power_down
;
16788 SET_NETDEV_DEV(dev
, &pdev
->dev
);
16790 tp
= netdev_priv(dev
);
16793 tp
->pm_cap
= pm_cap
;
16794 tp
->rx_mode
= TG3_DEF_RX_MODE
;
16795 tp
->tx_mode
= TG3_DEF_TX_MODE
;
16799 tp
->msg_enable
= tg3_debug
;
16801 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
16803 if (pdev_is_ssb_gige_core(pdev
)) {
16804 tg3_flag_set(tp
, IS_SSB_CORE
);
16805 if (ssb_gige_must_flush_posted_writes(pdev
))
16806 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
16807 if (ssb_gige_one_dma_at_once(pdev
))
16808 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
16809 if (ssb_gige_have_roboswitch(pdev
))
16810 tg3_flag_set(tp
, ROBOSWITCH
);
16811 if (ssb_gige_is_rgmii(pdev
))
16812 tg3_flag_set(tp
, RGMII_MODE
);
16815 /* The word/byte swap controls here control register access byte
16816 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16819 tp
->misc_host_ctrl
=
16820 MISC_HOST_CTRL_MASK_PCI_INT
|
16821 MISC_HOST_CTRL_WORD_SWAP
|
16822 MISC_HOST_CTRL_INDIR_ACCESS
|
16823 MISC_HOST_CTRL_PCISTATE_RW
;
16825 /* The NONFRM (non-frame) byte/word swap controls take effect
16826 * on descriptor entries, anything which isn't packet data.
16828 * The StrongARM chips on the board (one for tx, one for rx)
16829 * are running in big-endian mode.
16831 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
16832 GRC_MODE_WSWAP_NONFRM_DATA
);
16833 #ifdef __BIG_ENDIAN
16834 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
16836 spin_lock_init(&tp
->lock
);
16837 spin_lock_init(&tp
->indirect_lock
);
16838 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
16840 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
16842 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
16844 goto err_out_free_dev
;
16847 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16848 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
16849 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
16850 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
16851 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16852 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16853 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16854 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16855 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16856 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16857 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16858 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
16859 tg3_flag_set(tp
, ENABLE_APE
);
16860 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
16861 if (!tp
->aperegs
) {
16862 dev_err(&pdev
->dev
,
16863 "Cannot map APE registers, aborting\n");
16865 goto err_out_iounmap
;
16869 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
16870 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
16872 dev
->ethtool_ops
= &tg3_ethtool_ops
;
16873 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
16874 dev
->netdev_ops
= &tg3_netdev_ops
;
16875 dev
->irq
= pdev
->irq
;
16877 err
= tg3_get_invariants(tp
, ent
);
16879 dev_err(&pdev
->dev
,
16880 "Problem fetching invariants of chip, aborting\n");
16881 goto err_out_apeunmap
;
16884 /* The EPB bridge inside 5714, 5715, and 5780 and any
16885 * device behind the EPB cannot support DMA addresses > 40-bit.
16886 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16887 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16888 * do DMA address check in tg3_start_xmit().
16890 if (tg3_flag(tp
, IS_5788
))
16891 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
16892 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
16893 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
16894 #ifdef CONFIG_HIGHMEM
16895 dma_mask
= DMA_BIT_MASK(64);
16898 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
16900 /* Configure DMA attributes. */
16901 if (dma_mask
> DMA_BIT_MASK(32)) {
16902 err
= pci_set_dma_mask(pdev
, dma_mask
);
16904 features
|= NETIF_F_HIGHDMA
;
16905 err
= pci_set_consistent_dma_mask(pdev
,
16908 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
16909 "DMA for consistent allocations\n");
16910 goto err_out_apeunmap
;
16914 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
16915 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
16917 dev_err(&pdev
->dev
,
16918 "No usable DMA configuration, aborting\n");
16919 goto err_out_apeunmap
;
16923 tg3_init_bufmgr_config(tp
);
16925 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
16927 /* 5700 B0 chips do not support checksumming correctly due
16928 * to hardware bugs.
16930 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
16931 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
16933 if (tg3_flag(tp
, 5755_PLUS
))
16934 features
|= NETIF_F_IPV6_CSUM
;
16937 /* TSO is on by default on chips that support hardware TSO.
16938 * Firmware TSO on older chips gives lower performance, so it
16939 * is off by default, but can be enabled using ethtool.
16941 if ((tg3_flag(tp
, HW_TSO_1
) ||
16942 tg3_flag(tp
, HW_TSO_2
) ||
16943 tg3_flag(tp
, HW_TSO_3
)) &&
16944 (features
& NETIF_F_IP_CSUM
))
16945 features
|= NETIF_F_TSO
;
16946 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
16947 if (features
& NETIF_F_IPV6_CSUM
)
16948 features
|= NETIF_F_TSO6
;
16949 if (tg3_flag(tp
, HW_TSO_3
) ||
16950 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16951 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16952 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
16953 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16954 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16955 features
|= NETIF_F_TSO_ECN
;
16958 dev
->features
|= features
;
16959 dev
->vlan_features
|= features
;
16962 * Add loopback capability only for a subset of devices that support
16963 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16964 * loopback for the remaining devices.
16966 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
16967 !tg3_flag(tp
, CPMU_PRESENT
))
16968 /* Add the loopback capability */
16969 features
|= NETIF_F_LOOPBACK
;
16971 dev
->hw_features
|= features
;
16973 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
16974 !tg3_flag(tp
, TSO_CAPABLE
) &&
16975 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
16976 tg3_flag_set(tp
, MAX_RXPEND_64
);
16977 tp
->rx_pending
= 63;
16980 err
= tg3_get_device_address(tp
);
16982 dev_err(&pdev
->dev
,
16983 "Could not obtain valid ethernet address, aborting\n");
16984 goto err_out_apeunmap
;
16988 * Reset chip in case UNDI or EFI driver did not shutdown
16989 * DMA self test will enable WDMAC and we'll see (spurious)
16990 * pending DMA on the PCI bus at that point.
16992 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
16993 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
16994 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
16995 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16998 err
= tg3_test_dma(tp
);
17000 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17001 goto err_out_apeunmap
;
17004 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17005 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17006 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17007 for (i
= 0; i
< tp
->irq_max
; i
++) {
17008 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17011 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17013 tnapi
->int_mbox
= intmbx
;
17019 tnapi
->consmbox
= rcvmbx
;
17020 tnapi
->prodmbox
= sndmbx
;
17023 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17025 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17027 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17031 * If we support MSIX, we'll be using RSS. If we're using
17032 * RSS, the first vector only handles link interrupts and the
17033 * remaining vectors handle rx and tx interrupts. Reuse the
17034 * mailbox values for the next iteration. The values we setup
17035 * above are still useful for the single vectored mode.
17050 pci_set_drvdata(pdev
, dev
);
17052 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17053 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17054 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17055 tg3_flag_set(tp
, PTP_CAPABLE
);
17057 if (tg3_flag(tp
, 5717_PLUS
)) {
17058 /* Resume a low-power mode */
17059 tg3_frob_aux_power(tp
, false);
17062 tg3_timer_init(tp
);
17064 tg3_carrier_off(tp
);
17066 err
= register_netdev(dev
);
17068 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17069 goto err_out_apeunmap
;
17072 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17073 tp
->board_part_number
,
17074 tg3_chip_rev_id(tp
),
17075 tg3_bus_string(tp
, str
),
17078 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
17079 struct phy_device
*phydev
;
17080 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
17082 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17083 phydev
->drv
->name
, dev_name(&phydev
->dev
));
17087 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17088 ethtype
= "10/100Base-TX";
17089 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17090 ethtype
= "1000Base-SX";
17092 ethtype
= "10/100/1000Base-T";
17094 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17095 "(WireSpeed[%d], EEE[%d])\n",
17096 tg3_phy_string(tp
), ethtype
,
17097 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17098 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17101 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17102 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17103 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17104 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17105 tg3_flag(tp
, ENABLE_ASF
) != 0,
17106 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17107 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17109 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17110 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17112 pci_save_state(pdev
);
17118 iounmap(tp
->aperegs
);
17119 tp
->aperegs
= NULL
;
17131 err_out_power_down
:
17132 pci_set_power_state(pdev
, PCI_D3hot
);
17135 pci_release_regions(pdev
);
17137 err_out_disable_pdev
:
17138 pci_disable_device(pdev
);
17139 pci_set_drvdata(pdev
, NULL
);
17143 static void tg3_remove_one(struct pci_dev
*pdev
)
17145 struct net_device
*dev
= pci_get_drvdata(pdev
);
17148 struct tg3
*tp
= netdev_priv(dev
);
17150 release_firmware(tp
->fw
);
17152 tg3_reset_task_cancel(tp
);
17154 if (tg3_flag(tp
, USE_PHYLIB
)) {
17159 unregister_netdev(dev
);
17161 iounmap(tp
->aperegs
);
17162 tp
->aperegs
= NULL
;
17169 pci_release_regions(pdev
);
17170 pci_disable_device(pdev
);
17171 pci_set_drvdata(pdev
, NULL
);
17175 #ifdef CONFIG_PM_SLEEP
17176 static int tg3_suspend(struct device
*device
)
17178 struct pci_dev
*pdev
= to_pci_dev(device
);
17179 struct net_device
*dev
= pci_get_drvdata(pdev
);
17180 struct tg3
*tp
= netdev_priv(dev
);
17183 if (!netif_running(dev
))
17186 tg3_reset_task_cancel(tp
);
17188 tg3_netif_stop(tp
);
17190 tg3_timer_stop(tp
);
17192 tg3_full_lock(tp
, 1);
17193 tg3_disable_ints(tp
);
17194 tg3_full_unlock(tp
);
17196 netif_device_detach(dev
);
17198 tg3_full_lock(tp
, 0);
17199 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17200 tg3_flag_clear(tp
, INIT_COMPLETE
);
17201 tg3_full_unlock(tp
);
17203 err
= tg3_power_down_prepare(tp
);
17207 tg3_full_lock(tp
, 0);
17209 tg3_flag_set(tp
, INIT_COMPLETE
);
17210 err2
= tg3_restart_hw(tp
, 1);
17214 tg3_timer_start(tp
);
17216 netif_device_attach(dev
);
17217 tg3_netif_start(tp
);
17220 tg3_full_unlock(tp
);
17229 static int tg3_resume(struct device
*device
)
17231 struct pci_dev
*pdev
= to_pci_dev(device
);
17232 struct net_device
*dev
= pci_get_drvdata(pdev
);
17233 struct tg3
*tp
= netdev_priv(dev
);
17236 if (!netif_running(dev
))
17239 netif_device_attach(dev
);
17241 tg3_full_lock(tp
, 0);
17243 tg3_flag_set(tp
, INIT_COMPLETE
);
17244 err
= tg3_restart_hw(tp
, 1);
17248 tg3_timer_start(tp
);
17250 tg3_netif_start(tp
);
17253 tg3_full_unlock(tp
);
17261 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17262 #define TG3_PM_OPS (&tg3_pm_ops)
17266 #define TG3_PM_OPS NULL
17268 #endif /* CONFIG_PM_SLEEP */
17271 * tg3_io_error_detected - called when PCI error is detected
17272 * @pdev: Pointer to PCI device
17273 * @state: The current pci connection state
17275 * This function is called after a PCI bus error affecting
17276 * this device has been detected.
17278 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17279 pci_channel_state_t state
)
17281 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17282 struct tg3
*tp
= netdev_priv(netdev
);
17283 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17285 netdev_info(netdev
, "PCI I/O error detected\n");
17289 if (!netif_running(netdev
))
17294 tg3_netif_stop(tp
);
17296 tg3_timer_stop(tp
);
17298 /* Want to make sure that the reset task doesn't run */
17299 tg3_reset_task_cancel(tp
);
17301 netif_device_detach(netdev
);
17303 /* Clean up software state, even if MMIO is blocked */
17304 tg3_full_lock(tp
, 0);
17305 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17306 tg3_full_unlock(tp
);
17309 if (state
== pci_channel_io_perm_failure
)
17310 err
= PCI_ERS_RESULT_DISCONNECT
;
17312 pci_disable_device(pdev
);
17320 * tg3_io_slot_reset - called after the pci bus has been reset.
17321 * @pdev: Pointer to PCI device
17323 * Restart the card from scratch, as if from a cold-boot.
17324 * At this point, the card has exprienced a hard reset,
17325 * followed by fixups by BIOS, and has its config space
17326 * set up identically to what it was at cold boot.
17328 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17330 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17331 struct tg3
*tp
= netdev_priv(netdev
);
17332 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17337 if (pci_enable_device(pdev
)) {
17338 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17342 pci_set_master(pdev
);
17343 pci_restore_state(pdev
);
17344 pci_save_state(pdev
);
17346 if (!netif_running(netdev
)) {
17347 rc
= PCI_ERS_RESULT_RECOVERED
;
17351 err
= tg3_power_up(tp
);
17355 rc
= PCI_ERS_RESULT_RECOVERED
;
17364 * tg3_io_resume - called when traffic can start flowing again.
17365 * @pdev: Pointer to PCI device
17367 * This callback is called when the error recovery driver tells
17368 * us that its OK to resume normal operation.
17370 static void tg3_io_resume(struct pci_dev
*pdev
)
17372 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17373 struct tg3
*tp
= netdev_priv(netdev
);
17378 if (!netif_running(netdev
))
17381 tg3_full_lock(tp
, 0);
17382 tg3_flag_set(tp
, INIT_COMPLETE
);
17383 err
= tg3_restart_hw(tp
, 1);
17385 tg3_full_unlock(tp
);
17386 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17390 netif_device_attach(netdev
);
17392 tg3_timer_start(tp
);
17394 tg3_netif_start(tp
);
17396 tg3_full_unlock(tp
);
17404 static const struct pci_error_handlers tg3_err_handler
= {
17405 .error_detected
= tg3_io_error_detected
,
17406 .slot_reset
= tg3_io_slot_reset
,
17407 .resume
= tg3_io_resume
17410 static struct pci_driver tg3_driver
= {
17411 .name
= DRV_MODULE_NAME
,
17412 .id_table
= tg3_pci_tbl
,
17413 .probe
= tg3_init_one
,
17414 .remove
= tg3_remove_one
,
17415 .err_handler
= &tg3_err_handler
,
17416 .driver
.pm
= TG3_PM_OPS
,
17419 static int __init
tg3_init(void)
17421 return pci_register_driver(&tg3_driver
);
17424 static void __exit
tg3_cleanup(void)
17426 pci_unregister_driver(&tg3_driver
);
17429 module_init(tg3_init
);
17430 module_exit(tg3_cleanup
);