2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218 static char version
[] =
219 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION
);
225 MODULE_FIRMWARE(FIRMWARE_TG3
);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
229 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug
, int, 0);
231 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
256 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
257 TG3_DRV_DATA_FLAG_5705_10_100
},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
259 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
260 TG3_DRV_DATA_FLAG_5705_10_100
},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
263 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
264 TG3_DRV_DATA_FLAG_5705_10_100
},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
271 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
277 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
286 PCI_VENDOR_ID_LENOVO
,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
288 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
291 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
311 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
312 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
314 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
315 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
319 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
329 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
331 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
350 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
352 static const struct {
353 const char string
[ETH_GSTRING_LEN
];
354 } ethtool_stats_keys
[] = {
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
387 { "tx_flow_control" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
420 { "rx_threshold_hit" },
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
432 { "mbuf_lwm_thresh_hit" },
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
446 static const struct {
447 const char string
[ETH_GSTRING_LEN
];
448 } ethtool_test_keys
[] = {
449 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
450 [TG3_LINK_TEST
] = { "link test (online) " },
451 [TG3_REGISTER_TEST
] = { "register test (offline)" },
452 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
462 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
464 writel(val
, tp
->regs
+ off
);
467 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
469 return readl(tp
->regs
+ off
);
472 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
474 writel(val
, tp
->aperegs
+ off
);
477 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
479 return readl(tp
->aperegs
+ off
);
482 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
486 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
487 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
489 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
492 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
494 writel(val
, tp
->regs
+ off
);
495 readl(tp
->regs
+ off
);
498 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
503 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
504 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
505 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
506 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
510 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
514 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
515 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
516 TG3_64BIT_REG_LOW
, val
);
519 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
520 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
521 TG3_64BIT_REG_LOW
, val
);
525 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
526 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
528 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
533 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
535 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
536 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
540 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
545 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
546 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
547 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
548 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
559 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
560 /* Non-posted methods */
561 tp
->write32(tp
, off
, val
);
564 tg3_write32(tp
, off
, val
);
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
576 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
578 tp
->write32_mbox(tp
, off
, val
);
579 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
580 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
581 !tg3_flag(tp
, ICH_WORKAROUND
)))
582 tp
->read32_mbox(tp
, off
);
585 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
587 void __iomem
*mbox
= tp
->regs
+ off
;
589 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
591 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
592 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
596 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
598 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
601 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
603 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
617 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
621 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
622 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
625 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
626 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
627 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
634 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
639 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
642 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
646 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
647 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
652 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
653 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
654 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
655 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
661 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
666 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
669 static void tg3_ape_lock_init(struct tg3
*tp
)
674 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
675 regbase
= TG3_APE_LOCK_GRANT
;
677 regbase
= TG3_APE_PER_LOCK_GRANT
;
679 /* Make sure the driver hasn't any stale locks. */
680 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
682 case TG3_APE_LOCK_PHY0
:
683 case TG3_APE_LOCK_PHY1
:
684 case TG3_APE_LOCK_PHY2
:
685 case TG3_APE_LOCK_PHY3
:
686 bit
= APE_LOCK_GRANT_DRIVER
;
690 bit
= APE_LOCK_GRANT_DRIVER
;
692 bit
= 1 << tp
->pci_fn
;
694 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
699 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
703 u32 status
, req
, gnt
, bit
;
705 if (!tg3_flag(tp
, ENABLE_APE
))
709 case TG3_APE_LOCK_GPIO
:
710 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
712 case TG3_APE_LOCK_GRC
:
713 case TG3_APE_LOCK_MEM
:
715 bit
= APE_LOCK_REQ_DRIVER
;
717 bit
= 1 << tp
->pci_fn
;
719 case TG3_APE_LOCK_PHY0
:
720 case TG3_APE_LOCK_PHY1
:
721 case TG3_APE_LOCK_PHY2
:
722 case TG3_APE_LOCK_PHY3
:
723 bit
= APE_LOCK_REQ_DRIVER
;
729 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
730 req
= TG3_APE_LOCK_REQ
;
731 gnt
= TG3_APE_LOCK_GRANT
;
733 req
= TG3_APE_PER_LOCK_REQ
;
734 gnt
= TG3_APE_PER_LOCK_GRANT
;
739 tg3_ape_write32(tp
, req
+ off
, bit
);
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i
= 0; i
< 100; i
++) {
743 status
= tg3_ape_read32(tp
, gnt
+ off
);
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp
, gnt
+ off
, bit
);
758 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
762 if (!tg3_flag(tp
, ENABLE_APE
))
766 case TG3_APE_LOCK_GPIO
:
767 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
769 case TG3_APE_LOCK_GRC
:
770 case TG3_APE_LOCK_MEM
:
772 bit
= APE_LOCK_GRANT_DRIVER
;
774 bit
= 1 << tp
->pci_fn
;
776 case TG3_APE_LOCK_PHY0
:
777 case TG3_APE_LOCK_PHY1
:
778 case TG3_APE_LOCK_PHY2
:
779 case TG3_APE_LOCK_PHY3
:
780 bit
= APE_LOCK_GRANT_DRIVER
;
786 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
787 gnt
= TG3_APE_LOCK_GRANT
;
789 gnt
= TG3_APE_PER_LOCK_GRANT
;
791 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
794 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
799 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
802 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
803 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
806 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
809 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
812 return timeout_us
? 0 : -EBUSY
;
815 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
819 for (i
= 0; i
< timeout_us
/ 10; i
++) {
820 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
822 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
828 return i
== timeout_us
/ 10;
831 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
835 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
837 if (!tg3_flag(tp
, APE_HAS_NCSI
))
840 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
841 if (apedata
!= APE_SEG_SIG_MAGIC
)
844 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
845 if (!(apedata
& APE_FW_STATUS_READY
))
848 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
850 msgoff
= bufoff
+ 2 * sizeof(u32
);
851 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
856 /* Cap xfer sizes to scratchpad limits. */
857 length
= (len
> maxlen
) ? maxlen
: len
;
860 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
861 if (!(apedata
& APE_FW_STATUS_READY
))
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err
= tg3_ape_event_lock(tp
, 1000);
869 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
870 APE_EVENT_STATUS_SCRTCHPD_READ
|
871 APE_EVENT_STATUS_EVENT_PENDING
;
872 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
874 tg3_ape_write32(tp
, bufoff
, base_off
);
875 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
877 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
878 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
882 if (tg3_ape_wait_for_event(tp
, 30000))
885 for (i
= 0; length
; i
+= 4, length
-= 4) {
886 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
887 memcpy(data
, &val
, sizeof(u32
));
895 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
900 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
901 if (apedata
!= APE_SEG_SIG_MAGIC
)
904 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
905 if (!(apedata
& APE_FW_STATUS_READY
))
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err
= tg3_ape_event_lock(tp
, 1000);
913 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
914 event
| APE_EVENT_STATUS_EVENT_PENDING
);
916 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
917 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
922 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
927 if (!tg3_flag(tp
, ENABLE_APE
))
931 case RESET_KIND_INIT
:
932 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
933 APE_HOST_SEG_SIG_MAGIC
);
934 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
935 APE_HOST_SEG_LEN_MAGIC
);
936 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
937 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
940 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
941 APE_HOST_BEHAV_NO_PHYLOCK
);
942 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
943 TG3_APE_HOST_DRVR_STATE_START
);
945 event
= APE_EVENT_STATUS_STATE_START
;
947 case RESET_KIND_SHUTDOWN
:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
953 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
955 if (device_may_wakeup(&tp
->pdev
->dev
) &&
956 tg3_flag(tp
, WOL_ENABLE
)) {
957 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
958 TG3_APE_HOST_WOL_SPEED_AUTO
);
959 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
961 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
963 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
965 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
967 case RESET_KIND_SUSPEND
:
968 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
974 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
976 tg3_ape_send_event(tp
, event
);
979 static void tg3_disable_ints(struct tg3
*tp
)
983 tw32(TG3PCI_MISC_HOST_CTRL
,
984 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
985 for (i
= 0; i
< tp
->irq_max
; i
++)
986 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
989 static void tg3_enable_ints(struct tg3
*tp
)
996 tw32(TG3PCI_MISC_HOST_CTRL
,
997 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
999 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1000 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1001 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1003 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1004 if (tg3_flag(tp
, 1SHOT_MSI
))
1005 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1007 tp
->coal_now
|= tnapi
->coal_now
;
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1012 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1013 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1015 tw32(HOSTCC_MODE
, tp
->coal_now
);
1017 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1020 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1022 struct tg3
*tp
= tnapi
->tp
;
1023 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1024 unsigned int work_exists
= 0;
1026 /* check for phy events */
1027 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1028 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1032 /* check for TX work to do */
1033 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1036 /* check for RX work to do */
1037 if (tnapi
->rx_rcb_prod_idx
&&
1038 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1049 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1051 struct tg3
*tp
= tnapi
->tp
;
1053 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1060 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1061 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1062 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1065 static void tg3_switch_clocks(struct tg3
*tp
)
1068 u32 orig_clock_ctrl
;
1070 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1073 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1075 orig_clock_ctrl
= clock_ctrl
;
1076 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1077 CLOCK_CTRL_CLKRUN_OENABLE
|
1079 tp
->pci_clock_ctrl
= clock_ctrl
;
1081 if (tg3_flag(tp
, 5705_PLUS
)) {
1082 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1084 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1086 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1089 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1092 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1098 #define PHY_BUSY_LOOPS 5000
1100 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1107 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1109 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1113 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1117 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1118 MI_COM_PHY_ADDR_MASK
);
1119 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1120 MI_COM_REG_ADDR_MASK
);
1121 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1123 tw32_f(MAC_MI_COM
, frame_val
);
1125 loops
= PHY_BUSY_LOOPS
;
1126 while (loops
!= 0) {
1128 frame_val
= tr32(MAC_MI_COM
);
1130 if ((frame_val
& MI_COM_BUSY
) == 0) {
1132 frame_val
= tr32(MAC_MI_COM
);
1140 *val
= frame_val
& MI_COM_DATA_MASK
;
1144 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1145 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1149 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1154 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1156 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1159 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1166 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1167 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1170 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1172 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1176 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1178 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1179 MI_COM_PHY_ADDR_MASK
);
1180 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1181 MI_COM_REG_ADDR_MASK
);
1182 frame_val
|= (val
& MI_COM_DATA_MASK
);
1183 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1185 tw32_f(MAC_MI_COM
, frame_val
);
1187 loops
= PHY_BUSY_LOOPS
;
1188 while (loops
!= 0) {
1190 frame_val
= tr32(MAC_MI_COM
);
1191 if ((frame_val
& MI_COM_BUSY
) == 0) {
1193 frame_val
= tr32(MAC_MI_COM
);
1203 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1204 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1208 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1213 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1215 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1218 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1222 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1226 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1230 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1231 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1235 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1241 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1245 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1249 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1253 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1254 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1258 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1264 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1268 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1270 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1275 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1279 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1281 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1286 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1290 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1291 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1294 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1299 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1301 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1302 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1304 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1312 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1318 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1320 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1322 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1323 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1328 static int tg3_bmcr_reset(struct tg3
*tp
)
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1336 phy_control
= BMCR_RESET
;
1337 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1343 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1347 if ((phy_control
& BMCR_RESET
) == 0) {
1359 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1361 struct tg3
*tp
= bp
->priv
;
1364 spin_lock_bh(&tp
->lock
);
1366 if (tg3_readphy(tp
, reg
, &val
))
1369 spin_unlock_bh(&tp
->lock
);
1374 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1376 struct tg3
*tp
= bp
->priv
;
1379 spin_lock_bh(&tp
->lock
);
1381 if (tg3_writephy(tp
, reg
, val
))
1384 spin_unlock_bh(&tp
->lock
);
1389 static int tg3_mdio_reset(struct mii_bus
*bp
)
1394 static void tg3_mdio_config_5785(struct tg3
*tp
)
1397 struct phy_device
*phydev
;
1399 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1400 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1401 case PHY_ID_BCM50610
:
1402 case PHY_ID_BCM50610M
:
1403 val
= MAC_PHYCFG2_50610_LED_MODES
;
1405 case PHY_ID_BCMAC131
:
1406 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1408 case PHY_ID_RTL8211C
:
1409 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1411 case PHY_ID_RTL8201E
:
1412 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1418 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1419 tw32(MAC_PHYCFG2
, val
);
1421 val
= tr32(MAC_PHYCFG1
);
1422 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1423 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1424 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1425 tw32(MAC_PHYCFG1
, val
);
1430 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1431 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1432 MAC_PHYCFG2_FMODE_MASK_MASK
|
1433 MAC_PHYCFG2_GMODE_MASK_MASK
|
1434 MAC_PHYCFG2_ACT_MASK_MASK
|
1435 MAC_PHYCFG2_QUAL_MASK_MASK
|
1436 MAC_PHYCFG2_INBAND_ENABLE
;
1438 tw32(MAC_PHYCFG2
, val
);
1440 val
= tr32(MAC_PHYCFG1
);
1441 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1443 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1444 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1445 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1446 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1447 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1449 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1450 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1451 tw32(MAC_PHYCFG1
, val
);
1453 val
= tr32(MAC_EXT_RGMII_MODE
);
1454 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1455 MAC_RGMII_MODE_RX_QUALITY
|
1456 MAC_RGMII_MODE_RX_ACTIVITY
|
1457 MAC_RGMII_MODE_RX_ENG_DET
|
1458 MAC_RGMII_MODE_TX_ENABLE
|
1459 MAC_RGMII_MODE_TX_LOWPWR
|
1460 MAC_RGMII_MODE_TX_RESET
);
1461 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1462 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1463 val
|= MAC_RGMII_MODE_RX_INT_B
|
1464 MAC_RGMII_MODE_RX_QUALITY
|
1465 MAC_RGMII_MODE_RX_ACTIVITY
|
1466 MAC_RGMII_MODE_RX_ENG_DET
;
1467 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1468 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1469 MAC_RGMII_MODE_TX_LOWPWR
|
1470 MAC_RGMII_MODE_TX_RESET
;
1472 tw32(MAC_EXT_RGMII_MODE
, val
);
1475 static void tg3_mdio_start(struct tg3
*tp
)
1477 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1478 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1481 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1482 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1483 tg3_mdio_config_5785(tp
);
1486 static int tg3_mdio_init(struct tg3
*tp
)
1490 struct phy_device
*phydev
;
1492 if (tg3_flag(tp
, 5717_PLUS
)) {
1495 tp
->phy_addr
= tp
->pci_fn
+ 1;
1497 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1498 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1500 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1505 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1509 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1512 tp
->mdio_bus
= mdiobus_alloc();
1513 if (tp
->mdio_bus
== NULL
)
1516 tp
->mdio_bus
->name
= "tg3 mdio bus";
1517 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1518 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1519 tp
->mdio_bus
->priv
= tp
;
1520 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1521 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1522 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1523 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1524 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1525 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1527 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1528 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1535 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1538 i
= mdiobus_register(tp
->mdio_bus
);
1540 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1541 mdiobus_free(tp
->mdio_bus
);
1545 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1547 if (!phydev
|| !phydev
->drv
) {
1548 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1549 mdiobus_unregister(tp
->mdio_bus
);
1550 mdiobus_free(tp
->mdio_bus
);
1554 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1555 case PHY_ID_BCM57780
:
1556 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1557 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1559 case PHY_ID_BCM50610
:
1560 case PHY_ID_BCM50610M
:
1561 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1562 PHY_BRCM_RX_REFCLK_UNUSED
|
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1565 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1566 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1567 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1568 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1569 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1570 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1572 case PHY_ID_RTL8211C
:
1573 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1575 case PHY_ID_RTL8201E
:
1576 case PHY_ID_BCMAC131
:
1577 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1578 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1579 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1583 tg3_flag_set(tp
, MDIOBUS_INITED
);
1585 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1586 tg3_mdio_config_5785(tp
);
1591 static void tg3_mdio_fini(struct tg3
*tp
)
1593 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1594 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1595 mdiobus_unregister(tp
->mdio_bus
);
1596 mdiobus_free(tp
->mdio_bus
);
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1605 val
= tr32(GRC_RX_CPU_EVENT
);
1606 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1607 tw32_f(GRC_RX_CPU_EVENT
, val
);
1609 tp
->last_event_jiffies
= jiffies
;
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1618 unsigned int delay_cnt
;
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1625 if (time_remain
< 0)
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt
= jiffies_to_usecs(time_remain
);
1630 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1631 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1632 delay_cnt
= (delay_cnt
>> 3) + 1;
1634 for (i
= 0; i
< delay_cnt
; i
++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1647 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1649 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1650 val
|= (reg
& 0xffff);
1654 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1656 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1657 val
|= (reg
& 0xffff);
1661 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1662 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1664 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1665 val
|= (reg
& 0xffff);
1669 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3
*tp
)
1681 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1684 tg3_phy_gather_ump_data(tp
, data
);
1686 tg3_wait_for_event_ack(tp
);
1688 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1689 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1690 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1691 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1692 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1693 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1695 tg3_generate_fw_event(tp
);
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3
*tp
)
1701 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp
);
1705 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1707 tg3_generate_fw_event(tp
);
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp
);
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1717 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1720 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1722 case RESET_KIND_INIT
:
1723 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1727 case RESET_KIND_SHUTDOWN
:
1728 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1732 case RESET_KIND_SUSPEND
:
1733 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1742 if (kind
== RESET_KIND_INIT
||
1743 kind
== RESET_KIND_SUSPEND
)
1744 tg3_ape_driver_state_change(tp
, kind
);
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1750 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1752 case RESET_KIND_INIT
:
1753 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1754 DRV_STATE_START_DONE
);
1757 case RESET_KIND_SHUTDOWN
:
1758 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1759 DRV_STATE_UNLOAD_DONE
);
1767 if (kind
== RESET_KIND_SHUTDOWN
)
1768 tg3_ape_driver_state_change(tp
, kind
);
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1774 if (tg3_flag(tp
, ENABLE_ASF
)) {
1776 case RESET_KIND_INIT
:
1777 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1781 case RESET_KIND_SHUTDOWN
:
1782 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1786 case RESET_KIND_SUSPEND
:
1787 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1797 static int tg3_poll_fw(struct tg3
*tp
)
1802 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1803 /* We don't use firmware. */
1807 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1808 /* Wait up to 20ms for init done. */
1809 for (i
= 0; i
< 200; i
++) {
1810 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1817 /* Wait for firmware initialization to complete. */
1818 for (i
= 0; i
< 100000; i
++) {
1819 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1820 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1830 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1831 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1833 netdev_info(tp
->dev
, "No firmware running\n");
1836 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1846 static void tg3_link_report(struct tg3
*tp
)
1848 if (!netif_carrier_ok(tp
->dev
)) {
1849 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1850 tg3_ump_link_report(tp
);
1851 } else if (netif_msg_link(tp
)) {
1852 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1853 (tp
->link_config
.active_speed
== SPEED_1000
?
1855 (tp
->link_config
.active_speed
== SPEED_100
?
1857 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1860 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1861 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1863 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1866 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1867 netdev_info(tp
->dev
, "EEE is %s\n",
1868 tp
->setlpicnt
? "enabled" : "disabled");
1870 tg3_ump_link_report(tp
);
1874 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1878 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1879 miireg
= ADVERTISE_1000XPAUSE
;
1880 else if (flow_ctrl
& FLOW_CTRL_TX
)
1881 miireg
= ADVERTISE_1000XPSE_ASYM
;
1882 else if (flow_ctrl
& FLOW_CTRL_RX
)
1883 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1890 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1894 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1895 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1896 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1897 if (lcladv
& ADVERTISE_1000XPAUSE
)
1899 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1906 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1910 u32 old_rx_mode
= tp
->rx_mode
;
1911 u32 old_tx_mode
= tp
->tx_mode
;
1913 if (tg3_flag(tp
, USE_PHYLIB
))
1914 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1916 autoneg
= tp
->link_config
.autoneg
;
1918 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1919 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1920 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1922 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1924 flowctrl
= tp
->link_config
.flowctrl
;
1926 tp
->link_config
.active_flowctrl
= flowctrl
;
1928 if (flowctrl
& FLOW_CTRL_RX
)
1929 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1931 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1933 if (old_rx_mode
!= tp
->rx_mode
)
1934 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1936 if (flowctrl
& FLOW_CTRL_TX
)
1937 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1939 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1941 if (old_tx_mode
!= tp
->tx_mode
)
1942 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1945 static void tg3_adjust_link(struct net_device
*dev
)
1947 u8 oldflowctrl
, linkmesg
= 0;
1948 u32 mac_mode
, lcl_adv
, rmt_adv
;
1949 struct tg3
*tp
= netdev_priv(dev
);
1950 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1952 spin_lock_bh(&tp
->lock
);
1954 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1955 MAC_MODE_HALF_DUPLEX
);
1957 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1963 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1964 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1965 else if (phydev
->speed
== SPEED_1000
||
1966 tg3_asic_rev(tp
) != ASIC_REV_5785
)
1967 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1969 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1971 if (phydev
->duplex
== DUPLEX_HALF
)
1972 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1974 lcl_adv
= mii_advertise_flowctrl(
1975 tp
->link_config
.flowctrl
);
1978 rmt_adv
= LPA_PAUSE_CAP
;
1979 if (phydev
->asym_pause
)
1980 rmt_adv
|= LPA_PAUSE_ASYM
;
1983 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1985 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1987 if (mac_mode
!= tp
->mac_mode
) {
1988 tp
->mac_mode
= mac_mode
;
1989 tw32_f(MAC_MODE
, tp
->mac_mode
);
1993 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
1994 if (phydev
->speed
== SPEED_10
)
1996 MAC_MI_STAT_10MBPS_MODE
|
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1999 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2002 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2003 tw32(MAC_TX_LENGTHS
,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2005 (6 << TX_LENGTHS_IPG_SHIFT
) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2008 tw32(MAC_TX_LENGTHS
,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2010 (6 << TX_LENGTHS_IPG_SHIFT
) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2013 if (phydev
->link
!= tp
->old_link
||
2014 phydev
->speed
!= tp
->link_config
.active_speed
||
2015 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2016 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2019 tp
->old_link
= phydev
->link
;
2020 tp
->link_config
.active_speed
= phydev
->speed
;
2021 tp
->link_config
.active_duplex
= phydev
->duplex
;
2023 spin_unlock_bh(&tp
->lock
);
2026 tg3_link_report(tp
);
2029 static int tg3_phy_init(struct tg3
*tp
)
2031 struct phy_device
*phydev
;
2033 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2036 /* Bring the PHY back to a known state. */
2039 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2041 /* Attach the MAC to the PHY. */
2042 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2043 tg3_adjust_link
, phydev
->interface
);
2044 if (IS_ERR(phydev
)) {
2045 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev
);
2049 /* Mask with MAC supported features. */
2050 switch (phydev
->interface
) {
2051 case PHY_INTERFACE_MODE_GMII
:
2052 case PHY_INTERFACE_MODE_RGMII
:
2053 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2054 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2056 SUPPORTED_Asym_Pause
);
2060 case PHY_INTERFACE_MODE_MII
:
2061 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2063 SUPPORTED_Asym_Pause
);
2066 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2070 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2072 phydev
->advertising
= phydev
->supported
;
2077 static void tg3_phy_start(struct tg3
*tp
)
2079 struct phy_device
*phydev
;
2081 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2084 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2086 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2087 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2088 phydev
->speed
= tp
->link_config
.speed
;
2089 phydev
->duplex
= tp
->link_config
.duplex
;
2090 phydev
->autoneg
= tp
->link_config
.autoneg
;
2091 phydev
->advertising
= tp
->link_config
.advertising
;
2096 phy_start_aneg(phydev
);
2099 static void tg3_phy_stop(struct tg3
*tp
)
2101 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2104 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2107 static void tg3_phy_fini(struct tg3
*tp
)
2109 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2110 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2111 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2115 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2120 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2123 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err
= tg3_phy_auxctl_write(tp
,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2132 err
= tg3_phy_auxctl_read(tp
,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2137 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2138 err
= tg3_phy_auxctl_write(tp
,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2145 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2149 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2152 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2153 phytest
| MII_TG3_FET_SHADOW_EN
);
2154 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2156 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2158 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2159 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2161 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2165 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2169 if (!tg3_flag(tp
, 5705_PLUS
) ||
2170 (tg3_flag(tp
, 5717_PLUS
) &&
2171 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2174 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2175 tg3_phy_fet_toggle_apd(tp
, enable
);
2179 reg
= MII_TG3_MISC_SHDW_WREN
|
2180 MII_TG3_MISC_SHDW_SCR5_SEL
|
2181 MII_TG3_MISC_SHDW_SCR5_LPED
|
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2183 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2184 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2185 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2186 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2188 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2191 reg
= MII_TG3_MISC_SHDW_WREN
|
2192 MII_TG3_MISC_SHDW_APD_SEL
|
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2195 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2197 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2200 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2204 if (!tg3_flag(tp
, 5705_PLUS
) ||
2205 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2208 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2211 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2212 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2214 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2215 ephy
| MII_TG3_FET_SHADOW_EN
);
2216 if (!tg3_readphy(tp
, reg
, &phy
)) {
2218 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2220 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2221 tg3_writephy(tp
, reg
, phy
);
2223 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2228 ret
= tg3_phy_auxctl_read(tp
,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2232 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2234 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2235 tg3_phy_auxctl_write(tp
,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2241 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2246 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2249 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2251 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2252 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2255 static void tg3_phy_apply_otp(struct tg3
*tp
)
2264 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2267 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2268 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2269 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2271 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2272 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2273 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2275 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2276 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2277 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2279 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2280 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2282 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2283 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2285 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2286 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2287 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2289 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2292 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2296 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2301 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2302 current_link_up
== 1 &&
2303 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2304 (tp
->link_config
.active_speed
== SPEED_100
||
2305 tp
->link_config
.active_speed
== SPEED_1000
)) {
2308 if (tp
->link_config
.active_speed
== SPEED_1000
)
2309 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2311 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2313 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2315 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2316 TG3_CL45_D7_EEERES_STAT
, &val
);
2318 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2319 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2323 if (!tp
->setlpicnt
) {
2324 if (current_link_up
== 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2326 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2330 val
= tr32(TG3_CPMU_EEE_MODE
);
2331 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2335 static void tg3_phy_eee_enable(struct tg3
*tp
)
2339 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2340 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2341 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2342 tg3_flag(tp
, 57765_CLASS
)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2344 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2345 MII_TG3_DSP_TAP26_RMRXSTO
;
2346 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2347 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2350 val
= tr32(TG3_CPMU_EEE_MODE
);
2351 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2354 static int tg3_wait_macro_done(struct tg3
*tp
)
2361 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2362 if ((tmp32
& 0x1000) == 0)
2372 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2374 static const u32 test_pat
[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382 for (chan
= 0; chan
< 4; chan
++) {
2385 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2386 (chan
* 0x2000) | 0x0200);
2387 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2389 for (i
= 0; i
< 6; i
++)
2390 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2393 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2394 if (tg3_wait_macro_done(tp
)) {
2399 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2400 (chan
* 0x2000) | 0x0200);
2401 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2402 if (tg3_wait_macro_done(tp
)) {
2407 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2408 if (tg3_wait_macro_done(tp
)) {
2413 for (i
= 0; i
< 6; i
+= 2) {
2416 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2417 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2418 tg3_wait_macro_done(tp
)) {
2424 if (low
!= test_pat
[chan
][i
] ||
2425 high
!= test_pat
[chan
][i
+1]) {
2426 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2427 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2428 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2438 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2442 for (chan
= 0; chan
< 4; chan
++) {
2445 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2446 (chan
* 0x2000) | 0x0200);
2447 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2448 for (i
= 0; i
< 6; i
++)
2449 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2450 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2451 if (tg3_wait_macro_done(tp
))
2458 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2460 u32 reg32
, phy9_orig
;
2461 int retries
, do_phy_reset
, err
;
2467 err
= tg3_bmcr_reset(tp
);
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2478 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp
, MII_BMCR
,
2482 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2488 tg3_writephy(tp
, MII_CTRL1000
,
2489 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2491 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2498 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2501 } while (--retries
);
2503 err
= tg3_phy_reset_chanpat(tp
);
2507 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2509 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2510 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2512 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2514 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2516 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2518 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2525 static void tg3_carrier_on(struct tg3
*tp
)
2527 netif_carrier_on(tp
->dev
);
2531 static void tg3_carrier_off(struct tg3
*tp
)
2533 netif_carrier_off(tp
->dev
);
2534 tp
->link_up
= false;
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2540 static int tg3_phy_reset(struct tg3
*tp
)
2545 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2546 val
= tr32(GRC_MISC_CFG
);
2547 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2550 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2551 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2555 if (netif_running(tp
->dev
) && tp
->link_up
) {
2556 tg3_carrier_off(tp
);
2557 tg3_link_report(tp
);
2560 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2561 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2562 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2563 err
= tg3_phy_reset_5703_4_5(tp
);
2570 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2571 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2572 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2573 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2575 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2578 err
= tg3_bmcr_reset(tp
);
2582 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2583 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2584 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2586 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2589 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2590 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2591 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2592 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2594 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2600 if (tg3_flag(tp
, 5717_PLUS
) &&
2601 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2604 tg3_phy_apply_otp(tp
);
2606 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2607 tg3_phy_toggle_apd(tp
, true);
2609 tg3_phy_toggle_apd(tp
, false);
2612 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2614 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2619 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2620 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2621 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2624 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2626 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2631 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2633 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2634 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2635 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2636 tg3_writephy(tp
, MII_TG3_TEST1
,
2637 MII_TG3_TEST1_TRIM_EN
| 0x4);
2639 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2641 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2650 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err
= tg3_phy_auxctl_read(tp
,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2655 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2656 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2662 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2663 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2664 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2665 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2668 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2673 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2674 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2676 tg3_phy_toggle_automdix(tp
, 1);
2677 tg3_phy_set_wirespeed(tp
);
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2697 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2701 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2702 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2703 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2705 status
= tr32(TG3_CPMU_DRV_STATUS
);
2707 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2708 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2709 status
|= (newstat
<< shift
);
2711 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2712 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2713 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2715 tw32(TG3_CPMU_DRV_STATUS
, status
);
2717 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2722 if (!tg3_flag(tp
, IS_NIC
))
2725 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2726 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2727 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2728 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2731 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2733 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2736 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2738 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2749 if (!tg3_flag(tp
, IS_NIC
) ||
2750 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2751 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2754 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2756 tw32_wait_f(GRC_LOCAL_CTRL
,
2757 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2760 tw32_wait_f(GRC_LOCAL_CTRL
,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2764 tw32_wait_f(GRC_LOCAL_CTRL
,
2765 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2771 if (!tg3_flag(tp
, IS_NIC
))
2774 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2775 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2776 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2777 (GRC_LCLCTRL_GPIO_OE0
|
2778 GRC_LCLCTRL_GPIO_OE1
|
2779 GRC_LCLCTRL_GPIO_OE2
|
2780 GRC_LCLCTRL_GPIO_OUTPUT0
|
2781 GRC_LCLCTRL_GPIO_OUTPUT1
),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2783 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2784 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2787 GRC_LCLCTRL_GPIO_OE1
|
2788 GRC_LCLCTRL_GPIO_OE2
|
2789 GRC_LCLCTRL_GPIO_OUTPUT0
|
2790 GRC_LCLCTRL_GPIO_OUTPUT1
|
2792 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2795 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2796 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2799 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2800 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2804 u32 grc_local_ctrl
= 0;
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2808 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2809 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2811 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2
= tp
->nic_sram_data_cfg
&
2816 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2818 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2819 GRC_LCLCTRL_GPIO_OE1
|
2820 GRC_LCLCTRL_GPIO_OE2
|
2821 GRC_LCLCTRL_GPIO_OUTPUT1
|
2822 GRC_LCLCTRL_GPIO_OUTPUT2
;
2824 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2825 GRC_LCLCTRL_GPIO_OUTPUT2
);
2827 tw32_wait_f(GRC_LOCAL_CTRL
,
2828 tp
->grc_local_ctrl
| grc_local_ctrl
,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2831 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2833 tw32_wait_f(GRC_LOCAL_CTRL
,
2834 tp
->grc_local_ctrl
| grc_local_ctrl
,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2838 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2839 tw32_wait_f(GRC_LOCAL_CTRL
,
2840 tp
->grc_local_ctrl
| grc_local_ctrl
,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2846 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2854 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2855 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2857 msg
= tg3_set_function_status(tp
, msg
);
2859 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2862 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2863 tg3_pwrsrc_switch_to_vaux(tp
);
2865 tg3_pwrsrc_die_with_vmain(tp
);
2868 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2871 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2873 bool need_vaux
= false;
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2879 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2880 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2881 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2882 tg3_frob_aux_power_5717(tp
, include_wol
?
2883 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2887 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2888 struct net_device
*dev_peer
;
2890 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2892 /* remove_one() may have been run on the peer. */
2894 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2896 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2899 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2900 tg3_flag(tp_peer
, ENABLE_ASF
))
2905 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2906 tg3_flag(tp
, ENABLE_ASF
))
2910 tg3_pwrsrc_switch_to_vaux(tp
);
2912 tg3_pwrsrc_die_with_vmain(tp
);
2915 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2917 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2919 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2920 if (speed
!= SPEED_10
)
2922 } else if (speed
== SPEED_10
)
2928 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2932 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2933 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
2934 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2935 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2938 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2939 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2940 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2945 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2947 val
= tr32(GRC_MISC_CFG
);
2948 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2951 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2953 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2956 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2957 tg3_writephy(tp
, MII_BMCR
,
2958 BMCR_ANENABLE
| BMCR_ANRESTART
);
2960 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2961 phytest
| MII_TG3_FET_SHADOW_EN
);
2962 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2963 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2965 MII_TG3_FET_SHDW_AUXMODE4
,
2968 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2971 } else if (do_low_power
) {
2972 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2975 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2977 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2978 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2981 /* The PHY should not be powered down on some chips because
2984 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2985 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2986 (tg3_asic_rev(tp
) == ASIC_REV_5780
&&
2987 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2988 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
2992 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2993 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2994 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2995 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2996 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3000 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3
*tp
)
3006 if (tg3_flag(tp
, NVRAM
)) {
3009 if (tp
->nvram_lock_cnt
== 0) {
3010 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3011 for (i
= 0; i
< 8000; i
++) {
3012 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3017 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3021 tp
->nvram_lock_cnt
++;
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3
*tp
)
3029 if (tg3_flag(tp
, NVRAM
)) {
3030 if (tp
->nvram_lock_cnt
> 0)
3031 tp
->nvram_lock_cnt
--;
3032 if (tp
->nvram_lock_cnt
== 0)
3033 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3
*tp
)
3040 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3041 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3043 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3
*tp
)
3050 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3051 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3053 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3057 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3058 u32 offset
, u32
*val
)
3063 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3066 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3067 EEPROM_ADDR_DEVID_MASK
|
3069 tw32(GRC_EEPROM_ADDR
,
3071 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3072 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3073 EEPROM_ADDR_ADDR_MASK
) |
3074 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3076 for (i
= 0; i
< 1000; i
++) {
3077 tmp
= tr32(GRC_EEPROM_ADDR
);
3079 if (tmp
& EEPROM_ADDR_COMPLETE
)
3083 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3086 tmp
= tr32(GRC_EEPROM_DATA
);
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3097 #define NVRAM_CMD_TIMEOUT 10000
3099 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3103 tw32(NVRAM_CMD
, nvram_cmd
);
3104 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3106 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3112 if (i
== NVRAM_CMD_TIMEOUT
)
3118 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3120 if (tg3_flag(tp
, NVRAM
) &&
3121 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3122 tg3_flag(tp
, FLASH
) &&
3123 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3124 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3126 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS
) +
3128 (addr
% tp
->nvram_pagesize
);
3133 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3135 if (tg3_flag(tp
, NVRAM
) &&
3136 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3137 tg3_flag(tp
, FLASH
) &&
3138 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3139 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3141 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3142 tp
->nvram_pagesize
) +
3143 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3154 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3158 if (!tg3_flag(tp
, NVRAM
))
3159 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3161 offset
= tg3_nvram_phys_addr(tp
, offset
);
3163 if (offset
> NVRAM_ADDR_MSK
)
3166 ret
= tg3_nvram_lock(tp
);
3170 tg3_enable_nvram_access(tp
);
3172 tw32(NVRAM_ADDR
, offset
);
3173 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3174 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3177 *val
= tr32(NVRAM_RDDATA
);
3179 tg3_disable_nvram_access(tp
);
3181 tg3_nvram_unlock(tp
);
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3190 int res
= tg3_nvram_read(tp
, offset
, &v
);
3192 *val
= cpu_to_be32(v
);
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3197 u32 offset
, u32 len
, u8
*buf
)
3202 for (i
= 0; i
< len
; i
+= 4) {
3208 memcpy(&data
, buf
+ i
, 4);
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3216 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3218 val
= tr32(GRC_EEPROM_ADDR
);
3219 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3221 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3223 tw32(GRC_EEPROM_ADDR
, val
|
3224 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3225 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3229 for (j
= 0; j
< 1000; j
++) {
3230 val
= tr32(GRC_EEPROM_ADDR
);
3232 if (val
& EEPROM_ADDR_COMPLETE
)
3236 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3250 u32 pagesize
= tp
->nvram_pagesize
;
3251 u32 pagemask
= pagesize
- 1;
3255 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3261 u32 phy_addr
, page_off
, size
;
3263 phy_addr
= offset
& ~pagemask
;
3265 for (j
= 0; j
< pagesize
; j
+= 4) {
3266 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3267 (__be32
*) (tmp
+ j
));
3274 page_off
= offset
& pagemask
;
3281 memcpy(tmp
+ page_off
, buf
, size
);
3283 offset
= offset
+ (pagesize
- page_off
);
3285 tg3_enable_nvram_access(tp
);
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3291 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3293 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR
, phy_addr
);
3299 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3300 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3302 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3308 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3311 for (j
= 0; j
< pagesize
; j
+= 4) {
3314 data
= *((__be32
*) (tmp
+ j
));
3316 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3318 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3320 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3324 nvram_cmd
|= NVRAM_CMD_FIRST
;
3325 else if (j
== (pagesize
- 4))
3326 nvram_cmd
|= NVRAM_CMD_LAST
;
3328 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3336 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3337 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3350 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3351 u32 page_off
, phy_addr
, nvram_cmd
;
3354 memcpy(&data
, buf
+ i
, 4);
3355 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3357 page_off
= offset
% tp
->nvram_pagesize
;
3359 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3361 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3363 if (page_off
== 0 || i
== 0)
3364 nvram_cmd
|= NVRAM_CMD_FIRST
;
3365 if (page_off
== (tp
->nvram_pagesize
- 4))
3366 nvram_cmd
|= NVRAM_CMD_LAST
;
3369 nvram_cmd
|= NVRAM_CMD_LAST
;
3371 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3372 !tg3_flag(tp
, FLASH
) ||
3373 !tg3_flag(tp
, 57765_PLUS
))
3374 tw32(NVRAM_ADDR
, phy_addr
);
3376 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3377 !tg3_flag(tp
, 5755_PLUS
) &&
3378 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3379 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3382 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3383 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3387 if (!tg3_flag(tp
, FLASH
)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3392 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3404 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3405 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3410 if (!tg3_flag(tp
, NVRAM
)) {
3411 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3415 ret
= tg3_nvram_lock(tp
);
3419 tg3_enable_nvram_access(tp
);
3420 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3421 tw32(NVRAM_WRITE1
, 0x406);
3423 grc_mode
= tr32(GRC_MODE
);
3424 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3426 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3427 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3430 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3434 grc_mode
= tr32(GRC_MODE
);
3435 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3437 tg3_disable_nvram_access(tp
);
3438 tg3_nvram_unlock(tp
);
3441 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3442 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3454 /* tp->lock is held. */
3455 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
3459 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3461 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3462 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3464 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3467 if (offset
== RX_CPU_BASE
) {
3468 for (i
= 0; i
< 10000; i
++) {
3469 tw32(offset
+ CPU_STATE
, 0xffffffff);
3470 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3471 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3475 tw32(offset
+ CPU_STATE
, 0xffffffff);
3476 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3480 * There is only an Rx CPU for the 5750 derivative in the
3483 if (tg3_flag(tp
, IS_SSB_CORE
))
3486 for (i
= 0; i
< 10000; i
++) {
3487 tw32(offset
+ CPU_STATE
, 0xffffffff);
3488 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3489 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3495 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3496 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3500 /* Clear firmware's nvram arbitration. */
3501 if (tg3_flag(tp
, NVRAM
))
3502 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3507 unsigned int fw_base
;
3508 unsigned int fw_len
;
3509 const __be32
*fw_data
;
3512 /* tp->lock is held. */
3513 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3514 u32 cpu_scratch_base
, int cpu_scratch_size
,
3515 struct fw_info
*info
)
3517 int err
, lock_err
, i
;
3518 void (*write_op
)(struct tg3
*, u32
, u32
);
3520 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3522 "%s: Trying to load TX cpu firmware which is 5705\n",
3527 if (tg3_flag(tp
, 5705_PLUS
))
3528 write_op
= tg3_write_mem
;
3530 write_op
= tg3_write_indirect_reg32
;
3532 /* It is possible that bootcode is still loading at this point.
3533 * Get the nvram lock first before halting the cpu.
3535 lock_err
= tg3_nvram_lock(tp
);
3536 err
= tg3_halt_cpu(tp
, cpu_base
);
3538 tg3_nvram_unlock(tp
);
3542 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3543 write_op(tp
, cpu_scratch_base
+ i
, 0);
3544 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3545 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3546 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3547 write_op(tp
, (cpu_scratch_base
+
3548 (info
->fw_base
& 0xffff) +
3550 be32_to_cpu(info
->fw_data
[i
]));
3558 /* tp->lock is held. */
3559 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3561 struct fw_info info
;
3562 const __be32
*fw_data
;
3565 fw_data
= (void *)tp
->fw
->data
;
3567 /* Firmware blob starts with version numbers, followed by
3568 start address and length. We are setting complete length.
3569 length = end_address_of_bss - start_address_of_text.
3570 Remainder is the blob to be loaded contiguously
3571 from start address. */
3573 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3574 info
.fw_len
= tp
->fw
->size
- 12;
3575 info
.fw_data
= &fw_data
[3];
3577 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3578 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3583 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3584 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3589 /* Now startup only the RX cpu. */
3590 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3591 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3593 for (i
= 0; i
< 5; i
++) {
3594 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3596 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3597 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3598 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3602 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3603 "should be %08x\n", __func__
,
3604 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3607 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3608 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3613 /* tp->lock is held. */
3614 static int tg3_load_tso_firmware(struct tg3
*tp
)
3616 struct fw_info info
;
3617 const __be32
*fw_data
;
3618 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3621 if (tg3_flag(tp
, HW_TSO_1
) ||
3622 tg3_flag(tp
, HW_TSO_2
) ||
3623 tg3_flag(tp
, HW_TSO_3
))
3626 fw_data
= (void *)tp
->fw
->data
;
3628 /* Firmware blob starts with version numbers, followed by
3629 start address and length. We are setting complete length.
3630 length = end_address_of_bss - start_address_of_text.
3631 Remainder is the blob to be loaded contiguously
3632 from start address. */
3634 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3635 cpu_scratch_size
= tp
->fw_len
;
3636 info
.fw_len
= tp
->fw
->size
- 12;
3637 info
.fw_data
= &fw_data
[3];
3639 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3640 cpu_base
= RX_CPU_BASE
;
3641 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3643 cpu_base
= TX_CPU_BASE
;
3644 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3645 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3648 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3649 cpu_scratch_base
, cpu_scratch_size
,
3654 /* Now startup the cpu. */
3655 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3656 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3658 for (i
= 0; i
< 5; i
++) {
3659 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3661 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3662 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3663 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3668 "%s fails to set CPU PC, is %08x should be %08x\n",
3669 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3672 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3673 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3678 /* tp->lock is held. */
3679 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3681 u32 addr_high
, addr_low
;
3684 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3685 tp
->dev
->dev_addr
[1]);
3686 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3687 (tp
->dev
->dev_addr
[3] << 16) |
3688 (tp
->dev
->dev_addr
[4] << 8) |
3689 (tp
->dev
->dev_addr
[5] << 0));
3690 for (i
= 0; i
< 4; i
++) {
3691 if (i
== 1 && skip_mac_1
)
3693 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3694 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3697 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3698 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3699 for (i
= 0; i
< 12; i
++) {
3700 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3701 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3705 addr_high
= (tp
->dev
->dev_addr
[0] +
3706 tp
->dev
->dev_addr
[1] +
3707 tp
->dev
->dev_addr
[2] +
3708 tp
->dev
->dev_addr
[3] +
3709 tp
->dev
->dev_addr
[4] +
3710 tp
->dev
->dev_addr
[5]) &
3711 TX_BACKOFF_SEED_MASK
;
3712 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3715 static void tg3_enable_register_access(struct tg3
*tp
)
3718 * Make sure register accesses (indirect or otherwise) will function
3721 pci_write_config_dword(tp
->pdev
,
3722 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3725 static int tg3_power_up(struct tg3
*tp
)
3729 tg3_enable_register_access(tp
);
3731 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3733 /* Switch out of Vaux if it is a NIC */
3734 tg3_pwrsrc_switch_to_vmain(tp
);
3736 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3742 static int tg3_setup_phy(struct tg3
*, int);
3744 static int tg3_power_down_prepare(struct tg3
*tp
)
3747 bool device_should_wake
, do_low_power
;
3749 tg3_enable_register_access(tp
);
3751 /* Restore the CLKREQ setting. */
3752 if (tg3_flag(tp
, CLKREQ_BUG
))
3753 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3754 PCI_EXP_LNKCTL_CLKREQ_EN
);
3756 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3757 tw32(TG3PCI_MISC_HOST_CTRL
,
3758 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3760 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3761 tg3_flag(tp
, WOL_ENABLE
);
3763 if (tg3_flag(tp
, USE_PHYLIB
)) {
3764 do_low_power
= false;
3765 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3766 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3767 struct phy_device
*phydev
;
3768 u32 phyid
, advertising
;
3770 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3772 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3774 tp
->link_config
.speed
= phydev
->speed
;
3775 tp
->link_config
.duplex
= phydev
->duplex
;
3776 tp
->link_config
.autoneg
= phydev
->autoneg
;
3777 tp
->link_config
.advertising
= phydev
->advertising
;
3779 advertising
= ADVERTISED_TP
|
3781 ADVERTISED_Autoneg
|
3782 ADVERTISED_10baseT_Half
;
3784 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3785 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3787 ADVERTISED_100baseT_Half
|
3788 ADVERTISED_100baseT_Full
|
3789 ADVERTISED_10baseT_Full
;
3791 advertising
|= ADVERTISED_10baseT_Full
;
3794 phydev
->advertising
= advertising
;
3796 phy_start_aneg(phydev
);
3798 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3799 if (phyid
!= PHY_ID_BCMAC131
) {
3800 phyid
&= PHY_BCM_OUI_MASK
;
3801 if (phyid
== PHY_BCM_OUI_1
||
3802 phyid
== PHY_BCM_OUI_2
||
3803 phyid
== PHY_BCM_OUI_3
)
3804 do_low_power
= true;
3808 do_low_power
= true;
3810 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3811 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3813 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3814 tg3_setup_phy(tp
, 0);
3817 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3820 val
= tr32(GRC_VCPU_EXT_CTRL
);
3821 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3822 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3826 for (i
= 0; i
< 200; i
++) {
3827 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3828 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3833 if (tg3_flag(tp
, WOL_CAP
))
3834 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3835 WOL_DRV_STATE_SHUTDOWN
|
3839 if (device_should_wake
) {
3842 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3844 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3845 tg3_phy_auxctl_write(tp
,
3846 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3847 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3848 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3849 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3853 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3854 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3856 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3858 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3859 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
3860 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3861 SPEED_100
: SPEED_10
;
3862 if (tg3_5700_link_polarity(tp
, speed
))
3863 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3865 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3868 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3871 if (!tg3_flag(tp
, 5750_PLUS
))
3872 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3874 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3875 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3876 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3877 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3879 if (tg3_flag(tp
, ENABLE_APE
))
3880 mac_mode
|= MAC_MODE_APE_TX_EN
|
3881 MAC_MODE_APE_RX_EN
|
3882 MAC_MODE_TDE_ENABLE
;
3884 tw32_f(MAC_MODE
, mac_mode
);
3887 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3891 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3892 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3893 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
3896 base_val
= tp
->pci_clock_ctrl
;
3897 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3898 CLOCK_CTRL_TXCLK_DISABLE
);
3900 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3901 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3902 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3903 tg3_flag(tp
, CPMU_PRESENT
) ||
3904 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3906 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3907 u32 newbits1
, newbits2
;
3909 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3910 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3911 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3912 CLOCK_CTRL_TXCLK_DISABLE
|
3914 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3915 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3916 newbits1
= CLOCK_CTRL_625_CORE
;
3917 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3919 newbits1
= CLOCK_CTRL_ALTCLK
;
3920 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3923 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3926 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3929 if (!tg3_flag(tp
, 5705_PLUS
)) {
3932 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3933 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3934 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3935 CLOCK_CTRL_TXCLK_DISABLE
|
3936 CLOCK_CTRL_44MHZ_CORE
);
3938 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3941 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3942 tp
->pci_clock_ctrl
| newbits3
, 40);
3946 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3947 tg3_power_down_phy(tp
, do_low_power
);
3949 tg3_frob_aux_power(tp
, true);
3951 /* Workaround for unstable PLL clock */
3952 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
3953 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
3954 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
3955 u32 val
= tr32(0x7d00);
3957 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3959 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3962 err
= tg3_nvram_lock(tp
);
3963 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3965 tg3_nvram_unlock(tp
);
3969 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3974 static void tg3_power_down(struct tg3
*tp
)
3976 tg3_power_down_prepare(tp
);
3978 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3979 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3982 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3984 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3985 case MII_TG3_AUX_STAT_10HALF
:
3987 *duplex
= DUPLEX_HALF
;
3990 case MII_TG3_AUX_STAT_10FULL
:
3992 *duplex
= DUPLEX_FULL
;
3995 case MII_TG3_AUX_STAT_100HALF
:
3997 *duplex
= DUPLEX_HALF
;
4000 case MII_TG3_AUX_STAT_100FULL
:
4002 *duplex
= DUPLEX_FULL
;
4005 case MII_TG3_AUX_STAT_1000HALF
:
4006 *speed
= SPEED_1000
;
4007 *duplex
= DUPLEX_HALF
;
4010 case MII_TG3_AUX_STAT_1000FULL
:
4011 *speed
= SPEED_1000
;
4012 *duplex
= DUPLEX_FULL
;
4016 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4017 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4019 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4023 *speed
= SPEED_UNKNOWN
;
4024 *duplex
= DUPLEX_UNKNOWN
;
4029 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4034 new_adv
= ADVERTISE_CSMA
;
4035 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4036 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4038 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4042 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4043 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4045 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4046 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4047 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4049 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4054 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4057 tw32(TG3_CPMU_EEE_MODE
,
4058 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4060 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4065 /* Advertise 100-BaseTX EEE ability */
4066 if (advertise
& ADVERTISED_100baseT_Full
)
4067 val
|= MDIO_AN_EEE_ADV_100TX
;
4068 /* Advertise 1000-BaseT EEE ability */
4069 if (advertise
& ADVERTISED_1000baseT_Full
)
4070 val
|= MDIO_AN_EEE_ADV_1000T
;
4071 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4075 switch (tg3_asic_rev(tp
)) {
4077 case ASIC_REV_57765
:
4078 case ASIC_REV_57766
:
4080 /* If we advertised any eee advertisements above... */
4082 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4083 MII_TG3_DSP_TAP26_RMRXSTO
|
4084 MII_TG3_DSP_TAP26_OPCSINPT
;
4085 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4089 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4090 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4091 MII_TG3_DSP_CH34TP2_HIBW01
);
4094 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4103 static void tg3_phy_copper_begin(struct tg3
*tp
)
4105 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4106 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4109 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
4110 adv
= ADVERTISED_10baseT_Half
|
4111 ADVERTISED_10baseT_Full
;
4112 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4113 adv
|= ADVERTISED_100baseT_Half
|
4114 ADVERTISED_100baseT_Full
;
4116 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4118 adv
= tp
->link_config
.advertising
;
4119 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4120 adv
&= ~(ADVERTISED_1000baseT_Half
|
4121 ADVERTISED_1000baseT_Full
);
4123 fc
= tp
->link_config
.flowctrl
;
4126 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4128 tg3_writephy(tp
, MII_BMCR
,
4129 BMCR_ANENABLE
| BMCR_ANRESTART
);
4132 u32 bmcr
, orig_bmcr
;
4134 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4135 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4138 switch (tp
->link_config
.speed
) {
4144 bmcr
|= BMCR_SPEED100
;
4148 bmcr
|= BMCR_SPEED1000
;
4152 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4153 bmcr
|= BMCR_FULLDPLX
;
4155 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4156 (bmcr
!= orig_bmcr
)) {
4157 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4158 for (i
= 0; i
< 1500; i
++) {
4162 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4163 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4165 if (!(tmp
& BMSR_LSTATUS
)) {
4170 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4176 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4180 /* Turn off tap power management. */
4181 /* Set Extended packet length bit */
4182 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4184 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4185 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4186 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4187 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4188 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4195 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4197 u32 advmsk
, tgtadv
, advertising
;
4199 advertising
= tp
->link_config
.advertising
;
4200 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4202 advmsk
= ADVERTISE_ALL
;
4203 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4204 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4205 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4208 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4211 if ((*lcladv
& advmsk
) != tgtadv
)
4214 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4217 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4219 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4223 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4224 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4225 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4226 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4227 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4229 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4232 if (tg3_ctrl
!= tgtadv
)
4239 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4243 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4246 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4249 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4252 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4255 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4256 tp
->link_config
.rmt_adv
= lpeth
;
4261 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, int curr_link_up
)
4263 if (curr_link_up
!= tp
->link_up
) {
4267 tg3_carrier_off(tp
);
4268 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4269 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4272 tg3_link_report(tp
);
4279 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4281 int current_link_up
;
4283 u32 lcl_adv
, rmt_adv
;
4291 (MAC_STATUS_SYNC_CHANGED
|
4292 MAC_STATUS_CFG_CHANGED
|
4293 MAC_STATUS_MI_COMPLETION
|
4294 MAC_STATUS_LNKSTATE_CHANGED
));
4297 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4299 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4303 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4305 /* Some third-party PHYs need to be reset on link going
4308 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4309 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4310 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4312 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4313 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4314 !(bmsr
& BMSR_LSTATUS
))
4320 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4321 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4322 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4323 !tg3_flag(tp
, INIT_COMPLETE
))
4326 if (!(bmsr
& BMSR_LSTATUS
)) {
4327 err
= tg3_init_5401phy_dsp(tp
);
4331 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4332 for (i
= 0; i
< 1000; i
++) {
4334 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4335 (bmsr
& BMSR_LSTATUS
)) {
4341 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4342 TG3_PHY_REV_BCM5401_B0
&&
4343 !(bmsr
& BMSR_LSTATUS
) &&
4344 tp
->link_config
.active_speed
== SPEED_1000
) {
4345 err
= tg3_phy_reset(tp
);
4347 err
= tg3_init_5401phy_dsp(tp
);
4352 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4353 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4354 /* 5701 {A0,B0} CRC bug workaround */
4355 tg3_writephy(tp
, 0x15, 0x0a75);
4356 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4357 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4358 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4361 /* Clear pending interrupts... */
4362 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4363 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4365 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4366 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4367 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4368 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4370 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4371 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4372 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4373 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4374 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4376 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4379 current_link_up
= 0;
4380 current_speed
= SPEED_UNKNOWN
;
4381 current_duplex
= DUPLEX_UNKNOWN
;
4382 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4383 tp
->link_config
.rmt_adv
= 0;
4385 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4386 err
= tg3_phy_auxctl_read(tp
,
4387 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4389 if (!err
&& !(val
& (1 << 10))) {
4390 tg3_phy_auxctl_write(tp
,
4391 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4398 for (i
= 0; i
< 100; i
++) {
4399 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4400 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4401 (bmsr
& BMSR_LSTATUS
))
4406 if (bmsr
& BMSR_LSTATUS
) {
4409 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4410 for (i
= 0; i
< 2000; i
++) {
4412 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4417 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4422 for (i
= 0; i
< 200; i
++) {
4423 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4424 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4426 if (bmcr
&& bmcr
!= 0x7fff)
4434 tp
->link_config
.active_speed
= current_speed
;
4435 tp
->link_config
.active_duplex
= current_duplex
;
4437 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4438 if ((bmcr
& BMCR_ANENABLE
) &&
4439 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4440 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4441 current_link_up
= 1;
4443 if (!(bmcr
& BMCR_ANENABLE
) &&
4444 tp
->link_config
.speed
== current_speed
&&
4445 tp
->link_config
.duplex
== current_duplex
&&
4446 tp
->link_config
.flowctrl
==
4447 tp
->link_config
.active_flowctrl
) {
4448 current_link_up
= 1;
4452 if (current_link_up
== 1 &&
4453 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4456 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4457 reg
= MII_TG3_FET_GEN_STAT
;
4458 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4460 reg
= MII_TG3_EXT_STAT
;
4461 bit
= MII_TG3_EXT_STAT_MDIX
;
4464 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4465 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4467 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4472 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4473 tg3_phy_copper_begin(tp
);
4475 if (tg3_flag(tp
, ROBOSWITCH
)) {
4476 current_link_up
= 1;
4477 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4478 current_speed
= SPEED_1000
;
4479 current_duplex
= DUPLEX_FULL
;
4480 tp
->link_config
.active_speed
= current_speed
;
4481 tp
->link_config
.active_duplex
= current_duplex
;
4484 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4485 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4486 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4487 current_link_up
= 1;
4490 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4491 if (current_link_up
== 1) {
4492 if (tp
->link_config
.active_speed
== SPEED_100
||
4493 tp
->link_config
.active_speed
== SPEED_10
)
4494 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4496 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4497 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4498 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4500 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4502 /* In order for the 5750 core in BCM4785 chip to work properly
4503 * in RGMII mode, the Led Control Register must be set up.
4505 if (tg3_flag(tp
, RGMII_MODE
)) {
4506 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4507 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4509 if (tp
->link_config
.active_speed
== SPEED_10
)
4510 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4511 else if (tp
->link_config
.active_speed
== SPEED_100
)
4512 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4513 LED_CTRL_100MBPS_ON
);
4514 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4515 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4516 LED_CTRL_1000MBPS_ON
);
4518 tw32(MAC_LED_CTRL
, led_ctrl
);
4522 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4523 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4524 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4526 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4527 if (current_link_up
== 1 &&
4528 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4529 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4531 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4534 /* ??? Without this setting Netgear GA302T PHY does not
4535 * ??? send/receive packets...
4537 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4538 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4539 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4540 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4544 tw32_f(MAC_MODE
, tp
->mac_mode
);
4547 tg3_phy_eee_adjust(tp
, current_link_up
);
4549 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4550 /* Polled via timer. */
4551 tw32_f(MAC_EVENT
, 0);
4553 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4557 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
4558 current_link_up
== 1 &&
4559 tp
->link_config
.active_speed
== SPEED_1000
&&
4560 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4563 (MAC_STATUS_SYNC_CHANGED
|
4564 MAC_STATUS_CFG_CHANGED
));
4567 NIC_SRAM_FIRMWARE_MBOX
,
4568 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4571 /* Prevent send BD corruption. */
4572 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4573 if (tp
->link_config
.active_speed
== SPEED_100
||
4574 tp
->link_config
.active_speed
== SPEED_10
)
4575 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4576 PCI_EXP_LNKCTL_CLKREQ_EN
);
4578 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4579 PCI_EXP_LNKCTL_CLKREQ_EN
);
4582 tg3_test_and_report_link_chg(tp
, current_link_up
);
4587 struct tg3_fiber_aneginfo
{
4589 #define ANEG_STATE_UNKNOWN 0
4590 #define ANEG_STATE_AN_ENABLE 1
4591 #define ANEG_STATE_RESTART_INIT 2
4592 #define ANEG_STATE_RESTART 3
4593 #define ANEG_STATE_DISABLE_LINK_OK 4
4594 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4595 #define ANEG_STATE_ABILITY_DETECT 6
4596 #define ANEG_STATE_ACK_DETECT_INIT 7
4597 #define ANEG_STATE_ACK_DETECT 8
4598 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4599 #define ANEG_STATE_COMPLETE_ACK 10
4600 #define ANEG_STATE_IDLE_DETECT_INIT 11
4601 #define ANEG_STATE_IDLE_DETECT 12
4602 #define ANEG_STATE_LINK_OK 13
4603 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4604 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4607 #define MR_AN_ENABLE 0x00000001
4608 #define MR_RESTART_AN 0x00000002
4609 #define MR_AN_COMPLETE 0x00000004
4610 #define MR_PAGE_RX 0x00000008
4611 #define MR_NP_LOADED 0x00000010
4612 #define MR_TOGGLE_TX 0x00000020
4613 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4614 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4615 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4616 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4617 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4618 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4619 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4620 #define MR_TOGGLE_RX 0x00002000
4621 #define MR_NP_RX 0x00004000
4623 #define MR_LINK_OK 0x80000000
4625 unsigned long link_time
, cur_time
;
4627 u32 ability_match_cfg
;
4628 int ability_match_count
;
4630 char ability_match
, idle_match
, ack_match
;
4632 u32 txconfig
, rxconfig
;
4633 #define ANEG_CFG_NP 0x00000080
4634 #define ANEG_CFG_ACK 0x00000040
4635 #define ANEG_CFG_RF2 0x00000020
4636 #define ANEG_CFG_RF1 0x00000010
4637 #define ANEG_CFG_PS2 0x00000001
4638 #define ANEG_CFG_PS1 0x00008000
4639 #define ANEG_CFG_HD 0x00004000
4640 #define ANEG_CFG_FD 0x00002000
4641 #define ANEG_CFG_INVAL 0x00001f06
4646 #define ANEG_TIMER_ENAB 2
4647 #define ANEG_FAILED -1
4649 #define ANEG_STATE_SETTLE_TIME 10000
4651 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4652 struct tg3_fiber_aneginfo
*ap
)
4655 unsigned long delta
;
4659 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4663 ap
->ability_match_cfg
= 0;
4664 ap
->ability_match_count
= 0;
4665 ap
->ability_match
= 0;
4671 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4672 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4674 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4675 ap
->ability_match_cfg
= rx_cfg_reg
;
4676 ap
->ability_match
= 0;
4677 ap
->ability_match_count
= 0;
4679 if (++ap
->ability_match_count
> 1) {
4680 ap
->ability_match
= 1;
4681 ap
->ability_match_cfg
= rx_cfg_reg
;
4684 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4692 ap
->ability_match_cfg
= 0;
4693 ap
->ability_match_count
= 0;
4694 ap
->ability_match
= 0;
4700 ap
->rxconfig
= rx_cfg_reg
;
4703 switch (ap
->state
) {
4704 case ANEG_STATE_UNKNOWN
:
4705 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4706 ap
->state
= ANEG_STATE_AN_ENABLE
;
4709 case ANEG_STATE_AN_ENABLE
:
4710 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4711 if (ap
->flags
& MR_AN_ENABLE
) {
4714 ap
->ability_match_cfg
= 0;
4715 ap
->ability_match_count
= 0;
4716 ap
->ability_match
= 0;
4720 ap
->state
= ANEG_STATE_RESTART_INIT
;
4722 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4726 case ANEG_STATE_RESTART_INIT
:
4727 ap
->link_time
= ap
->cur_time
;
4728 ap
->flags
&= ~(MR_NP_LOADED
);
4730 tw32(MAC_TX_AUTO_NEG
, 0);
4731 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4732 tw32_f(MAC_MODE
, tp
->mac_mode
);
4735 ret
= ANEG_TIMER_ENAB
;
4736 ap
->state
= ANEG_STATE_RESTART
;
4739 case ANEG_STATE_RESTART
:
4740 delta
= ap
->cur_time
- ap
->link_time
;
4741 if (delta
> ANEG_STATE_SETTLE_TIME
)
4742 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4744 ret
= ANEG_TIMER_ENAB
;
4747 case ANEG_STATE_DISABLE_LINK_OK
:
4751 case ANEG_STATE_ABILITY_DETECT_INIT
:
4752 ap
->flags
&= ~(MR_TOGGLE_TX
);
4753 ap
->txconfig
= ANEG_CFG_FD
;
4754 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4755 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4756 ap
->txconfig
|= ANEG_CFG_PS1
;
4757 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4758 ap
->txconfig
|= ANEG_CFG_PS2
;
4759 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4760 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4761 tw32_f(MAC_MODE
, tp
->mac_mode
);
4764 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4767 case ANEG_STATE_ABILITY_DETECT
:
4768 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4769 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4772 case ANEG_STATE_ACK_DETECT_INIT
:
4773 ap
->txconfig
|= ANEG_CFG_ACK
;
4774 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4775 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4776 tw32_f(MAC_MODE
, tp
->mac_mode
);
4779 ap
->state
= ANEG_STATE_ACK_DETECT
;
4782 case ANEG_STATE_ACK_DETECT
:
4783 if (ap
->ack_match
!= 0) {
4784 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4785 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4786 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4788 ap
->state
= ANEG_STATE_AN_ENABLE
;
4790 } else if (ap
->ability_match
!= 0 &&
4791 ap
->rxconfig
== 0) {
4792 ap
->state
= ANEG_STATE_AN_ENABLE
;
4796 case ANEG_STATE_COMPLETE_ACK_INIT
:
4797 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4801 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4802 MR_LP_ADV_HALF_DUPLEX
|
4803 MR_LP_ADV_SYM_PAUSE
|
4804 MR_LP_ADV_ASYM_PAUSE
|
4805 MR_LP_ADV_REMOTE_FAULT1
|
4806 MR_LP_ADV_REMOTE_FAULT2
|
4807 MR_LP_ADV_NEXT_PAGE
|
4810 if (ap
->rxconfig
& ANEG_CFG_FD
)
4811 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4812 if (ap
->rxconfig
& ANEG_CFG_HD
)
4813 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4814 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4815 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4816 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4817 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4818 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4819 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4820 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4821 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4822 if (ap
->rxconfig
& ANEG_CFG_NP
)
4823 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4825 ap
->link_time
= ap
->cur_time
;
4827 ap
->flags
^= (MR_TOGGLE_TX
);
4828 if (ap
->rxconfig
& 0x0008)
4829 ap
->flags
|= MR_TOGGLE_RX
;
4830 if (ap
->rxconfig
& ANEG_CFG_NP
)
4831 ap
->flags
|= MR_NP_RX
;
4832 ap
->flags
|= MR_PAGE_RX
;
4834 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4835 ret
= ANEG_TIMER_ENAB
;
4838 case ANEG_STATE_COMPLETE_ACK
:
4839 if (ap
->ability_match
!= 0 &&
4840 ap
->rxconfig
== 0) {
4841 ap
->state
= ANEG_STATE_AN_ENABLE
;
4844 delta
= ap
->cur_time
- ap
->link_time
;
4845 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4846 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4847 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4849 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4850 !(ap
->flags
& MR_NP_RX
)) {
4851 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4859 case ANEG_STATE_IDLE_DETECT_INIT
:
4860 ap
->link_time
= ap
->cur_time
;
4861 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4862 tw32_f(MAC_MODE
, tp
->mac_mode
);
4865 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4866 ret
= ANEG_TIMER_ENAB
;
4869 case ANEG_STATE_IDLE_DETECT
:
4870 if (ap
->ability_match
!= 0 &&
4871 ap
->rxconfig
== 0) {
4872 ap
->state
= ANEG_STATE_AN_ENABLE
;
4875 delta
= ap
->cur_time
- ap
->link_time
;
4876 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4877 /* XXX another gem from the Broadcom driver :( */
4878 ap
->state
= ANEG_STATE_LINK_OK
;
4882 case ANEG_STATE_LINK_OK
:
4883 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4887 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4888 /* ??? unimplemented */
4891 case ANEG_STATE_NEXT_PAGE_WAIT
:
4892 /* ??? unimplemented */
4903 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4906 struct tg3_fiber_aneginfo aninfo
;
4907 int status
= ANEG_FAILED
;
4911 tw32_f(MAC_TX_AUTO_NEG
, 0);
4913 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4914 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4917 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4920 memset(&aninfo
, 0, sizeof(aninfo
));
4921 aninfo
.flags
|= MR_AN_ENABLE
;
4922 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4923 aninfo
.cur_time
= 0;
4925 while (++tick
< 195000) {
4926 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4927 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4933 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4934 tw32_f(MAC_MODE
, tp
->mac_mode
);
4937 *txflags
= aninfo
.txconfig
;
4938 *rxflags
= aninfo
.flags
;
4940 if (status
== ANEG_DONE
&&
4941 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4942 MR_LP_ADV_FULL_DUPLEX
)))
4948 static void tg3_init_bcm8002(struct tg3
*tp
)
4950 u32 mac_status
= tr32(MAC_STATUS
);
4953 /* Reset when initting first time or we have a link. */
4954 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4955 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4958 /* Set PLL lock range. */
4959 tg3_writephy(tp
, 0x16, 0x8007);
4962 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4964 /* Wait for reset to complete. */
4965 /* XXX schedule_timeout() ... */
4966 for (i
= 0; i
< 500; i
++)
4969 /* Config mode; select PMA/Ch 1 regs. */
4970 tg3_writephy(tp
, 0x10, 0x8411);
4972 /* Enable auto-lock and comdet, select txclk for tx. */
4973 tg3_writephy(tp
, 0x11, 0x0a10);
4975 tg3_writephy(tp
, 0x18, 0x00a0);
4976 tg3_writephy(tp
, 0x16, 0x41ff);
4978 /* Assert and deassert POR. */
4979 tg3_writephy(tp
, 0x13, 0x0400);
4981 tg3_writephy(tp
, 0x13, 0x0000);
4983 tg3_writephy(tp
, 0x11, 0x0a50);
4985 tg3_writephy(tp
, 0x11, 0x0a10);
4987 /* Wait for signal to stabilize */
4988 /* XXX schedule_timeout() ... */
4989 for (i
= 0; i
< 15000; i
++)
4992 /* Deselect the channel register so we can read the PHYID
4995 tg3_writephy(tp
, 0x10, 0x8011);
4998 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5001 u32 sg_dig_ctrl
, sg_dig_status
;
5002 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5003 int workaround
, port_a
;
5004 int current_link_up
;
5007 expected_sg_dig_ctrl
= 0;
5010 current_link_up
= 0;
5012 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5013 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5015 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5018 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5019 /* preserve bits 20-23 for voltage regulator */
5020 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5023 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5025 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5026 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5028 u32 val
= serdes_cfg
;
5034 tw32_f(MAC_SERDES_CFG
, val
);
5037 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5039 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5040 tg3_setup_flow_control(tp
, 0, 0);
5041 current_link_up
= 1;
5046 /* Want auto-negotiation. */
5047 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5049 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5050 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5051 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5052 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5053 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5055 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5056 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5057 tp
->serdes_counter
&&
5058 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5059 MAC_STATUS_RCVD_CFG
)) ==
5060 MAC_STATUS_PCS_SYNCED
)) {
5061 tp
->serdes_counter
--;
5062 current_link_up
= 1;
5067 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5068 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5070 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5072 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5073 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5074 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5075 MAC_STATUS_SIGNAL_DET
)) {
5076 sg_dig_status
= tr32(SG_DIG_STATUS
);
5077 mac_status
= tr32(MAC_STATUS
);
5079 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5080 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5081 u32 local_adv
= 0, remote_adv
= 0;
5083 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5084 local_adv
|= ADVERTISE_1000XPAUSE
;
5085 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5086 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5088 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5089 remote_adv
|= LPA_1000XPAUSE
;
5090 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5091 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5093 tp
->link_config
.rmt_adv
=
5094 mii_adv_to_ethtool_adv_x(remote_adv
);
5096 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5097 current_link_up
= 1;
5098 tp
->serdes_counter
= 0;
5099 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5100 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5101 if (tp
->serdes_counter
)
5102 tp
->serdes_counter
--;
5105 u32 val
= serdes_cfg
;
5112 tw32_f(MAC_SERDES_CFG
, val
);
5115 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5118 /* Link parallel detection - link is up */
5119 /* only if we have PCS_SYNC and not */
5120 /* receiving config code words */
5121 mac_status
= tr32(MAC_STATUS
);
5122 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5123 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5124 tg3_setup_flow_control(tp
, 0, 0);
5125 current_link_up
= 1;
5127 TG3_PHYFLG_PARALLEL_DETECT
;
5128 tp
->serdes_counter
=
5129 SERDES_PARALLEL_DET_TIMEOUT
;
5131 goto restart_autoneg
;
5135 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5136 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5140 return current_link_up
;
5143 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5145 int current_link_up
= 0;
5147 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5150 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5151 u32 txflags
, rxflags
;
5154 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5155 u32 local_adv
= 0, remote_adv
= 0;
5157 if (txflags
& ANEG_CFG_PS1
)
5158 local_adv
|= ADVERTISE_1000XPAUSE
;
5159 if (txflags
& ANEG_CFG_PS2
)
5160 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5162 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5163 remote_adv
|= LPA_1000XPAUSE
;
5164 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5165 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5167 tp
->link_config
.rmt_adv
=
5168 mii_adv_to_ethtool_adv_x(remote_adv
);
5170 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5172 current_link_up
= 1;
5174 for (i
= 0; i
< 30; i
++) {
5177 (MAC_STATUS_SYNC_CHANGED
|
5178 MAC_STATUS_CFG_CHANGED
));
5180 if ((tr32(MAC_STATUS
) &
5181 (MAC_STATUS_SYNC_CHANGED
|
5182 MAC_STATUS_CFG_CHANGED
)) == 0)
5186 mac_status
= tr32(MAC_STATUS
);
5187 if (current_link_up
== 0 &&
5188 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5189 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5190 current_link_up
= 1;
5192 tg3_setup_flow_control(tp
, 0, 0);
5194 /* Forcing 1000FD link up. */
5195 current_link_up
= 1;
5197 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5200 tw32_f(MAC_MODE
, tp
->mac_mode
);
5205 return current_link_up
;
5208 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5211 u16 orig_active_speed
;
5212 u8 orig_active_duplex
;
5214 int current_link_up
;
5217 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5218 orig_active_speed
= tp
->link_config
.active_speed
;
5219 orig_active_duplex
= tp
->link_config
.active_duplex
;
5221 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5223 tg3_flag(tp
, INIT_COMPLETE
)) {
5224 mac_status
= tr32(MAC_STATUS
);
5225 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5226 MAC_STATUS_SIGNAL_DET
|
5227 MAC_STATUS_CFG_CHANGED
|
5228 MAC_STATUS_RCVD_CFG
);
5229 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5230 MAC_STATUS_SIGNAL_DET
)) {
5231 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5232 MAC_STATUS_CFG_CHANGED
));
5237 tw32_f(MAC_TX_AUTO_NEG
, 0);
5239 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5240 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5241 tw32_f(MAC_MODE
, tp
->mac_mode
);
5244 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5245 tg3_init_bcm8002(tp
);
5247 /* Enable link change event even when serdes polling. */
5248 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5251 current_link_up
= 0;
5252 tp
->link_config
.rmt_adv
= 0;
5253 mac_status
= tr32(MAC_STATUS
);
5255 if (tg3_flag(tp
, HW_AUTONEG
))
5256 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5258 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5260 tp
->napi
[0].hw_status
->status
=
5261 (SD_STATUS_UPDATED
|
5262 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5264 for (i
= 0; i
< 100; i
++) {
5265 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5266 MAC_STATUS_CFG_CHANGED
));
5268 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5269 MAC_STATUS_CFG_CHANGED
|
5270 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5274 mac_status
= tr32(MAC_STATUS
);
5275 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5276 current_link_up
= 0;
5277 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5278 tp
->serdes_counter
== 0) {
5279 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5280 MAC_MODE_SEND_CONFIGS
));
5282 tw32_f(MAC_MODE
, tp
->mac_mode
);
5286 if (current_link_up
== 1) {
5287 tp
->link_config
.active_speed
= SPEED_1000
;
5288 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5289 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5290 LED_CTRL_LNKLED_OVERRIDE
|
5291 LED_CTRL_1000MBPS_ON
));
5293 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5294 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5295 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5296 LED_CTRL_LNKLED_OVERRIDE
|
5297 LED_CTRL_TRAFFIC_OVERRIDE
));
5300 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5301 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5302 if (orig_pause_cfg
!= now_pause_cfg
||
5303 orig_active_speed
!= tp
->link_config
.active_speed
||
5304 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5305 tg3_link_report(tp
);
5311 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5313 int current_link_up
, err
= 0;
5317 u32 local_adv
, remote_adv
;
5319 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5320 tw32_f(MAC_MODE
, tp
->mac_mode
);
5326 (MAC_STATUS_SYNC_CHANGED
|
5327 MAC_STATUS_CFG_CHANGED
|
5328 MAC_STATUS_MI_COMPLETION
|
5329 MAC_STATUS_LNKSTATE_CHANGED
));
5335 current_link_up
= 0;
5336 current_speed
= SPEED_UNKNOWN
;
5337 current_duplex
= DUPLEX_UNKNOWN
;
5338 tp
->link_config
.rmt_adv
= 0;
5340 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5341 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5342 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5343 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5344 bmsr
|= BMSR_LSTATUS
;
5346 bmsr
&= ~BMSR_LSTATUS
;
5349 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5351 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5352 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5353 /* do nothing, just check for link up at the end */
5354 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5357 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5358 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5359 ADVERTISE_1000XPAUSE
|
5360 ADVERTISE_1000XPSE_ASYM
|
5363 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5364 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5366 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5367 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5368 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5369 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5371 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5372 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5373 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5380 bmcr
&= ~BMCR_SPEED1000
;
5381 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5383 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5384 new_bmcr
|= BMCR_FULLDPLX
;
5386 if (new_bmcr
!= bmcr
) {
5387 /* BMCR_SPEED1000 is a reserved bit that needs
5388 * to be set on write.
5390 new_bmcr
|= BMCR_SPEED1000
;
5392 /* Force a linkdown */
5396 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5397 adv
&= ~(ADVERTISE_1000XFULL
|
5398 ADVERTISE_1000XHALF
|
5400 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5401 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5405 tg3_carrier_off(tp
);
5407 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5409 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5410 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5411 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5412 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5413 bmsr
|= BMSR_LSTATUS
;
5415 bmsr
&= ~BMSR_LSTATUS
;
5417 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5421 if (bmsr
& BMSR_LSTATUS
) {
5422 current_speed
= SPEED_1000
;
5423 current_link_up
= 1;
5424 if (bmcr
& BMCR_FULLDPLX
)
5425 current_duplex
= DUPLEX_FULL
;
5427 current_duplex
= DUPLEX_HALF
;
5432 if (bmcr
& BMCR_ANENABLE
) {
5435 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5436 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5437 common
= local_adv
& remote_adv
;
5438 if (common
& (ADVERTISE_1000XHALF
|
5439 ADVERTISE_1000XFULL
)) {
5440 if (common
& ADVERTISE_1000XFULL
)
5441 current_duplex
= DUPLEX_FULL
;
5443 current_duplex
= DUPLEX_HALF
;
5445 tp
->link_config
.rmt_adv
=
5446 mii_adv_to_ethtool_adv_x(remote_adv
);
5447 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5448 /* Link is up via parallel detect */
5450 current_link_up
= 0;
5455 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5456 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5458 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5459 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5460 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5462 tw32_f(MAC_MODE
, tp
->mac_mode
);
5465 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5467 tp
->link_config
.active_speed
= current_speed
;
5468 tp
->link_config
.active_duplex
= current_duplex
;
5470 tg3_test_and_report_link_chg(tp
, current_link_up
);
5474 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5476 if (tp
->serdes_counter
) {
5477 /* Give autoneg time to complete. */
5478 tp
->serdes_counter
--;
5483 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5486 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5487 if (bmcr
& BMCR_ANENABLE
) {
5490 /* Select shadow register 0x1f */
5491 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5492 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5494 /* Select expansion interrupt status register */
5495 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5496 MII_TG3_DSP_EXP1_INT_STAT
);
5497 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5498 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5500 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5501 /* We have signal detect and not receiving
5502 * config code words, link is up by parallel
5506 bmcr
&= ~BMCR_ANENABLE
;
5507 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5508 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5509 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5512 } else if (tp
->link_up
&&
5513 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5514 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5517 /* Select expansion interrupt status register */
5518 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5519 MII_TG3_DSP_EXP1_INT_STAT
);
5520 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5524 /* Config code words received, turn on autoneg. */
5525 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5526 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5528 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5534 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5539 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5540 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5541 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5542 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5544 err
= tg3_setup_copper_phy(tp
, force_reset
);
5546 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
5549 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5550 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5552 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5557 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5558 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5559 tw32(GRC_MISC_CFG
, val
);
5562 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5563 (6 << TX_LENGTHS_IPG_SHIFT
);
5564 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
5565 tg3_asic_rev(tp
) == ASIC_REV_5762
)
5566 val
|= tr32(MAC_TX_LENGTHS
) &
5567 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5568 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5570 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5571 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5572 tw32(MAC_TX_LENGTHS
, val
|
5573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5575 tw32(MAC_TX_LENGTHS
, val
|
5576 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5578 if (!tg3_flag(tp
, 5705_PLUS
)) {
5580 tw32(HOSTCC_STAT_COAL_TICKS
,
5581 tp
->coal
.stats_block_coalesce_usecs
);
5583 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5587 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5588 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5590 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5593 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5594 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5600 /* tp->lock must be held */
5601 static u64
tg3_refclk_read(struct tg3
*tp
)
5603 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
5604 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
5607 /* tp->lock must be held */
5608 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
5610 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
5611 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
5612 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
5613 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
5616 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
5617 static inline void tg3_full_unlock(struct tg3
*tp
);
5618 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
5620 struct tg3
*tp
= netdev_priv(dev
);
5622 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
5623 SOF_TIMESTAMPING_RX_SOFTWARE
|
5624 SOF_TIMESTAMPING_SOFTWARE
|
5625 SOF_TIMESTAMPING_TX_HARDWARE
|
5626 SOF_TIMESTAMPING_RX_HARDWARE
|
5627 SOF_TIMESTAMPING_RAW_HARDWARE
;
5630 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
5632 info
->phc_index
= -1;
5634 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
5636 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
5637 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
5638 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
5639 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
5643 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
5645 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5646 bool neg_adj
= false;
5654 /* Frequency adjustment is performed using hardware with a 24 bit
5655 * accumulator and a programmable correction value. On each clk, the
5656 * correction value gets added to the accumulator and when it
5657 * overflows, the time counter is incremented/decremented.
5659 * So conversion from ppb to correction value is
5660 * ppb * (1 << 24) / 1000000000
5662 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
5663 TG3_EAV_REF_CLK_CORRECT_MASK
;
5665 tg3_full_lock(tp
, 0);
5668 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
5669 TG3_EAV_REF_CLK_CORRECT_EN
|
5670 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
5672 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
5674 tg3_full_unlock(tp
);
5679 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
5681 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5683 tg3_full_lock(tp
, 0);
5684 tp
->ptp_adjust
+= delta
;
5685 tg3_full_unlock(tp
);
5690 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
5694 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5696 tg3_full_lock(tp
, 0);
5697 ns
= tg3_refclk_read(tp
);
5698 ns
+= tp
->ptp_adjust
;
5699 tg3_full_unlock(tp
);
5701 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
5702 ts
->tv_nsec
= remainder
;
5707 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
5708 const struct timespec
*ts
)
5711 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5713 ns
= timespec_to_ns(ts
);
5715 tg3_full_lock(tp
, 0);
5716 tg3_refclk_write(tp
, ns
);
5718 tg3_full_unlock(tp
);
5723 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
5724 struct ptp_clock_request
*rq
, int on
)
5729 static const struct ptp_clock_info tg3_ptp_caps
= {
5730 .owner
= THIS_MODULE
,
5731 .name
= "tg3 clock",
5732 .max_adj
= 250000000,
5737 .adjfreq
= tg3_ptp_adjfreq
,
5738 .adjtime
= tg3_ptp_adjtime
,
5739 .gettime
= tg3_ptp_gettime
,
5740 .settime
= tg3_ptp_settime
,
5741 .enable
= tg3_ptp_enable
,
5744 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
5745 struct skb_shared_hwtstamps
*timestamp
)
5747 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
5748 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
5752 /* tp->lock must be held */
5753 static void tg3_ptp_init(struct tg3
*tp
)
5755 if (!tg3_flag(tp
, PTP_CAPABLE
))
5758 /* Initialize the hardware clock to the system time. */
5759 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
5761 tp
->ptp_info
= tg3_ptp_caps
;
5764 /* tp->lock must be held */
5765 static void tg3_ptp_resume(struct tg3
*tp
)
5767 if (!tg3_flag(tp
, PTP_CAPABLE
))
5770 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
5774 static void tg3_ptp_fini(struct tg3
*tp
)
5776 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
5779 ptp_clock_unregister(tp
->ptp_clock
);
5780 tp
->ptp_clock
= NULL
;
5784 static inline int tg3_irq_sync(struct tg3
*tp
)
5786 return tp
->irq_sync
;
5789 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5793 dst
= (u32
*)((u8
*)dst
+ off
);
5794 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5795 *dst
++ = tr32(off
+ i
);
5798 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5800 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5801 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5802 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5803 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5804 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5805 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5806 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5807 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5808 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5809 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5810 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5811 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5812 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5813 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5814 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5815 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5816 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5817 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5818 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5820 if (tg3_flag(tp
, SUPPORT_MSIX
))
5821 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5823 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5824 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5825 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5826 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5827 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5828 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5829 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5830 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5832 if (!tg3_flag(tp
, 5705_PLUS
)) {
5833 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5834 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5835 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5838 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5839 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5840 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5841 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5842 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5844 if (tg3_flag(tp
, NVRAM
))
5845 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5848 static void tg3_dump_state(struct tg3
*tp
)
5853 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5857 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5858 /* Read up to but not including private PCI registers */
5859 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5860 regs
[i
/ sizeof(u32
)] = tr32(i
);
5862 tg3_dump_legacy_regs(tp
, regs
);
5864 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5865 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5866 !regs
[i
+ 2] && !regs
[i
+ 3])
5869 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5871 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5876 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5877 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5879 /* SW status block */
5881 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5883 tnapi
->hw_status
->status
,
5884 tnapi
->hw_status
->status_tag
,
5885 tnapi
->hw_status
->rx_jumbo_consumer
,
5886 tnapi
->hw_status
->rx_consumer
,
5887 tnapi
->hw_status
->rx_mini_consumer
,
5888 tnapi
->hw_status
->idx
[0].rx_producer
,
5889 tnapi
->hw_status
->idx
[0].tx_consumer
);
5892 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5894 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5895 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5897 tnapi
->prodring
.rx_std_prod_idx
,
5898 tnapi
->prodring
.rx_std_cons_idx
,
5899 tnapi
->prodring
.rx_jmb_prod_idx
,
5900 tnapi
->prodring
.rx_jmb_cons_idx
);
5904 /* This is called whenever we suspect that the system chipset is re-
5905 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5906 * is bogus tx completions. We try to recover by setting the
5907 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5910 static void tg3_tx_recover(struct tg3
*tp
)
5912 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5913 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5915 netdev_warn(tp
->dev
,
5916 "The system may be re-ordering memory-mapped I/O "
5917 "cycles to the network device, attempting to recover. "
5918 "Please report the problem to the driver maintainer "
5919 "and include system chipset information.\n");
5921 spin_lock(&tp
->lock
);
5922 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5923 spin_unlock(&tp
->lock
);
5926 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5928 /* Tell compiler to fetch tx indices from memory. */
5930 return tnapi
->tx_pending
-
5931 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5934 /* Tigon3 never reports partial packet sends. So we do not
5935 * need special logic to handle SKBs that have not had all
5936 * of their frags sent yet, like SunGEM does.
5938 static void tg3_tx(struct tg3_napi
*tnapi
)
5940 struct tg3
*tp
= tnapi
->tp
;
5941 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5942 u32 sw_idx
= tnapi
->tx_cons
;
5943 struct netdev_queue
*txq
;
5944 int index
= tnapi
- tp
->napi
;
5945 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5947 if (tg3_flag(tp
, ENABLE_TSS
))
5950 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5952 while (sw_idx
!= hw_idx
) {
5953 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5954 struct sk_buff
*skb
= ri
->skb
;
5957 if (unlikely(skb
== NULL
)) {
5962 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
5963 struct skb_shared_hwtstamps timestamp
;
5964 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
5965 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
5967 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
5969 skb_tstamp_tx(skb
, ×tamp
);
5972 pci_unmap_single(tp
->pdev
,
5973 dma_unmap_addr(ri
, mapping
),
5979 while (ri
->fragmented
) {
5980 ri
->fragmented
= false;
5981 sw_idx
= NEXT_TX(sw_idx
);
5982 ri
= &tnapi
->tx_buffers
[sw_idx
];
5985 sw_idx
= NEXT_TX(sw_idx
);
5987 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5988 ri
= &tnapi
->tx_buffers
[sw_idx
];
5989 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5992 pci_unmap_page(tp
->pdev
,
5993 dma_unmap_addr(ri
, mapping
),
5994 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5997 while (ri
->fragmented
) {
5998 ri
->fragmented
= false;
5999 sw_idx
= NEXT_TX(sw_idx
);
6000 ri
= &tnapi
->tx_buffers
[sw_idx
];
6003 sw_idx
= NEXT_TX(sw_idx
);
6007 bytes_compl
+= skb
->len
;
6011 if (unlikely(tx_bug
)) {
6017 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6019 tnapi
->tx_cons
= sw_idx
;
6021 /* Need to make the tx_cons update visible to tg3_start_xmit()
6022 * before checking for netif_queue_stopped(). Without the
6023 * memory barrier, there is a small possibility that tg3_start_xmit()
6024 * will miss it and cause the queue to be stopped forever.
6028 if (unlikely(netif_tx_queue_stopped(txq
) &&
6029 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6030 __netif_tx_lock(txq
, smp_processor_id());
6031 if (netif_tx_queue_stopped(txq
) &&
6032 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6033 netif_tx_wake_queue(txq
);
6034 __netif_tx_unlock(txq
);
6038 static void tg3_frag_free(bool is_frag
, void *data
)
6041 put_page(virt_to_head_page(data
));
6046 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6048 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6049 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6054 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6055 map_sz
, PCI_DMA_FROMDEVICE
);
6056 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6061 /* Returns size of skb allocated or < 0 on error.
6063 * We only need to fill in the address because the other members
6064 * of the RX descriptor are invariant, see tg3_init_rings.
6066 * Note the purposeful assymetry of cpu vs. chip accesses. For
6067 * posting buffers we only dirty the first cache line of the RX
6068 * descriptor (containing the address). Whereas for the RX status
6069 * buffers the cpu only reads the last cacheline of the RX descriptor
6070 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6072 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6073 u32 opaque_key
, u32 dest_idx_unmasked
,
6074 unsigned int *frag_size
)
6076 struct tg3_rx_buffer_desc
*desc
;
6077 struct ring_info
*map
;
6080 int skb_size
, data_size
, dest_idx
;
6082 switch (opaque_key
) {
6083 case RXD_OPAQUE_RING_STD
:
6084 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6085 desc
= &tpr
->rx_std
[dest_idx
];
6086 map
= &tpr
->rx_std_buffers
[dest_idx
];
6087 data_size
= tp
->rx_pkt_map_sz
;
6090 case RXD_OPAQUE_RING_JUMBO
:
6091 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6092 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6093 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6094 data_size
= TG3_RX_JMB_MAP_SZ
;
6101 /* Do not overwrite any of the map or rp information
6102 * until we are sure we can commit to a new buffer.
6104 * Callers depend upon this behavior and assume that
6105 * we leave everything unchanged if we fail.
6107 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6109 if (skb_size
<= PAGE_SIZE
) {
6110 data
= netdev_alloc_frag(skb_size
);
6111 *frag_size
= skb_size
;
6113 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6119 mapping
= pci_map_single(tp
->pdev
,
6120 data
+ TG3_RX_OFFSET(tp
),
6122 PCI_DMA_FROMDEVICE
);
6123 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6124 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6129 dma_unmap_addr_set(map
, mapping
, mapping
);
6131 desc
->addr_hi
= ((u64
)mapping
>> 32);
6132 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6137 /* We only need to move over in the address because the other
6138 * members of the RX descriptor are invariant. See notes above
6139 * tg3_alloc_rx_data for full details.
6141 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6142 struct tg3_rx_prodring_set
*dpr
,
6143 u32 opaque_key
, int src_idx
,
6144 u32 dest_idx_unmasked
)
6146 struct tg3
*tp
= tnapi
->tp
;
6147 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6148 struct ring_info
*src_map
, *dest_map
;
6149 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6152 switch (opaque_key
) {
6153 case RXD_OPAQUE_RING_STD
:
6154 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6155 dest_desc
= &dpr
->rx_std
[dest_idx
];
6156 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6157 src_desc
= &spr
->rx_std
[src_idx
];
6158 src_map
= &spr
->rx_std_buffers
[src_idx
];
6161 case RXD_OPAQUE_RING_JUMBO
:
6162 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6163 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6164 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6165 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6166 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6173 dest_map
->data
= src_map
->data
;
6174 dma_unmap_addr_set(dest_map
, mapping
,
6175 dma_unmap_addr(src_map
, mapping
));
6176 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6177 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6179 /* Ensure that the update to the skb happens after the physical
6180 * addresses have been transferred to the new BD location.
6184 src_map
->data
= NULL
;
6187 /* The RX ring scheme is composed of multiple rings which post fresh
6188 * buffers to the chip, and one special ring the chip uses to report
6189 * status back to the host.
6191 * The special ring reports the status of received packets to the
6192 * host. The chip does not write into the original descriptor the
6193 * RX buffer was obtained from. The chip simply takes the original
6194 * descriptor as provided by the host, updates the status and length
6195 * field, then writes this into the next status ring entry.
6197 * Each ring the host uses to post buffers to the chip is described
6198 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6199 * it is first placed into the on-chip ram. When the packet's length
6200 * is known, it walks down the TG3_BDINFO entries to select the ring.
6201 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6202 * which is within the range of the new packet's length is chosen.
6204 * The "separate ring for rx status" scheme may sound queer, but it makes
6205 * sense from a cache coherency perspective. If only the host writes
6206 * to the buffer post rings, and only the chip writes to the rx status
6207 * rings, then cache lines never move beyond shared-modified state.
6208 * If both the host and chip were to write into the same ring, cache line
6209 * eviction could occur since both entities want it in an exclusive state.
6211 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6213 struct tg3
*tp
= tnapi
->tp
;
6214 u32 work_mask
, rx_std_posted
= 0;
6215 u32 std_prod_idx
, jmb_prod_idx
;
6216 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6219 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6221 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6223 * We need to order the read of hw_idx and the read of
6224 * the opaque cookie.
6229 std_prod_idx
= tpr
->rx_std_prod_idx
;
6230 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6231 while (sw_idx
!= hw_idx
&& budget
> 0) {
6232 struct ring_info
*ri
;
6233 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6235 struct sk_buff
*skb
;
6236 dma_addr_t dma_addr
;
6237 u32 opaque_key
, desc_idx
, *post_ptr
;
6241 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6242 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6243 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6244 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6245 dma_addr
= dma_unmap_addr(ri
, mapping
);
6247 post_ptr
= &std_prod_idx
;
6249 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6250 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6251 dma_addr
= dma_unmap_addr(ri
, mapping
);
6253 post_ptr
= &jmb_prod_idx
;
6255 goto next_pkt_nopost
;
6257 work_mask
|= opaque_key
;
6259 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6260 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6262 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6263 desc_idx
, *post_ptr
);
6265 /* Other statistics kept track of by card. */
6270 prefetch(data
+ TG3_RX_OFFSET(tp
));
6271 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6274 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6275 RXD_FLAG_PTPSTAT_PTPV1
||
6276 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6277 RXD_FLAG_PTPSTAT_PTPV2
) {
6278 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6279 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6282 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6284 unsigned int frag_size
;
6286 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6287 *post_ptr
, &frag_size
);
6291 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6292 PCI_DMA_FROMDEVICE
);
6294 skb
= build_skb(data
, frag_size
);
6296 tg3_frag_free(frag_size
!= 0, data
);
6297 goto drop_it_no_recycle
;
6299 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6300 /* Ensure that the update to the data happens
6301 * after the usage of the old DMA mapping.
6308 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6309 desc_idx
, *post_ptr
);
6311 skb
= netdev_alloc_skb(tp
->dev
,
6312 len
+ TG3_RAW_IP_ALIGN
);
6314 goto drop_it_no_recycle
;
6316 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6317 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6319 data
+ TG3_RX_OFFSET(tp
),
6321 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6326 tg3_hwclock_to_timestamp(tp
, tstamp
,
6327 skb_hwtstamps(skb
));
6329 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6330 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6331 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6332 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6333 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6335 skb_checksum_none_assert(skb
);
6337 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6339 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6340 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6342 goto drop_it_no_recycle
;
6345 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6346 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6347 __vlan_hwaccel_put_tag(skb
,
6348 desc
->err_vlan
& RXD_VLAN_MASK
);
6350 napi_gro_receive(&tnapi
->napi
, skb
);
6358 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6359 tpr
->rx_std_prod_idx
= std_prod_idx
&
6360 tp
->rx_std_ring_mask
;
6361 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6362 tpr
->rx_std_prod_idx
);
6363 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6368 sw_idx
&= tp
->rx_ret_ring_mask
;
6370 /* Refresh hw_idx to see if there is new work */
6371 if (sw_idx
== hw_idx
) {
6372 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6377 /* ACK the status ring. */
6378 tnapi
->rx_rcb_ptr
= sw_idx
;
6379 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6381 /* Refill RX ring(s). */
6382 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6383 /* Sync BD data before updating mailbox */
6386 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6387 tpr
->rx_std_prod_idx
= std_prod_idx
&
6388 tp
->rx_std_ring_mask
;
6389 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6390 tpr
->rx_std_prod_idx
);
6392 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6393 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6394 tp
->rx_jmb_ring_mask
;
6395 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6396 tpr
->rx_jmb_prod_idx
);
6399 } else if (work_mask
) {
6400 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6401 * updated before the producer indices can be updated.
6405 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6406 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6408 if (tnapi
!= &tp
->napi
[1]) {
6409 tp
->rx_refill
= true;
6410 napi_schedule(&tp
->napi
[1].napi
);
6417 static void tg3_poll_link(struct tg3
*tp
)
6419 /* handle link change and other phy events */
6420 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6421 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6423 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6424 sblk
->status
= SD_STATUS_UPDATED
|
6425 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6426 spin_lock(&tp
->lock
);
6427 if (tg3_flag(tp
, USE_PHYLIB
)) {
6429 (MAC_STATUS_SYNC_CHANGED
|
6430 MAC_STATUS_CFG_CHANGED
|
6431 MAC_STATUS_MI_COMPLETION
|
6432 MAC_STATUS_LNKSTATE_CHANGED
));
6435 tg3_setup_phy(tp
, 0);
6436 spin_unlock(&tp
->lock
);
6441 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6442 struct tg3_rx_prodring_set
*dpr
,
6443 struct tg3_rx_prodring_set
*spr
)
6445 u32 si
, di
, cpycnt
, src_prod_idx
;
6449 src_prod_idx
= spr
->rx_std_prod_idx
;
6451 /* Make sure updates to the rx_std_buffers[] entries and the
6452 * standard producer index are seen in the correct order.
6456 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6459 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6460 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6462 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6463 spr
->rx_std_cons_idx
;
6465 cpycnt
= min(cpycnt
,
6466 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6468 si
= spr
->rx_std_cons_idx
;
6469 di
= dpr
->rx_std_prod_idx
;
6471 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6472 if (dpr
->rx_std_buffers
[i
].data
) {
6482 /* Ensure that updates to the rx_std_buffers ring and the
6483 * shadowed hardware producer ring from tg3_recycle_skb() are
6484 * ordered correctly WRT the skb check above.
6488 memcpy(&dpr
->rx_std_buffers
[di
],
6489 &spr
->rx_std_buffers
[si
],
6490 cpycnt
* sizeof(struct ring_info
));
6492 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6493 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6494 sbd
= &spr
->rx_std
[si
];
6495 dbd
= &dpr
->rx_std
[di
];
6496 dbd
->addr_hi
= sbd
->addr_hi
;
6497 dbd
->addr_lo
= sbd
->addr_lo
;
6500 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6501 tp
->rx_std_ring_mask
;
6502 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6503 tp
->rx_std_ring_mask
;
6507 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6509 /* Make sure updates to the rx_jmb_buffers[] entries and
6510 * the jumbo producer index are seen in the correct order.
6514 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6517 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6518 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6520 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6521 spr
->rx_jmb_cons_idx
;
6523 cpycnt
= min(cpycnt
,
6524 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6526 si
= spr
->rx_jmb_cons_idx
;
6527 di
= dpr
->rx_jmb_prod_idx
;
6529 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6530 if (dpr
->rx_jmb_buffers
[i
].data
) {
6540 /* Ensure that updates to the rx_jmb_buffers ring and the
6541 * shadowed hardware producer ring from tg3_recycle_skb() are
6542 * ordered correctly WRT the skb check above.
6546 memcpy(&dpr
->rx_jmb_buffers
[di
],
6547 &spr
->rx_jmb_buffers
[si
],
6548 cpycnt
* sizeof(struct ring_info
));
6550 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6551 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6552 sbd
= &spr
->rx_jmb
[si
].std
;
6553 dbd
= &dpr
->rx_jmb
[di
].std
;
6554 dbd
->addr_hi
= sbd
->addr_hi
;
6555 dbd
->addr_lo
= sbd
->addr_lo
;
6558 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6559 tp
->rx_jmb_ring_mask
;
6560 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6561 tp
->rx_jmb_ring_mask
;
6567 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6569 struct tg3
*tp
= tnapi
->tp
;
6571 /* run TX completion thread */
6572 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6574 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6578 if (!tnapi
->rx_rcb_prod_idx
)
6581 /* run RX thread, within the bounds set by NAPI.
6582 * All RX "locking" is done by ensuring outside
6583 * code synchronizes with tg3->napi.poll()
6585 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6586 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6588 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6589 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6591 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6592 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6594 tp
->rx_refill
= false;
6595 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
6596 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6597 &tp
->napi
[i
].prodring
);
6601 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6602 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6603 dpr
->rx_std_prod_idx
);
6605 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6606 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6607 dpr
->rx_jmb_prod_idx
);
6612 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6618 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6620 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6621 schedule_work(&tp
->reset_task
);
6624 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6626 cancel_work_sync(&tp
->reset_task
);
6627 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6628 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6631 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6633 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6634 struct tg3
*tp
= tnapi
->tp
;
6636 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6639 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6641 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6644 if (unlikely(work_done
>= budget
))
6647 /* tp->last_tag is used in tg3_int_reenable() below
6648 * to tell the hw how much work has been processed,
6649 * so we must read it before checking for more work.
6651 tnapi
->last_tag
= sblk
->status_tag
;
6652 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6655 /* check for RX/TX work to do */
6656 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6657 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6659 /* This test here is not race free, but will reduce
6660 * the number of interrupts by looping again.
6662 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6665 napi_complete(napi
);
6666 /* Reenable interrupts. */
6667 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6669 /* This test here is synchronized by napi_schedule()
6670 * and napi_complete() to close the race condition.
6672 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6673 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6674 HOSTCC_MODE_ENABLE
|
6685 /* work_done is guaranteed to be less than budget. */
6686 napi_complete(napi
);
6687 tg3_reset_task_schedule(tp
);
6691 static void tg3_process_error(struct tg3
*tp
)
6694 bool real_error
= false;
6696 if (tg3_flag(tp
, ERROR_PROCESSED
))
6699 /* Check Flow Attention register */
6700 val
= tr32(HOSTCC_FLOW_ATTN
);
6701 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6702 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6706 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6707 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6711 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6712 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6721 tg3_flag_set(tp
, ERROR_PROCESSED
);
6722 tg3_reset_task_schedule(tp
);
6725 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6727 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6728 struct tg3
*tp
= tnapi
->tp
;
6730 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6733 if (sblk
->status
& SD_STATUS_ERROR
)
6734 tg3_process_error(tp
);
6738 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6740 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6743 if (unlikely(work_done
>= budget
))
6746 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6747 /* tp->last_tag is used in tg3_int_reenable() below
6748 * to tell the hw how much work has been processed,
6749 * so we must read it before checking for more work.
6751 tnapi
->last_tag
= sblk
->status_tag
;
6752 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6755 sblk
->status
&= ~SD_STATUS_UPDATED
;
6757 if (likely(!tg3_has_work(tnapi
))) {
6758 napi_complete(napi
);
6759 tg3_int_reenable(tnapi
);
6767 /* work_done is guaranteed to be less than budget. */
6768 napi_complete(napi
);
6769 tg3_reset_task_schedule(tp
);
6773 static void tg3_napi_disable(struct tg3
*tp
)
6777 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6778 napi_disable(&tp
->napi
[i
].napi
);
6781 static void tg3_napi_enable(struct tg3
*tp
)
6785 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6786 napi_enable(&tp
->napi
[i
].napi
);
6789 static void tg3_napi_init(struct tg3
*tp
)
6793 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6794 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6795 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6798 static void tg3_napi_fini(struct tg3
*tp
)
6802 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6803 netif_napi_del(&tp
->napi
[i
].napi
);
6806 static inline void tg3_netif_stop(struct tg3
*tp
)
6808 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6809 tg3_napi_disable(tp
);
6810 netif_carrier_off(tp
->dev
);
6811 netif_tx_disable(tp
->dev
);
6814 /* tp->lock must be held */
6815 static inline void tg3_netif_start(struct tg3
*tp
)
6819 /* NOTE: unconditional netif_tx_wake_all_queues is only
6820 * appropriate so long as all callers are assured to
6821 * have free tx slots (such as after tg3_init_hw)
6823 netif_tx_wake_all_queues(tp
->dev
);
6826 netif_carrier_on(tp
->dev
);
6828 tg3_napi_enable(tp
);
6829 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6830 tg3_enable_ints(tp
);
6833 static void tg3_irq_quiesce(struct tg3
*tp
)
6837 BUG_ON(tp
->irq_sync
);
6842 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6843 synchronize_irq(tp
->napi
[i
].irq_vec
);
6846 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6847 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6848 * with as well. Most of the time, this is not necessary except when
6849 * shutting down the device.
6851 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6853 spin_lock_bh(&tp
->lock
);
6855 tg3_irq_quiesce(tp
);
6858 static inline void tg3_full_unlock(struct tg3
*tp
)
6860 spin_unlock_bh(&tp
->lock
);
6863 /* One-shot MSI handler - Chip automatically disables interrupt
6864 * after sending MSI so driver doesn't have to do it.
6866 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6868 struct tg3_napi
*tnapi
= dev_id
;
6869 struct tg3
*tp
= tnapi
->tp
;
6871 prefetch(tnapi
->hw_status
);
6873 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6875 if (likely(!tg3_irq_sync(tp
)))
6876 napi_schedule(&tnapi
->napi
);
6881 /* MSI ISR - No need to check for interrupt sharing and no need to
6882 * flush status block and interrupt mailbox. PCI ordering rules
6883 * guarantee that MSI will arrive after the status block.
6885 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6887 struct tg3_napi
*tnapi
= dev_id
;
6888 struct tg3
*tp
= tnapi
->tp
;
6890 prefetch(tnapi
->hw_status
);
6892 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6894 * Writing any value to intr-mbox-0 clears PCI INTA# and
6895 * chip-internal interrupt pending events.
6896 * Writing non-zero to intr-mbox-0 additional tells the
6897 * NIC to stop sending us irqs, engaging "in-intr-handler"
6900 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6901 if (likely(!tg3_irq_sync(tp
)))
6902 napi_schedule(&tnapi
->napi
);
6904 return IRQ_RETVAL(1);
6907 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6909 struct tg3_napi
*tnapi
= dev_id
;
6910 struct tg3
*tp
= tnapi
->tp
;
6911 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6912 unsigned int handled
= 1;
6914 /* In INTx mode, it is possible for the interrupt to arrive at
6915 * the CPU before the status block posted prior to the interrupt.
6916 * Reading the PCI State register will confirm whether the
6917 * interrupt is ours and will flush the status block.
6919 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6920 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6921 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6928 * Writing any value to intr-mbox-0 clears PCI INTA# and
6929 * chip-internal interrupt pending events.
6930 * Writing non-zero to intr-mbox-0 additional tells the
6931 * NIC to stop sending us irqs, engaging "in-intr-handler"
6934 * Flush the mailbox to de-assert the IRQ immediately to prevent
6935 * spurious interrupts. The flush impacts performance but
6936 * excessive spurious interrupts can be worse in some cases.
6938 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6939 if (tg3_irq_sync(tp
))
6941 sblk
->status
&= ~SD_STATUS_UPDATED
;
6942 if (likely(tg3_has_work(tnapi
))) {
6943 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6944 napi_schedule(&tnapi
->napi
);
6946 /* No work, shared interrupt perhaps? re-enable
6947 * interrupts, and flush that PCI write
6949 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6953 return IRQ_RETVAL(handled
);
6956 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6958 struct tg3_napi
*tnapi
= dev_id
;
6959 struct tg3
*tp
= tnapi
->tp
;
6960 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6961 unsigned int handled
= 1;
6963 /* In INTx mode, it is possible for the interrupt to arrive at
6964 * the CPU before the status block posted prior to the interrupt.
6965 * Reading the PCI State register will confirm whether the
6966 * interrupt is ours and will flush the status block.
6968 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6969 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6970 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6977 * writing any value to intr-mbox-0 clears PCI INTA# and
6978 * chip-internal interrupt pending events.
6979 * writing non-zero to intr-mbox-0 additional tells the
6980 * NIC to stop sending us irqs, engaging "in-intr-handler"
6983 * Flush the mailbox to de-assert the IRQ immediately to prevent
6984 * spurious interrupts. The flush impacts performance but
6985 * excessive spurious interrupts can be worse in some cases.
6987 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6990 * In a shared interrupt configuration, sometimes other devices'
6991 * interrupts will scream. We record the current status tag here
6992 * so that the above check can report that the screaming interrupts
6993 * are unhandled. Eventually they will be silenced.
6995 tnapi
->last_irq_tag
= sblk
->status_tag
;
6997 if (tg3_irq_sync(tp
))
7000 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7002 napi_schedule(&tnapi
->napi
);
7005 return IRQ_RETVAL(handled
);
7008 /* ISR for interrupt test */
7009 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7011 struct tg3_napi
*tnapi
= dev_id
;
7012 struct tg3
*tp
= tnapi
->tp
;
7013 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7015 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7016 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7017 tg3_disable_ints(tp
);
7018 return IRQ_RETVAL(1);
7020 return IRQ_RETVAL(0);
7023 #ifdef CONFIG_NET_POLL_CONTROLLER
7024 static void tg3_poll_controller(struct net_device
*dev
)
7027 struct tg3
*tp
= netdev_priv(dev
);
7029 if (tg3_irq_sync(tp
))
7032 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7033 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7037 static void tg3_tx_timeout(struct net_device
*dev
)
7039 struct tg3
*tp
= netdev_priv(dev
);
7041 if (netif_msg_tx_err(tp
)) {
7042 netdev_err(dev
, "transmit timed out, resetting\n");
7046 tg3_reset_task_schedule(tp
);
7049 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7050 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7052 u32 base
= (u32
) mapping
& 0xffffffff;
7054 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7057 /* Test for DMA addresses > 40-bit */
7058 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7061 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7062 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7063 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7070 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7071 dma_addr_t mapping
, u32 len
, u32 flags
,
7074 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7075 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7076 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7077 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7080 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7081 dma_addr_t map
, u32 len
, u32 flags
,
7084 struct tg3
*tp
= tnapi
->tp
;
7087 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7090 if (tg3_4g_overflow_test(map
, len
))
7093 if (tg3_40bit_overflow_test(tp
, map
, len
))
7096 if (tp
->dma_limit
) {
7097 u32 prvidx
= *entry
;
7098 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7099 while (len
> tp
->dma_limit
&& *budget
) {
7100 u32 frag_len
= tp
->dma_limit
;
7101 len
-= tp
->dma_limit
;
7103 /* Avoid the 8byte DMA problem */
7105 len
+= tp
->dma_limit
/ 2;
7106 frag_len
= tp
->dma_limit
/ 2;
7109 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7111 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7112 frag_len
, tmp_flag
, mss
, vlan
);
7115 *entry
= NEXT_TX(*entry
);
7122 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7123 len
, flags
, mss
, vlan
);
7125 *entry
= NEXT_TX(*entry
);
7128 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7132 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7133 len
, flags
, mss
, vlan
);
7134 *entry
= NEXT_TX(*entry
);
7140 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7143 struct sk_buff
*skb
;
7144 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7149 pci_unmap_single(tnapi
->tp
->pdev
,
7150 dma_unmap_addr(txb
, mapping
),
7154 while (txb
->fragmented
) {
7155 txb
->fragmented
= false;
7156 entry
= NEXT_TX(entry
);
7157 txb
= &tnapi
->tx_buffers
[entry
];
7160 for (i
= 0; i
<= last
; i
++) {
7161 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7163 entry
= NEXT_TX(entry
);
7164 txb
= &tnapi
->tx_buffers
[entry
];
7166 pci_unmap_page(tnapi
->tp
->pdev
,
7167 dma_unmap_addr(txb
, mapping
),
7168 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7170 while (txb
->fragmented
) {
7171 txb
->fragmented
= false;
7172 entry
= NEXT_TX(entry
);
7173 txb
= &tnapi
->tx_buffers
[entry
];
7178 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7179 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7180 struct sk_buff
**pskb
,
7181 u32
*entry
, u32
*budget
,
7182 u32 base_flags
, u32 mss
, u32 vlan
)
7184 struct tg3
*tp
= tnapi
->tp
;
7185 struct sk_buff
*new_skb
, *skb
= *pskb
;
7186 dma_addr_t new_addr
= 0;
7189 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7190 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7192 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7194 new_skb
= skb_copy_expand(skb
,
7195 skb_headroom(skb
) + more_headroom
,
7196 skb_tailroom(skb
), GFP_ATOMIC
);
7202 /* New SKB is guaranteed to be linear. */
7203 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7205 /* Make sure the mapping succeeded */
7206 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7207 dev_kfree_skb(new_skb
);
7210 u32 save_entry
= *entry
;
7212 base_flags
|= TXD_FLAG_END
;
7214 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7215 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7218 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7219 new_skb
->len
, base_flags
,
7221 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7222 dev_kfree_skb(new_skb
);
7233 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7235 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7236 * TSO header is greater than 80 bytes.
7238 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7240 struct sk_buff
*segs
, *nskb
;
7241 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7243 /* Estimate the number of fragments in the worst case */
7244 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7245 netif_stop_queue(tp
->dev
);
7247 /* netif_tx_stop_queue() must be done before checking
7248 * checking tx index in tg3_tx_avail() below, because in
7249 * tg3_tx(), we update tx index before checking for
7250 * netif_tx_queue_stopped().
7253 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7254 return NETDEV_TX_BUSY
;
7256 netif_wake_queue(tp
->dev
);
7259 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7261 goto tg3_tso_bug_end
;
7267 tg3_start_xmit(nskb
, tp
->dev
);
7273 return NETDEV_TX_OK
;
7276 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7277 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7279 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7281 struct tg3
*tp
= netdev_priv(dev
);
7282 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7284 int i
= -1, would_hit_hwbug
;
7286 struct tg3_napi
*tnapi
;
7287 struct netdev_queue
*txq
;
7290 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7291 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7292 if (tg3_flag(tp
, ENABLE_TSS
))
7295 budget
= tg3_tx_avail(tnapi
);
7297 /* We are running in BH disabled context with netif_tx_lock
7298 * and TX reclaim runs via tp->napi.poll inside of a software
7299 * interrupt. Furthermore, IRQ processing runs lockless so we have
7300 * no IRQ context deadlocks to worry about either. Rejoice!
7302 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7303 if (!netif_tx_queue_stopped(txq
)) {
7304 netif_tx_stop_queue(txq
);
7306 /* This is a hard error, log it. */
7308 "BUG! Tx Ring full when queue awake!\n");
7310 return NETDEV_TX_BUSY
;
7313 entry
= tnapi
->tx_prod
;
7315 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7316 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7318 mss
= skb_shinfo(skb
)->gso_size
;
7321 u32 tcp_opt_len
, hdr_len
;
7323 if (skb_header_cloned(skb
) &&
7324 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7328 tcp_opt_len
= tcp_optlen(skb
);
7330 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7332 if (!skb_is_gso_v6(skb
)) {
7334 iph
->tot_len
= htons(mss
+ hdr_len
);
7337 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7338 tg3_flag(tp
, TSO_BUG
))
7339 return tg3_tso_bug(tp
, skb
);
7341 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7342 TXD_FLAG_CPU_POST_DMA
);
7344 if (tg3_flag(tp
, HW_TSO_1
) ||
7345 tg3_flag(tp
, HW_TSO_2
) ||
7346 tg3_flag(tp
, HW_TSO_3
)) {
7347 tcp_hdr(skb
)->check
= 0;
7348 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7350 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7355 if (tg3_flag(tp
, HW_TSO_3
)) {
7356 mss
|= (hdr_len
& 0xc) << 12;
7358 base_flags
|= 0x00000010;
7359 base_flags
|= (hdr_len
& 0x3e0) << 5;
7360 } else if (tg3_flag(tp
, HW_TSO_2
))
7361 mss
|= hdr_len
<< 9;
7362 else if (tg3_flag(tp
, HW_TSO_1
) ||
7363 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7364 if (tcp_opt_len
|| iph
->ihl
> 5) {
7367 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7368 mss
|= (tsflags
<< 11);
7371 if (tcp_opt_len
|| iph
->ihl
> 5) {
7374 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7375 base_flags
|= tsflags
<< 12;
7380 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7381 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7382 base_flags
|= TXD_FLAG_JMB_PKT
;
7384 if (vlan_tx_tag_present(skb
)) {
7385 base_flags
|= TXD_FLAG_VLAN
;
7386 vlan
= vlan_tx_tag_get(skb
);
7389 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7390 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7391 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7392 base_flags
|= TXD_FLAG_HWTSTAMP
;
7395 len
= skb_headlen(skb
);
7397 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7398 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7402 tnapi
->tx_buffers
[entry
].skb
= skb
;
7403 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7405 would_hit_hwbug
= 0;
7407 if (tg3_flag(tp
, 5701_DMA_BUG
))
7408 would_hit_hwbug
= 1;
7410 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7411 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7413 would_hit_hwbug
= 1;
7414 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7417 if (!tg3_flag(tp
, HW_TSO_1
) &&
7418 !tg3_flag(tp
, HW_TSO_2
) &&
7419 !tg3_flag(tp
, HW_TSO_3
))
7422 /* Now loop through additional data
7423 * fragments, and queue them.
7425 last
= skb_shinfo(skb
)->nr_frags
- 1;
7426 for (i
= 0; i
<= last
; i
++) {
7427 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7429 len
= skb_frag_size(frag
);
7430 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7431 len
, DMA_TO_DEVICE
);
7433 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7434 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7436 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7440 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7442 ((i
== last
) ? TXD_FLAG_END
: 0),
7444 would_hit_hwbug
= 1;
7450 if (would_hit_hwbug
) {
7451 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7453 /* If the workaround fails due to memory/mapping
7454 * failure, silently drop this packet.
7456 entry
= tnapi
->tx_prod
;
7457 budget
= tg3_tx_avail(tnapi
);
7458 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7459 base_flags
, mss
, vlan
))
7463 skb_tx_timestamp(skb
);
7464 netdev_tx_sent_queue(txq
, skb
->len
);
7466 /* Sync BD data before updating mailbox */
7469 /* Packets are ready, update Tx producer idx local and on card. */
7470 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7472 tnapi
->tx_prod
= entry
;
7473 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7474 netif_tx_stop_queue(txq
);
7476 /* netif_tx_stop_queue() must be done before checking
7477 * checking tx index in tg3_tx_avail() below, because in
7478 * tg3_tx(), we update tx index before checking for
7479 * netif_tx_queue_stopped().
7482 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7483 netif_tx_wake_queue(txq
);
7487 return NETDEV_TX_OK
;
7490 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7491 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7496 return NETDEV_TX_OK
;
7499 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7502 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7503 MAC_MODE_PORT_MODE_MASK
);
7505 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7507 if (!tg3_flag(tp
, 5705_PLUS
))
7508 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7510 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7511 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7513 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7515 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7517 if (tg3_flag(tp
, 5705_PLUS
) ||
7518 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7519 tg3_asic_rev(tp
) == ASIC_REV_5700
)
7520 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7523 tw32(MAC_MODE
, tp
->mac_mode
);
7527 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7529 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7531 tg3_phy_toggle_apd(tp
, false);
7532 tg3_phy_toggle_automdix(tp
, 0);
7534 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7537 bmcr
= BMCR_FULLDPLX
;
7542 bmcr
|= BMCR_SPEED100
;
7546 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7548 bmcr
|= BMCR_SPEED100
;
7551 bmcr
|= BMCR_SPEED1000
;
7556 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7557 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7558 val
|= CTL1000_AS_MASTER
|
7559 CTL1000_ENABLE_MASTER
;
7560 tg3_writephy(tp
, MII_CTRL1000
, val
);
7562 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7563 MII_TG3_FET_PTEST_TRIM_2
;
7564 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7567 bmcr
|= BMCR_LOOPBACK
;
7569 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7571 /* The write needs to be flushed for the FETs */
7572 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7573 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7577 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7578 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
7579 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7580 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7581 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7583 /* The write needs to be flushed for the AC131 */
7584 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7587 /* Reset to prevent losing 1st rx packet intermittently */
7588 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7589 tg3_flag(tp
, 5780_CLASS
)) {
7590 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7592 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7595 mac_mode
= tp
->mac_mode
&
7596 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7597 if (speed
== SPEED_1000
)
7598 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7600 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7602 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
7603 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7605 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7606 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7607 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7608 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7610 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7611 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7614 tw32(MAC_MODE
, mac_mode
);
7620 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7622 struct tg3
*tp
= netdev_priv(dev
);
7624 if (features
& NETIF_F_LOOPBACK
) {
7625 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7628 spin_lock_bh(&tp
->lock
);
7629 tg3_mac_loopback(tp
, true);
7630 netif_carrier_on(tp
->dev
);
7631 spin_unlock_bh(&tp
->lock
);
7632 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7634 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7637 spin_lock_bh(&tp
->lock
);
7638 tg3_mac_loopback(tp
, false);
7639 /* Force link status check */
7640 tg3_setup_phy(tp
, 1);
7641 spin_unlock_bh(&tp
->lock
);
7642 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7646 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7647 netdev_features_t features
)
7649 struct tg3
*tp
= netdev_priv(dev
);
7651 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7652 features
&= ~NETIF_F_ALL_TSO
;
7657 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7659 netdev_features_t changed
= dev
->features
^ features
;
7661 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7662 tg3_set_loopback(dev
, features
);
7667 static void tg3_rx_prodring_free(struct tg3
*tp
,
7668 struct tg3_rx_prodring_set
*tpr
)
7672 if (tpr
!= &tp
->napi
[0].prodring
) {
7673 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7674 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7675 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7678 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7679 for (i
= tpr
->rx_jmb_cons_idx
;
7680 i
!= tpr
->rx_jmb_prod_idx
;
7681 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7682 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7690 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7691 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7694 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7695 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7696 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7701 /* Initialize rx rings for packet processing.
7703 * The chip has been shut down and the driver detached from
7704 * the networking, so no interrupts or new tx packets will
7705 * end up in the driver. tp->{tx,}lock are held and thus
7708 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7709 struct tg3_rx_prodring_set
*tpr
)
7711 u32 i
, rx_pkt_dma_sz
;
7713 tpr
->rx_std_cons_idx
= 0;
7714 tpr
->rx_std_prod_idx
= 0;
7715 tpr
->rx_jmb_cons_idx
= 0;
7716 tpr
->rx_jmb_prod_idx
= 0;
7718 if (tpr
!= &tp
->napi
[0].prodring
) {
7719 memset(&tpr
->rx_std_buffers
[0], 0,
7720 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7721 if (tpr
->rx_jmb_buffers
)
7722 memset(&tpr
->rx_jmb_buffers
[0], 0,
7723 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7727 /* Zero out all descriptors. */
7728 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7730 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7731 if (tg3_flag(tp
, 5780_CLASS
) &&
7732 tp
->dev
->mtu
> ETH_DATA_LEN
)
7733 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7734 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7736 /* Initialize invariants of the rings, we only set this
7737 * stuff once. This works because the card does not
7738 * write into the rx buffer posting rings.
7740 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7741 struct tg3_rx_buffer_desc
*rxd
;
7743 rxd
= &tpr
->rx_std
[i
];
7744 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7745 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7746 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7747 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7750 /* Now allocate fresh SKBs for each rx ring. */
7751 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7752 unsigned int frag_size
;
7754 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
7756 netdev_warn(tp
->dev
,
7757 "Using a smaller RX standard ring. Only "
7758 "%d out of %d buffers were allocated "
7759 "successfully\n", i
, tp
->rx_pending
);
7767 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7770 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7772 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7775 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7776 struct tg3_rx_buffer_desc
*rxd
;
7778 rxd
= &tpr
->rx_jmb
[i
].std
;
7779 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7780 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7782 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7783 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7786 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7787 unsigned int frag_size
;
7789 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
7791 netdev_warn(tp
->dev
,
7792 "Using a smaller RX jumbo ring. Only %d "
7793 "out of %d buffers were allocated "
7794 "successfully\n", i
, tp
->rx_jumbo_pending
);
7797 tp
->rx_jumbo_pending
= i
;
7806 tg3_rx_prodring_free(tp
, tpr
);
7810 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7811 struct tg3_rx_prodring_set
*tpr
)
7813 kfree(tpr
->rx_std_buffers
);
7814 tpr
->rx_std_buffers
= NULL
;
7815 kfree(tpr
->rx_jmb_buffers
);
7816 tpr
->rx_jmb_buffers
= NULL
;
7818 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7819 tpr
->rx_std
, tpr
->rx_std_mapping
);
7823 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7824 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7829 static int tg3_rx_prodring_init(struct tg3
*tp
,
7830 struct tg3_rx_prodring_set
*tpr
)
7832 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7834 if (!tpr
->rx_std_buffers
)
7837 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7838 TG3_RX_STD_RING_BYTES(tp
),
7839 &tpr
->rx_std_mapping
,
7844 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7845 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7847 if (!tpr
->rx_jmb_buffers
)
7850 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7851 TG3_RX_JMB_RING_BYTES(tp
),
7852 &tpr
->rx_jmb_mapping
,
7861 tg3_rx_prodring_fini(tp
, tpr
);
7865 /* Free up pending packets in all rx/tx rings.
7867 * The chip has been shut down and the driver detached from
7868 * the networking, so no interrupts or new tx packets will
7869 * end up in the driver. tp->{tx,}lock is not held and we are not
7870 * in an interrupt context and thus may sleep.
7872 static void tg3_free_rings(struct tg3
*tp
)
7876 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7877 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7879 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7881 if (!tnapi
->tx_buffers
)
7884 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7885 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7890 tg3_tx_skb_unmap(tnapi
, i
,
7891 skb_shinfo(skb
)->nr_frags
- 1);
7893 dev_kfree_skb_any(skb
);
7895 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7899 /* Initialize tx/rx rings for packet processing.
7901 * The chip has been shut down and the driver detached from
7902 * the networking, so no interrupts or new tx packets will
7903 * end up in the driver. tp->{tx,}lock are held and thus
7906 static int tg3_init_rings(struct tg3
*tp
)
7910 /* Free up all the SKBs. */
7913 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7914 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7916 tnapi
->last_tag
= 0;
7917 tnapi
->last_irq_tag
= 0;
7918 tnapi
->hw_status
->status
= 0;
7919 tnapi
->hw_status
->status_tag
= 0;
7920 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7925 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7927 tnapi
->rx_rcb_ptr
= 0;
7929 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7931 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7940 static void tg3_mem_tx_release(struct tg3
*tp
)
7944 for (i
= 0; i
< tp
->irq_max
; i
++) {
7945 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7947 if (tnapi
->tx_ring
) {
7948 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7949 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7950 tnapi
->tx_ring
= NULL
;
7953 kfree(tnapi
->tx_buffers
);
7954 tnapi
->tx_buffers
= NULL
;
7958 static int tg3_mem_tx_acquire(struct tg3
*tp
)
7961 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7963 /* If multivector TSS is enabled, vector 0 does not handle
7964 * tx interrupts. Don't allocate any resources for it.
7966 if (tg3_flag(tp
, ENABLE_TSS
))
7969 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
7970 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
7971 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7972 if (!tnapi
->tx_buffers
)
7975 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7977 &tnapi
->tx_desc_mapping
,
7979 if (!tnapi
->tx_ring
)
7986 tg3_mem_tx_release(tp
);
7990 static void tg3_mem_rx_release(struct tg3
*tp
)
7994 for (i
= 0; i
< tp
->irq_max
; i
++) {
7995 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7997 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8002 dma_free_coherent(&tp
->pdev
->dev
,
8003 TG3_RX_RCB_RING_BYTES(tp
),
8005 tnapi
->rx_rcb_mapping
);
8006 tnapi
->rx_rcb
= NULL
;
8010 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8012 unsigned int i
, limit
;
8014 limit
= tp
->rxq_cnt
;
8016 /* If RSS is enabled, we need a (dummy) producer ring
8017 * set on vector zero. This is the true hw prodring.
8019 if (tg3_flag(tp
, ENABLE_RSS
))
8022 for (i
= 0; i
< limit
; i
++) {
8023 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8025 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8028 /* If multivector RSS is enabled, vector 0
8029 * does not handle rx or tx interrupts.
8030 * Don't allocate any resources for it.
8032 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8035 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8036 TG3_RX_RCB_RING_BYTES(tp
),
8037 &tnapi
->rx_rcb_mapping
,
8042 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8048 tg3_mem_rx_release(tp
);
8053 * Must not be invoked with interrupt sources disabled and
8054 * the hardware shutdown down.
8056 static void tg3_free_consistent(struct tg3
*tp
)
8060 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8061 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8063 if (tnapi
->hw_status
) {
8064 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8066 tnapi
->status_mapping
);
8067 tnapi
->hw_status
= NULL
;
8071 tg3_mem_rx_release(tp
);
8072 tg3_mem_tx_release(tp
);
8075 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8076 tp
->hw_stats
, tp
->stats_mapping
);
8077 tp
->hw_stats
= NULL
;
8082 * Must not be invoked with interrupt sources disabled and
8083 * the hardware shutdown down. Can sleep.
8085 static int tg3_alloc_consistent(struct tg3
*tp
)
8089 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8090 sizeof(struct tg3_hw_stats
),
8096 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8098 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8099 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8100 struct tg3_hw_status
*sblk
;
8102 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8104 &tnapi
->status_mapping
,
8106 if (!tnapi
->hw_status
)
8109 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8110 sblk
= tnapi
->hw_status
;
8112 if (tg3_flag(tp
, ENABLE_RSS
)) {
8113 u16
*prodptr
= NULL
;
8116 * When RSS is enabled, the status block format changes
8117 * slightly. The "rx_jumbo_consumer", "reserved",
8118 * and "rx_mini_consumer" members get mapped to the
8119 * other three rx return ring producer indexes.
8123 prodptr
= &sblk
->idx
[0].rx_producer
;
8126 prodptr
= &sblk
->rx_jumbo_consumer
;
8129 prodptr
= &sblk
->reserved
;
8132 prodptr
= &sblk
->rx_mini_consumer
;
8135 tnapi
->rx_rcb_prod_idx
= prodptr
;
8137 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8141 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8147 tg3_free_consistent(tp
);
8151 #define MAX_WAIT_CNT 1000
8153 /* To stop a block, clear the enable bit and poll till it
8154 * clears. tp->lock is held.
8156 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
8161 if (tg3_flag(tp
, 5705_PLUS
)) {
8168 /* We can't enable/disable these bits of the
8169 * 5705/5750, just say success.
8182 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8185 if ((val
& enable_bit
) == 0)
8189 if (i
== MAX_WAIT_CNT
&& !silent
) {
8190 dev_err(&tp
->pdev
->dev
,
8191 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8199 /* tp->lock is held. */
8200 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
8204 tg3_disable_ints(tp
);
8206 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8207 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8210 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8211 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8212 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8213 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8214 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8215 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8217 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8218 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8219 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8220 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8221 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8222 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8223 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8225 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8226 tw32_f(MAC_MODE
, tp
->mac_mode
);
8229 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8230 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8232 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8234 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8237 if (i
>= MAX_WAIT_CNT
) {
8238 dev_err(&tp
->pdev
->dev
,
8239 "%s timed out, TX_MODE_ENABLE will not clear "
8240 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8244 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8245 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8246 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8248 tw32(FTQ_RESET
, 0xffffffff);
8249 tw32(FTQ_RESET
, 0x00000000);
8251 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8252 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8254 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8255 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8256 if (tnapi
->hw_status
)
8257 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8263 /* Save PCI command register before chip reset */
8264 static void tg3_save_pci_state(struct tg3
*tp
)
8266 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8269 /* Restore PCI state after chip reset */
8270 static void tg3_restore_pci_state(struct tg3
*tp
)
8274 /* Re-enable indirect register accesses. */
8275 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8276 tp
->misc_host_ctrl
);
8278 /* Set MAX PCI retry to zero. */
8279 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8280 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8281 tg3_flag(tp
, PCIX_MODE
))
8282 val
|= PCISTATE_RETRY_SAME_DMA
;
8283 /* Allow reads and writes to the APE register and memory space. */
8284 if (tg3_flag(tp
, ENABLE_APE
))
8285 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8286 PCISTATE_ALLOW_APE_SHMEM_WR
|
8287 PCISTATE_ALLOW_APE_PSPACE_WR
;
8288 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8290 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8292 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8293 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8294 tp
->pci_cacheline_sz
);
8295 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8299 /* Make sure PCI-X relaxed ordering bit is clear. */
8300 if (tg3_flag(tp
, PCIX_MODE
)) {
8303 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8305 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8306 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8310 if (tg3_flag(tp
, 5780_CLASS
)) {
8312 /* Chip reset on 5780 will reset MSI enable bit,
8313 * so need to restore it.
8315 if (tg3_flag(tp
, USING_MSI
)) {
8318 pci_read_config_word(tp
->pdev
,
8319 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8321 pci_write_config_word(tp
->pdev
,
8322 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8323 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8324 val
= tr32(MSGINT_MODE
);
8325 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8330 /* tp->lock is held. */
8331 static int tg3_chip_reset(struct tg3
*tp
)
8334 void (*write_op
)(struct tg3
*, u32
, u32
);
8339 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8341 /* No matching tg3_nvram_unlock() after this because
8342 * chip reset below will undo the nvram lock.
8344 tp
->nvram_lock_cnt
= 0;
8346 /* GRC_MISC_CFG core clock reset will clear the memory
8347 * enable bit in PCI register 4 and the MSI enable bit
8348 * on some chips, so we save relevant registers here.
8350 tg3_save_pci_state(tp
);
8352 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8353 tg3_flag(tp
, 5755_PLUS
))
8354 tw32(GRC_FASTBOOT_PC
, 0);
8357 * We must avoid the readl() that normally takes place.
8358 * It locks machines, causes machine checks, and other
8359 * fun things. So, temporarily disable the 5701
8360 * hardware workaround, while we do the reset.
8362 write_op
= tp
->write32
;
8363 if (write_op
== tg3_write_flush_reg32
)
8364 tp
->write32
= tg3_write32
;
8366 /* Prevent the irq handler from reading or writing PCI registers
8367 * during chip reset when the memory enable bit in the PCI command
8368 * register may be cleared. The chip does not generate interrupt
8369 * at this time, but the irq handler may still be called due to irq
8370 * sharing or irqpoll.
8372 tg3_flag_set(tp
, CHIP_RESETTING
);
8373 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8374 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8375 if (tnapi
->hw_status
) {
8376 tnapi
->hw_status
->status
= 0;
8377 tnapi
->hw_status
->status_tag
= 0;
8379 tnapi
->last_tag
= 0;
8380 tnapi
->last_irq_tag
= 0;
8384 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8385 synchronize_irq(tp
->napi
[i
].irq_vec
);
8387 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8388 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8389 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8393 val
= GRC_MISC_CFG_CORECLK_RESET
;
8395 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8396 /* Force PCIe 1.0a mode */
8397 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8398 !tg3_flag(tp
, 57765_PLUS
) &&
8399 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8400 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8401 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8403 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8404 tw32(GRC_MISC_CFG
, (1 << 29));
8409 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8410 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8411 tw32(GRC_VCPU_EXT_CTRL
,
8412 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8415 /* Manage gphy power for all CPMU absent PCIe devices. */
8416 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8417 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8419 tw32(GRC_MISC_CFG
, val
);
8421 /* restore 5701 hardware bug workaround write method */
8422 tp
->write32
= write_op
;
8424 /* Unfortunately, we have to delay before the PCI read back.
8425 * Some 575X chips even will not respond to a PCI cfg access
8426 * when the reset command is given to the chip.
8428 * How do these hardware designers expect things to work
8429 * properly if the PCI write is posted for a long period
8430 * of time? It is always necessary to have some method by
8431 * which a register read back can occur to push the write
8432 * out which does the reset.
8434 * For most tg3 variants the trick below was working.
8439 /* Flush PCI posted writes. The normal MMIO registers
8440 * are inaccessible at this time so this is the only
8441 * way to make this reliably (actually, this is no longer
8442 * the case, see above). I tried to use indirect
8443 * register read/write but this upset some 5701 variants.
8445 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8449 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8452 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8456 /* Wait for link training to complete. */
8457 for (j
= 0; j
< 5000; j
++)
8460 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8461 pci_write_config_dword(tp
->pdev
, 0xc4,
8462 cfg_val
| (1 << 15));
8465 /* Clear the "no snoop" and "relaxed ordering" bits. */
8466 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8468 * Older PCIe devices only support the 128 byte
8469 * MPS setting. Enforce the restriction.
8471 if (!tg3_flag(tp
, CPMU_PRESENT
))
8472 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8473 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8475 /* Clear error status */
8476 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8477 PCI_EXP_DEVSTA_CED
|
8478 PCI_EXP_DEVSTA_NFED
|
8479 PCI_EXP_DEVSTA_FED
|
8480 PCI_EXP_DEVSTA_URD
);
8483 tg3_restore_pci_state(tp
);
8485 tg3_flag_clear(tp
, CHIP_RESETTING
);
8486 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8489 if (tg3_flag(tp
, 5780_CLASS
))
8490 val
= tr32(MEMARB_MODE
);
8491 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8493 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
8495 tw32(0x5000, 0x400);
8498 if (tg3_flag(tp
, IS_SSB_CORE
)) {
8500 * BCM4785: In order to avoid repercussions from using
8501 * potentially defective internal ROM, stop the Rx RISC CPU,
8502 * which is not required.
8505 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8508 tw32(GRC_MODE
, tp
->grc_mode
);
8510 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
8513 tw32(0xc4, val
| (1 << 15));
8516 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8517 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8518 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8519 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
8520 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8521 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8524 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8525 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8527 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8528 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8533 tw32_f(MAC_MODE
, val
);
8536 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8538 err
= tg3_poll_fw(tp
);
8544 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8545 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
8546 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8547 !tg3_flag(tp
, 57765_PLUS
)) {
8550 tw32(0x7c00, val
| (1 << 25));
8553 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
8554 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8555 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8558 /* Reprobe ASF enable state. */
8559 tg3_flag_clear(tp
, ENABLE_ASF
);
8560 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8561 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8562 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8565 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8566 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8567 tg3_flag_set(tp
, ENABLE_ASF
);
8568 tp
->last_event_jiffies
= jiffies
;
8569 if (tg3_flag(tp
, 5750_PLUS
))
8570 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8577 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8578 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8580 /* tp->lock is held. */
8581 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8587 tg3_write_sig_pre_reset(tp
, kind
);
8589 tg3_abort_hw(tp
, silent
);
8590 err
= tg3_chip_reset(tp
);
8592 __tg3_set_mac_addr(tp
, 0);
8594 tg3_write_sig_legacy(tp
, kind
);
8595 tg3_write_sig_post_reset(tp
, kind
);
8598 /* Save the stats across chip resets... */
8599 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8600 tg3_get_estats(tp
, &tp
->estats_prev
);
8602 /* And make sure the next sample is new data */
8603 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8612 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8614 struct tg3
*tp
= netdev_priv(dev
);
8615 struct sockaddr
*addr
= p
;
8616 int err
= 0, skip_mac_1
= 0;
8618 if (!is_valid_ether_addr(addr
->sa_data
))
8619 return -EADDRNOTAVAIL
;
8621 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8623 if (!netif_running(dev
))
8626 if (tg3_flag(tp
, ENABLE_ASF
)) {
8627 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8629 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8630 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8631 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8632 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8634 /* Skip MAC addr 1 if ASF is using it. */
8635 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8636 !(addr1_high
== 0 && addr1_low
== 0))
8639 spin_lock_bh(&tp
->lock
);
8640 __tg3_set_mac_addr(tp
, skip_mac_1
);
8641 spin_unlock_bh(&tp
->lock
);
8646 /* tp->lock is held. */
8647 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8648 dma_addr_t mapping
, u32 maxlen_flags
,
8652 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8653 ((u64
) mapping
>> 32));
8655 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8656 ((u64
) mapping
& 0xffffffff));
8658 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8661 if (!tg3_flag(tp
, 5705_PLUS
))
8663 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8668 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8672 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8673 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8674 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8675 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8677 tw32(HOSTCC_TXCOL_TICKS
, 0);
8678 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8679 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8681 for (; i
< tp
->txq_cnt
; i
++) {
8684 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8685 tw32(reg
, ec
->tx_coalesce_usecs
);
8686 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8687 tw32(reg
, ec
->tx_max_coalesced_frames
);
8688 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8689 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8693 for (; i
< tp
->irq_max
- 1; i
++) {
8694 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8695 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8696 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8700 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8703 u32 limit
= tp
->rxq_cnt
;
8705 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8706 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8707 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8708 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8711 tw32(HOSTCC_RXCOL_TICKS
, 0);
8712 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8713 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8716 for (; i
< limit
; i
++) {
8719 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8720 tw32(reg
, ec
->rx_coalesce_usecs
);
8721 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8722 tw32(reg
, ec
->rx_max_coalesced_frames
);
8723 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8724 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8727 for (; i
< tp
->irq_max
- 1; i
++) {
8728 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8729 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8730 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8734 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8736 tg3_coal_tx_init(tp
, ec
);
8737 tg3_coal_rx_init(tp
, ec
);
8739 if (!tg3_flag(tp
, 5705_PLUS
)) {
8740 u32 val
= ec
->stats_block_coalesce_usecs
;
8742 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8743 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8748 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8752 /* tp->lock is held. */
8753 static void tg3_rings_reset(struct tg3
*tp
)
8756 u32 stblk
, txrcb
, rxrcb
, limit
;
8757 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8759 /* Disable all transmit rings but the first. */
8760 if (!tg3_flag(tp
, 5705_PLUS
))
8761 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8762 else if (tg3_flag(tp
, 5717_PLUS
))
8763 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8764 else if (tg3_flag(tp
, 57765_CLASS
) ||
8765 tg3_asic_rev(tp
) == ASIC_REV_5762
)
8766 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8768 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8770 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8771 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8772 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8773 BDINFO_FLAGS_DISABLED
);
8776 /* Disable all receive return rings but the first. */
8777 if (tg3_flag(tp
, 5717_PLUS
))
8778 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8779 else if (!tg3_flag(tp
, 5705_PLUS
))
8780 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8781 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8782 tg3_asic_rev(tp
) == ASIC_REV_5762
||
8783 tg3_flag(tp
, 57765_CLASS
))
8784 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8786 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8788 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8789 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8790 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8791 BDINFO_FLAGS_DISABLED
);
8793 /* Disable interrupts */
8794 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8795 tp
->napi
[0].chk_msi_cnt
= 0;
8796 tp
->napi
[0].last_rx_cons
= 0;
8797 tp
->napi
[0].last_tx_cons
= 0;
8799 /* Zero mailbox registers. */
8800 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8801 for (i
= 1; i
< tp
->irq_max
; i
++) {
8802 tp
->napi
[i
].tx_prod
= 0;
8803 tp
->napi
[i
].tx_cons
= 0;
8804 if (tg3_flag(tp
, ENABLE_TSS
))
8805 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8806 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8807 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8808 tp
->napi
[i
].chk_msi_cnt
= 0;
8809 tp
->napi
[i
].last_rx_cons
= 0;
8810 tp
->napi
[i
].last_tx_cons
= 0;
8812 if (!tg3_flag(tp
, ENABLE_TSS
))
8813 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8815 tp
->napi
[0].tx_prod
= 0;
8816 tp
->napi
[0].tx_cons
= 0;
8817 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8818 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8821 /* Make sure the NIC-based send BD rings are disabled. */
8822 if (!tg3_flag(tp
, 5705_PLUS
)) {
8823 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8824 for (i
= 0; i
< 16; i
++)
8825 tw32_tx_mbox(mbox
+ i
* 8, 0);
8828 txrcb
= NIC_SRAM_SEND_RCB
;
8829 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8831 /* Clear status block in ram. */
8832 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8834 /* Set status block DMA address */
8835 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8836 ((u64
) tnapi
->status_mapping
>> 32));
8837 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8838 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8840 if (tnapi
->tx_ring
) {
8841 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8842 (TG3_TX_RING_SIZE
<<
8843 BDINFO_FLAGS_MAXLEN_SHIFT
),
8844 NIC_SRAM_TX_BUFFER_DESC
);
8845 txrcb
+= TG3_BDINFO_SIZE
;
8848 if (tnapi
->rx_rcb
) {
8849 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8850 (tp
->rx_ret_ring_mask
+ 1) <<
8851 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8852 rxrcb
+= TG3_BDINFO_SIZE
;
8855 stblk
= HOSTCC_STATBLCK_RING1
;
8857 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8858 u64 mapping
= (u64
)tnapi
->status_mapping
;
8859 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8860 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8862 /* Clear status block in ram. */
8863 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8865 if (tnapi
->tx_ring
) {
8866 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8867 (TG3_TX_RING_SIZE
<<
8868 BDINFO_FLAGS_MAXLEN_SHIFT
),
8869 NIC_SRAM_TX_BUFFER_DESC
);
8870 txrcb
+= TG3_BDINFO_SIZE
;
8873 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8874 ((tp
->rx_ret_ring_mask
+ 1) <<
8875 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8878 rxrcb
+= TG3_BDINFO_SIZE
;
8882 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8884 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8886 if (!tg3_flag(tp
, 5750_PLUS
) ||
8887 tg3_flag(tp
, 5780_CLASS
) ||
8888 tg3_asic_rev(tp
) == ASIC_REV_5750
||
8889 tg3_asic_rev(tp
) == ASIC_REV_5752
||
8890 tg3_flag(tp
, 57765_PLUS
))
8891 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8892 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8893 tg3_asic_rev(tp
) == ASIC_REV_5787
)
8894 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8896 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8898 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8899 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8901 val
= min(nic_rep_thresh
, host_rep_thresh
);
8902 tw32(RCVBDI_STD_THRESH
, val
);
8904 if (tg3_flag(tp
, 57765_PLUS
))
8905 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8907 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8910 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8912 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8914 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8915 tw32(RCVBDI_JUMBO_THRESH
, val
);
8917 if (tg3_flag(tp
, 57765_PLUS
))
8918 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8921 static inline u32
calc_crc(unsigned char *buf
, int len
)
8929 for (j
= 0; j
< len
; j
++) {
8932 for (k
= 0; k
< 8; k
++) {
8945 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
8947 /* accept or reject all multicast frames */
8948 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
8949 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
8950 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
8951 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
8954 static void __tg3_set_rx_mode(struct net_device
*dev
)
8956 struct tg3
*tp
= netdev_priv(dev
);
8959 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
8960 RX_MODE_KEEP_VLAN_TAG
);
8962 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8963 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8966 if (!tg3_flag(tp
, ENABLE_ASF
))
8967 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
8970 if (dev
->flags
& IFF_PROMISC
) {
8971 /* Promiscuous mode. */
8972 rx_mode
|= RX_MODE_PROMISC
;
8973 } else if (dev
->flags
& IFF_ALLMULTI
) {
8974 /* Accept all multicast. */
8975 tg3_set_multi(tp
, 1);
8976 } else if (netdev_mc_empty(dev
)) {
8977 /* Reject all multicast. */
8978 tg3_set_multi(tp
, 0);
8980 /* Accept one or more multicast(s). */
8981 struct netdev_hw_addr
*ha
;
8982 u32 mc_filter
[4] = { 0, };
8987 netdev_for_each_mc_addr(ha
, dev
) {
8988 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
8990 regidx
= (bit
& 0x60) >> 5;
8992 mc_filter
[regidx
] |= (1 << bit
);
8995 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
8996 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
8997 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
8998 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9001 if (rx_mode
!= tp
->rx_mode
) {
9002 tp
->rx_mode
= rx_mode
;
9003 tw32_f(MAC_RX_MODE
, rx_mode
);
9008 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9012 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9013 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9016 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9020 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9023 if (tp
->rxq_cnt
== 1) {
9024 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9028 /* Validate table against current IRQ count */
9029 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9030 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9034 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9035 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9038 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9041 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9043 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9044 u32 val
= tp
->rss_ind_tbl
[i
];
9046 for (; i
% 8; i
++) {
9048 val
|= tp
->rss_ind_tbl
[i
];
9055 /* tp->lock is held. */
9056 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
9058 u32 val
, rdmac_mode
;
9060 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9062 tg3_disable_ints(tp
);
9066 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9068 if (tg3_flag(tp
, INIT_COMPLETE
))
9069 tg3_abort_hw(tp
, 1);
9071 /* Enable MAC control of LPI */
9072 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
9073 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
9074 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
9075 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9076 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
9078 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
9080 tw32_f(TG3_CPMU_EEE_CTRL
,
9081 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9083 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9084 TG3_CPMU_EEEMD_LPI_IN_TX
|
9085 TG3_CPMU_EEEMD_LPI_IN_RX
|
9086 TG3_CPMU_EEEMD_EEE_ENABLE
;
9088 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
9089 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9091 if (tg3_flag(tp
, ENABLE_APE
))
9092 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9094 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9096 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9097 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9098 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9100 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9101 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9102 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9108 err
= tg3_chip_reset(tp
);
9112 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9114 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9115 val
= tr32(TG3_CPMU_CTRL
);
9116 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9117 tw32(TG3_CPMU_CTRL
, val
);
9119 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9120 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9121 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9122 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9124 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9125 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9126 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9127 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9129 val
= tr32(TG3_CPMU_HST_ACC
);
9130 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9131 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9132 tw32(TG3_CPMU_HST_ACC
, val
);
9135 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9136 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9137 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9138 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9139 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9141 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9142 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9144 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9146 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9147 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9150 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9151 u32 grc_mode
= tr32(GRC_MODE
);
9153 /* Access the lower 1K of PL PCIE block registers. */
9154 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9155 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9157 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9158 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9159 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9161 tw32(GRC_MODE
, grc_mode
);
9164 if (tg3_flag(tp
, 57765_CLASS
)) {
9165 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9166 u32 grc_mode
= tr32(GRC_MODE
);
9168 /* Access the lower 1K of PL PCIE block registers. */
9169 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9170 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9172 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9173 TG3_PCIE_PL_LO_PHYCTL5
);
9174 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9175 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9177 tw32(GRC_MODE
, grc_mode
);
9180 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9183 /* Fix transmit hangs */
9184 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9185 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9186 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9188 grc_mode
= tr32(GRC_MODE
);
9190 /* Access the lower 1K of DL PCIE block registers. */
9191 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9192 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9194 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9195 TG3_PCIE_DL_LO_FTSMAX
);
9196 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9197 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9198 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9200 tw32(GRC_MODE
, grc_mode
);
9203 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9204 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9205 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9206 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9209 /* This works around an issue with Athlon chipsets on
9210 * B3 tigon3 silicon. This bit has no effect on any
9211 * other revision. But do not set this on PCI Express
9212 * chips and don't even touch the clocks if the CPMU is present.
9214 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9215 if (!tg3_flag(tp
, PCI_EXPRESS
))
9216 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9217 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9220 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9221 tg3_flag(tp
, PCIX_MODE
)) {
9222 val
= tr32(TG3PCI_PCISTATE
);
9223 val
|= PCISTATE_RETRY_SAME_DMA
;
9224 tw32(TG3PCI_PCISTATE
, val
);
9227 if (tg3_flag(tp
, ENABLE_APE
)) {
9228 /* Allow reads and writes to the
9229 * APE register and memory space.
9231 val
= tr32(TG3PCI_PCISTATE
);
9232 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9233 PCISTATE_ALLOW_APE_SHMEM_WR
|
9234 PCISTATE_ALLOW_APE_PSPACE_WR
;
9235 tw32(TG3PCI_PCISTATE
, val
);
9238 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9239 /* Enable some hw fixes. */
9240 val
= tr32(TG3PCI_MSI_DATA
);
9241 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9242 tw32(TG3PCI_MSI_DATA
, val
);
9245 /* Descriptor ring init may make accesses to the
9246 * NIC SRAM area to setup the TX descriptors, so we
9247 * can only do this after the hardware has been
9248 * successfully reset.
9250 err
= tg3_init_rings(tp
);
9254 if (tg3_flag(tp
, 57765_PLUS
)) {
9255 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9256 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9257 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9258 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9259 if (!tg3_flag(tp
, 57765_CLASS
) &&
9260 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9261 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9262 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9263 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9264 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9265 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9266 /* This value is determined during the probe time DMA
9267 * engine test, tg3_test_dma.
9269 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9272 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9273 GRC_MODE_4X_NIC_SEND_RINGS
|
9274 GRC_MODE_NO_TX_PHDR_CSUM
|
9275 GRC_MODE_NO_RX_PHDR_CSUM
);
9276 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9278 /* Pseudo-header checksum is done by hardware logic and not
9279 * the offload processers, so make the chip do the pseudo-
9280 * header checksums on receive. For transmit it is more
9281 * convenient to do the pseudo-header checksum in software
9282 * as Linux does that on transmit for us in all cases.
9284 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9286 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9288 tw32(TG3_RX_PTP_CTL
,
9289 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9291 if (tg3_flag(tp
, PTP_CAPABLE
))
9292 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9294 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9296 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9297 val
= tr32(GRC_MISC_CFG
);
9299 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9300 tw32(GRC_MISC_CFG
, val
);
9302 /* Initialize MBUF/DESC pool. */
9303 if (tg3_flag(tp
, 5750_PLUS
)) {
9305 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9306 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9307 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9308 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9310 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9311 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9312 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9313 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9316 fw_len
= tp
->fw_len
;
9317 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9318 tw32(BUFMGR_MB_POOL_ADDR
,
9319 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9320 tw32(BUFMGR_MB_POOL_SIZE
,
9321 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9324 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9325 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9326 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9327 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9328 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9329 tw32(BUFMGR_MB_HIGH_WATER
,
9330 tp
->bufmgr_config
.mbuf_high_water
);
9332 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9333 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9334 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9335 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9336 tw32(BUFMGR_MB_HIGH_WATER
,
9337 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9339 tw32(BUFMGR_DMA_LOW_WATER
,
9340 tp
->bufmgr_config
.dma_low_water
);
9341 tw32(BUFMGR_DMA_HIGH_WATER
,
9342 tp
->bufmgr_config
.dma_high_water
);
9344 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9345 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9346 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9347 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9348 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9349 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9350 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9351 tw32(BUFMGR_MODE
, val
);
9352 for (i
= 0; i
< 2000; i
++) {
9353 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9358 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9362 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9363 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9365 tg3_setup_rxbd_thresholds(tp
);
9367 /* Initialize TG3_BDINFO's at:
9368 * RCVDBDI_STD_BD: standard eth size rx ring
9369 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9370 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9373 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9374 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9375 * ring attribute flags
9376 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9378 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9379 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9381 * The size of each ring is fixed in the firmware, but the location is
9384 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9385 ((u64
) tpr
->rx_std_mapping
>> 32));
9386 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9387 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9388 if (!tg3_flag(tp
, 5717_PLUS
))
9389 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9390 NIC_SRAM_RX_BUFFER_DESC
);
9392 /* Disable the mini ring */
9393 if (!tg3_flag(tp
, 5705_PLUS
))
9394 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9395 BDINFO_FLAGS_DISABLED
);
9397 /* Program the jumbo buffer descriptor ring control
9398 * blocks on those devices that have them.
9400 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9401 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9403 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9404 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9405 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9406 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9407 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9408 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9409 BDINFO_FLAGS_MAXLEN_SHIFT
;
9410 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9411 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9412 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9413 tg3_flag(tp
, 57765_CLASS
) ||
9414 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9415 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9416 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9418 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9419 BDINFO_FLAGS_DISABLED
);
9422 if (tg3_flag(tp
, 57765_PLUS
)) {
9423 val
= TG3_RX_STD_RING_SIZE(tp
);
9424 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9425 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9427 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9429 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9431 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9433 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9434 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9436 tpr
->rx_jmb_prod_idx
=
9437 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9438 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9440 tg3_rings_reset(tp
);
9442 /* Initialize MAC address and backoff seed. */
9443 __tg3_set_mac_addr(tp
, 0);
9445 /* MTU + ethernet header + FCS + optional VLAN tag */
9446 tw32(MAC_RX_MTU_SIZE
,
9447 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9449 /* The slot time is changed by tg3_setup_phy if we
9450 * run at gigabit with half duplex.
9452 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9453 (6 << TX_LENGTHS_IPG_SHIFT
) |
9454 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9456 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9457 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9458 val
|= tr32(MAC_TX_LENGTHS
) &
9459 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9460 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9462 tw32(MAC_TX_LENGTHS
, val
);
9464 /* Receive rules. */
9465 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9466 tw32(RCVLPC_CONFIG
, 0x0181);
9468 /* Calculate RDMAC_MODE setting early, we need it to determine
9469 * the RCVLPC_STATE_ENABLE mask.
9471 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9472 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9473 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9474 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9475 RDMAC_MODE_LNGREAD_ENAB
);
9477 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
9478 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9480 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
9481 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9482 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9483 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9484 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9485 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9487 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9488 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9489 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9490 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9491 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9492 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9493 !tg3_flag(tp
, IS_5788
)) {
9494 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9498 if (tg3_flag(tp
, PCI_EXPRESS
))
9499 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9501 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9503 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9504 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
9505 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
9509 if (tg3_flag(tp
, HW_TSO_1
) ||
9510 tg3_flag(tp
, HW_TSO_2
) ||
9511 tg3_flag(tp
, HW_TSO_3
))
9512 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9514 if (tg3_flag(tp
, 57765_PLUS
) ||
9515 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9516 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9517 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9519 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9520 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9521 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9523 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
9524 tg3_asic_rev(tp
) == ASIC_REV_5784
||
9525 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9526 tg3_asic_rev(tp
) == ASIC_REV_57780
||
9527 tg3_flag(tp
, 57765_PLUS
)) {
9530 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9531 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
9533 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
9536 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9537 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9538 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9539 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9540 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9541 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9542 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9543 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9545 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9548 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
9549 tg3_asic_rev(tp
) == ASIC_REV_5720
||
9550 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9553 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9554 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
9556 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
9560 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9561 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9564 /* Receive/send statistics. */
9565 if (tg3_flag(tp
, 5750_PLUS
)) {
9566 val
= tr32(RCVLPC_STATS_ENABLE
);
9567 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9568 tw32(RCVLPC_STATS_ENABLE
, val
);
9569 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9570 tg3_flag(tp
, TSO_CAPABLE
)) {
9571 val
= tr32(RCVLPC_STATS_ENABLE
);
9572 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9573 tw32(RCVLPC_STATS_ENABLE
, val
);
9575 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9577 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9578 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9579 tw32(SNDDATAI_STATSCTRL
,
9580 (SNDDATAI_SCTRL_ENABLE
|
9581 SNDDATAI_SCTRL_FASTUPD
));
9583 /* Setup host coalescing engine. */
9584 tw32(HOSTCC_MODE
, 0);
9585 for (i
= 0; i
< 2000; i
++) {
9586 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9591 __tg3_set_coalesce(tp
, &tp
->coal
);
9593 if (!tg3_flag(tp
, 5705_PLUS
)) {
9594 /* Status/statistics block address. See tg3_timer,
9595 * the tg3_periodic_fetch_stats call there, and
9596 * tg3_get_stats to see how this works for 5705/5750 chips.
9598 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9599 ((u64
) tp
->stats_mapping
>> 32));
9600 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9601 ((u64
) tp
->stats_mapping
& 0xffffffff));
9602 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9604 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9606 /* Clear statistics and status block memory areas */
9607 for (i
= NIC_SRAM_STATS_BLK
;
9608 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9610 tg3_write_mem(tp
, i
, 0);
9615 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9617 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9618 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9619 if (!tg3_flag(tp
, 5705_PLUS
))
9620 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9622 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9623 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9624 /* reset to prevent losing 1st rx packet intermittently */
9625 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9629 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9630 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9631 MAC_MODE_FHDE_ENABLE
;
9632 if (tg3_flag(tp
, ENABLE_APE
))
9633 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9634 if (!tg3_flag(tp
, 5705_PLUS
) &&
9635 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9636 tg3_asic_rev(tp
) != ASIC_REV_5700
)
9637 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9638 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9641 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9642 * If TG3_FLAG_IS_NIC is zero, we should read the
9643 * register to preserve the GPIO settings for LOMs. The GPIOs,
9644 * whether used as inputs or outputs, are set by boot code after
9647 if (!tg3_flag(tp
, IS_NIC
)) {
9650 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9651 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9652 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9654 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
9655 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9656 GRC_LCLCTRL_GPIO_OUTPUT3
;
9658 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
9659 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9661 tp
->grc_local_ctrl
&= ~gpio_mask
;
9662 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9664 /* GPIO1 must be driven high for eeprom write protect */
9665 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9666 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9667 GRC_LCLCTRL_GPIO_OUTPUT1
);
9669 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9672 if (tg3_flag(tp
, USING_MSIX
)) {
9673 val
= tr32(MSGINT_MODE
);
9674 val
|= MSGINT_MODE_ENABLE
;
9675 if (tp
->irq_cnt
> 1)
9676 val
|= MSGINT_MODE_MULTIVEC_EN
;
9677 if (!tg3_flag(tp
, 1SHOT_MSI
))
9678 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9679 tw32(MSGINT_MODE
, val
);
9682 if (!tg3_flag(tp
, 5705_PLUS
)) {
9683 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9687 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9688 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9689 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9690 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9691 WDMAC_MODE_LNGREAD_ENAB
);
9693 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9694 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9695 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9696 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
9697 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
9699 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9700 !tg3_flag(tp
, IS_5788
)) {
9701 val
|= WDMAC_MODE_RX_ACCEL
;
9705 /* Enable host coalescing bug fix */
9706 if (tg3_flag(tp
, 5755_PLUS
))
9707 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9709 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
9710 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9712 tw32_f(WDMAC_MODE
, val
);
9715 if (tg3_flag(tp
, PCIX_MODE
)) {
9718 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9720 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
9721 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9722 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9723 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
9724 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9725 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9727 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9731 tw32_f(RDMAC_MODE
, rdmac_mode
);
9734 if (tg3_asic_rev(tp
) == ASIC_REV_5719
) {
9735 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9736 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9739 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9740 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9741 val
|= TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9742 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9743 tg3_flag_set(tp
, 5719_RDMA_BUG
);
9747 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9748 if (!tg3_flag(tp
, 5705_PLUS
))
9749 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9751 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
9753 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9755 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9757 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9758 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9759 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9760 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9761 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9762 tw32(RCVDBDI_MODE
, val
);
9763 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9764 if (tg3_flag(tp
, HW_TSO_1
) ||
9765 tg3_flag(tp
, HW_TSO_2
) ||
9766 tg3_flag(tp
, HW_TSO_3
))
9767 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9768 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9769 if (tg3_flag(tp
, ENABLE_TSS
))
9770 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9771 tw32(SNDBDI_MODE
, val
);
9772 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9774 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
9775 err
= tg3_load_5701_a0_firmware_fix(tp
);
9780 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9781 err
= tg3_load_tso_firmware(tp
);
9786 tp
->tx_mode
= TX_MODE_ENABLE
;
9788 if (tg3_flag(tp
, 5755_PLUS
) ||
9789 tg3_asic_rev(tp
) == ASIC_REV_5906
)
9790 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9792 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9793 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9794 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9795 tp
->tx_mode
&= ~val
;
9796 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9799 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9802 if (tg3_flag(tp
, ENABLE_RSS
)) {
9803 tg3_rss_write_indir_tbl(tp
);
9805 /* Setup the "secret" hash key. */
9806 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9807 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9808 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9809 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9810 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9811 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9812 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9813 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9814 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9815 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9818 tp
->rx_mode
= RX_MODE_ENABLE
;
9819 if (tg3_flag(tp
, 5755_PLUS
))
9820 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9822 if (tg3_flag(tp
, ENABLE_RSS
))
9823 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9824 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9825 RX_MODE_RSS_IPV6_HASH_EN
|
9826 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9827 RX_MODE_RSS_IPV4_HASH_EN
|
9828 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9830 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9833 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9835 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9836 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9837 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9840 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9843 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9844 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
9845 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9846 /* Set drive transmission level to 1.2V */
9847 /* only if the signal pre-emphasis bit is not set */
9848 val
= tr32(MAC_SERDES_CFG
);
9851 tw32(MAC_SERDES_CFG
, val
);
9853 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
9854 tw32(MAC_SERDES_CFG
, 0x616000);
9857 /* Prevent chip from dropping frames when flow control
9860 if (tg3_flag(tp
, 57765_CLASS
))
9864 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9866 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
9867 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9868 /* Use hardware link auto-negotiation */
9869 tg3_flag_set(tp
, HW_AUTONEG
);
9872 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9873 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
9876 tmp
= tr32(SERDES_RX_CTRL
);
9877 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9878 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9879 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9880 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9883 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9884 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9885 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9887 err
= tg3_setup_phy(tp
, 0);
9891 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9892 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9895 /* Clear CRC stats. */
9896 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9897 tg3_writephy(tp
, MII_TG3_TEST1
,
9898 tmp
| MII_TG3_TEST1_CRC_EN
);
9899 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9904 __tg3_set_rx_mode(tp
->dev
);
9906 /* Initialize receive rules. */
9907 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9908 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9909 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9910 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9912 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9916 if (tg3_flag(tp
, ENABLE_ASF
))
9920 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9922 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9924 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9926 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9928 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9930 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9932 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9934 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9936 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9938 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9940 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9942 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9944 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9946 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9954 if (tg3_flag(tp
, ENABLE_APE
))
9955 /* Write our heartbeat update interval to APE. */
9956 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9957 APE_HOST_HEARTBEAT_INT_DISABLE
);
9959 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9964 /* Called at device open time to get the chip ready for
9965 * packet processing. Invoked with tp->lock held.
9967 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9969 tg3_switch_clocks(tp
);
9971 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9973 return tg3_reset_hw(tp
, reset_phy
);
9976 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
9980 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
9981 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
9983 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
9986 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
9987 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
9988 memset(ocir
, 0, TG3_OCIR_LEN
);
9992 /* sysfs attributes for hwmon */
9993 static ssize_t
tg3_show_temp(struct device
*dev
,
9994 struct device_attribute
*devattr
, char *buf
)
9996 struct pci_dev
*pdev
= to_pci_dev(dev
);
9997 struct net_device
*netdev
= pci_get_drvdata(pdev
);
9998 struct tg3
*tp
= netdev_priv(netdev
);
9999 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10002 spin_lock_bh(&tp
->lock
);
10003 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10004 sizeof(temperature
));
10005 spin_unlock_bh(&tp
->lock
);
10006 return sprintf(buf
, "%u\n", temperature
);
10010 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10011 TG3_TEMP_SENSOR_OFFSET
);
10012 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10013 TG3_TEMP_CAUTION_OFFSET
);
10014 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10015 TG3_TEMP_MAX_OFFSET
);
10017 static struct attribute
*tg3_attributes
[] = {
10018 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10019 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10020 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10024 static const struct attribute_group tg3_group
= {
10025 .attrs
= tg3_attributes
,
10028 static void tg3_hwmon_close(struct tg3
*tp
)
10030 if (tp
->hwmon_dev
) {
10031 hwmon_device_unregister(tp
->hwmon_dev
);
10032 tp
->hwmon_dev
= NULL
;
10033 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10037 static void tg3_hwmon_open(struct tg3
*tp
)
10041 struct pci_dev
*pdev
= tp
->pdev
;
10042 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10044 tg3_sd_scan_scratchpad(tp
, ocirs
);
10046 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10047 if (!ocirs
[i
].src_data_length
)
10050 size
+= ocirs
[i
].src_hdr_length
;
10051 size
+= ocirs
[i
].src_data_length
;
10057 /* Register hwmon sysfs hooks */
10058 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10060 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10064 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10065 if (IS_ERR(tp
->hwmon_dev
)) {
10066 tp
->hwmon_dev
= NULL
;
10067 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10068 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10073 #define TG3_STAT_ADD32(PSTAT, REG) \
10074 do { u32 __val = tr32(REG); \
10075 (PSTAT)->low += __val; \
10076 if ((PSTAT)->low < __val) \
10077 (PSTAT)->high += 1; \
10080 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10082 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10087 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10088 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10089 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10090 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10091 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10092 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10093 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10094 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10095 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10096 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10097 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10098 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10099 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10100 if (unlikely(tg3_flag(tp
, 5719_RDMA_BUG
) &&
10101 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10102 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10105 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10106 val
&= ~TG3_LSO_RD_DMA_TX_LENGTH_WA
;
10107 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10108 tg3_flag_clear(tp
, 5719_RDMA_BUG
);
10111 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10112 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10113 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10114 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10115 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10116 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10117 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10118 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10119 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10120 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10121 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10122 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10123 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10124 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10126 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10127 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10128 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10129 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10130 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10132 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10133 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10135 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10136 sp
->rx_discards
.low
+= val
;
10137 if (sp
->rx_discards
.low
< val
)
10138 sp
->rx_discards
.high
+= 1;
10140 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10142 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10145 static void tg3_chk_missed_msi(struct tg3
*tp
)
10149 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10150 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10152 if (tg3_has_work(tnapi
)) {
10153 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10154 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10155 if (tnapi
->chk_msi_cnt
< 1) {
10156 tnapi
->chk_msi_cnt
++;
10162 tnapi
->chk_msi_cnt
= 0;
10163 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10164 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10168 static void tg3_timer(unsigned long __opaque
)
10170 struct tg3
*tp
= (struct tg3
*) __opaque
;
10172 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10173 goto restart_timer
;
10175 spin_lock(&tp
->lock
);
10177 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10178 tg3_flag(tp
, 57765_CLASS
))
10179 tg3_chk_missed_msi(tp
);
10181 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10182 /* BCM4785: Flush posted writes from GbE to host memory. */
10186 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10187 /* All of this garbage is because when using non-tagged
10188 * IRQ status the mailbox/status_block protocol the chip
10189 * uses with the cpu is race prone.
10191 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10192 tw32(GRC_LOCAL_CTRL
,
10193 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10195 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10196 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10199 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10200 spin_unlock(&tp
->lock
);
10201 tg3_reset_task_schedule(tp
);
10202 goto restart_timer
;
10206 /* This part only runs once per second. */
10207 if (!--tp
->timer_counter
) {
10208 if (tg3_flag(tp
, 5705_PLUS
))
10209 tg3_periodic_fetch_stats(tp
);
10211 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10212 tg3_phy_eee_enable(tp
);
10214 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10218 mac_stat
= tr32(MAC_STATUS
);
10221 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10222 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10224 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10228 tg3_setup_phy(tp
, 0);
10229 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10230 u32 mac_stat
= tr32(MAC_STATUS
);
10231 int need_setup
= 0;
10234 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10237 if (!tp
->link_up
&&
10238 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10239 MAC_STATUS_SIGNAL_DET
))) {
10243 if (!tp
->serdes_counter
) {
10246 ~MAC_MODE_PORT_MODE_MASK
));
10248 tw32_f(MAC_MODE
, tp
->mac_mode
);
10251 tg3_setup_phy(tp
, 0);
10253 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10254 tg3_flag(tp
, 5780_CLASS
)) {
10255 tg3_serdes_parallel_detect(tp
);
10258 tp
->timer_counter
= tp
->timer_multiplier
;
10261 /* Heartbeat is only sent once every 2 seconds.
10263 * The heartbeat is to tell the ASF firmware that the host
10264 * driver is still alive. In the event that the OS crashes,
10265 * ASF needs to reset the hardware to free up the FIFO space
10266 * that may be filled with rx packets destined for the host.
10267 * If the FIFO is full, ASF will no longer function properly.
10269 * Unintended resets have been reported on real time kernels
10270 * where the timer doesn't run on time. Netpoll will also have
10273 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10274 * to check the ring condition when the heartbeat is expiring
10275 * before doing the reset. This will prevent most unintended
10278 if (!--tp
->asf_counter
) {
10279 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10280 tg3_wait_for_event_ack(tp
);
10282 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10283 FWCMD_NICDRV_ALIVE3
);
10284 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10285 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10286 TG3_FW_UPDATE_TIMEOUT_SEC
);
10288 tg3_generate_fw_event(tp
);
10290 tp
->asf_counter
= tp
->asf_multiplier
;
10293 spin_unlock(&tp
->lock
);
10296 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10297 add_timer(&tp
->timer
);
10300 static void tg3_timer_init(struct tg3
*tp
)
10302 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10303 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10304 !tg3_flag(tp
, 57765_CLASS
))
10305 tp
->timer_offset
= HZ
;
10307 tp
->timer_offset
= HZ
/ 10;
10309 BUG_ON(tp
->timer_offset
> HZ
);
10311 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10312 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10313 TG3_FW_UPDATE_FREQ_SEC
;
10315 init_timer(&tp
->timer
);
10316 tp
->timer
.data
= (unsigned long) tp
;
10317 tp
->timer
.function
= tg3_timer
;
10320 static void tg3_timer_start(struct tg3
*tp
)
10322 tp
->asf_counter
= tp
->asf_multiplier
;
10323 tp
->timer_counter
= tp
->timer_multiplier
;
10325 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10326 add_timer(&tp
->timer
);
10329 static void tg3_timer_stop(struct tg3
*tp
)
10331 del_timer_sync(&tp
->timer
);
10334 /* Restart hardware after configuration changes, self-test, etc.
10335 * Invoked with tp->lock held.
10337 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
10338 __releases(tp
->lock
)
10339 __acquires(tp
->lock
)
10343 err
= tg3_init_hw(tp
, reset_phy
);
10345 netdev_err(tp
->dev
,
10346 "Failed to re-initialize device, aborting\n");
10347 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10348 tg3_full_unlock(tp
);
10349 tg3_timer_stop(tp
);
10351 tg3_napi_enable(tp
);
10352 dev_close(tp
->dev
);
10353 tg3_full_lock(tp
, 0);
10358 static void tg3_reset_task(struct work_struct
*work
)
10360 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10363 tg3_full_lock(tp
, 0);
10365 if (!netif_running(tp
->dev
)) {
10366 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10367 tg3_full_unlock(tp
);
10371 tg3_full_unlock(tp
);
10375 tg3_netif_stop(tp
);
10377 tg3_full_lock(tp
, 1);
10379 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10380 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10381 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10382 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10383 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10386 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10387 err
= tg3_init_hw(tp
, 1);
10391 tg3_netif_start(tp
);
10394 tg3_full_unlock(tp
);
10399 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10402 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10405 unsigned long flags
;
10407 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10409 if (tp
->irq_cnt
== 1)
10410 name
= tp
->dev
->name
;
10412 name
= &tnapi
->irq_lbl
[0];
10413 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10414 name
[IFNAMSIZ
-1] = 0;
10417 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10419 if (tg3_flag(tp
, 1SHOT_MSI
))
10420 fn
= tg3_msi_1shot
;
10423 fn
= tg3_interrupt
;
10424 if (tg3_flag(tp
, TAGGED_STATUS
))
10425 fn
= tg3_interrupt_tagged
;
10426 flags
= IRQF_SHARED
;
10429 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10432 static int tg3_test_interrupt(struct tg3
*tp
)
10434 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10435 struct net_device
*dev
= tp
->dev
;
10436 int err
, i
, intr_ok
= 0;
10439 if (!netif_running(dev
))
10442 tg3_disable_ints(tp
);
10444 free_irq(tnapi
->irq_vec
, tnapi
);
10447 * Turn off MSI one shot mode. Otherwise this test has no
10448 * observable way to know whether the interrupt was delivered.
10450 if (tg3_flag(tp
, 57765_PLUS
)) {
10451 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10452 tw32(MSGINT_MODE
, val
);
10455 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10456 IRQF_SHARED
, dev
->name
, tnapi
);
10460 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10461 tg3_enable_ints(tp
);
10463 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10466 for (i
= 0; i
< 5; i
++) {
10467 u32 int_mbox
, misc_host_ctrl
;
10469 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10470 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10472 if ((int_mbox
!= 0) ||
10473 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10478 if (tg3_flag(tp
, 57765_PLUS
) &&
10479 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10480 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10485 tg3_disable_ints(tp
);
10487 free_irq(tnapi
->irq_vec
, tnapi
);
10489 err
= tg3_request_irq(tp
, 0);
10495 /* Reenable MSI one shot mode. */
10496 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10497 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10498 tw32(MSGINT_MODE
, val
);
10506 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10507 * successfully restored
10509 static int tg3_test_msi(struct tg3
*tp
)
10514 if (!tg3_flag(tp
, USING_MSI
))
10517 /* Turn off SERR reporting in case MSI terminates with Master
10520 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10521 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
10522 pci_cmd
& ~PCI_COMMAND_SERR
);
10524 err
= tg3_test_interrupt(tp
);
10526 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10531 /* other failures */
10535 /* MSI test failed, go back to INTx mode */
10536 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
10537 "to INTx mode. Please report this failure to the PCI "
10538 "maintainer and include system chipset information\n");
10540 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10542 pci_disable_msi(tp
->pdev
);
10544 tg3_flag_clear(tp
, USING_MSI
);
10545 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10547 err
= tg3_request_irq(tp
, 0);
10551 /* Need to reset the chip because the MSI cycle may have terminated
10552 * with Master Abort.
10554 tg3_full_lock(tp
, 1);
10556 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10557 err
= tg3_init_hw(tp
, 1);
10559 tg3_full_unlock(tp
);
10562 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10567 static int tg3_request_firmware(struct tg3
*tp
)
10569 const __be32
*fw_data
;
10571 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
10572 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
10577 fw_data
= (void *)tp
->fw
->data
;
10579 /* Firmware blob starts with version numbers, followed by
10580 * start address and _full_ length including BSS sections
10581 * (which must be longer than the actual data, of course
10584 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
10585 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
10586 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
10587 tp
->fw_len
, tp
->fw_needed
);
10588 release_firmware(tp
->fw
);
10593 /* We no longer need firmware; we have it. */
10594 tp
->fw_needed
= NULL
;
10598 static u32
tg3_irq_count(struct tg3
*tp
)
10600 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
10603 /* We want as many rx rings enabled as there are cpus.
10604 * In multiqueue MSI-X mode, the first MSI-X vector
10605 * only deals with link interrupts, etc, so we add
10606 * one to the number of vectors we are requesting.
10608 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
10614 static bool tg3_enable_msix(struct tg3
*tp
)
10617 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
10619 tp
->txq_cnt
= tp
->txq_req
;
10620 tp
->rxq_cnt
= tp
->rxq_req
;
10622 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
10623 if (tp
->rxq_cnt
> tp
->rxq_max
)
10624 tp
->rxq_cnt
= tp
->rxq_max
;
10626 /* Disable multiple TX rings by default. Simple round-robin hardware
10627 * scheduling of the TX rings can cause starvation of rings with
10628 * small packets when other rings have TSO or jumbo packets.
10633 tp
->irq_cnt
= tg3_irq_count(tp
);
10635 for (i
= 0; i
< tp
->irq_max
; i
++) {
10636 msix_ent
[i
].entry
= i
;
10637 msix_ent
[i
].vector
= 0;
10640 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
10643 } else if (rc
!= 0) {
10644 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
10646 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
10649 tp
->rxq_cnt
= max(rc
- 1, 1);
10651 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
10654 for (i
= 0; i
< tp
->irq_max
; i
++)
10655 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
10657 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
10658 pci_disable_msix(tp
->pdev
);
10662 if (tp
->irq_cnt
== 1)
10665 tg3_flag_set(tp
, ENABLE_RSS
);
10667 if (tp
->txq_cnt
> 1)
10668 tg3_flag_set(tp
, ENABLE_TSS
);
10670 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
10675 static void tg3_ints_init(struct tg3
*tp
)
10677 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10678 !tg3_flag(tp
, TAGGED_STATUS
)) {
10679 /* All MSI supporting chips should support tagged
10680 * status. Assert that this is the case.
10682 netdev_warn(tp
->dev
,
10683 "MSI without TAGGED_STATUS? Not using MSI\n");
10687 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10688 tg3_flag_set(tp
, USING_MSIX
);
10689 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10690 tg3_flag_set(tp
, USING_MSI
);
10692 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10693 u32 msi_mode
= tr32(MSGINT_MODE
);
10694 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10695 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10696 if (!tg3_flag(tp
, 1SHOT_MSI
))
10697 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10698 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10701 if (!tg3_flag(tp
, USING_MSIX
)) {
10703 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10706 if (tp
->irq_cnt
== 1) {
10709 netif_set_real_num_tx_queues(tp
->dev
, 1);
10710 netif_set_real_num_rx_queues(tp
->dev
, 1);
10714 static void tg3_ints_fini(struct tg3
*tp
)
10716 if (tg3_flag(tp
, USING_MSIX
))
10717 pci_disable_msix(tp
->pdev
);
10718 else if (tg3_flag(tp
, USING_MSI
))
10719 pci_disable_msi(tp
->pdev
);
10720 tg3_flag_clear(tp
, USING_MSI
);
10721 tg3_flag_clear(tp
, USING_MSIX
);
10722 tg3_flag_clear(tp
, ENABLE_RSS
);
10723 tg3_flag_clear(tp
, ENABLE_TSS
);
10726 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
10729 struct net_device
*dev
= tp
->dev
;
10733 * Setup interrupts first so we know how
10734 * many NAPI resources to allocate
10738 tg3_rss_check_indir_tbl(tp
);
10740 /* The placement of this call is tied
10741 * to the setup and use of Host TX descriptors.
10743 err
= tg3_alloc_consistent(tp
);
10749 tg3_napi_enable(tp
);
10751 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10752 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10753 err
= tg3_request_irq(tp
, i
);
10755 for (i
--; i
>= 0; i
--) {
10756 tnapi
= &tp
->napi
[i
];
10757 free_irq(tnapi
->irq_vec
, tnapi
);
10763 tg3_full_lock(tp
, 0);
10765 err
= tg3_init_hw(tp
, reset_phy
);
10767 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10768 tg3_free_rings(tp
);
10771 tg3_full_unlock(tp
);
10776 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
10777 err
= tg3_test_msi(tp
);
10780 tg3_full_lock(tp
, 0);
10781 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10782 tg3_free_rings(tp
);
10783 tg3_full_unlock(tp
);
10788 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10789 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10791 tw32(PCIE_TRANSACTION_CFG
,
10792 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10798 tg3_hwmon_open(tp
);
10800 tg3_full_lock(tp
, 0);
10802 tg3_timer_start(tp
);
10803 tg3_flag_set(tp
, INIT_COMPLETE
);
10804 tg3_enable_ints(tp
);
10809 tg3_ptp_resume(tp
);
10812 tg3_full_unlock(tp
);
10814 netif_tx_start_all_queues(dev
);
10817 * Reset loopback feature if it was turned on while the device was down
10818 * make sure that it's installed properly now.
10820 if (dev
->features
& NETIF_F_LOOPBACK
)
10821 tg3_set_loopback(dev
, dev
->features
);
10826 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10827 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10828 free_irq(tnapi
->irq_vec
, tnapi
);
10832 tg3_napi_disable(tp
);
10834 tg3_free_consistent(tp
);
10842 static void tg3_stop(struct tg3
*tp
)
10846 tg3_reset_task_cancel(tp
);
10847 tg3_netif_stop(tp
);
10849 tg3_timer_stop(tp
);
10851 tg3_hwmon_close(tp
);
10855 tg3_full_lock(tp
, 1);
10857 tg3_disable_ints(tp
);
10859 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10860 tg3_free_rings(tp
);
10861 tg3_flag_clear(tp
, INIT_COMPLETE
);
10863 tg3_full_unlock(tp
);
10865 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10866 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10867 free_irq(tnapi
->irq_vec
, tnapi
);
10874 tg3_free_consistent(tp
);
10877 static int tg3_open(struct net_device
*dev
)
10879 struct tg3
*tp
= netdev_priv(dev
);
10882 if (tp
->fw_needed
) {
10883 err
= tg3_request_firmware(tp
);
10884 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10888 netdev_warn(tp
->dev
, "TSO capability disabled\n");
10889 tg3_flag_clear(tp
, TSO_CAPABLE
);
10890 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
10891 netdev_notice(tp
->dev
, "TSO capability restored\n");
10892 tg3_flag_set(tp
, TSO_CAPABLE
);
10896 tg3_carrier_off(tp
);
10898 err
= tg3_power_up(tp
);
10902 tg3_full_lock(tp
, 0);
10904 tg3_disable_ints(tp
);
10905 tg3_flag_clear(tp
, INIT_COMPLETE
);
10907 tg3_full_unlock(tp
);
10909 err
= tg3_start(tp
, true, true, true);
10911 tg3_frob_aux_power(tp
, false);
10912 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
10915 if (tg3_flag(tp
, PTP_CAPABLE
)) {
10916 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
10918 if (IS_ERR(tp
->ptp_clock
))
10919 tp
->ptp_clock
= NULL
;
10925 static int tg3_close(struct net_device
*dev
)
10927 struct tg3
*tp
= netdev_priv(dev
);
10933 /* Clear stats across close / open calls */
10934 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
10935 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
10937 tg3_power_down(tp
);
10939 tg3_carrier_off(tp
);
10944 static inline u64
get_stat64(tg3_stat64_t
*val
)
10946 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
10949 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
10951 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10953 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10954 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
10955 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
10958 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
10959 tg3_writephy(tp
, MII_TG3_TEST1
,
10960 val
| MII_TG3_TEST1_CRC_EN
);
10961 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
10965 tp
->phy_crc_errors
+= val
;
10967 return tp
->phy_crc_errors
;
10970 return get_stat64(&hw_stats
->rx_fcs_errors
);
10973 #define ESTAT_ADD(member) \
10974 estats->member = old_estats->member + \
10975 get_stat64(&hw_stats->member)
10977 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
10979 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
10980 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10982 ESTAT_ADD(rx_octets
);
10983 ESTAT_ADD(rx_fragments
);
10984 ESTAT_ADD(rx_ucast_packets
);
10985 ESTAT_ADD(rx_mcast_packets
);
10986 ESTAT_ADD(rx_bcast_packets
);
10987 ESTAT_ADD(rx_fcs_errors
);
10988 ESTAT_ADD(rx_align_errors
);
10989 ESTAT_ADD(rx_xon_pause_rcvd
);
10990 ESTAT_ADD(rx_xoff_pause_rcvd
);
10991 ESTAT_ADD(rx_mac_ctrl_rcvd
);
10992 ESTAT_ADD(rx_xoff_entered
);
10993 ESTAT_ADD(rx_frame_too_long_errors
);
10994 ESTAT_ADD(rx_jabbers
);
10995 ESTAT_ADD(rx_undersize_packets
);
10996 ESTAT_ADD(rx_in_length_errors
);
10997 ESTAT_ADD(rx_out_length_errors
);
10998 ESTAT_ADD(rx_64_or_less_octet_packets
);
10999 ESTAT_ADD(rx_65_to_127_octet_packets
);
11000 ESTAT_ADD(rx_128_to_255_octet_packets
);
11001 ESTAT_ADD(rx_256_to_511_octet_packets
);
11002 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11003 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11004 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11005 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11006 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11007 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11009 ESTAT_ADD(tx_octets
);
11010 ESTAT_ADD(tx_collisions
);
11011 ESTAT_ADD(tx_xon_sent
);
11012 ESTAT_ADD(tx_xoff_sent
);
11013 ESTAT_ADD(tx_flow_control
);
11014 ESTAT_ADD(tx_mac_errors
);
11015 ESTAT_ADD(tx_single_collisions
);
11016 ESTAT_ADD(tx_mult_collisions
);
11017 ESTAT_ADD(tx_deferred
);
11018 ESTAT_ADD(tx_excessive_collisions
);
11019 ESTAT_ADD(tx_late_collisions
);
11020 ESTAT_ADD(tx_collide_2times
);
11021 ESTAT_ADD(tx_collide_3times
);
11022 ESTAT_ADD(tx_collide_4times
);
11023 ESTAT_ADD(tx_collide_5times
);
11024 ESTAT_ADD(tx_collide_6times
);
11025 ESTAT_ADD(tx_collide_7times
);
11026 ESTAT_ADD(tx_collide_8times
);
11027 ESTAT_ADD(tx_collide_9times
);
11028 ESTAT_ADD(tx_collide_10times
);
11029 ESTAT_ADD(tx_collide_11times
);
11030 ESTAT_ADD(tx_collide_12times
);
11031 ESTAT_ADD(tx_collide_13times
);
11032 ESTAT_ADD(tx_collide_14times
);
11033 ESTAT_ADD(tx_collide_15times
);
11034 ESTAT_ADD(tx_ucast_packets
);
11035 ESTAT_ADD(tx_mcast_packets
);
11036 ESTAT_ADD(tx_bcast_packets
);
11037 ESTAT_ADD(tx_carrier_sense_errors
);
11038 ESTAT_ADD(tx_discards
);
11039 ESTAT_ADD(tx_errors
);
11041 ESTAT_ADD(dma_writeq_full
);
11042 ESTAT_ADD(dma_write_prioq_full
);
11043 ESTAT_ADD(rxbds_empty
);
11044 ESTAT_ADD(rx_discards
);
11045 ESTAT_ADD(rx_errors
);
11046 ESTAT_ADD(rx_threshold_hit
);
11048 ESTAT_ADD(dma_readq_full
);
11049 ESTAT_ADD(dma_read_prioq_full
);
11050 ESTAT_ADD(tx_comp_queue_full
);
11052 ESTAT_ADD(ring_set_send_prod_index
);
11053 ESTAT_ADD(ring_status_update
);
11054 ESTAT_ADD(nic_irqs
);
11055 ESTAT_ADD(nic_avoided_irqs
);
11056 ESTAT_ADD(nic_tx_threshold_hit
);
11058 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11061 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11063 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11064 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11066 stats
->rx_packets
= old_stats
->rx_packets
+
11067 get_stat64(&hw_stats
->rx_ucast_packets
) +
11068 get_stat64(&hw_stats
->rx_mcast_packets
) +
11069 get_stat64(&hw_stats
->rx_bcast_packets
);
11071 stats
->tx_packets
= old_stats
->tx_packets
+
11072 get_stat64(&hw_stats
->tx_ucast_packets
) +
11073 get_stat64(&hw_stats
->tx_mcast_packets
) +
11074 get_stat64(&hw_stats
->tx_bcast_packets
);
11076 stats
->rx_bytes
= old_stats
->rx_bytes
+
11077 get_stat64(&hw_stats
->rx_octets
);
11078 stats
->tx_bytes
= old_stats
->tx_bytes
+
11079 get_stat64(&hw_stats
->tx_octets
);
11081 stats
->rx_errors
= old_stats
->rx_errors
+
11082 get_stat64(&hw_stats
->rx_errors
);
11083 stats
->tx_errors
= old_stats
->tx_errors
+
11084 get_stat64(&hw_stats
->tx_errors
) +
11085 get_stat64(&hw_stats
->tx_mac_errors
) +
11086 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11087 get_stat64(&hw_stats
->tx_discards
);
11089 stats
->multicast
= old_stats
->multicast
+
11090 get_stat64(&hw_stats
->rx_mcast_packets
);
11091 stats
->collisions
= old_stats
->collisions
+
11092 get_stat64(&hw_stats
->tx_collisions
);
11094 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11095 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11096 get_stat64(&hw_stats
->rx_undersize_packets
);
11098 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11099 get_stat64(&hw_stats
->rxbds_empty
);
11100 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11101 get_stat64(&hw_stats
->rx_align_errors
);
11102 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11103 get_stat64(&hw_stats
->tx_discards
);
11104 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11105 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11107 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11108 tg3_calc_crc_errors(tp
);
11110 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11111 get_stat64(&hw_stats
->rx_discards
);
11113 stats
->rx_dropped
= tp
->rx_dropped
;
11114 stats
->tx_dropped
= tp
->tx_dropped
;
11117 static int tg3_get_regs_len(struct net_device
*dev
)
11119 return TG3_REG_BLK_SIZE
;
11122 static void tg3_get_regs(struct net_device
*dev
,
11123 struct ethtool_regs
*regs
, void *_p
)
11125 struct tg3
*tp
= netdev_priv(dev
);
11129 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11131 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11134 tg3_full_lock(tp
, 0);
11136 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11138 tg3_full_unlock(tp
);
11141 static int tg3_get_eeprom_len(struct net_device
*dev
)
11143 struct tg3
*tp
= netdev_priv(dev
);
11145 return tp
->nvram_size
;
11148 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11150 struct tg3
*tp
= netdev_priv(dev
);
11153 u32 i
, offset
, len
, b_offset
, b_count
;
11156 if (tg3_flag(tp
, NO_NVRAM
))
11159 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11162 offset
= eeprom
->offset
;
11166 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11169 /* adjustments to start on required 4 byte boundary */
11170 b_offset
= offset
& 3;
11171 b_count
= 4 - b_offset
;
11172 if (b_count
> len
) {
11173 /* i.e. offset=1 len=2 */
11176 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11179 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11182 eeprom
->len
+= b_count
;
11185 /* read bytes up to the last 4 byte boundary */
11186 pd
= &data
[eeprom
->len
];
11187 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11188 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11193 memcpy(pd
+ i
, &val
, 4);
11198 /* read last bytes not ending on 4 byte boundary */
11199 pd
= &data
[eeprom
->len
];
11201 b_offset
= offset
+ len
- b_count
;
11202 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11205 memcpy(pd
, &val
, b_count
);
11206 eeprom
->len
+= b_count
;
11211 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11213 struct tg3
*tp
= netdev_priv(dev
);
11215 u32 offset
, len
, b_offset
, odd_len
;
11219 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11222 if (tg3_flag(tp
, NO_NVRAM
) ||
11223 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11226 offset
= eeprom
->offset
;
11229 if ((b_offset
= (offset
& 3))) {
11230 /* adjustments to start on required 4 byte boundary */
11231 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11242 /* adjustments to end on required 4 byte boundary */
11244 len
= (len
+ 3) & ~3;
11245 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11251 if (b_offset
|| odd_len
) {
11252 buf
= kmalloc(len
, GFP_KERNEL
);
11256 memcpy(buf
, &start
, 4);
11258 memcpy(buf
+len
-4, &end
, 4);
11259 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11262 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11270 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11272 struct tg3
*tp
= netdev_priv(dev
);
11274 if (tg3_flag(tp
, USE_PHYLIB
)) {
11275 struct phy_device
*phydev
;
11276 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11278 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11279 return phy_ethtool_gset(phydev
, cmd
);
11282 cmd
->supported
= (SUPPORTED_Autoneg
);
11284 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11285 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11286 SUPPORTED_1000baseT_Full
);
11288 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11289 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11290 SUPPORTED_100baseT_Full
|
11291 SUPPORTED_10baseT_Half
|
11292 SUPPORTED_10baseT_Full
|
11294 cmd
->port
= PORT_TP
;
11296 cmd
->supported
|= SUPPORTED_FIBRE
;
11297 cmd
->port
= PORT_FIBRE
;
11300 cmd
->advertising
= tp
->link_config
.advertising
;
11301 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11302 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11303 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11304 cmd
->advertising
|= ADVERTISED_Pause
;
11306 cmd
->advertising
|= ADVERTISED_Pause
|
11307 ADVERTISED_Asym_Pause
;
11309 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11310 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11313 if (netif_running(dev
) && tp
->link_up
) {
11314 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11315 cmd
->duplex
= tp
->link_config
.active_duplex
;
11316 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11317 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11318 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11319 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11321 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11324 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11325 cmd
->duplex
= DUPLEX_UNKNOWN
;
11326 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11328 cmd
->phy_address
= tp
->phy_addr
;
11329 cmd
->transceiver
= XCVR_INTERNAL
;
11330 cmd
->autoneg
= tp
->link_config
.autoneg
;
11336 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11338 struct tg3
*tp
= netdev_priv(dev
);
11339 u32 speed
= ethtool_cmd_speed(cmd
);
11341 if (tg3_flag(tp
, USE_PHYLIB
)) {
11342 struct phy_device
*phydev
;
11343 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11345 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11346 return phy_ethtool_sset(phydev
, cmd
);
11349 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11350 cmd
->autoneg
!= AUTONEG_DISABLE
)
11353 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11354 cmd
->duplex
!= DUPLEX_FULL
&&
11355 cmd
->duplex
!= DUPLEX_HALF
)
11358 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11359 u32 mask
= ADVERTISED_Autoneg
|
11361 ADVERTISED_Asym_Pause
;
11363 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11364 mask
|= ADVERTISED_1000baseT_Half
|
11365 ADVERTISED_1000baseT_Full
;
11367 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11368 mask
|= ADVERTISED_100baseT_Half
|
11369 ADVERTISED_100baseT_Full
|
11370 ADVERTISED_10baseT_Half
|
11371 ADVERTISED_10baseT_Full
|
11374 mask
|= ADVERTISED_FIBRE
;
11376 if (cmd
->advertising
& ~mask
)
11379 mask
&= (ADVERTISED_1000baseT_Half
|
11380 ADVERTISED_1000baseT_Full
|
11381 ADVERTISED_100baseT_Half
|
11382 ADVERTISED_100baseT_Full
|
11383 ADVERTISED_10baseT_Half
|
11384 ADVERTISED_10baseT_Full
);
11386 cmd
->advertising
&= mask
;
11388 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11389 if (speed
!= SPEED_1000
)
11392 if (cmd
->duplex
!= DUPLEX_FULL
)
11395 if (speed
!= SPEED_100
&&
11401 tg3_full_lock(tp
, 0);
11403 tp
->link_config
.autoneg
= cmd
->autoneg
;
11404 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11405 tp
->link_config
.advertising
= (cmd
->advertising
|
11406 ADVERTISED_Autoneg
);
11407 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11408 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11410 tp
->link_config
.advertising
= 0;
11411 tp
->link_config
.speed
= speed
;
11412 tp
->link_config
.duplex
= cmd
->duplex
;
11415 if (netif_running(dev
))
11416 tg3_setup_phy(tp
, 1);
11418 tg3_full_unlock(tp
);
11423 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11425 struct tg3
*tp
= netdev_priv(dev
);
11427 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11428 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11429 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11430 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11433 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11435 struct tg3
*tp
= netdev_priv(dev
);
11437 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11438 wol
->supported
= WAKE_MAGIC
;
11440 wol
->supported
= 0;
11442 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11443 wol
->wolopts
= WAKE_MAGIC
;
11444 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11447 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11449 struct tg3
*tp
= netdev_priv(dev
);
11450 struct device
*dp
= &tp
->pdev
->dev
;
11452 if (wol
->wolopts
& ~WAKE_MAGIC
)
11454 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11455 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11458 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11460 spin_lock_bh(&tp
->lock
);
11461 if (device_may_wakeup(dp
))
11462 tg3_flag_set(tp
, WOL_ENABLE
);
11464 tg3_flag_clear(tp
, WOL_ENABLE
);
11465 spin_unlock_bh(&tp
->lock
);
11470 static u32
tg3_get_msglevel(struct net_device
*dev
)
11472 struct tg3
*tp
= netdev_priv(dev
);
11473 return tp
->msg_enable
;
11476 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11478 struct tg3
*tp
= netdev_priv(dev
);
11479 tp
->msg_enable
= value
;
11482 static int tg3_nway_reset(struct net_device
*dev
)
11484 struct tg3
*tp
= netdev_priv(dev
);
11487 if (!netif_running(dev
))
11490 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11493 if (tg3_flag(tp
, USE_PHYLIB
)) {
11494 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11496 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
11500 spin_lock_bh(&tp
->lock
);
11502 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
11503 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
11504 ((bmcr
& BMCR_ANENABLE
) ||
11505 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
11506 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
11510 spin_unlock_bh(&tp
->lock
);
11516 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11518 struct tg3
*tp
= netdev_priv(dev
);
11520 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
11521 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11522 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
11524 ering
->rx_jumbo_max_pending
= 0;
11526 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
11528 ering
->rx_pending
= tp
->rx_pending
;
11529 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11530 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
11532 ering
->rx_jumbo_pending
= 0;
11534 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
11537 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11539 struct tg3
*tp
= netdev_priv(dev
);
11540 int i
, irq_sync
= 0, err
= 0;
11542 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
11543 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
11544 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
11545 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
11546 (tg3_flag(tp
, TSO_BUG
) &&
11547 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
11550 if (netif_running(dev
)) {
11552 tg3_netif_stop(tp
);
11556 tg3_full_lock(tp
, irq_sync
);
11558 tp
->rx_pending
= ering
->rx_pending
;
11560 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
11561 tp
->rx_pending
> 63)
11562 tp
->rx_pending
= 63;
11563 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
11565 for (i
= 0; i
< tp
->irq_max
; i
++)
11566 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
11568 if (netif_running(dev
)) {
11569 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11570 err
= tg3_restart_hw(tp
, 1);
11572 tg3_netif_start(tp
);
11575 tg3_full_unlock(tp
);
11577 if (irq_sync
&& !err
)
11583 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11585 struct tg3
*tp
= netdev_priv(dev
);
11587 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
11589 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
11590 epause
->rx_pause
= 1;
11592 epause
->rx_pause
= 0;
11594 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
11595 epause
->tx_pause
= 1;
11597 epause
->tx_pause
= 0;
11600 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11602 struct tg3
*tp
= netdev_priv(dev
);
11605 if (tg3_flag(tp
, USE_PHYLIB
)) {
11607 struct phy_device
*phydev
;
11609 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11611 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
11612 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
11613 (epause
->rx_pause
!= epause
->tx_pause
)))
11616 tp
->link_config
.flowctrl
= 0;
11617 if (epause
->rx_pause
) {
11618 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11620 if (epause
->tx_pause
) {
11621 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11622 newadv
= ADVERTISED_Pause
;
11624 newadv
= ADVERTISED_Pause
|
11625 ADVERTISED_Asym_Pause
;
11626 } else if (epause
->tx_pause
) {
11627 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11628 newadv
= ADVERTISED_Asym_Pause
;
11632 if (epause
->autoneg
)
11633 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11635 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11637 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
11638 u32 oldadv
= phydev
->advertising
&
11639 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
11640 if (oldadv
!= newadv
) {
11641 phydev
->advertising
&=
11642 ~(ADVERTISED_Pause
|
11643 ADVERTISED_Asym_Pause
);
11644 phydev
->advertising
|= newadv
;
11645 if (phydev
->autoneg
) {
11647 * Always renegotiate the link to
11648 * inform our link partner of our
11649 * flow control settings, even if the
11650 * flow control is forced. Let
11651 * tg3_adjust_link() do the final
11652 * flow control setup.
11654 return phy_start_aneg(phydev
);
11658 if (!epause
->autoneg
)
11659 tg3_setup_flow_control(tp
, 0, 0);
11661 tp
->link_config
.advertising
&=
11662 ~(ADVERTISED_Pause
|
11663 ADVERTISED_Asym_Pause
);
11664 tp
->link_config
.advertising
|= newadv
;
11669 if (netif_running(dev
)) {
11670 tg3_netif_stop(tp
);
11674 tg3_full_lock(tp
, irq_sync
);
11676 if (epause
->autoneg
)
11677 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11679 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11680 if (epause
->rx_pause
)
11681 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11683 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
11684 if (epause
->tx_pause
)
11685 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11687 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
11689 if (netif_running(dev
)) {
11690 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11691 err
= tg3_restart_hw(tp
, 1);
11693 tg3_netif_start(tp
);
11696 tg3_full_unlock(tp
);
11702 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11706 return TG3_NUM_TEST
;
11708 return TG3_NUM_STATS
;
11710 return -EOPNOTSUPP
;
11714 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11715 u32
*rules __always_unused
)
11717 struct tg3
*tp
= netdev_priv(dev
);
11719 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11720 return -EOPNOTSUPP
;
11722 switch (info
->cmd
) {
11723 case ETHTOOL_GRXRINGS
:
11724 if (netif_running(tp
->dev
))
11725 info
->data
= tp
->rxq_cnt
;
11727 info
->data
= num_online_cpus();
11728 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
11729 info
->data
= TG3_RSS_MAX_NUM_QS
;
11732 /* The first interrupt vector only
11733 * handles link interrupts.
11739 return -EOPNOTSUPP
;
11743 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11746 struct tg3
*tp
= netdev_priv(dev
);
11748 if (tg3_flag(tp
, SUPPORT_MSIX
))
11749 size
= TG3_RSS_INDIR_TBL_SIZE
;
11754 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11756 struct tg3
*tp
= netdev_priv(dev
);
11759 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11760 indir
[i
] = tp
->rss_ind_tbl
[i
];
11765 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11767 struct tg3
*tp
= netdev_priv(dev
);
11770 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11771 tp
->rss_ind_tbl
[i
] = indir
[i
];
11773 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11776 /* It is legal to write the indirection
11777 * table while the device is running.
11779 tg3_full_lock(tp
, 0);
11780 tg3_rss_write_indir_tbl(tp
);
11781 tg3_full_unlock(tp
);
11786 static void tg3_get_channels(struct net_device
*dev
,
11787 struct ethtool_channels
*channel
)
11789 struct tg3
*tp
= netdev_priv(dev
);
11790 u32 deflt_qs
= netif_get_num_default_rss_queues();
11792 channel
->max_rx
= tp
->rxq_max
;
11793 channel
->max_tx
= tp
->txq_max
;
11795 if (netif_running(dev
)) {
11796 channel
->rx_count
= tp
->rxq_cnt
;
11797 channel
->tx_count
= tp
->txq_cnt
;
11800 channel
->rx_count
= tp
->rxq_req
;
11802 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
11805 channel
->tx_count
= tp
->txq_req
;
11807 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
11811 static int tg3_set_channels(struct net_device
*dev
,
11812 struct ethtool_channels
*channel
)
11814 struct tg3
*tp
= netdev_priv(dev
);
11816 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11817 return -EOPNOTSUPP
;
11819 if (channel
->rx_count
> tp
->rxq_max
||
11820 channel
->tx_count
> tp
->txq_max
)
11823 tp
->rxq_req
= channel
->rx_count
;
11824 tp
->txq_req
= channel
->tx_count
;
11826 if (!netif_running(dev
))
11831 tg3_carrier_off(tp
);
11833 tg3_start(tp
, true, false, false);
11838 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11840 switch (stringset
) {
11842 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
11845 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
11848 WARN_ON(1); /* we need a WARN() */
11853 static int tg3_set_phys_id(struct net_device
*dev
,
11854 enum ethtool_phys_id_state state
)
11856 struct tg3
*tp
= netdev_priv(dev
);
11858 if (!netif_running(tp
->dev
))
11862 case ETHTOOL_ID_ACTIVE
:
11863 return 1; /* cycle on/off once per second */
11865 case ETHTOOL_ID_ON
:
11866 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11867 LED_CTRL_1000MBPS_ON
|
11868 LED_CTRL_100MBPS_ON
|
11869 LED_CTRL_10MBPS_ON
|
11870 LED_CTRL_TRAFFIC_OVERRIDE
|
11871 LED_CTRL_TRAFFIC_BLINK
|
11872 LED_CTRL_TRAFFIC_LED
);
11875 case ETHTOOL_ID_OFF
:
11876 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11877 LED_CTRL_TRAFFIC_OVERRIDE
);
11880 case ETHTOOL_ID_INACTIVE
:
11881 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
11888 static void tg3_get_ethtool_stats(struct net_device
*dev
,
11889 struct ethtool_stats
*estats
, u64
*tmp_stats
)
11891 struct tg3
*tp
= netdev_priv(dev
);
11894 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
11896 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
11899 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
11903 u32 offset
= 0, len
= 0;
11906 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
11909 if (magic
== TG3_EEPROM_MAGIC
) {
11910 for (offset
= TG3_NVM_DIR_START
;
11911 offset
< TG3_NVM_DIR_END
;
11912 offset
+= TG3_NVM_DIRENT_SIZE
) {
11913 if (tg3_nvram_read(tp
, offset
, &val
))
11916 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
11917 TG3_NVM_DIRTYPE_EXTVPD
)
11921 if (offset
!= TG3_NVM_DIR_END
) {
11922 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
11923 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
11926 offset
= tg3_nvram_logical_addr(tp
, offset
);
11930 if (!offset
|| !len
) {
11931 offset
= TG3_NVM_VPD_OFF
;
11932 len
= TG3_NVM_VPD_LEN
;
11935 buf
= kmalloc(len
, GFP_KERNEL
);
11939 if (magic
== TG3_EEPROM_MAGIC
) {
11940 for (i
= 0; i
< len
; i
+= 4) {
11941 /* The data is in little-endian format in NVRAM.
11942 * Use the big-endian read routines to preserve
11943 * the byte order as it exists in NVRAM.
11945 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
11951 unsigned int pos
= 0;
11953 ptr
= (u8
*)&buf
[0];
11954 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
11955 cnt
= pci_read_vpd(tp
->pdev
, pos
,
11957 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
11975 #define NVRAM_TEST_SIZE 0x100
11976 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11977 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11978 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11979 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11980 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11981 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11982 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11983 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11985 static int tg3_test_nvram(struct tg3
*tp
)
11987 u32 csum
, magic
, len
;
11989 int i
, j
, k
, err
= 0, size
;
11991 if (tg3_flag(tp
, NO_NVRAM
))
11994 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11997 if (magic
== TG3_EEPROM_MAGIC
)
11998 size
= NVRAM_TEST_SIZE
;
11999 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12000 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12001 TG3_EEPROM_SB_FORMAT_1
) {
12002 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12003 case TG3_EEPROM_SB_REVISION_0
:
12004 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12006 case TG3_EEPROM_SB_REVISION_2
:
12007 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12009 case TG3_EEPROM_SB_REVISION_3
:
12010 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12012 case TG3_EEPROM_SB_REVISION_4
:
12013 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12015 case TG3_EEPROM_SB_REVISION_5
:
12016 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12018 case TG3_EEPROM_SB_REVISION_6
:
12019 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12026 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12027 size
= NVRAM_SELFBOOT_HW_SIZE
;
12031 buf
= kmalloc(size
, GFP_KERNEL
);
12036 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12037 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12044 /* Selfboot format */
12045 magic
= be32_to_cpu(buf
[0]);
12046 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12047 TG3_EEPROM_MAGIC_FW
) {
12048 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12050 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12051 TG3_EEPROM_SB_REVISION_2
) {
12052 /* For rev 2, the csum doesn't include the MBA. */
12053 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12055 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12058 for (i
= 0; i
< size
; i
++)
12071 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12072 TG3_EEPROM_MAGIC_HW
) {
12073 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12074 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12075 u8
*buf8
= (u8
*) buf
;
12077 /* Separate the parity bits and the data bytes. */
12078 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12079 if ((i
== 0) || (i
== 8)) {
12083 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12084 parity
[k
++] = buf8
[i
] & msk
;
12086 } else if (i
== 16) {
12090 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12091 parity
[k
++] = buf8
[i
] & msk
;
12094 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12095 parity
[k
++] = buf8
[i
] & msk
;
12098 data
[j
++] = buf8
[i
];
12102 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12103 u8 hw8
= hweight8(data
[i
]);
12105 if ((hw8
& 0x1) && parity
[i
])
12107 else if (!(hw8
& 0x1) && !parity
[i
])
12116 /* Bootstrap checksum at offset 0x10 */
12117 csum
= calc_crc((unsigned char *) buf
, 0x10);
12118 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12121 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12122 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12123 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12128 buf
= tg3_vpd_readblock(tp
, &len
);
12132 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12134 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12138 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12141 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12142 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12143 PCI_VPD_RO_KEYWORD_CHKSUM
);
12147 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12149 for (i
= 0; i
<= j
; i
++)
12150 csum8
+= ((u8
*)buf
)[i
];
12164 #define TG3_SERDES_TIMEOUT_SEC 2
12165 #define TG3_COPPER_TIMEOUT_SEC 6
12167 static int tg3_test_link(struct tg3
*tp
)
12171 if (!netif_running(tp
->dev
))
12174 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12175 max
= TG3_SERDES_TIMEOUT_SEC
;
12177 max
= TG3_COPPER_TIMEOUT_SEC
;
12179 for (i
= 0; i
< max
; i
++) {
12183 if (msleep_interruptible(1000))
12190 /* Only test the commonly used registers */
12191 static int tg3_test_registers(struct tg3
*tp
)
12193 int i
, is_5705
, is_5750
;
12194 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12198 #define TG3_FL_5705 0x1
12199 #define TG3_FL_NOT_5705 0x2
12200 #define TG3_FL_NOT_5788 0x4
12201 #define TG3_FL_NOT_5750 0x8
12205 /* MAC Control Registers */
12206 { MAC_MODE
, TG3_FL_NOT_5705
,
12207 0x00000000, 0x00ef6f8c },
12208 { MAC_MODE
, TG3_FL_5705
,
12209 0x00000000, 0x01ef6b8c },
12210 { MAC_STATUS
, TG3_FL_NOT_5705
,
12211 0x03800107, 0x00000000 },
12212 { MAC_STATUS
, TG3_FL_5705
,
12213 0x03800100, 0x00000000 },
12214 { MAC_ADDR_0_HIGH
, 0x0000,
12215 0x00000000, 0x0000ffff },
12216 { MAC_ADDR_0_LOW
, 0x0000,
12217 0x00000000, 0xffffffff },
12218 { MAC_RX_MTU_SIZE
, 0x0000,
12219 0x00000000, 0x0000ffff },
12220 { MAC_TX_MODE
, 0x0000,
12221 0x00000000, 0x00000070 },
12222 { MAC_TX_LENGTHS
, 0x0000,
12223 0x00000000, 0x00003fff },
12224 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12225 0x00000000, 0x000007fc },
12226 { MAC_RX_MODE
, TG3_FL_5705
,
12227 0x00000000, 0x000007dc },
12228 { MAC_HASH_REG_0
, 0x0000,
12229 0x00000000, 0xffffffff },
12230 { MAC_HASH_REG_1
, 0x0000,
12231 0x00000000, 0xffffffff },
12232 { MAC_HASH_REG_2
, 0x0000,
12233 0x00000000, 0xffffffff },
12234 { MAC_HASH_REG_3
, 0x0000,
12235 0x00000000, 0xffffffff },
12237 /* Receive Data and Receive BD Initiator Control Registers. */
12238 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12239 0x00000000, 0xffffffff },
12240 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12241 0x00000000, 0xffffffff },
12242 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12243 0x00000000, 0x00000003 },
12244 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12245 0x00000000, 0xffffffff },
12246 { RCVDBDI_STD_BD
+0, 0x0000,
12247 0x00000000, 0xffffffff },
12248 { RCVDBDI_STD_BD
+4, 0x0000,
12249 0x00000000, 0xffffffff },
12250 { RCVDBDI_STD_BD
+8, 0x0000,
12251 0x00000000, 0xffff0002 },
12252 { RCVDBDI_STD_BD
+0xc, 0x0000,
12253 0x00000000, 0xffffffff },
12255 /* Receive BD Initiator Control Registers. */
12256 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12257 0x00000000, 0xffffffff },
12258 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12259 0x00000000, 0x000003ff },
12260 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12261 0x00000000, 0xffffffff },
12263 /* Host Coalescing Control Registers. */
12264 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12265 0x00000000, 0x00000004 },
12266 { HOSTCC_MODE
, TG3_FL_5705
,
12267 0x00000000, 0x000000f6 },
12268 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12269 0x00000000, 0xffffffff },
12270 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12271 0x00000000, 0x000003ff },
12272 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12273 0x00000000, 0xffffffff },
12274 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12275 0x00000000, 0x000003ff },
12276 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12277 0x00000000, 0xffffffff },
12278 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12279 0x00000000, 0x000000ff },
12280 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12281 0x00000000, 0xffffffff },
12282 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12283 0x00000000, 0x000000ff },
12284 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12285 0x00000000, 0xffffffff },
12286 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12287 0x00000000, 0xffffffff },
12288 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12289 0x00000000, 0xffffffff },
12290 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12291 0x00000000, 0x000000ff },
12292 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12293 0x00000000, 0xffffffff },
12294 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12295 0x00000000, 0x000000ff },
12296 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12297 0x00000000, 0xffffffff },
12298 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12299 0x00000000, 0xffffffff },
12300 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12301 0x00000000, 0xffffffff },
12302 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12303 0x00000000, 0xffffffff },
12304 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12305 0x00000000, 0xffffffff },
12306 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12307 0xffffffff, 0x00000000 },
12308 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12309 0xffffffff, 0x00000000 },
12311 /* Buffer Manager Control Registers. */
12312 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12313 0x00000000, 0x007fff80 },
12314 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12315 0x00000000, 0x007fffff },
12316 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12317 0x00000000, 0x0000003f },
12318 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12319 0x00000000, 0x000001ff },
12320 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12321 0x00000000, 0x000001ff },
12322 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12323 0xffffffff, 0x00000000 },
12324 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12325 0xffffffff, 0x00000000 },
12327 /* Mailbox Registers */
12328 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12329 0x00000000, 0x000001ff },
12330 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12331 0x00000000, 0x000001ff },
12332 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12333 0x00000000, 0x000007ff },
12334 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12335 0x00000000, 0x000001ff },
12337 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12340 is_5705
= is_5750
= 0;
12341 if (tg3_flag(tp
, 5705_PLUS
)) {
12343 if (tg3_flag(tp
, 5750_PLUS
))
12347 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12348 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12351 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12354 if (tg3_flag(tp
, IS_5788
) &&
12355 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12358 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12361 offset
= (u32
) reg_tbl
[i
].offset
;
12362 read_mask
= reg_tbl
[i
].read_mask
;
12363 write_mask
= reg_tbl
[i
].write_mask
;
12365 /* Save the original register content */
12366 save_val
= tr32(offset
);
12368 /* Determine the read-only value. */
12369 read_val
= save_val
& read_mask
;
12371 /* Write zero to the register, then make sure the read-only bits
12372 * are not changed and the read/write bits are all zeros.
12376 val
= tr32(offset
);
12378 /* Test the read-only and read/write bits. */
12379 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12382 /* Write ones to all the bits defined by RdMask and WrMask, then
12383 * make sure the read-only bits are not changed and the
12384 * read/write bits are all ones.
12386 tw32(offset
, read_mask
| write_mask
);
12388 val
= tr32(offset
);
12390 /* Test the read-only bits. */
12391 if ((val
& read_mask
) != read_val
)
12394 /* Test the read/write bits. */
12395 if ((val
& write_mask
) != write_mask
)
12398 tw32(offset
, save_val
);
12404 if (netif_msg_hw(tp
))
12405 netdev_err(tp
->dev
,
12406 "Register test failed at offset %x\n", offset
);
12407 tw32(offset
, save_val
);
12411 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12413 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12417 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12418 for (j
= 0; j
< len
; j
+= 4) {
12421 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12422 tg3_read_mem(tp
, offset
+ j
, &val
);
12423 if (val
!= test_pattern
[i
])
12430 static int tg3_test_memory(struct tg3
*tp
)
12432 static struct mem_entry
{
12435 } mem_tbl_570x
[] = {
12436 { 0x00000000, 0x00b50},
12437 { 0x00002000, 0x1c000},
12438 { 0xffffffff, 0x00000}
12439 }, mem_tbl_5705
[] = {
12440 { 0x00000100, 0x0000c},
12441 { 0x00000200, 0x00008},
12442 { 0x00004000, 0x00800},
12443 { 0x00006000, 0x01000},
12444 { 0x00008000, 0x02000},
12445 { 0x00010000, 0x0e000},
12446 { 0xffffffff, 0x00000}
12447 }, mem_tbl_5755
[] = {
12448 { 0x00000200, 0x00008},
12449 { 0x00004000, 0x00800},
12450 { 0x00006000, 0x00800},
12451 { 0x00008000, 0x02000},
12452 { 0x00010000, 0x0c000},
12453 { 0xffffffff, 0x00000}
12454 }, mem_tbl_5906
[] = {
12455 { 0x00000200, 0x00008},
12456 { 0x00004000, 0x00400},
12457 { 0x00006000, 0x00400},
12458 { 0x00008000, 0x01000},
12459 { 0x00010000, 0x01000},
12460 { 0xffffffff, 0x00000}
12461 }, mem_tbl_5717
[] = {
12462 { 0x00000200, 0x00008},
12463 { 0x00010000, 0x0a000},
12464 { 0x00020000, 0x13c00},
12465 { 0xffffffff, 0x00000}
12466 }, mem_tbl_57765
[] = {
12467 { 0x00000200, 0x00008},
12468 { 0x00004000, 0x00800},
12469 { 0x00006000, 0x09800},
12470 { 0x00010000, 0x0a000},
12471 { 0xffffffff, 0x00000}
12473 struct mem_entry
*mem_tbl
;
12477 if (tg3_flag(tp
, 5717_PLUS
))
12478 mem_tbl
= mem_tbl_5717
;
12479 else if (tg3_flag(tp
, 57765_CLASS
) ||
12480 tg3_asic_rev(tp
) == ASIC_REV_5762
)
12481 mem_tbl
= mem_tbl_57765
;
12482 else if (tg3_flag(tp
, 5755_PLUS
))
12483 mem_tbl
= mem_tbl_5755
;
12484 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
12485 mem_tbl
= mem_tbl_5906
;
12486 else if (tg3_flag(tp
, 5705_PLUS
))
12487 mem_tbl
= mem_tbl_5705
;
12489 mem_tbl
= mem_tbl_570x
;
12491 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
12492 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
12500 #define TG3_TSO_MSS 500
12502 #define TG3_TSO_IP_HDR_LEN 20
12503 #define TG3_TSO_TCP_HDR_LEN 20
12504 #define TG3_TSO_TCP_OPT_LEN 12
12506 static const u8 tg3_tso_header
[] = {
12508 0x45, 0x00, 0x00, 0x00,
12509 0x00, 0x00, 0x40, 0x00,
12510 0x40, 0x06, 0x00, 0x00,
12511 0x0a, 0x00, 0x00, 0x01,
12512 0x0a, 0x00, 0x00, 0x02,
12513 0x0d, 0x00, 0xe0, 0x00,
12514 0x00, 0x00, 0x01, 0x00,
12515 0x00, 0x00, 0x02, 0x00,
12516 0x80, 0x10, 0x10, 0x00,
12517 0x14, 0x09, 0x00, 0x00,
12518 0x01, 0x01, 0x08, 0x0a,
12519 0x11, 0x11, 0x11, 0x11,
12520 0x11, 0x11, 0x11, 0x11,
12523 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
12525 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
12526 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
12528 struct sk_buff
*skb
;
12529 u8
*tx_data
, *rx_data
;
12531 int num_pkts
, tx_len
, rx_len
, i
, err
;
12532 struct tg3_rx_buffer_desc
*desc
;
12533 struct tg3_napi
*tnapi
, *rnapi
;
12534 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
12536 tnapi
= &tp
->napi
[0];
12537 rnapi
= &tp
->napi
[0];
12538 if (tp
->irq_cnt
> 1) {
12539 if (tg3_flag(tp
, ENABLE_RSS
))
12540 rnapi
= &tp
->napi
[1];
12541 if (tg3_flag(tp
, ENABLE_TSS
))
12542 tnapi
= &tp
->napi
[1];
12544 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
12549 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
12553 tx_data
= skb_put(skb
, tx_len
);
12554 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
12555 memset(tx_data
+ 6, 0x0, 8);
12557 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
12559 if (tso_loopback
) {
12560 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
12562 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
12563 TG3_TSO_TCP_OPT_LEN
;
12565 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
12566 sizeof(tg3_tso_header
));
12569 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
12570 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
12572 /* Set the total length field in the IP header */
12573 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
12575 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
12576 TXD_FLAG_CPU_POST_DMA
);
12578 if (tg3_flag(tp
, HW_TSO_1
) ||
12579 tg3_flag(tp
, HW_TSO_2
) ||
12580 tg3_flag(tp
, HW_TSO_3
)) {
12582 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
12583 th
= (struct tcphdr
*)&tx_data
[val
];
12586 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
12588 if (tg3_flag(tp
, HW_TSO_3
)) {
12589 mss
|= (hdr_len
& 0xc) << 12;
12590 if (hdr_len
& 0x10)
12591 base_flags
|= 0x00000010;
12592 base_flags
|= (hdr_len
& 0x3e0) << 5;
12593 } else if (tg3_flag(tp
, HW_TSO_2
))
12594 mss
|= hdr_len
<< 9;
12595 else if (tg3_flag(tp
, HW_TSO_1
) ||
12596 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
12597 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
12599 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
12602 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
12605 data_off
= ETH_HLEN
;
12607 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
12608 tx_len
> VLAN_ETH_FRAME_LEN
)
12609 base_flags
|= TXD_FLAG_JMB_PKT
;
12612 for (i
= data_off
; i
< tx_len
; i
++)
12613 tx_data
[i
] = (u8
) (i
& 0xff);
12615 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
12616 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
12617 dev_kfree_skb(skb
);
12621 val
= tnapi
->tx_prod
;
12622 tnapi
->tx_buffers
[val
].skb
= skb
;
12623 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
12625 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12630 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12632 budget
= tg3_tx_avail(tnapi
);
12633 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
12634 base_flags
| TXD_FLAG_END
, mss
, 0)) {
12635 tnapi
->tx_buffers
[val
].skb
= NULL
;
12636 dev_kfree_skb(skb
);
12642 /* Sync BD data before updating mailbox */
12645 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
12646 tr32_mailbox(tnapi
->prodmbox
);
12650 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12651 for (i
= 0; i
< 35; i
++) {
12652 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12657 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
12658 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12659 if ((tx_idx
== tnapi
->tx_prod
) &&
12660 (rx_idx
== (rx_start_idx
+ num_pkts
)))
12664 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
12665 dev_kfree_skb(skb
);
12667 if (tx_idx
!= tnapi
->tx_prod
)
12670 if (rx_idx
!= rx_start_idx
+ num_pkts
)
12674 while (rx_idx
!= rx_start_idx
) {
12675 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
12676 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
12677 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
12679 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
12680 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
12683 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
12686 if (!tso_loopback
) {
12687 if (rx_len
!= tx_len
)
12690 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
12691 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
12694 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
12697 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
12698 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
12699 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
12703 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
12704 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
12705 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
12707 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
12708 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
12709 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
12714 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
12715 PCI_DMA_FROMDEVICE
);
12717 rx_data
+= TG3_RX_OFFSET(tp
);
12718 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
12719 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
12726 /* tg3_free_rings will unmap and free the rx_data */
12731 #define TG3_STD_LOOPBACK_FAILED 1
12732 #define TG3_JMB_LOOPBACK_FAILED 2
12733 #define TG3_TSO_LOOPBACK_FAILED 4
12734 #define TG3_LOOPBACK_FAILED \
12735 (TG3_STD_LOOPBACK_FAILED | \
12736 TG3_JMB_LOOPBACK_FAILED | \
12737 TG3_TSO_LOOPBACK_FAILED)
12739 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
12743 u32 jmb_pkt_sz
= 9000;
12746 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
12748 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
12749 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12751 if (!netif_running(tp
->dev
)) {
12752 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12753 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12755 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12759 err
= tg3_reset_hw(tp
, 1);
12761 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12762 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12764 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12768 if (tg3_flag(tp
, ENABLE_RSS
)) {
12771 /* Reroute all rx packets to the 1st queue */
12772 for (i
= MAC_RSS_INDIR_TBL_0
;
12773 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12777 /* HW errata - mac loopback fails in some cases on 5780.
12778 * Normal traffic and PHY loopback are not affected by
12779 * errata. Also, the MAC loopback test is deprecated for
12780 * all newer ASIC revisions.
12782 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
12783 !tg3_flag(tp
, CPMU_PRESENT
)) {
12784 tg3_mac_loopback(tp
, true);
12786 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12787 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12789 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12790 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12791 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12793 tg3_mac_loopback(tp
, false);
12796 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12797 !tg3_flag(tp
, USE_PHYLIB
)) {
12800 tg3_phy_lpbk_set(tp
, 0, false);
12802 /* Wait for link */
12803 for (i
= 0; i
< 100; i
++) {
12804 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12809 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12810 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12811 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12812 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12813 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
12814 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12815 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12816 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12819 tg3_phy_lpbk_set(tp
, 0, true);
12821 /* All link indications report up, but the hardware
12822 * isn't really ready for about 20 msec. Double it
12827 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12828 data
[TG3_EXT_LOOPB_TEST
] |=
12829 TG3_STD_LOOPBACK_FAILED
;
12830 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12831 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12832 data
[TG3_EXT_LOOPB_TEST
] |=
12833 TG3_TSO_LOOPBACK_FAILED
;
12834 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12835 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12836 data
[TG3_EXT_LOOPB_TEST
] |=
12837 TG3_JMB_LOOPBACK_FAILED
;
12840 /* Re-enable gphy autopowerdown. */
12841 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12842 tg3_phy_toggle_apd(tp
, true);
12845 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
12846 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
12849 tp
->phy_flags
|= eee_cap
;
12854 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
12857 struct tg3
*tp
= netdev_priv(dev
);
12858 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
12860 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
12861 tg3_power_up(tp
)) {
12862 etest
->flags
|= ETH_TEST_FL_FAILED
;
12863 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
12867 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
12869 if (tg3_test_nvram(tp
) != 0) {
12870 etest
->flags
|= ETH_TEST_FL_FAILED
;
12871 data
[TG3_NVRAM_TEST
] = 1;
12873 if (!doextlpbk
&& tg3_test_link(tp
)) {
12874 etest
->flags
|= ETH_TEST_FL_FAILED
;
12875 data
[TG3_LINK_TEST
] = 1;
12877 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
12878 int err
, err2
= 0, irq_sync
= 0;
12880 if (netif_running(dev
)) {
12882 tg3_netif_stop(tp
);
12886 tg3_full_lock(tp
, irq_sync
);
12887 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
12888 err
= tg3_nvram_lock(tp
);
12889 tg3_halt_cpu(tp
, RX_CPU_BASE
);
12890 if (!tg3_flag(tp
, 5705_PLUS
))
12891 tg3_halt_cpu(tp
, TX_CPU_BASE
);
12893 tg3_nvram_unlock(tp
);
12895 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
12898 if (tg3_test_registers(tp
) != 0) {
12899 etest
->flags
|= ETH_TEST_FL_FAILED
;
12900 data
[TG3_REGISTER_TEST
] = 1;
12903 if (tg3_test_memory(tp
) != 0) {
12904 etest
->flags
|= ETH_TEST_FL_FAILED
;
12905 data
[TG3_MEMORY_TEST
] = 1;
12909 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
12911 if (tg3_test_loopback(tp
, data
, doextlpbk
))
12912 etest
->flags
|= ETH_TEST_FL_FAILED
;
12914 tg3_full_unlock(tp
);
12916 if (tg3_test_interrupt(tp
) != 0) {
12917 etest
->flags
|= ETH_TEST_FL_FAILED
;
12918 data
[TG3_INTERRUPT_TEST
] = 1;
12921 tg3_full_lock(tp
, 0);
12923 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12924 if (netif_running(dev
)) {
12925 tg3_flag_set(tp
, INIT_COMPLETE
);
12926 err2
= tg3_restart_hw(tp
, 1);
12928 tg3_netif_start(tp
);
12931 tg3_full_unlock(tp
);
12933 if (irq_sync
&& !err2
)
12936 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
12937 tg3_power_down(tp
);
12941 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
12942 struct ifreq
*ifr
, int cmd
)
12944 struct tg3
*tp
= netdev_priv(dev
);
12945 struct hwtstamp_config stmpconf
;
12947 if (!tg3_flag(tp
, PTP_CAPABLE
))
12950 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
12953 if (stmpconf
.flags
)
12956 switch (stmpconf
.tx_type
) {
12957 case HWTSTAMP_TX_ON
:
12958 tg3_flag_set(tp
, TX_TSTAMP_EN
);
12960 case HWTSTAMP_TX_OFF
:
12961 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
12967 switch (stmpconf
.rx_filter
) {
12968 case HWTSTAMP_FILTER_NONE
:
12971 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
12972 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12973 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
12975 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
12976 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12977 TG3_RX_PTP_CTL_SYNC_EVNT
;
12979 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
12980 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12981 TG3_RX_PTP_CTL_DELAY_REQ
;
12983 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
12984 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
12985 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12987 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
12988 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
12989 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12991 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
12992 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
12993 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12995 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
12996 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
12997 TG3_RX_PTP_CTL_SYNC_EVNT
;
12999 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13000 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13001 TG3_RX_PTP_CTL_SYNC_EVNT
;
13003 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13004 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13005 TG3_RX_PTP_CTL_SYNC_EVNT
;
13007 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13008 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13009 TG3_RX_PTP_CTL_DELAY_REQ
;
13011 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13012 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13013 TG3_RX_PTP_CTL_DELAY_REQ
;
13015 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13016 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13017 TG3_RX_PTP_CTL_DELAY_REQ
;
13023 if (netif_running(dev
) && tp
->rxptpctl
)
13024 tw32(TG3_RX_PTP_CTL
,
13025 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13027 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13031 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13033 struct mii_ioctl_data
*data
= if_mii(ifr
);
13034 struct tg3
*tp
= netdev_priv(dev
);
13037 if (tg3_flag(tp
, USE_PHYLIB
)) {
13038 struct phy_device
*phydev
;
13039 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13041 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13042 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13047 data
->phy_id
= tp
->phy_addr
;
13050 case SIOCGMIIREG
: {
13053 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13054 break; /* We have no PHY */
13056 if (!netif_running(dev
))
13059 spin_lock_bh(&tp
->lock
);
13060 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13061 data
->reg_num
& 0x1f, &mii_regval
);
13062 spin_unlock_bh(&tp
->lock
);
13064 data
->val_out
= mii_regval
;
13070 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13071 break; /* We have no PHY */
13073 if (!netif_running(dev
))
13076 spin_lock_bh(&tp
->lock
);
13077 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13078 data
->reg_num
& 0x1f, data
->val_in
);
13079 spin_unlock_bh(&tp
->lock
);
13083 case SIOCSHWTSTAMP
:
13084 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13090 return -EOPNOTSUPP
;
13093 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13095 struct tg3
*tp
= netdev_priv(dev
);
13097 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13101 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13103 struct tg3
*tp
= netdev_priv(dev
);
13104 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13105 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13107 if (!tg3_flag(tp
, 5705_PLUS
)) {
13108 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13109 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13110 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13111 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13114 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13115 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13116 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13117 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13118 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13119 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13120 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13121 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13122 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13123 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13126 /* No rx interrupts will be generated if both are zero */
13127 if ((ec
->rx_coalesce_usecs
== 0) &&
13128 (ec
->rx_max_coalesced_frames
== 0))
13131 /* No tx interrupts will be generated if both are zero */
13132 if ((ec
->tx_coalesce_usecs
== 0) &&
13133 (ec
->tx_max_coalesced_frames
== 0))
13136 /* Only copy relevant parameters, ignore all others. */
13137 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13138 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13139 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13140 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13141 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13142 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13143 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13144 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13145 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13147 if (netif_running(dev
)) {
13148 tg3_full_lock(tp
, 0);
13149 __tg3_set_coalesce(tp
, &tp
->coal
);
13150 tg3_full_unlock(tp
);
13155 static const struct ethtool_ops tg3_ethtool_ops
= {
13156 .get_settings
= tg3_get_settings
,
13157 .set_settings
= tg3_set_settings
,
13158 .get_drvinfo
= tg3_get_drvinfo
,
13159 .get_regs_len
= tg3_get_regs_len
,
13160 .get_regs
= tg3_get_regs
,
13161 .get_wol
= tg3_get_wol
,
13162 .set_wol
= tg3_set_wol
,
13163 .get_msglevel
= tg3_get_msglevel
,
13164 .set_msglevel
= tg3_set_msglevel
,
13165 .nway_reset
= tg3_nway_reset
,
13166 .get_link
= ethtool_op_get_link
,
13167 .get_eeprom_len
= tg3_get_eeprom_len
,
13168 .get_eeprom
= tg3_get_eeprom
,
13169 .set_eeprom
= tg3_set_eeprom
,
13170 .get_ringparam
= tg3_get_ringparam
,
13171 .set_ringparam
= tg3_set_ringparam
,
13172 .get_pauseparam
= tg3_get_pauseparam
,
13173 .set_pauseparam
= tg3_set_pauseparam
,
13174 .self_test
= tg3_self_test
,
13175 .get_strings
= tg3_get_strings
,
13176 .set_phys_id
= tg3_set_phys_id
,
13177 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13178 .get_coalesce
= tg3_get_coalesce
,
13179 .set_coalesce
= tg3_set_coalesce
,
13180 .get_sset_count
= tg3_get_sset_count
,
13181 .get_rxnfc
= tg3_get_rxnfc
,
13182 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13183 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13184 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13185 .get_channels
= tg3_get_channels
,
13186 .set_channels
= tg3_set_channels
,
13187 .get_ts_info
= tg3_get_ts_info
,
13190 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13191 struct rtnl_link_stats64
*stats
)
13193 struct tg3
*tp
= netdev_priv(dev
);
13195 spin_lock_bh(&tp
->lock
);
13196 if (!tp
->hw_stats
) {
13197 spin_unlock_bh(&tp
->lock
);
13198 return &tp
->net_stats_prev
;
13201 tg3_get_nstats(tp
, stats
);
13202 spin_unlock_bh(&tp
->lock
);
13207 static void tg3_set_rx_mode(struct net_device
*dev
)
13209 struct tg3
*tp
= netdev_priv(dev
);
13211 if (!netif_running(dev
))
13214 tg3_full_lock(tp
, 0);
13215 __tg3_set_rx_mode(dev
);
13216 tg3_full_unlock(tp
);
13219 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13222 dev
->mtu
= new_mtu
;
13224 if (new_mtu
> ETH_DATA_LEN
) {
13225 if (tg3_flag(tp
, 5780_CLASS
)) {
13226 netdev_update_features(dev
);
13227 tg3_flag_clear(tp
, TSO_CAPABLE
);
13229 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13232 if (tg3_flag(tp
, 5780_CLASS
)) {
13233 tg3_flag_set(tp
, TSO_CAPABLE
);
13234 netdev_update_features(dev
);
13236 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13240 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13242 struct tg3
*tp
= netdev_priv(dev
);
13243 int err
, reset_phy
= 0;
13245 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13248 if (!netif_running(dev
)) {
13249 /* We'll just catch it later when the
13252 tg3_set_mtu(dev
, tp
, new_mtu
);
13258 tg3_netif_stop(tp
);
13260 tg3_full_lock(tp
, 1);
13262 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13264 tg3_set_mtu(dev
, tp
, new_mtu
);
13266 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13267 * breaks all requests to 256 bytes.
13269 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13272 err
= tg3_restart_hw(tp
, reset_phy
);
13275 tg3_netif_start(tp
);
13277 tg3_full_unlock(tp
);
13285 static const struct net_device_ops tg3_netdev_ops
= {
13286 .ndo_open
= tg3_open
,
13287 .ndo_stop
= tg3_close
,
13288 .ndo_start_xmit
= tg3_start_xmit
,
13289 .ndo_get_stats64
= tg3_get_stats64
,
13290 .ndo_validate_addr
= eth_validate_addr
,
13291 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13292 .ndo_set_mac_address
= tg3_set_mac_addr
,
13293 .ndo_do_ioctl
= tg3_ioctl
,
13294 .ndo_tx_timeout
= tg3_tx_timeout
,
13295 .ndo_change_mtu
= tg3_change_mtu
,
13296 .ndo_fix_features
= tg3_fix_features
,
13297 .ndo_set_features
= tg3_set_features
,
13298 #ifdef CONFIG_NET_POLL_CONTROLLER
13299 .ndo_poll_controller
= tg3_poll_controller
,
13303 static void tg3_get_eeprom_size(struct tg3
*tp
)
13305 u32 cursize
, val
, magic
;
13307 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13309 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13312 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13313 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13314 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13318 * Size the chip by reading offsets at increasing powers of two.
13319 * When we encounter our validation signature, we know the addressing
13320 * has wrapped around, and thus have our chip size.
13324 while (cursize
< tp
->nvram_size
) {
13325 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13334 tp
->nvram_size
= cursize
;
13337 static void tg3_get_nvram_size(struct tg3
*tp
)
13341 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13344 /* Selfboot format */
13345 if (val
!= TG3_EEPROM_MAGIC
) {
13346 tg3_get_eeprom_size(tp
);
13350 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13352 /* This is confusing. We want to operate on the
13353 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13354 * call will read from NVRAM and byteswap the data
13355 * according to the byteswapping settings for all
13356 * other register accesses. This ensures the data we
13357 * want will always reside in the lower 16-bits.
13358 * However, the data in NVRAM is in LE format, which
13359 * means the data from the NVRAM read will always be
13360 * opposite the endianness of the CPU. The 16-bit
13361 * byteswap then brings the data to CPU endianness.
13363 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13367 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13370 static void tg3_get_nvram_info(struct tg3
*tp
)
13374 nvcfg1
= tr32(NVRAM_CFG1
);
13375 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13376 tg3_flag_set(tp
, FLASH
);
13378 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13379 tw32(NVRAM_CFG1
, nvcfg1
);
13382 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
13383 tg3_flag(tp
, 5780_CLASS
)) {
13384 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13385 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13386 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13387 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13388 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13390 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13391 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13392 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13394 case FLASH_VENDOR_ATMEL_EEPROM
:
13395 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13396 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13397 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13399 case FLASH_VENDOR_ST
:
13400 tp
->nvram_jedecnum
= JEDEC_ST
;
13401 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13402 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13404 case FLASH_VENDOR_SAIFUN
:
13405 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13406 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13408 case FLASH_VENDOR_SST_SMALL
:
13409 case FLASH_VENDOR_SST_LARGE
:
13410 tp
->nvram_jedecnum
= JEDEC_SST
;
13411 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13415 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13416 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13417 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13421 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13423 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13424 case FLASH_5752PAGE_SIZE_256
:
13425 tp
->nvram_pagesize
= 256;
13427 case FLASH_5752PAGE_SIZE_512
:
13428 tp
->nvram_pagesize
= 512;
13430 case FLASH_5752PAGE_SIZE_1K
:
13431 tp
->nvram_pagesize
= 1024;
13433 case FLASH_5752PAGE_SIZE_2K
:
13434 tp
->nvram_pagesize
= 2048;
13436 case FLASH_5752PAGE_SIZE_4K
:
13437 tp
->nvram_pagesize
= 4096;
13439 case FLASH_5752PAGE_SIZE_264
:
13440 tp
->nvram_pagesize
= 264;
13442 case FLASH_5752PAGE_SIZE_528
:
13443 tp
->nvram_pagesize
= 528;
13448 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13452 nvcfg1
= tr32(NVRAM_CFG1
);
13454 /* NVRAM protection for TPM */
13455 if (nvcfg1
& (1 << 27))
13456 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13458 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13459 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13460 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13461 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13462 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13464 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13465 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13466 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13467 tg3_flag_set(tp
, FLASH
);
13469 case FLASH_5752VENDOR_ST_M45PE10
:
13470 case FLASH_5752VENDOR_ST_M45PE20
:
13471 case FLASH_5752VENDOR_ST_M45PE40
:
13472 tp
->nvram_jedecnum
= JEDEC_ST
;
13473 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13474 tg3_flag_set(tp
, FLASH
);
13478 if (tg3_flag(tp
, FLASH
)) {
13479 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13481 /* For eeprom, set pagesize to maximum eeprom size */
13482 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13484 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13485 tw32(NVRAM_CFG1
, nvcfg1
);
13489 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
13491 u32 nvcfg1
, protect
= 0;
13493 nvcfg1
= tr32(NVRAM_CFG1
);
13495 /* NVRAM protection for TPM */
13496 if (nvcfg1
& (1 << 27)) {
13497 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13501 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13503 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13504 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13505 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13506 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
13507 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13508 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13509 tg3_flag_set(tp
, FLASH
);
13510 tp
->nvram_pagesize
= 264;
13511 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
13512 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
13513 tp
->nvram_size
= (protect
? 0x3e200 :
13514 TG3_NVRAM_SIZE_512KB
);
13515 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
13516 tp
->nvram_size
= (protect
? 0x1f200 :
13517 TG3_NVRAM_SIZE_256KB
);
13519 tp
->nvram_size
= (protect
? 0x1f200 :
13520 TG3_NVRAM_SIZE_128KB
);
13522 case FLASH_5752VENDOR_ST_M45PE10
:
13523 case FLASH_5752VENDOR_ST_M45PE20
:
13524 case FLASH_5752VENDOR_ST_M45PE40
:
13525 tp
->nvram_jedecnum
= JEDEC_ST
;
13526 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13527 tg3_flag_set(tp
, FLASH
);
13528 tp
->nvram_pagesize
= 256;
13529 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
13530 tp
->nvram_size
= (protect
?
13531 TG3_NVRAM_SIZE_64KB
:
13532 TG3_NVRAM_SIZE_128KB
);
13533 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
13534 tp
->nvram_size
= (protect
?
13535 TG3_NVRAM_SIZE_64KB
:
13536 TG3_NVRAM_SIZE_256KB
);
13538 tp
->nvram_size
= (protect
?
13539 TG3_NVRAM_SIZE_128KB
:
13540 TG3_NVRAM_SIZE_512KB
);
13545 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
13549 nvcfg1
= tr32(NVRAM_CFG1
);
13551 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13552 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
13553 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13554 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
13555 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13556 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13557 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13558 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13560 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13561 tw32(NVRAM_CFG1
, nvcfg1
);
13563 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13564 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13565 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13566 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13567 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13568 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13569 tg3_flag_set(tp
, FLASH
);
13570 tp
->nvram_pagesize
= 264;
13572 case FLASH_5752VENDOR_ST_M45PE10
:
13573 case FLASH_5752VENDOR_ST_M45PE20
:
13574 case FLASH_5752VENDOR_ST_M45PE40
:
13575 tp
->nvram_jedecnum
= JEDEC_ST
;
13576 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13577 tg3_flag_set(tp
, FLASH
);
13578 tp
->nvram_pagesize
= 256;
13583 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
13585 u32 nvcfg1
, protect
= 0;
13587 nvcfg1
= tr32(NVRAM_CFG1
);
13589 /* NVRAM protection for TPM */
13590 if (nvcfg1
& (1 << 27)) {
13591 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13595 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13597 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13598 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13599 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13600 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13601 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13602 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13603 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13604 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13605 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13606 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13607 tg3_flag_set(tp
, FLASH
);
13608 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13609 tp
->nvram_pagesize
= 256;
13611 case FLASH_5761VENDOR_ST_A_M45PE20
:
13612 case FLASH_5761VENDOR_ST_A_M45PE40
:
13613 case FLASH_5761VENDOR_ST_A_M45PE80
:
13614 case FLASH_5761VENDOR_ST_A_M45PE16
:
13615 case FLASH_5761VENDOR_ST_M_M45PE20
:
13616 case FLASH_5761VENDOR_ST_M_M45PE40
:
13617 case FLASH_5761VENDOR_ST_M_M45PE80
:
13618 case FLASH_5761VENDOR_ST_M_M45PE16
:
13619 tp
->nvram_jedecnum
= JEDEC_ST
;
13620 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13621 tg3_flag_set(tp
, FLASH
);
13622 tp
->nvram_pagesize
= 256;
13627 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
13630 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13631 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13632 case FLASH_5761VENDOR_ST_A_M45PE16
:
13633 case FLASH_5761VENDOR_ST_M_M45PE16
:
13634 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
13636 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13637 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13638 case FLASH_5761VENDOR_ST_A_M45PE80
:
13639 case FLASH_5761VENDOR_ST_M_M45PE80
:
13640 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13642 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13643 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13644 case FLASH_5761VENDOR_ST_A_M45PE40
:
13645 case FLASH_5761VENDOR_ST_M_M45PE40
:
13646 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13648 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13649 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13650 case FLASH_5761VENDOR_ST_A_M45PE20
:
13651 case FLASH_5761VENDOR_ST_M_M45PE20
:
13652 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13658 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
13660 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13661 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13662 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13665 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
13669 nvcfg1
= tr32(NVRAM_CFG1
);
13671 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13672 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13673 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13674 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13675 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13676 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13678 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13679 tw32(NVRAM_CFG1
, nvcfg1
);
13681 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13682 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13683 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13684 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13685 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13686 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13687 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13688 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13689 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13690 tg3_flag_set(tp
, FLASH
);
13692 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13693 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13694 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13695 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13696 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13698 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13699 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13700 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13702 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13703 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13704 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13708 case FLASH_5752VENDOR_ST_M45PE10
:
13709 case FLASH_5752VENDOR_ST_M45PE20
:
13710 case FLASH_5752VENDOR_ST_M45PE40
:
13711 tp
->nvram_jedecnum
= JEDEC_ST
;
13712 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13713 tg3_flag_set(tp
, FLASH
);
13715 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13716 case FLASH_5752VENDOR_ST_M45PE10
:
13717 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13719 case FLASH_5752VENDOR_ST_M45PE20
:
13720 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13722 case FLASH_5752VENDOR_ST_M45PE40
:
13723 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13728 tg3_flag_set(tp
, NO_NVRAM
);
13732 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13733 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13734 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13738 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
13742 nvcfg1
= tr32(NVRAM_CFG1
);
13744 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13745 case FLASH_5717VENDOR_ATMEL_EEPROM
:
13746 case FLASH_5717VENDOR_MICRO_EEPROM
:
13747 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13748 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13749 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13751 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13752 tw32(NVRAM_CFG1
, nvcfg1
);
13754 case FLASH_5717VENDOR_ATMEL_MDB011D
:
13755 case FLASH_5717VENDOR_ATMEL_ADB011B
:
13756 case FLASH_5717VENDOR_ATMEL_ADB011D
:
13757 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13758 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13759 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13760 case FLASH_5717VENDOR_ATMEL_45USPT
:
13761 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13762 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13763 tg3_flag_set(tp
, FLASH
);
13765 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13766 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13767 /* Detect size with tg3_nvram_get_size() */
13769 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13770 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13771 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13774 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13778 case FLASH_5717VENDOR_ST_M_M25PE10
:
13779 case FLASH_5717VENDOR_ST_A_M25PE10
:
13780 case FLASH_5717VENDOR_ST_M_M45PE10
:
13781 case FLASH_5717VENDOR_ST_A_M45PE10
:
13782 case FLASH_5717VENDOR_ST_M_M25PE20
:
13783 case FLASH_5717VENDOR_ST_A_M25PE20
:
13784 case FLASH_5717VENDOR_ST_M_M45PE20
:
13785 case FLASH_5717VENDOR_ST_A_M45PE20
:
13786 case FLASH_5717VENDOR_ST_25USPT
:
13787 case FLASH_5717VENDOR_ST_45USPT
:
13788 tp
->nvram_jedecnum
= JEDEC_ST
;
13789 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13790 tg3_flag_set(tp
, FLASH
);
13792 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13793 case FLASH_5717VENDOR_ST_M_M25PE20
:
13794 case FLASH_5717VENDOR_ST_M_M45PE20
:
13795 /* Detect size with tg3_nvram_get_size() */
13797 case FLASH_5717VENDOR_ST_A_M25PE20
:
13798 case FLASH_5717VENDOR_ST_A_M45PE20
:
13799 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13802 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13807 tg3_flag_set(tp
, NO_NVRAM
);
13811 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13812 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13813 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13816 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
13818 u32 nvcfg1
, nvmpinstrp
;
13820 nvcfg1
= tr32(NVRAM_CFG1
);
13821 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
13823 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13824 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
13825 tg3_flag_set(tp
, NO_NVRAM
);
13829 switch (nvmpinstrp
) {
13830 case FLASH_5762_EEPROM_HD
:
13831 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
13833 case FLASH_5762_EEPROM_LD
:
13834 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
13839 switch (nvmpinstrp
) {
13840 case FLASH_5720_EEPROM_HD
:
13841 case FLASH_5720_EEPROM_LD
:
13842 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13843 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13845 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13846 tw32(NVRAM_CFG1
, nvcfg1
);
13847 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
13848 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13850 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
13852 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
13853 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
13854 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
13855 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13856 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13857 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13858 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13859 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13860 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13861 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13862 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13863 case FLASH_5720VENDOR_ATMEL_45USPT
:
13864 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13865 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13866 tg3_flag_set(tp
, FLASH
);
13868 switch (nvmpinstrp
) {
13869 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13870 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13871 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13872 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13874 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13875 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13876 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13877 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13879 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13880 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13881 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13884 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13885 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13889 case FLASH_5720VENDOR_M_ST_M25PE10
:
13890 case FLASH_5720VENDOR_M_ST_M45PE10
:
13891 case FLASH_5720VENDOR_A_ST_M25PE10
:
13892 case FLASH_5720VENDOR_A_ST_M45PE10
:
13893 case FLASH_5720VENDOR_M_ST_M25PE20
:
13894 case FLASH_5720VENDOR_M_ST_M45PE20
:
13895 case FLASH_5720VENDOR_A_ST_M25PE20
:
13896 case FLASH_5720VENDOR_A_ST_M45PE20
:
13897 case FLASH_5720VENDOR_M_ST_M25PE40
:
13898 case FLASH_5720VENDOR_M_ST_M45PE40
:
13899 case FLASH_5720VENDOR_A_ST_M25PE40
:
13900 case FLASH_5720VENDOR_A_ST_M45PE40
:
13901 case FLASH_5720VENDOR_M_ST_M25PE80
:
13902 case FLASH_5720VENDOR_M_ST_M45PE80
:
13903 case FLASH_5720VENDOR_A_ST_M25PE80
:
13904 case FLASH_5720VENDOR_A_ST_M45PE80
:
13905 case FLASH_5720VENDOR_ST_25USPT
:
13906 case FLASH_5720VENDOR_ST_45USPT
:
13907 tp
->nvram_jedecnum
= JEDEC_ST
;
13908 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13909 tg3_flag_set(tp
, FLASH
);
13911 switch (nvmpinstrp
) {
13912 case FLASH_5720VENDOR_M_ST_M25PE20
:
13913 case FLASH_5720VENDOR_M_ST_M45PE20
:
13914 case FLASH_5720VENDOR_A_ST_M25PE20
:
13915 case FLASH_5720VENDOR_A_ST_M45PE20
:
13916 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13918 case FLASH_5720VENDOR_M_ST_M25PE40
:
13919 case FLASH_5720VENDOR_M_ST_M45PE40
:
13920 case FLASH_5720VENDOR_A_ST_M25PE40
:
13921 case FLASH_5720VENDOR_A_ST_M45PE40
:
13922 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13924 case FLASH_5720VENDOR_M_ST_M25PE80
:
13925 case FLASH_5720VENDOR_M_ST_M45PE80
:
13926 case FLASH_5720VENDOR_A_ST_M25PE80
:
13927 case FLASH_5720VENDOR_A_ST_M45PE80
:
13928 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13931 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13932 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13937 tg3_flag_set(tp
, NO_NVRAM
);
13941 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13942 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13943 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13945 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13948 if (tg3_nvram_read(tp
, 0, &val
))
13951 if (val
!= TG3_EEPROM_MAGIC
&&
13952 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
13953 tg3_flag_set(tp
, NO_NVRAM
);
13957 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13958 static void tg3_nvram_init(struct tg3
*tp
)
13960 if (tg3_flag(tp
, IS_SSB_CORE
)) {
13961 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13962 tg3_flag_clear(tp
, NVRAM
);
13963 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
13964 tg3_flag_set(tp
, NO_NVRAM
);
13968 tw32_f(GRC_EEPROM_ADDR
,
13969 (EEPROM_ADDR_FSM_RESET
|
13970 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
13971 EEPROM_ADDR_CLKPERD_SHIFT
)));
13975 /* Enable seeprom accesses. */
13976 tw32_f(GRC_LOCAL_CTRL
,
13977 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
13980 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
13981 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
13982 tg3_flag_set(tp
, NVRAM
);
13984 if (tg3_nvram_lock(tp
)) {
13985 netdev_warn(tp
->dev
,
13986 "Cannot get nvram lock, %s failed\n",
13990 tg3_enable_nvram_access(tp
);
13992 tp
->nvram_size
= 0;
13994 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
13995 tg3_get_5752_nvram_info(tp
);
13996 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
13997 tg3_get_5755_nvram_info(tp
);
13998 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
13999 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14000 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14001 tg3_get_5787_nvram_info(tp
);
14002 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14003 tg3_get_5761_nvram_info(tp
);
14004 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14005 tg3_get_5906_nvram_info(tp
);
14006 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14007 tg3_flag(tp
, 57765_CLASS
))
14008 tg3_get_57780_nvram_info(tp
);
14009 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14010 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14011 tg3_get_5717_nvram_info(tp
);
14012 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14013 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14014 tg3_get_5720_nvram_info(tp
);
14016 tg3_get_nvram_info(tp
);
14018 if (tp
->nvram_size
== 0)
14019 tg3_get_nvram_size(tp
);
14021 tg3_disable_nvram_access(tp
);
14022 tg3_nvram_unlock(tp
);
14025 tg3_flag_clear(tp
, NVRAM
);
14026 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14028 tg3_get_eeprom_size(tp
);
14032 struct subsys_tbl_ent
{
14033 u16 subsys_vendor
, subsys_devid
;
14037 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14038 /* Broadcom boards. */
14039 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14040 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14041 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14042 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14043 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14044 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14045 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14046 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14047 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14048 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14049 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14050 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14051 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14052 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14053 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14054 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14055 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14056 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14057 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14058 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14059 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14060 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14063 { TG3PCI_SUBVENDOR_ID_3COM
,
14064 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14065 { TG3PCI_SUBVENDOR_ID_3COM
,
14066 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14067 { TG3PCI_SUBVENDOR_ID_3COM
,
14068 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14069 { TG3PCI_SUBVENDOR_ID_3COM
,
14070 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14071 { TG3PCI_SUBVENDOR_ID_3COM
,
14072 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14075 { TG3PCI_SUBVENDOR_ID_DELL
,
14076 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14077 { TG3PCI_SUBVENDOR_ID_DELL
,
14078 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14079 { TG3PCI_SUBVENDOR_ID_DELL
,
14080 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14081 { TG3PCI_SUBVENDOR_ID_DELL
,
14082 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14084 /* Compaq boards. */
14085 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14086 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14087 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14088 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14089 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14090 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14091 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14092 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14093 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14094 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14097 { TG3PCI_SUBVENDOR_ID_IBM
,
14098 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14101 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14105 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14106 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14107 tp
->pdev
->subsystem_vendor
) &&
14108 (subsys_id_to_phy_id
[i
].subsys_devid
==
14109 tp
->pdev
->subsystem_device
))
14110 return &subsys_id_to_phy_id
[i
];
14115 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14119 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14120 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14122 /* Assume an onboard device and WOL capable by default. */
14123 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14124 tg3_flag_set(tp
, WOL_CAP
);
14126 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14127 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14128 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14129 tg3_flag_set(tp
, IS_NIC
);
14131 val
= tr32(VCPU_CFGSHDW
);
14132 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14133 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14134 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14135 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14136 tg3_flag_set(tp
, WOL_ENABLE
);
14137 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14142 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14143 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14144 u32 nic_cfg
, led_cfg
;
14145 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14146 int eeprom_phy_serdes
= 0;
14148 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14149 tp
->nic_sram_data_cfg
= nic_cfg
;
14151 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14152 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14153 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14154 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14155 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14156 (ver
> 0) && (ver
< 0x100))
14157 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14159 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14160 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14162 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14163 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14164 eeprom_phy_serdes
= 1;
14166 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14167 if (nic_phy_id
!= 0) {
14168 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14169 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14171 eeprom_phy_id
= (id1
>> 16) << 10;
14172 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14173 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14177 tp
->phy_id
= eeprom_phy_id
;
14178 if (eeprom_phy_serdes
) {
14179 if (!tg3_flag(tp
, 5705_PLUS
))
14180 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14182 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14185 if (tg3_flag(tp
, 5750_PLUS
))
14186 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14187 SHASTA_EXT_LED_MODE_MASK
);
14189 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14193 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14194 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14197 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14198 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14201 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14202 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14204 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14205 * read on some older 5700/5701 bootcode.
14207 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14208 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14209 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14213 case SHASTA_EXT_LED_SHARED
:
14214 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14215 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14216 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14217 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14218 LED_CTRL_MODE_PHY_2
);
14221 case SHASTA_EXT_LED_MAC
:
14222 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14225 case SHASTA_EXT_LED_COMBO
:
14226 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14227 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14228 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14229 LED_CTRL_MODE_PHY_2
);
14234 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14235 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14236 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14237 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14239 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14240 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14242 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14243 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14244 if ((tp
->pdev
->subsystem_vendor
==
14245 PCI_VENDOR_ID_ARIMA
) &&
14246 (tp
->pdev
->subsystem_device
== 0x205a ||
14247 tp
->pdev
->subsystem_device
== 0x2063))
14248 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14250 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14251 tg3_flag_set(tp
, IS_NIC
);
14254 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14255 tg3_flag_set(tp
, ENABLE_ASF
);
14256 if (tg3_flag(tp
, 5750_PLUS
))
14257 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14260 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14261 tg3_flag(tp
, 5750_PLUS
))
14262 tg3_flag_set(tp
, ENABLE_APE
);
14264 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14265 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14266 tg3_flag_clear(tp
, WOL_CAP
);
14268 if (tg3_flag(tp
, WOL_CAP
) &&
14269 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14270 tg3_flag_set(tp
, WOL_ENABLE
);
14271 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14274 if (cfg2
& (1 << 17))
14275 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14277 /* serdes signal pre-emphasis in register 0x590 set by */
14278 /* bootcode if bit 18 is set */
14279 if (cfg2
& (1 << 18))
14280 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14282 if ((tg3_flag(tp
, 57765_PLUS
) ||
14283 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14284 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14285 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14286 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14288 if (tg3_flag(tp
, PCI_EXPRESS
) &&
14289 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14290 !tg3_flag(tp
, 57765_PLUS
)) {
14293 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14294 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
14295 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14298 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14299 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14300 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14301 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14302 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14303 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14306 if (tg3_flag(tp
, WOL_CAP
))
14307 device_set_wakeup_enable(&tp
->pdev
->dev
,
14308 tg3_flag(tp
, WOL_ENABLE
));
14310 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14313 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14316 u32 val2
, off
= offset
* 8;
14318 err
= tg3_nvram_lock(tp
);
14322 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14323 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14324 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14325 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14328 for (i
= 0; i
< 100; i
++) {
14329 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14330 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14331 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14337 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14339 tg3_nvram_unlock(tp
);
14340 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14346 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14351 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14352 tw32(OTP_CTRL
, cmd
);
14354 /* Wait for up to 1 ms for command to execute. */
14355 for (i
= 0; i
< 100; i
++) {
14356 val
= tr32(OTP_STATUS
);
14357 if (val
& OTP_STATUS_CMD_DONE
)
14362 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14365 /* Read the gphy configuration from the OTP region of the chip. The gphy
14366 * configuration is a 32-bit value that straddles the alignment boundary.
14367 * We do two 32-bit reads and then shift and merge the results.
14369 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14371 u32 bhalf_otp
, thalf_otp
;
14373 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14375 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14378 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14380 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14383 thalf_otp
= tr32(OTP_READ_DATA
);
14385 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14387 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14390 bhalf_otp
= tr32(OTP_READ_DATA
);
14392 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14395 static void tg3_phy_init_link_config(struct tg3
*tp
)
14397 u32 adv
= ADVERTISED_Autoneg
;
14399 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14400 adv
|= ADVERTISED_1000baseT_Half
|
14401 ADVERTISED_1000baseT_Full
;
14403 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14404 adv
|= ADVERTISED_100baseT_Half
|
14405 ADVERTISED_100baseT_Full
|
14406 ADVERTISED_10baseT_Half
|
14407 ADVERTISED_10baseT_Full
|
14410 adv
|= ADVERTISED_FIBRE
;
14412 tp
->link_config
.advertising
= adv
;
14413 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14414 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14415 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14416 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14417 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14422 static int tg3_phy_probe(struct tg3
*tp
)
14424 u32 hw_phy_id_1
, hw_phy_id_2
;
14425 u32 hw_phy_id
, hw_phy_id_masked
;
14428 /* flow control autonegotiation is default behavior */
14429 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14430 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14432 if (tg3_flag(tp
, ENABLE_APE
)) {
14433 switch (tp
->pci_fn
) {
14435 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14438 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14441 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14444 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14449 if (tg3_flag(tp
, USE_PHYLIB
))
14450 return tg3_phy_init(tp
);
14452 /* Reading the PHY ID register can conflict with ASF
14453 * firmware access to the PHY hardware.
14456 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14457 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14459 /* Now read the physical PHY_ID from the chip and verify
14460 * that it is sane. If it doesn't look good, we fall back
14461 * to either the hard-coded table based PHY_ID and failing
14462 * that the value found in the eeprom area.
14464 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
14465 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
14467 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
14468 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
14469 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
14471 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
14474 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
14475 tp
->phy_id
= hw_phy_id
;
14476 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
14477 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14479 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
14481 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
14482 /* Do nothing, phy ID already set up in
14483 * tg3_get_eeprom_hw_cfg().
14486 struct subsys_tbl_ent
*p
;
14488 /* No eeprom signature? Try the hardcoded
14489 * subsys device table.
14491 p
= tg3_lookup_by_subsys(tp
);
14493 tp
->phy_id
= p
->phy_id
;
14494 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
14495 /* For now we saw the IDs 0xbc050cd0,
14496 * 0xbc050f80 and 0xbc050c30 on devices
14497 * connected to an BCM4785 and there are
14498 * probably more. Just assume that the phy is
14499 * supported when it is connected to a SSB core
14506 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
14507 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14511 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14512 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
14513 tg3_asic_rev(tp
) == ASIC_REV_5720
||
14514 tg3_asic_rev(tp
) == ASIC_REV_5762
||
14515 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
14516 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
14517 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
14518 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
)))
14519 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
14521 tg3_phy_init_link_config(tp
);
14523 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14524 !tg3_flag(tp
, ENABLE_APE
) &&
14525 !tg3_flag(tp
, ENABLE_ASF
)) {
14528 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
14529 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
14530 (bmsr
& BMSR_LSTATUS
))
14531 goto skip_phy_reset
;
14533 err
= tg3_phy_reset(tp
);
14537 tg3_phy_set_wirespeed(tp
);
14539 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
14540 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
14541 tp
->link_config
.flowctrl
);
14543 tg3_writephy(tp
, MII_BMCR
,
14544 BMCR_ANENABLE
| BMCR_ANRESTART
);
14549 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
14550 err
= tg3_init_5401phy_dsp(tp
);
14554 err
= tg3_init_5401phy_dsp(tp
);
14560 static void tg3_read_vpd(struct tg3
*tp
)
14563 unsigned int block_end
, rosize
, len
;
14567 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
14571 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
14573 goto out_not_found
;
14575 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
14576 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
14577 i
+= PCI_VPD_LRDT_TAG_SIZE
;
14579 if (block_end
> vpdlen
)
14580 goto out_not_found
;
14582 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14583 PCI_VPD_RO_KEYWORD_MFR_ID
);
14585 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14587 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14588 if (j
+ len
> block_end
|| len
!= 4 ||
14589 memcmp(&vpd_data
[j
], "1028", 4))
14592 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14593 PCI_VPD_RO_KEYWORD_VENDOR0
);
14597 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14599 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14600 if (j
+ len
> block_end
)
14603 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
14604 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
14608 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14609 PCI_VPD_RO_KEYWORD_PARTNO
);
14611 goto out_not_found
;
14613 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
14615 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14616 if (len
> TG3_BPN_SIZE
||
14617 (len
+ i
) > vpdlen
)
14618 goto out_not_found
;
14620 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
14624 if (tp
->board_part_number
[0])
14628 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
14629 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14630 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
14631 strcpy(tp
->board_part_number
, "BCM5717");
14632 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
14633 strcpy(tp
->board_part_number
, "BCM5718");
14636 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
14637 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
14638 strcpy(tp
->board_part_number
, "BCM57780");
14639 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
14640 strcpy(tp
->board_part_number
, "BCM57760");
14641 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
14642 strcpy(tp
->board_part_number
, "BCM57790");
14643 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
14644 strcpy(tp
->board_part_number
, "BCM57788");
14647 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
14648 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
14649 strcpy(tp
->board_part_number
, "BCM57761");
14650 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
14651 strcpy(tp
->board_part_number
, "BCM57765");
14652 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
14653 strcpy(tp
->board_part_number
, "BCM57781");
14654 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
14655 strcpy(tp
->board_part_number
, "BCM57785");
14656 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
14657 strcpy(tp
->board_part_number
, "BCM57791");
14658 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
14659 strcpy(tp
->board_part_number
, "BCM57795");
14662 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
14663 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
14664 strcpy(tp
->board_part_number
, "BCM57762");
14665 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
14666 strcpy(tp
->board_part_number
, "BCM57766");
14667 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
14668 strcpy(tp
->board_part_number
, "BCM57782");
14669 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14670 strcpy(tp
->board_part_number
, "BCM57786");
14673 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14674 strcpy(tp
->board_part_number
, "BCM95906");
14677 strcpy(tp
->board_part_number
, "none");
14681 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
14685 if (tg3_nvram_read(tp
, offset
, &val
) ||
14686 (val
& 0xfc000000) != 0x0c000000 ||
14687 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
14694 static void tg3_read_bc_ver(struct tg3
*tp
)
14696 u32 val
, offset
, start
, ver_offset
;
14698 bool newver
= false;
14700 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
14701 tg3_nvram_read(tp
, 0x4, &start
))
14704 offset
= tg3_nvram_logical_addr(tp
, offset
);
14706 if (tg3_nvram_read(tp
, offset
, &val
))
14709 if ((val
& 0xfc000000) == 0x0c000000) {
14710 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
14717 dst_off
= strlen(tp
->fw_ver
);
14720 if (TG3_VER_SIZE
- dst_off
< 16 ||
14721 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
14724 offset
= offset
+ ver_offset
- start
;
14725 for (i
= 0; i
< 16; i
+= 4) {
14727 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
14730 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
14735 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
14738 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
14739 TG3_NVM_BCVER_MAJSFT
;
14740 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
14741 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
14742 "v%d.%02d", major
, minor
);
14746 static void tg3_read_hwsb_ver(struct tg3
*tp
)
14748 u32 val
, major
, minor
;
14750 /* Use native endian representation */
14751 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
14754 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
14755 TG3_NVM_HWSB_CFG1_MAJSFT
;
14756 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
14757 TG3_NVM_HWSB_CFG1_MINSFT
;
14759 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
14762 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
14764 u32 offset
, major
, minor
, build
;
14766 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
14768 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
14771 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
14772 case TG3_EEPROM_SB_REVISION_0
:
14773 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
14775 case TG3_EEPROM_SB_REVISION_2
:
14776 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
14778 case TG3_EEPROM_SB_REVISION_3
:
14779 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
14781 case TG3_EEPROM_SB_REVISION_4
:
14782 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
14784 case TG3_EEPROM_SB_REVISION_5
:
14785 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
14787 case TG3_EEPROM_SB_REVISION_6
:
14788 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
14794 if (tg3_nvram_read(tp
, offset
, &val
))
14797 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
14798 TG3_EEPROM_SB_EDH_BLD_SHFT
;
14799 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
14800 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
14801 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
14803 if (minor
> 99 || build
> 26)
14806 offset
= strlen(tp
->fw_ver
);
14807 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
14808 " v%d.%02d", major
, minor
);
14811 offset
= strlen(tp
->fw_ver
);
14812 if (offset
< TG3_VER_SIZE
- 1)
14813 tp
->fw_ver
[offset
] = 'a' + build
- 1;
14817 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
14819 u32 val
, offset
, start
;
14822 for (offset
= TG3_NVM_DIR_START
;
14823 offset
< TG3_NVM_DIR_END
;
14824 offset
+= TG3_NVM_DIRENT_SIZE
) {
14825 if (tg3_nvram_read(tp
, offset
, &val
))
14828 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
14832 if (offset
== TG3_NVM_DIR_END
)
14835 if (!tg3_flag(tp
, 5705_PLUS
))
14836 start
= 0x08000000;
14837 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
14840 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
14841 !tg3_fw_img_is_valid(tp
, offset
) ||
14842 tg3_nvram_read(tp
, offset
+ 8, &val
))
14845 offset
+= val
- start
;
14847 vlen
= strlen(tp
->fw_ver
);
14849 tp
->fw_ver
[vlen
++] = ',';
14850 tp
->fw_ver
[vlen
++] = ' ';
14852 for (i
= 0; i
< 4; i
++) {
14854 if (tg3_nvram_read_be32(tp
, offset
, &v
))
14857 offset
+= sizeof(v
);
14859 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
14860 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
14864 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
14869 static void tg3_probe_ncsi(struct tg3
*tp
)
14873 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
14874 if (apedata
!= APE_SEG_SIG_MAGIC
)
14877 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
14878 if (!(apedata
& APE_FW_STATUS_READY
))
14881 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
14882 tg3_flag_set(tp
, APE_HAS_NCSI
);
14885 static void tg3_read_dash_ver(struct tg3
*tp
)
14891 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
14893 if (tg3_flag(tp
, APE_HAS_NCSI
))
14895 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
14900 vlen
= strlen(tp
->fw_ver
);
14902 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
14904 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
14905 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
14906 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
14907 (apedata
& APE_FW_VERSION_BLDMSK
));
14910 static void tg3_read_otp_ver(struct tg3
*tp
)
14914 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14917 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
14918 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
14919 TG3_OTP_MAGIC0_VALID(val
)) {
14920 u64 val64
= (u64
) val
<< 32 | val2
;
14924 for (i
= 0; i
< 7; i
++) {
14925 if ((val64
& 0xff) == 0)
14927 ver
= val64
& 0xff;
14930 vlen
= strlen(tp
->fw_ver
);
14931 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
14935 static void tg3_read_fw_ver(struct tg3
*tp
)
14938 bool vpd_vers
= false;
14940 if (tp
->fw_ver
[0] != 0)
14943 if (tg3_flag(tp
, NO_NVRAM
)) {
14944 strcat(tp
->fw_ver
, "sb");
14945 tg3_read_otp_ver(tp
);
14949 if (tg3_nvram_read(tp
, 0, &val
))
14952 if (val
== TG3_EEPROM_MAGIC
)
14953 tg3_read_bc_ver(tp
);
14954 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
14955 tg3_read_sb_ver(tp
, val
);
14956 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
14957 tg3_read_hwsb_ver(tp
);
14959 if (tg3_flag(tp
, ENABLE_ASF
)) {
14960 if (tg3_flag(tp
, ENABLE_APE
)) {
14961 tg3_probe_ncsi(tp
);
14963 tg3_read_dash_ver(tp
);
14964 } else if (!vpd_vers
) {
14965 tg3_read_mgmtfw_ver(tp
);
14969 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
14972 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
14974 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
14975 return TG3_RX_RET_MAX_SIZE_5717
;
14976 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
14977 return TG3_RX_RET_MAX_SIZE_5700
;
14979 return TG3_RX_RET_MAX_SIZE_5705
;
14982 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
14983 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
14984 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
14985 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
14989 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
14991 struct pci_dev
*peer
;
14992 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14994 for (func
= 0; func
< 8; func
++) {
14995 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14996 if (peer
&& peer
!= tp
->pdev
)
15000 /* 5704 can be configured in single-port mode, set peer to
15001 * tp->pdev in that case.
15009 * We don't need to keep the refcount elevated; there's no way
15010 * to remove one half of this device without removing the other
15017 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15019 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15020 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15023 /* All devices that use the alternate
15024 * ASIC REV location have a CPMU.
15026 tg3_flag_set(tp
, CPMU_PRESENT
);
15028 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15029 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15030 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15031 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15032 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15033 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15034 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15035 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15036 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15037 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15038 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15039 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15040 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15041 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15042 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15043 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15044 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15045 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15046 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15047 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15049 reg
= TG3PCI_PRODID_ASICREV
;
15051 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15054 /* Wrong chip ID in 5752 A0. This code can be removed later
15055 * as A0 is not in production.
15057 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15058 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15060 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15061 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15063 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15064 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15065 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15066 tg3_flag_set(tp
, 5717_PLUS
);
15068 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15069 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15070 tg3_flag_set(tp
, 57765_CLASS
);
15072 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15073 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15074 tg3_flag_set(tp
, 57765_PLUS
);
15076 /* Intentionally exclude ASIC_REV_5906 */
15077 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15078 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15079 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15080 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15081 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15082 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15083 tg3_flag(tp
, 57765_PLUS
))
15084 tg3_flag_set(tp
, 5755_PLUS
);
15086 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15087 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15088 tg3_flag_set(tp
, 5780_CLASS
);
15090 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15091 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15092 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15093 tg3_flag(tp
, 5755_PLUS
) ||
15094 tg3_flag(tp
, 5780_CLASS
))
15095 tg3_flag_set(tp
, 5750_PLUS
);
15097 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15098 tg3_flag(tp
, 5750_PLUS
))
15099 tg3_flag_set(tp
, 5705_PLUS
);
15102 static bool tg3_10_100_only_device(struct tg3
*tp
,
15103 const struct pci_device_id
*ent
)
15105 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15107 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15108 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15109 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15112 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15113 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15114 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15124 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15127 u32 pci_state_reg
, grc_misc_cfg
;
15132 /* Force memory write invalidate off. If we leave it on,
15133 * then on 5700_BX chips we have to enable a workaround.
15134 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15135 * to match the cacheline size. The Broadcom driver have this
15136 * workaround but turns MWI off all the times so never uses
15137 * it. This seems to suggest that the workaround is insufficient.
15139 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15140 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15141 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15143 /* Important! -- Make sure register accesses are byteswapped
15144 * correctly. Also, for those chips that require it, make
15145 * sure that indirect register accesses are enabled before
15146 * the first operation.
15148 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15150 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15151 MISC_HOST_CTRL_CHIPREV
);
15152 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15153 tp
->misc_host_ctrl
);
15155 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15157 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15158 * we need to disable memory and use config. cycles
15159 * only to access all registers. The 5702/03 chips
15160 * can mistakenly decode the special cycles from the
15161 * ICH chipsets as memory write cycles, causing corruption
15162 * of register and memory space. Only certain ICH bridges
15163 * will drive special cycles with non-zero data during the
15164 * address phase which can fall within the 5703's address
15165 * range. This is not an ICH bug as the PCI spec allows
15166 * non-zero address during special cycles. However, only
15167 * these ICH bridges are known to drive non-zero addresses
15168 * during special cycles.
15170 * Since special cycles do not cross PCI bridges, we only
15171 * enable this workaround if the 5703 is on the secondary
15172 * bus of these ICH bridges.
15174 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15175 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15176 static struct tg3_dev_id
{
15180 } ich_chipsets
[] = {
15181 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15183 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15185 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15187 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15191 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15192 struct pci_dev
*bridge
= NULL
;
15194 while (pci_id
->vendor
!= 0) {
15195 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15201 if (pci_id
->rev
!= PCI_ANY_ID
) {
15202 if (bridge
->revision
> pci_id
->rev
)
15205 if (bridge
->subordinate
&&
15206 (bridge
->subordinate
->number
==
15207 tp
->pdev
->bus
->number
)) {
15208 tg3_flag_set(tp
, ICH_WORKAROUND
);
15209 pci_dev_put(bridge
);
15215 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15216 static struct tg3_dev_id
{
15219 } bridge_chipsets
[] = {
15220 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15221 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15224 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15225 struct pci_dev
*bridge
= NULL
;
15227 while (pci_id
->vendor
!= 0) {
15228 bridge
= pci_get_device(pci_id
->vendor
,
15235 if (bridge
->subordinate
&&
15236 (bridge
->subordinate
->number
<=
15237 tp
->pdev
->bus
->number
) &&
15238 (bridge
->subordinate
->busn_res
.end
>=
15239 tp
->pdev
->bus
->number
)) {
15240 tg3_flag_set(tp
, 5701_DMA_BUG
);
15241 pci_dev_put(bridge
);
15247 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15248 * DMA addresses > 40-bit. This bridge may have other additional
15249 * 57xx devices behind it in some 4-port NIC designs for example.
15250 * Any tg3 device found behind the bridge will also need the 40-bit
15253 if (tg3_flag(tp
, 5780_CLASS
)) {
15254 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15255 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15257 struct pci_dev
*bridge
= NULL
;
15260 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15261 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15263 if (bridge
&& bridge
->subordinate
&&
15264 (bridge
->subordinate
->number
<=
15265 tp
->pdev
->bus
->number
) &&
15266 (bridge
->subordinate
->busn_res
.end
>=
15267 tp
->pdev
->bus
->number
)) {
15268 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15269 pci_dev_put(bridge
);
15275 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15276 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15277 tp
->pdev_peer
= tg3_find_peer(tp
);
15279 /* Determine TSO capabilities */
15280 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15281 ; /* Do nothing. HW bug. */
15282 else if (tg3_flag(tp
, 57765_PLUS
))
15283 tg3_flag_set(tp
, HW_TSO_3
);
15284 else if (tg3_flag(tp
, 5755_PLUS
) ||
15285 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15286 tg3_flag_set(tp
, HW_TSO_2
);
15287 else if (tg3_flag(tp
, 5750_PLUS
)) {
15288 tg3_flag_set(tp
, HW_TSO_1
);
15289 tg3_flag_set(tp
, TSO_BUG
);
15290 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15291 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15292 tg3_flag_clear(tp
, TSO_BUG
);
15293 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15294 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15295 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15296 tg3_flag_set(tp
, TSO_BUG
);
15297 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15298 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15300 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15303 /* Selectively allow TSO based on operating conditions */
15304 if (tg3_flag(tp
, HW_TSO_1
) ||
15305 tg3_flag(tp
, HW_TSO_2
) ||
15306 tg3_flag(tp
, HW_TSO_3
) ||
15308 /* For firmware TSO, assume ASF is disabled.
15309 * We'll disable TSO later if we discover ASF
15310 * is enabled in tg3_get_eeprom_hw_cfg().
15312 tg3_flag_set(tp
, TSO_CAPABLE
);
15314 tg3_flag_clear(tp
, TSO_CAPABLE
);
15315 tg3_flag_clear(tp
, TSO_BUG
);
15316 tp
->fw_needed
= NULL
;
15319 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15320 tp
->fw_needed
= FIRMWARE_TG3
;
15324 if (tg3_flag(tp
, 5750_PLUS
)) {
15325 tg3_flag_set(tp
, SUPPORT_MSI
);
15326 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15327 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15328 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15329 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15330 tp
->pdev_peer
== tp
->pdev
))
15331 tg3_flag_clear(tp
, SUPPORT_MSI
);
15333 if (tg3_flag(tp
, 5755_PLUS
) ||
15334 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15335 tg3_flag_set(tp
, 1SHOT_MSI
);
15338 if (tg3_flag(tp
, 57765_PLUS
)) {
15339 tg3_flag_set(tp
, SUPPORT_MSIX
);
15340 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15346 if (tp
->irq_max
> 1) {
15347 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15348 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15350 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15351 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15352 tp
->txq_max
= tp
->irq_max
- 1;
15355 if (tg3_flag(tp
, 5755_PLUS
) ||
15356 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15357 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15359 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
15360 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15362 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15363 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15364 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15365 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15366 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15368 if (tg3_flag(tp
, 57765_PLUS
) &&
15369 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
15370 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15372 if (!tg3_flag(tp
, 5705_PLUS
) ||
15373 tg3_flag(tp
, 5780_CLASS
) ||
15374 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15375 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15377 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15380 if (pci_is_pcie(tp
->pdev
)) {
15383 tg3_flag_set(tp
, PCI_EXPRESS
);
15385 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15386 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15387 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15388 tg3_flag_clear(tp
, HW_TSO_2
);
15389 tg3_flag_clear(tp
, TSO_CAPABLE
);
15391 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
15392 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15393 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
15394 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
15395 tg3_flag_set(tp
, CLKREQ_BUG
);
15396 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
15397 tg3_flag_set(tp
, L1PLLPD_EN
);
15399 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
15400 /* BCM5785 devices are effectively PCIe devices, and should
15401 * follow PCIe codepaths, but do not have a PCIe capabilities
15404 tg3_flag_set(tp
, PCI_EXPRESS
);
15405 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15406 tg3_flag(tp
, 5780_CLASS
)) {
15407 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15408 if (!tp
->pcix_cap
) {
15409 dev_err(&tp
->pdev
->dev
,
15410 "Cannot find PCI-X capability, aborting\n");
15414 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15415 tg3_flag_set(tp
, PCIX_MODE
);
15418 /* If we have an AMD 762 or VIA K8T800 chipset, write
15419 * reordering to the mailbox registers done by the host
15420 * controller can cause major troubles. We read back from
15421 * every mailbox register write to force the writes to be
15422 * posted to the chip in order.
15424 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15425 !tg3_flag(tp
, PCI_EXPRESS
))
15426 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15428 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15429 &tp
->pci_cacheline_sz
);
15430 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15431 &tp
->pci_lat_timer
);
15432 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15433 tp
->pci_lat_timer
< 64) {
15434 tp
->pci_lat_timer
= 64;
15435 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15436 tp
->pci_lat_timer
);
15439 /* Important! -- It is critical that the PCI-X hw workaround
15440 * situation is decided before the first MMIO register access.
15442 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
15443 /* 5700 BX chips need to have their TX producer index
15444 * mailboxes written twice to workaround a bug.
15446 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15448 /* If we are in PCI-X mode, enable register write workaround.
15450 * The workaround is to use indirect register accesses
15451 * for all chip writes not to mailbox registers.
15453 if (tg3_flag(tp
, PCIX_MODE
)) {
15456 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15458 /* The chip can have it's power management PCI config
15459 * space registers clobbered due to this bug.
15460 * So explicitly force the chip into D0 here.
15462 pci_read_config_dword(tp
->pdev
,
15463 tp
->pm_cap
+ PCI_PM_CTRL
,
15465 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
15466 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
15467 pci_write_config_dword(tp
->pdev
,
15468 tp
->pm_cap
+ PCI_PM_CTRL
,
15471 /* Also, force SERR#/PERR# in PCI command. */
15472 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15473 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
15474 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15478 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
15479 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
15480 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
15481 tg3_flag_set(tp
, PCI_32BIT
);
15483 /* Chip-specific fixup from Broadcom driver */
15484 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
15485 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
15486 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
15487 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
15490 /* Default fast path register access methods */
15491 tp
->read32
= tg3_read32
;
15492 tp
->write32
= tg3_write32
;
15493 tp
->read32_mbox
= tg3_read32
;
15494 tp
->write32_mbox
= tg3_write32
;
15495 tp
->write32_tx_mbox
= tg3_write32
;
15496 tp
->write32_rx_mbox
= tg3_write32
;
15498 /* Various workaround register access methods */
15499 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
15500 tp
->write32
= tg3_write_indirect_reg32
;
15501 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
15502 (tg3_flag(tp
, PCI_EXPRESS
) &&
15503 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
15505 * Back to back register writes can cause problems on these
15506 * chips, the workaround is to read back all reg writes
15507 * except those to mailbox regs.
15509 * See tg3_write_indirect_reg32().
15511 tp
->write32
= tg3_write_flush_reg32
;
15514 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
15515 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
15516 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
15517 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15520 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
15521 tp
->read32
= tg3_read_indirect_reg32
;
15522 tp
->write32
= tg3_write_indirect_reg32
;
15523 tp
->read32_mbox
= tg3_read_indirect_mbox
;
15524 tp
->write32_mbox
= tg3_write_indirect_mbox
;
15525 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
15526 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
15531 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15532 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
15533 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15535 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15536 tp
->read32_mbox
= tg3_read32_mbox_5906
;
15537 tp
->write32_mbox
= tg3_write32_mbox_5906
;
15538 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
15539 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
15542 if (tp
->write32
== tg3_write_indirect_reg32
||
15543 (tg3_flag(tp
, PCIX_MODE
) &&
15544 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15545 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
15546 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
15548 /* The memory arbiter has to be enabled in order for SRAM accesses
15549 * to succeed. Normally on powerup the tg3 chip firmware will make
15550 * sure it is enabled, but other entities such as system netboot
15551 * code might disable it.
15553 val
= tr32(MEMARB_MODE
);
15554 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
15556 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
15557 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15558 tg3_flag(tp
, 5780_CLASS
)) {
15559 if (tg3_flag(tp
, PCIX_MODE
)) {
15560 pci_read_config_dword(tp
->pdev
,
15561 tp
->pcix_cap
+ PCI_X_STATUS
,
15563 tp
->pci_fn
= val
& 0x7;
15565 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15566 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15567 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
15568 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
15569 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
15570 val
= tr32(TG3_CPMU_STATUS
);
15572 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
15573 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
15575 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
15576 TG3_CPMU_STATUS_FSHFT_5719
;
15579 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
15580 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
15581 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15584 /* Get eeprom hw config before calling tg3_set_power_state().
15585 * In particular, the TG3_FLAG_IS_NIC flag must be
15586 * determined before calling tg3_set_power_state() so that
15587 * we know whether or not to switch out of Vaux power.
15588 * When the flag is set, it means that GPIO1 is used for eeprom
15589 * write protect and also implies that it is a LOM where GPIOs
15590 * are not used to switch power.
15592 tg3_get_eeprom_hw_cfg(tp
);
15594 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
15595 tg3_flag_clear(tp
, TSO_CAPABLE
);
15596 tg3_flag_clear(tp
, TSO_BUG
);
15597 tp
->fw_needed
= NULL
;
15600 if (tg3_flag(tp
, ENABLE_APE
)) {
15601 /* Allow reads and writes to the
15602 * APE register and memory space.
15604 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
15605 PCISTATE_ALLOW_APE_SHMEM_WR
|
15606 PCISTATE_ALLOW_APE_PSPACE_WR
;
15607 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15610 tg3_ape_lock_init(tp
);
15613 /* Set up tp->grc_local_ctrl before calling
15614 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15615 * will bring 5700's external PHY out of reset.
15616 * It is also used as eeprom write protect on LOMs.
15618 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
15619 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15620 tg3_flag(tp
, EEPROM_WRITE_PROT
))
15621 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
15622 GRC_LCLCTRL_GPIO_OUTPUT1
);
15623 /* Unused GPIO3 must be driven as output on 5752 because there
15624 * are no pull-up resistors on unused GPIO pins.
15626 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15627 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
15629 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15630 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15631 tg3_flag(tp
, 57765_CLASS
))
15632 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15634 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15635 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
15636 /* Turn off the debug UART. */
15637 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15638 if (tg3_flag(tp
, IS_NIC
))
15639 /* Keep VMain power. */
15640 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
15641 GRC_LCLCTRL_GPIO_OUTPUT0
;
15644 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
15645 tp
->grc_local_ctrl
|=
15646 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
15648 /* Switch out of Vaux if it is a NIC */
15649 tg3_pwrsrc_switch_to_vmain(tp
);
15651 /* Derive initial jumbo mode from MTU assigned in
15652 * ether_setup() via the alloc_etherdev() call
15654 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
15655 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
15657 /* Determine WakeOnLan speed to use. */
15658 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15659 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15660 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15661 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
15662 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
15664 tg3_flag_set(tp
, WOL_SPEED_100MB
);
15667 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15668 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
15670 /* A few boards don't want Ethernet@WireSpeed phy feature */
15671 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15672 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15673 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
15674 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
15675 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
15676 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15677 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
15679 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
15680 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
15681 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
15682 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
15683 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
15685 if (tg3_flag(tp
, 5705_PLUS
) &&
15686 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
15687 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15688 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
15689 !tg3_flag(tp
, 57765_PLUS
)) {
15690 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15691 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15692 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15693 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
15694 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
15695 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
15696 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
15697 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
15698 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
15700 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
15703 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15704 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
15705 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
15706 if (tp
->phy_otp
== 0)
15707 tp
->phy_otp
= TG3_OTP_DEFAULT
;
15710 if (tg3_flag(tp
, CPMU_PRESENT
))
15711 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
15713 tp
->mi_mode
= MAC_MI_MODE_BASE
;
15715 tp
->coalesce_mode
= 0;
15716 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
15717 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
15718 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
15720 /* Set these bits to enable statistics workaround. */
15721 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15722 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
15723 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
15724 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
15725 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
15728 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
15729 tg3_asic_rev(tp
) == ASIC_REV_57780
)
15730 tg3_flag_set(tp
, USE_PHYLIB
);
15732 err
= tg3_mdio_init(tp
);
15736 /* Initialize data/descriptor byte/word swapping. */
15737 val
= tr32(GRC_MODE
);
15738 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15739 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15740 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
15741 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
15742 GRC_MODE_B2HRX_ENABLE
|
15743 GRC_MODE_HTX2B_ENABLE
|
15744 GRC_MODE_HOST_STACKUP
);
15746 val
&= GRC_MODE_HOST_STACKUP
;
15748 tw32(GRC_MODE
, val
| tp
->grc_mode
);
15750 tg3_switch_clocks(tp
);
15752 /* Clear this out for sanity. */
15753 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15755 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15757 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
15758 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
15759 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15760 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15761 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
15762 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
15763 void __iomem
*sram_base
;
15765 /* Write some dummy words into the SRAM status block
15766 * area, see if it reads back correctly. If the return
15767 * value is bad, force enable the PCIX workaround.
15769 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
15771 writel(0x00000000, sram_base
);
15772 writel(0x00000000, sram_base
+ 4);
15773 writel(0xffffffff, sram_base
+ 4);
15774 if (readl(sram_base
) != 0x00000000)
15775 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15780 tg3_nvram_init(tp
);
15782 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
15783 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
15785 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15786 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
15787 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
15788 tg3_flag_set(tp
, IS_5788
);
15790 if (!tg3_flag(tp
, IS_5788
) &&
15791 tg3_asic_rev(tp
) != ASIC_REV_5700
)
15792 tg3_flag_set(tp
, TAGGED_STATUS
);
15793 if (tg3_flag(tp
, TAGGED_STATUS
)) {
15794 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
15795 HOSTCC_MODE_CLRTICK_TXBD
);
15797 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
15798 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15799 tp
->misc_host_ctrl
);
15802 /* Preserve the APE MAC_MODE bits */
15803 if (tg3_flag(tp
, ENABLE_APE
))
15804 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
15808 if (tg3_10_100_only_device(tp
, ent
))
15809 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
15811 err
= tg3_phy_probe(tp
);
15813 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
15814 /* ... but do not return immediately ... */
15819 tg3_read_fw_ver(tp
);
15821 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
15822 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15824 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15825 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15827 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15830 /* 5700 {AX,BX} chips have a broken status block link
15831 * change bit implementation, so we must use the
15832 * status register in those cases.
15834 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15835 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15837 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
15839 /* The led_ctrl is set during tg3_phy_probe, here we might
15840 * have to force the link status polling mechanism based
15841 * upon subsystem IDs.
15843 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
15844 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15845 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
15846 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15847 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15850 /* For all SERDES we poll the MAC status register. */
15851 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
15852 tg3_flag_set(tp
, POLL_SERDES
);
15854 tg3_flag_clear(tp
, POLL_SERDES
);
15856 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
15857 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
15858 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15859 tg3_flag(tp
, PCIX_MODE
)) {
15860 tp
->rx_offset
= NET_SKB_PAD
;
15861 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15862 tp
->rx_copy_thresh
= ~(u16
)0;
15866 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
15867 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
15868 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
15870 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
15872 /* Increment the rx prod index on the rx std ring by at most
15873 * 8 for these chips to workaround hw errata.
15875 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15876 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15877 tg3_asic_rev(tp
) == ASIC_REV_5755
)
15878 tp
->rx_std_max_post
= 8;
15880 if (tg3_flag(tp
, ASPM_WORKAROUND
))
15881 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
15882 PCIE_PWR_MGMT_L1_THRESH_MSK
;
15887 #ifdef CONFIG_SPARC
15888 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
15890 struct net_device
*dev
= tp
->dev
;
15891 struct pci_dev
*pdev
= tp
->pdev
;
15892 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
15893 const unsigned char *addr
;
15896 addr
= of_get_property(dp
, "local-mac-address", &len
);
15897 if (addr
&& len
== 6) {
15898 memcpy(dev
->dev_addr
, addr
, 6);
15904 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
15906 struct net_device
*dev
= tp
->dev
;
15908 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
15913 static int tg3_get_device_address(struct tg3
*tp
)
15915 struct net_device
*dev
= tp
->dev
;
15916 u32 hi
, lo
, mac_offset
;
15920 #ifdef CONFIG_SPARC
15921 if (!tg3_get_macaddr_sparc(tp
))
15925 if (tg3_flag(tp
, IS_SSB_CORE
)) {
15926 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
15927 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
15932 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15933 tg3_flag(tp
, 5780_CLASS
)) {
15934 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
15936 if (tg3_nvram_lock(tp
))
15937 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
15939 tg3_nvram_unlock(tp
);
15940 } else if (tg3_flag(tp
, 5717_PLUS
)) {
15941 if (tp
->pci_fn
& 1)
15943 if (tp
->pci_fn
> 1)
15944 mac_offset
+= 0x18c;
15945 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15948 /* First try to get it from MAC address mailbox. */
15949 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
15950 if ((hi
>> 16) == 0x484b) {
15951 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15952 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
15954 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
15955 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15956 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15957 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15958 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
15960 /* Some old bootcode may report a 0 MAC address in SRAM */
15961 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
15964 /* Next, try NVRAM. */
15965 if (!tg3_flag(tp
, NO_NVRAM
) &&
15966 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
15967 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
15968 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
15969 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
15971 /* Finally just fetch it out of the MAC control regs. */
15973 hi
= tr32(MAC_ADDR_0_HIGH
);
15974 lo
= tr32(MAC_ADDR_0_LOW
);
15976 dev
->dev_addr
[5] = lo
& 0xff;
15977 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15978 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15979 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15980 dev
->dev_addr
[1] = hi
& 0xff;
15981 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15985 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
15986 #ifdef CONFIG_SPARC
15987 if (!tg3_get_default_macaddr_sparc(tp
))
15995 #define BOUNDARY_SINGLE_CACHELINE 1
15996 #define BOUNDARY_MULTI_CACHELINE 2
15998 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16000 int cacheline_size
;
16004 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16006 cacheline_size
= 1024;
16008 cacheline_size
= (int) byte
* 4;
16010 /* On 5703 and later chips, the boundary bits have no
16013 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16014 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16015 !tg3_flag(tp
, PCI_EXPRESS
))
16018 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16019 goal
= BOUNDARY_MULTI_CACHELINE
;
16021 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16022 goal
= BOUNDARY_SINGLE_CACHELINE
;
16028 if (tg3_flag(tp
, 57765_PLUS
)) {
16029 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16036 /* PCI controllers on most RISC systems tend to disconnect
16037 * when a device tries to burst across a cache-line boundary.
16038 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16040 * Unfortunately, for PCI-E there are only limited
16041 * write-side controls for this, and thus for reads
16042 * we will still get the disconnects. We'll also waste
16043 * these PCI cycles for both read and write for chips
16044 * other than 5700 and 5701 which do not implement the
16047 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16048 switch (cacheline_size
) {
16053 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16054 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16055 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16057 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16058 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16063 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16064 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16068 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16069 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16072 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16073 switch (cacheline_size
) {
16077 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16078 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16079 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16085 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16086 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16090 switch (cacheline_size
) {
16092 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16093 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16094 DMA_RWCTRL_WRITE_BNDRY_16
);
16099 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16100 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16101 DMA_RWCTRL_WRITE_BNDRY_32
);
16106 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16107 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16108 DMA_RWCTRL_WRITE_BNDRY_64
);
16113 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16114 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16115 DMA_RWCTRL_WRITE_BNDRY_128
);
16120 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16121 DMA_RWCTRL_WRITE_BNDRY_256
);
16124 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16125 DMA_RWCTRL_WRITE_BNDRY_512
);
16129 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16130 DMA_RWCTRL_WRITE_BNDRY_1024
);
16139 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16140 int size
, int to_device
)
16142 struct tg3_internal_buffer_desc test_desc
;
16143 u32 sram_dma_descs
;
16146 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16148 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16149 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16150 tw32(RDMAC_STATUS
, 0);
16151 tw32(WDMAC_STATUS
, 0);
16153 tw32(BUFMGR_MODE
, 0);
16154 tw32(FTQ_RESET
, 0);
16156 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16157 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16158 test_desc
.nic_mbuf
= 0x00002100;
16159 test_desc
.len
= size
;
16162 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16163 * the *second* time the tg3 driver was getting loaded after an
16166 * Broadcom tells me:
16167 * ...the DMA engine is connected to the GRC block and a DMA
16168 * reset may affect the GRC block in some unpredictable way...
16169 * The behavior of resets to individual blocks has not been tested.
16171 * Broadcom noted the GRC reset will also reset all sub-components.
16174 test_desc
.cqid_sqid
= (13 << 8) | 2;
16176 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16179 test_desc
.cqid_sqid
= (16 << 8) | 7;
16181 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16184 test_desc
.flags
= 0x00000005;
16186 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16189 val
= *(((u32
*)&test_desc
) + i
);
16190 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16191 sram_dma_descs
+ (i
* sizeof(u32
)));
16192 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16194 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16197 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16199 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16202 for (i
= 0; i
< 40; i
++) {
16206 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16208 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16209 if ((val
& 0xffff) == sram_dma_descs
) {
16220 #define TEST_BUFFER_SIZE 0x2000
16222 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16223 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16227 static int tg3_test_dma(struct tg3
*tp
)
16229 dma_addr_t buf_dma
;
16230 u32
*buf
, saved_dma_rwctrl
;
16233 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16234 &buf_dma
, GFP_KERNEL
);
16240 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16241 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16243 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16245 if (tg3_flag(tp
, 57765_PLUS
))
16248 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16249 /* DMA read watermark not used on PCIE */
16250 tp
->dma_rwctrl
|= 0x00180000;
16251 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16252 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16253 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16254 tp
->dma_rwctrl
|= 0x003f0000;
16256 tp
->dma_rwctrl
|= 0x003f000f;
16258 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16259 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16260 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16261 u32 read_water
= 0x7;
16263 /* If the 5704 is behind the EPB bridge, we can
16264 * do the less restrictive ONE_DMA workaround for
16265 * better performance.
16267 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16268 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16269 tp
->dma_rwctrl
|= 0x8000;
16270 else if (ccval
== 0x6 || ccval
== 0x7)
16271 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16273 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16275 /* Set bit 23 to enable PCIX hw bug fix */
16277 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16278 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16280 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16281 /* 5780 always in PCIX mode */
16282 tp
->dma_rwctrl
|= 0x00144000;
16283 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16284 /* 5714 always in PCIX mode */
16285 tp
->dma_rwctrl
|= 0x00148000;
16287 tp
->dma_rwctrl
|= 0x001b000f;
16290 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16291 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16293 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16294 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16295 tp
->dma_rwctrl
&= 0xfffffff0;
16297 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16298 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16299 /* Remove this if it causes problems for some boards. */
16300 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16302 /* On 5700/5701 chips, we need to set this bit.
16303 * Otherwise the chip will issue cacheline transactions
16304 * to streamable DMA memory with not all the byte
16305 * enables turned on. This is an error on several
16306 * RISC PCI controllers, in particular sparc64.
16308 * On 5703/5704 chips, this bit has been reassigned
16309 * a different meaning. In particular, it is used
16310 * on those chips to enable a PCI-X workaround.
16312 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16315 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16318 /* Unneeded, already done by tg3_get_invariants. */
16319 tg3_switch_clocks(tp
);
16322 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16323 tg3_asic_rev(tp
) != ASIC_REV_5701
)
16326 /* It is best to perform DMA test with maximum write burst size
16327 * to expose the 5700/5701 write DMA bug.
16329 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16330 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16331 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16336 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16339 /* Send the buffer to the chip. */
16340 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
16342 dev_err(&tp
->pdev
->dev
,
16343 "%s: Buffer write failed. err = %d\n",
16349 /* validate data reached card RAM correctly. */
16350 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16352 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16353 if (le32_to_cpu(val
) != p
[i
]) {
16354 dev_err(&tp
->pdev
->dev
,
16355 "%s: Buffer corrupted on device! "
16356 "(%d != %d)\n", __func__
, val
, i
);
16357 /* ret = -ENODEV here? */
16362 /* Now read it back. */
16363 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
16365 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16366 "err = %d\n", __func__
, ret
);
16371 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16375 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16376 DMA_RWCTRL_WRITE_BNDRY_16
) {
16377 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16378 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16379 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16382 dev_err(&tp
->pdev
->dev
,
16383 "%s: Buffer corrupted on read back! "
16384 "(%d != %d)\n", __func__
, p
[i
], i
);
16390 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16396 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16397 DMA_RWCTRL_WRITE_BNDRY_16
) {
16398 /* DMA test passed without adjusting DMA boundary,
16399 * now look for chipsets that are known to expose the
16400 * DMA bug without failing the test.
16402 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16403 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16404 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16406 /* Safe to use the calculated DMA boundary. */
16407 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16410 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16414 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16419 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16421 if (tg3_flag(tp
, 57765_PLUS
)) {
16422 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16423 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16424 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16425 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16426 tp
->bufmgr_config
.mbuf_high_water
=
16427 DEFAULT_MB_HIGH_WATER_57765
;
16429 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16430 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16431 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16432 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16433 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16434 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16435 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16436 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16437 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16438 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16439 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16440 tp
->bufmgr_config
.mbuf_high_water
=
16441 DEFAULT_MB_HIGH_WATER_5705
;
16442 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16443 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16444 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16445 tp
->bufmgr_config
.mbuf_high_water
=
16446 DEFAULT_MB_HIGH_WATER_5906
;
16449 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16450 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
16451 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16452 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
16453 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16454 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
16456 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16457 DEFAULT_MB_RDMA_LOW_WATER
;
16458 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16459 DEFAULT_MB_MACRX_LOW_WATER
;
16460 tp
->bufmgr_config
.mbuf_high_water
=
16461 DEFAULT_MB_HIGH_WATER
;
16463 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16464 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
16465 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16466 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
16467 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16468 DEFAULT_MB_HIGH_WATER_JUMBO
;
16471 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
16472 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
16475 static char *tg3_phy_string(struct tg3
*tp
)
16477 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
16478 case TG3_PHY_ID_BCM5400
: return "5400";
16479 case TG3_PHY_ID_BCM5401
: return "5401";
16480 case TG3_PHY_ID_BCM5411
: return "5411";
16481 case TG3_PHY_ID_BCM5701
: return "5701";
16482 case TG3_PHY_ID_BCM5703
: return "5703";
16483 case TG3_PHY_ID_BCM5704
: return "5704";
16484 case TG3_PHY_ID_BCM5705
: return "5705";
16485 case TG3_PHY_ID_BCM5750
: return "5750";
16486 case TG3_PHY_ID_BCM5752
: return "5752";
16487 case TG3_PHY_ID_BCM5714
: return "5714";
16488 case TG3_PHY_ID_BCM5780
: return "5780";
16489 case TG3_PHY_ID_BCM5755
: return "5755";
16490 case TG3_PHY_ID_BCM5787
: return "5787";
16491 case TG3_PHY_ID_BCM5784
: return "5784";
16492 case TG3_PHY_ID_BCM5756
: return "5722/5756";
16493 case TG3_PHY_ID_BCM5906
: return "5906";
16494 case TG3_PHY_ID_BCM5761
: return "5761";
16495 case TG3_PHY_ID_BCM5718C
: return "5718C";
16496 case TG3_PHY_ID_BCM5718S
: return "5718S";
16497 case TG3_PHY_ID_BCM57765
: return "57765";
16498 case TG3_PHY_ID_BCM5719C
: return "5719C";
16499 case TG3_PHY_ID_BCM5720C
: return "5720C";
16500 case TG3_PHY_ID_BCM5762
: return "5762C";
16501 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
16502 case 0: return "serdes";
16503 default: return "unknown";
16507 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
16509 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16510 strcpy(str
, "PCI Express");
16512 } else if (tg3_flag(tp
, PCIX_MODE
)) {
16513 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
16515 strcpy(str
, "PCIX:");
16517 if ((clock_ctrl
== 7) ||
16518 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
16519 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
16520 strcat(str
, "133MHz");
16521 else if (clock_ctrl
== 0)
16522 strcat(str
, "33MHz");
16523 else if (clock_ctrl
== 2)
16524 strcat(str
, "50MHz");
16525 else if (clock_ctrl
== 4)
16526 strcat(str
, "66MHz");
16527 else if (clock_ctrl
== 6)
16528 strcat(str
, "100MHz");
16530 strcpy(str
, "PCI:");
16531 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
16532 strcat(str
, "66MHz");
16534 strcat(str
, "33MHz");
16536 if (tg3_flag(tp
, PCI_32BIT
))
16537 strcat(str
, ":32-bit");
16539 strcat(str
, ":64-bit");
16543 static void tg3_init_coal(struct tg3
*tp
)
16545 struct ethtool_coalesce
*ec
= &tp
->coal
;
16547 memset(ec
, 0, sizeof(*ec
));
16548 ec
->cmd
= ETHTOOL_GCOALESCE
;
16549 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
16550 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
16551 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
16552 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
16553 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
16554 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
16555 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
16556 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
16557 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
16559 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
16560 HOSTCC_MODE_CLRTICK_TXBD
)) {
16561 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
16562 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
16563 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
16564 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
16567 if (tg3_flag(tp
, 5705_PLUS
)) {
16568 ec
->rx_coalesce_usecs_irq
= 0;
16569 ec
->tx_coalesce_usecs_irq
= 0;
16570 ec
->stats_block_coalesce_usecs
= 0;
16574 static int tg3_init_one(struct pci_dev
*pdev
,
16575 const struct pci_device_id
*ent
)
16577 struct net_device
*dev
;
16579 int i
, err
, pm_cap
;
16580 u32 sndmbx
, rcvmbx
, intmbx
;
16582 u64 dma_mask
, persist_dma_mask
;
16583 netdev_features_t features
= 0;
16585 printk_once(KERN_INFO
"%s\n", version
);
16587 err
= pci_enable_device(pdev
);
16589 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
16593 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
16595 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
16596 goto err_out_disable_pdev
;
16599 pci_set_master(pdev
);
16601 /* Find power-management capability. */
16602 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
16604 dev_err(&pdev
->dev
,
16605 "Cannot find Power Management capability, aborting\n");
16607 goto err_out_free_res
;
16610 err
= pci_set_power_state(pdev
, PCI_D0
);
16612 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
16613 goto err_out_free_res
;
16616 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
16619 goto err_out_power_down
;
16622 SET_NETDEV_DEV(dev
, &pdev
->dev
);
16624 tp
= netdev_priv(dev
);
16627 tp
->pm_cap
= pm_cap
;
16628 tp
->rx_mode
= TG3_DEF_RX_MODE
;
16629 tp
->tx_mode
= TG3_DEF_TX_MODE
;
16633 tp
->msg_enable
= tg3_debug
;
16635 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
16637 if (pdev_is_ssb_gige_core(pdev
)) {
16638 tg3_flag_set(tp
, IS_SSB_CORE
);
16639 if (ssb_gige_must_flush_posted_writes(pdev
))
16640 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
16641 if (ssb_gige_one_dma_at_once(pdev
))
16642 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
16643 if (ssb_gige_have_roboswitch(pdev
))
16644 tg3_flag_set(tp
, ROBOSWITCH
);
16645 if (ssb_gige_is_rgmii(pdev
))
16646 tg3_flag_set(tp
, RGMII_MODE
);
16649 /* The word/byte swap controls here control register access byte
16650 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16653 tp
->misc_host_ctrl
=
16654 MISC_HOST_CTRL_MASK_PCI_INT
|
16655 MISC_HOST_CTRL_WORD_SWAP
|
16656 MISC_HOST_CTRL_INDIR_ACCESS
|
16657 MISC_HOST_CTRL_PCISTATE_RW
;
16659 /* The NONFRM (non-frame) byte/word swap controls take effect
16660 * on descriptor entries, anything which isn't packet data.
16662 * The StrongARM chips on the board (one for tx, one for rx)
16663 * are running in big-endian mode.
16665 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
16666 GRC_MODE_WSWAP_NONFRM_DATA
);
16667 #ifdef __BIG_ENDIAN
16668 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
16670 spin_lock_init(&tp
->lock
);
16671 spin_lock_init(&tp
->indirect_lock
);
16672 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
16674 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
16676 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
16678 goto err_out_free_dev
;
16681 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16682 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
16683 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
16684 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
16685 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16686 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16687 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16688 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16689 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16690 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16691 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16692 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
16693 tg3_flag_set(tp
, ENABLE_APE
);
16694 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
16695 if (!tp
->aperegs
) {
16696 dev_err(&pdev
->dev
,
16697 "Cannot map APE registers, aborting\n");
16699 goto err_out_iounmap
;
16703 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
16704 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
16706 dev
->ethtool_ops
= &tg3_ethtool_ops
;
16707 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
16708 dev
->netdev_ops
= &tg3_netdev_ops
;
16709 dev
->irq
= pdev
->irq
;
16711 err
= tg3_get_invariants(tp
, ent
);
16713 dev_err(&pdev
->dev
,
16714 "Problem fetching invariants of chip, aborting\n");
16715 goto err_out_apeunmap
;
16718 /* The EPB bridge inside 5714, 5715, and 5780 and any
16719 * device behind the EPB cannot support DMA addresses > 40-bit.
16720 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16721 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16722 * do DMA address check in tg3_start_xmit().
16724 if (tg3_flag(tp
, IS_5788
))
16725 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
16726 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
16727 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
16728 #ifdef CONFIG_HIGHMEM
16729 dma_mask
= DMA_BIT_MASK(64);
16732 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
16734 /* Configure DMA attributes. */
16735 if (dma_mask
> DMA_BIT_MASK(32)) {
16736 err
= pci_set_dma_mask(pdev
, dma_mask
);
16738 features
|= NETIF_F_HIGHDMA
;
16739 err
= pci_set_consistent_dma_mask(pdev
,
16742 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
16743 "DMA for consistent allocations\n");
16744 goto err_out_apeunmap
;
16748 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
16749 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
16751 dev_err(&pdev
->dev
,
16752 "No usable DMA configuration, aborting\n");
16753 goto err_out_apeunmap
;
16757 tg3_init_bufmgr_config(tp
);
16759 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
16761 /* 5700 B0 chips do not support checksumming correctly due
16762 * to hardware bugs.
16764 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
16765 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
16767 if (tg3_flag(tp
, 5755_PLUS
))
16768 features
|= NETIF_F_IPV6_CSUM
;
16771 /* TSO is on by default on chips that support hardware TSO.
16772 * Firmware TSO on older chips gives lower performance, so it
16773 * is off by default, but can be enabled using ethtool.
16775 if ((tg3_flag(tp
, HW_TSO_1
) ||
16776 tg3_flag(tp
, HW_TSO_2
) ||
16777 tg3_flag(tp
, HW_TSO_3
)) &&
16778 (features
& NETIF_F_IP_CSUM
))
16779 features
|= NETIF_F_TSO
;
16780 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
16781 if (features
& NETIF_F_IPV6_CSUM
)
16782 features
|= NETIF_F_TSO6
;
16783 if (tg3_flag(tp
, HW_TSO_3
) ||
16784 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16785 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16786 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
16787 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16788 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16789 features
|= NETIF_F_TSO_ECN
;
16792 dev
->features
|= features
;
16793 dev
->vlan_features
|= features
;
16796 * Add loopback capability only for a subset of devices that support
16797 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16798 * loopback for the remaining devices.
16800 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
16801 !tg3_flag(tp
, CPMU_PRESENT
))
16802 /* Add the loopback capability */
16803 features
|= NETIF_F_LOOPBACK
;
16805 dev
->hw_features
|= features
;
16807 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
16808 !tg3_flag(tp
, TSO_CAPABLE
) &&
16809 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
16810 tg3_flag_set(tp
, MAX_RXPEND_64
);
16811 tp
->rx_pending
= 63;
16814 err
= tg3_get_device_address(tp
);
16816 dev_err(&pdev
->dev
,
16817 "Could not obtain valid ethernet address, aborting\n");
16818 goto err_out_apeunmap
;
16822 * Reset chip in case UNDI or EFI driver did not shutdown
16823 * DMA self test will enable WDMAC and we'll see (spurious)
16824 * pending DMA on the PCI bus at that point.
16826 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
16827 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
16828 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
16829 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16832 err
= tg3_test_dma(tp
);
16834 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
16835 goto err_out_apeunmap
;
16838 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
16839 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
16840 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
16841 for (i
= 0; i
< tp
->irq_max
; i
++) {
16842 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
16845 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
16847 tnapi
->int_mbox
= intmbx
;
16853 tnapi
->consmbox
= rcvmbx
;
16854 tnapi
->prodmbox
= sndmbx
;
16857 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
16859 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
16861 if (!tg3_flag(tp
, SUPPORT_MSIX
))
16865 * If we support MSIX, we'll be using RSS. If we're using
16866 * RSS, the first vector only handles link interrupts and the
16867 * remaining vectors handle rx and tx interrupts. Reuse the
16868 * mailbox values for the next iteration. The values we setup
16869 * above are still useful for the single vectored mode.
16884 pci_set_drvdata(pdev
, dev
);
16886 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16887 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16888 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16889 tg3_flag_set(tp
, PTP_CAPABLE
);
16891 if (tg3_flag(tp
, 5717_PLUS
)) {
16892 /* Resume a low-power mode */
16893 tg3_frob_aux_power(tp
, false);
16896 tg3_timer_init(tp
);
16898 tg3_carrier_off(tp
);
16900 err
= register_netdev(dev
);
16902 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
16903 goto err_out_apeunmap
;
16906 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16907 tp
->board_part_number
,
16908 tg3_chip_rev_id(tp
),
16909 tg3_bus_string(tp
, str
),
16912 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
16913 struct phy_device
*phydev
;
16914 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
16916 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16917 phydev
->drv
->name
, dev_name(&phydev
->dev
));
16921 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
16922 ethtype
= "10/100Base-TX";
16923 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
16924 ethtype
= "1000Base-SX";
16926 ethtype
= "10/100/1000Base-T";
16928 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
16929 "(WireSpeed[%d], EEE[%d])\n",
16930 tg3_phy_string(tp
), ethtype
,
16931 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
16932 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
16935 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16936 (dev
->features
& NETIF_F_RXCSUM
) != 0,
16937 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
16938 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
16939 tg3_flag(tp
, ENABLE_ASF
) != 0,
16940 tg3_flag(tp
, TSO_CAPABLE
) != 0);
16941 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16943 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
16944 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
16946 pci_save_state(pdev
);
16952 iounmap(tp
->aperegs
);
16953 tp
->aperegs
= NULL
;
16965 err_out_power_down
:
16966 pci_set_power_state(pdev
, PCI_D3hot
);
16969 pci_release_regions(pdev
);
16971 err_out_disable_pdev
:
16972 pci_disable_device(pdev
);
16973 pci_set_drvdata(pdev
, NULL
);
16977 static void tg3_remove_one(struct pci_dev
*pdev
)
16979 struct net_device
*dev
= pci_get_drvdata(pdev
);
16982 struct tg3
*tp
= netdev_priv(dev
);
16984 release_firmware(tp
->fw
);
16986 tg3_reset_task_cancel(tp
);
16988 if (tg3_flag(tp
, USE_PHYLIB
)) {
16993 unregister_netdev(dev
);
16995 iounmap(tp
->aperegs
);
16996 tp
->aperegs
= NULL
;
17003 pci_release_regions(pdev
);
17004 pci_disable_device(pdev
);
17005 pci_set_drvdata(pdev
, NULL
);
17009 #ifdef CONFIG_PM_SLEEP
17010 static int tg3_suspend(struct device
*device
)
17012 struct pci_dev
*pdev
= to_pci_dev(device
);
17013 struct net_device
*dev
= pci_get_drvdata(pdev
);
17014 struct tg3
*tp
= netdev_priv(dev
);
17017 if (!netif_running(dev
))
17020 tg3_reset_task_cancel(tp
);
17022 tg3_netif_stop(tp
);
17024 tg3_timer_stop(tp
);
17026 tg3_full_lock(tp
, 1);
17027 tg3_disable_ints(tp
);
17028 tg3_full_unlock(tp
);
17030 netif_device_detach(dev
);
17032 tg3_full_lock(tp
, 0);
17033 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17034 tg3_flag_clear(tp
, INIT_COMPLETE
);
17035 tg3_full_unlock(tp
);
17037 err
= tg3_power_down_prepare(tp
);
17041 tg3_full_lock(tp
, 0);
17043 tg3_flag_set(tp
, INIT_COMPLETE
);
17044 err2
= tg3_restart_hw(tp
, 1);
17048 tg3_timer_start(tp
);
17050 netif_device_attach(dev
);
17051 tg3_netif_start(tp
);
17054 tg3_full_unlock(tp
);
17063 static int tg3_resume(struct device
*device
)
17065 struct pci_dev
*pdev
= to_pci_dev(device
);
17066 struct net_device
*dev
= pci_get_drvdata(pdev
);
17067 struct tg3
*tp
= netdev_priv(dev
);
17070 if (!netif_running(dev
))
17073 netif_device_attach(dev
);
17075 tg3_full_lock(tp
, 0);
17077 tg3_flag_set(tp
, INIT_COMPLETE
);
17078 err
= tg3_restart_hw(tp
, 1);
17082 tg3_timer_start(tp
);
17084 tg3_netif_start(tp
);
17087 tg3_full_unlock(tp
);
17095 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17096 #define TG3_PM_OPS (&tg3_pm_ops)
17100 #define TG3_PM_OPS NULL
17102 #endif /* CONFIG_PM_SLEEP */
17105 * tg3_io_error_detected - called when PCI error is detected
17106 * @pdev: Pointer to PCI device
17107 * @state: The current pci connection state
17109 * This function is called after a PCI bus error affecting
17110 * this device has been detected.
17112 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17113 pci_channel_state_t state
)
17115 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17116 struct tg3
*tp
= netdev_priv(netdev
);
17117 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17119 netdev_info(netdev
, "PCI I/O error detected\n");
17123 if (!netif_running(netdev
))
17128 tg3_netif_stop(tp
);
17130 tg3_timer_stop(tp
);
17132 /* Want to make sure that the reset task doesn't run */
17133 tg3_reset_task_cancel(tp
);
17135 netif_device_detach(netdev
);
17137 /* Clean up software state, even if MMIO is blocked */
17138 tg3_full_lock(tp
, 0);
17139 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17140 tg3_full_unlock(tp
);
17143 if (state
== pci_channel_io_perm_failure
)
17144 err
= PCI_ERS_RESULT_DISCONNECT
;
17146 pci_disable_device(pdev
);
17154 * tg3_io_slot_reset - called after the pci bus has been reset.
17155 * @pdev: Pointer to PCI device
17157 * Restart the card from scratch, as if from a cold-boot.
17158 * At this point, the card has exprienced a hard reset,
17159 * followed by fixups by BIOS, and has its config space
17160 * set up identically to what it was at cold boot.
17162 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17164 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17165 struct tg3
*tp
= netdev_priv(netdev
);
17166 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17171 if (pci_enable_device(pdev
)) {
17172 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17176 pci_set_master(pdev
);
17177 pci_restore_state(pdev
);
17178 pci_save_state(pdev
);
17180 if (!netif_running(netdev
)) {
17181 rc
= PCI_ERS_RESULT_RECOVERED
;
17185 err
= tg3_power_up(tp
);
17189 rc
= PCI_ERS_RESULT_RECOVERED
;
17198 * tg3_io_resume - called when traffic can start flowing again.
17199 * @pdev: Pointer to PCI device
17201 * This callback is called when the error recovery driver tells
17202 * us that its OK to resume normal operation.
17204 static void tg3_io_resume(struct pci_dev
*pdev
)
17206 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17207 struct tg3
*tp
= netdev_priv(netdev
);
17212 if (!netif_running(netdev
))
17215 tg3_full_lock(tp
, 0);
17216 tg3_flag_set(tp
, INIT_COMPLETE
);
17217 err
= tg3_restart_hw(tp
, 1);
17219 tg3_full_unlock(tp
);
17220 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17224 netif_device_attach(netdev
);
17226 tg3_timer_start(tp
);
17228 tg3_netif_start(tp
);
17230 tg3_full_unlock(tp
);
17238 static const struct pci_error_handlers tg3_err_handler
= {
17239 .error_detected
= tg3_io_error_detected
,
17240 .slot_reset
= tg3_io_slot_reset
,
17241 .resume
= tg3_io_resume
17244 static struct pci_driver tg3_driver
= {
17245 .name
= DRV_MODULE_NAME
,
17246 .id_table
= tg3_pci_tbl
,
17247 .probe
= tg3_init_one
,
17248 .remove
= tg3_remove_one
,
17249 .err_handler
= &tg3_err_handler
,
17250 .driver
.pm
= TG3_PM_OPS
,
17253 static int __init
tg3_init(void)
17255 return pci_register_driver(&tg3_driver
);
17258 static void __exit
tg3_cleanup(void)
17260 pci_unregister_driver(&tg3_driver
);
17263 module_init(tg3_init
);
17264 module_exit(tg3_cleanup
);