2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218 static char version
[] =
219 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION
);
225 MODULE_FIRMWARE(FIRMWARE_TG3
);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
229 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug
, int, 0);
231 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
256 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
257 TG3_DRV_DATA_FLAG_5705_10_100
},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
259 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
260 TG3_DRV_DATA_FLAG_5705_10_100
},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
263 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
264 TG3_DRV_DATA_FLAG_5705_10_100
},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
271 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
277 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
286 PCI_VENDOR_ID_LENOVO
,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
288 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
291 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
311 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
312 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
314 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
315 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
319 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
329 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
331 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
350 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
352 static const struct {
353 const char string
[ETH_GSTRING_LEN
];
354 } ethtool_stats_keys
[] = {
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
387 { "tx_flow_control" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
420 { "rx_threshold_hit" },
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
432 { "mbuf_lwm_thresh_hit" },
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
446 static const struct {
447 const char string
[ETH_GSTRING_LEN
];
448 } ethtool_test_keys
[] = {
449 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
450 [TG3_LINK_TEST
] = { "link test (online) " },
451 [TG3_REGISTER_TEST
] = { "register test (offline)" },
452 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
462 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
464 writel(val
, tp
->regs
+ off
);
467 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
469 return readl(tp
->regs
+ off
);
472 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
474 writel(val
, tp
->aperegs
+ off
);
477 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
479 return readl(tp
->aperegs
+ off
);
482 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
486 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
487 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
489 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
492 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
494 writel(val
, tp
->regs
+ off
);
495 readl(tp
->regs
+ off
);
498 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
503 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
504 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
505 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
506 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
510 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
514 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
515 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
516 TG3_64BIT_REG_LOW
, val
);
519 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
520 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
521 TG3_64BIT_REG_LOW
, val
);
525 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
526 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
528 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
533 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
535 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
536 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
540 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
545 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
546 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
547 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
548 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
559 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
560 /* Non-posted methods */
561 tp
->write32(tp
, off
, val
);
564 tg3_write32(tp
, off
, val
);
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
576 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
578 tp
->write32_mbox(tp
, off
, val
);
579 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
580 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
581 !tg3_flag(tp
, ICH_WORKAROUND
)))
582 tp
->read32_mbox(tp
, off
);
585 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
587 void __iomem
*mbox
= tp
->regs
+ off
;
589 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
591 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
592 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
596 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
598 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
601 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
603 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
617 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
621 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
622 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
625 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
626 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
627 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
634 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
639 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
642 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
646 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
647 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
652 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
653 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
654 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
655 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
661 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
666 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
669 static void tg3_ape_lock_init(struct tg3
*tp
)
674 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
675 regbase
= TG3_APE_LOCK_GRANT
;
677 regbase
= TG3_APE_PER_LOCK_GRANT
;
679 /* Make sure the driver hasn't any stale locks. */
680 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
682 case TG3_APE_LOCK_PHY0
:
683 case TG3_APE_LOCK_PHY1
:
684 case TG3_APE_LOCK_PHY2
:
685 case TG3_APE_LOCK_PHY3
:
686 bit
= APE_LOCK_GRANT_DRIVER
;
690 bit
= APE_LOCK_GRANT_DRIVER
;
692 bit
= 1 << tp
->pci_fn
;
694 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
699 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
703 u32 status
, req
, gnt
, bit
;
705 if (!tg3_flag(tp
, ENABLE_APE
))
709 case TG3_APE_LOCK_GPIO
:
710 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
712 case TG3_APE_LOCK_GRC
:
713 case TG3_APE_LOCK_MEM
:
715 bit
= APE_LOCK_REQ_DRIVER
;
717 bit
= 1 << tp
->pci_fn
;
719 case TG3_APE_LOCK_PHY0
:
720 case TG3_APE_LOCK_PHY1
:
721 case TG3_APE_LOCK_PHY2
:
722 case TG3_APE_LOCK_PHY3
:
723 bit
= APE_LOCK_REQ_DRIVER
;
729 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
730 req
= TG3_APE_LOCK_REQ
;
731 gnt
= TG3_APE_LOCK_GRANT
;
733 req
= TG3_APE_PER_LOCK_REQ
;
734 gnt
= TG3_APE_PER_LOCK_GRANT
;
739 tg3_ape_write32(tp
, req
+ off
, bit
);
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i
= 0; i
< 100; i
++) {
743 status
= tg3_ape_read32(tp
, gnt
+ off
);
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp
, gnt
+ off
, bit
);
758 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
762 if (!tg3_flag(tp
, ENABLE_APE
))
766 case TG3_APE_LOCK_GPIO
:
767 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
769 case TG3_APE_LOCK_GRC
:
770 case TG3_APE_LOCK_MEM
:
772 bit
= APE_LOCK_GRANT_DRIVER
;
774 bit
= 1 << tp
->pci_fn
;
776 case TG3_APE_LOCK_PHY0
:
777 case TG3_APE_LOCK_PHY1
:
778 case TG3_APE_LOCK_PHY2
:
779 case TG3_APE_LOCK_PHY3
:
780 bit
= APE_LOCK_GRANT_DRIVER
;
786 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
787 gnt
= TG3_APE_LOCK_GRANT
;
789 gnt
= TG3_APE_PER_LOCK_GRANT
;
791 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
794 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
799 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
802 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
803 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
806 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
809 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
812 return timeout_us
? 0 : -EBUSY
;
815 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
819 for (i
= 0; i
< timeout_us
/ 10; i
++) {
820 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
822 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
828 return i
== timeout_us
/ 10;
831 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
835 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
837 if (!tg3_flag(tp
, APE_HAS_NCSI
))
840 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
841 if (apedata
!= APE_SEG_SIG_MAGIC
)
844 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
845 if (!(apedata
& APE_FW_STATUS_READY
))
848 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
850 msgoff
= bufoff
+ 2 * sizeof(u32
);
851 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
856 /* Cap xfer sizes to scratchpad limits. */
857 length
= (len
> maxlen
) ? maxlen
: len
;
860 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
861 if (!(apedata
& APE_FW_STATUS_READY
))
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err
= tg3_ape_event_lock(tp
, 1000);
869 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
870 APE_EVENT_STATUS_SCRTCHPD_READ
|
871 APE_EVENT_STATUS_EVENT_PENDING
;
872 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
874 tg3_ape_write32(tp
, bufoff
, base_off
);
875 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
877 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
878 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
882 if (tg3_ape_wait_for_event(tp
, 30000))
885 for (i
= 0; length
; i
+= 4, length
-= 4) {
886 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
887 memcpy(data
, &val
, sizeof(u32
));
895 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
900 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
901 if (apedata
!= APE_SEG_SIG_MAGIC
)
904 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
905 if (!(apedata
& APE_FW_STATUS_READY
))
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err
= tg3_ape_event_lock(tp
, 1000);
913 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
914 event
| APE_EVENT_STATUS_EVENT_PENDING
);
916 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
917 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
922 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
927 if (!tg3_flag(tp
, ENABLE_APE
))
931 case RESET_KIND_INIT
:
932 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
933 APE_HOST_SEG_SIG_MAGIC
);
934 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
935 APE_HOST_SEG_LEN_MAGIC
);
936 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
937 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
940 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
941 APE_HOST_BEHAV_NO_PHYLOCK
);
942 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
943 TG3_APE_HOST_DRVR_STATE_START
);
945 event
= APE_EVENT_STATUS_STATE_START
;
947 case RESET_KIND_SHUTDOWN
:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
953 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
955 if (device_may_wakeup(&tp
->pdev
->dev
) &&
956 tg3_flag(tp
, WOL_ENABLE
)) {
957 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
958 TG3_APE_HOST_WOL_SPEED_AUTO
);
959 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
961 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
963 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
965 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
967 case RESET_KIND_SUSPEND
:
968 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
974 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
976 tg3_ape_send_event(tp
, event
);
979 static void tg3_disable_ints(struct tg3
*tp
)
983 tw32(TG3PCI_MISC_HOST_CTRL
,
984 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
985 for (i
= 0; i
< tp
->irq_max
; i
++)
986 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
989 static void tg3_enable_ints(struct tg3
*tp
)
996 tw32(TG3PCI_MISC_HOST_CTRL
,
997 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
999 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1000 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1001 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1003 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1004 if (tg3_flag(tp
, 1SHOT_MSI
))
1005 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1007 tp
->coal_now
|= tnapi
->coal_now
;
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1012 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1013 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1015 tw32(HOSTCC_MODE
, tp
->coal_now
);
1017 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1020 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1022 struct tg3
*tp
= tnapi
->tp
;
1023 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1024 unsigned int work_exists
= 0;
1026 /* check for phy events */
1027 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1028 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1032 /* check for TX work to do */
1033 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1036 /* check for RX work to do */
1037 if (tnapi
->rx_rcb_prod_idx
&&
1038 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1049 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1051 struct tg3
*tp
= tnapi
->tp
;
1053 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1060 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1061 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1062 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1065 static void tg3_switch_clocks(struct tg3
*tp
)
1068 u32 orig_clock_ctrl
;
1070 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1073 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1075 orig_clock_ctrl
= clock_ctrl
;
1076 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1077 CLOCK_CTRL_CLKRUN_OENABLE
|
1079 tp
->pci_clock_ctrl
= clock_ctrl
;
1081 if (tg3_flag(tp
, 5705_PLUS
)) {
1082 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1084 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1086 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1089 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1092 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1098 #define PHY_BUSY_LOOPS 5000
1100 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1107 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1109 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1113 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1117 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1118 MI_COM_PHY_ADDR_MASK
);
1119 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1120 MI_COM_REG_ADDR_MASK
);
1121 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1123 tw32_f(MAC_MI_COM
, frame_val
);
1125 loops
= PHY_BUSY_LOOPS
;
1126 while (loops
!= 0) {
1128 frame_val
= tr32(MAC_MI_COM
);
1130 if ((frame_val
& MI_COM_BUSY
) == 0) {
1132 frame_val
= tr32(MAC_MI_COM
);
1140 *val
= frame_val
& MI_COM_DATA_MASK
;
1144 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1145 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1149 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1154 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1156 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1159 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1166 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1167 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1170 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1172 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1176 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1178 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1179 MI_COM_PHY_ADDR_MASK
);
1180 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1181 MI_COM_REG_ADDR_MASK
);
1182 frame_val
|= (val
& MI_COM_DATA_MASK
);
1183 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1185 tw32_f(MAC_MI_COM
, frame_val
);
1187 loops
= PHY_BUSY_LOOPS
;
1188 while (loops
!= 0) {
1190 frame_val
= tr32(MAC_MI_COM
);
1191 if ((frame_val
& MI_COM_BUSY
) == 0) {
1193 frame_val
= tr32(MAC_MI_COM
);
1203 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1204 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1208 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1213 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1215 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1218 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1222 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1226 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1230 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1231 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1235 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1241 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1245 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1249 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1253 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1254 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1258 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1264 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1268 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1270 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1275 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1279 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1281 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1286 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1290 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1291 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1294 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1299 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1301 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1302 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1304 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1312 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1318 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1320 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1322 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1323 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1328 static int tg3_bmcr_reset(struct tg3
*tp
)
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1336 phy_control
= BMCR_RESET
;
1337 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1343 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1347 if ((phy_control
& BMCR_RESET
) == 0) {
1359 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1361 struct tg3
*tp
= bp
->priv
;
1364 spin_lock_bh(&tp
->lock
);
1366 if (tg3_readphy(tp
, reg
, &val
))
1369 spin_unlock_bh(&tp
->lock
);
1374 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1376 struct tg3
*tp
= bp
->priv
;
1379 spin_lock_bh(&tp
->lock
);
1381 if (tg3_writephy(tp
, reg
, val
))
1384 spin_unlock_bh(&tp
->lock
);
1389 static int tg3_mdio_reset(struct mii_bus
*bp
)
1394 static void tg3_mdio_config_5785(struct tg3
*tp
)
1397 struct phy_device
*phydev
;
1399 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1400 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1401 case PHY_ID_BCM50610
:
1402 case PHY_ID_BCM50610M
:
1403 val
= MAC_PHYCFG2_50610_LED_MODES
;
1405 case PHY_ID_BCMAC131
:
1406 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1408 case PHY_ID_RTL8211C
:
1409 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1411 case PHY_ID_RTL8201E
:
1412 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1418 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1419 tw32(MAC_PHYCFG2
, val
);
1421 val
= tr32(MAC_PHYCFG1
);
1422 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1423 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1424 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1425 tw32(MAC_PHYCFG1
, val
);
1430 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1431 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1432 MAC_PHYCFG2_FMODE_MASK_MASK
|
1433 MAC_PHYCFG2_GMODE_MASK_MASK
|
1434 MAC_PHYCFG2_ACT_MASK_MASK
|
1435 MAC_PHYCFG2_QUAL_MASK_MASK
|
1436 MAC_PHYCFG2_INBAND_ENABLE
;
1438 tw32(MAC_PHYCFG2
, val
);
1440 val
= tr32(MAC_PHYCFG1
);
1441 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1443 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1444 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1445 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1446 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1447 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1449 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1450 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1451 tw32(MAC_PHYCFG1
, val
);
1453 val
= tr32(MAC_EXT_RGMII_MODE
);
1454 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1455 MAC_RGMII_MODE_RX_QUALITY
|
1456 MAC_RGMII_MODE_RX_ACTIVITY
|
1457 MAC_RGMII_MODE_RX_ENG_DET
|
1458 MAC_RGMII_MODE_TX_ENABLE
|
1459 MAC_RGMII_MODE_TX_LOWPWR
|
1460 MAC_RGMII_MODE_TX_RESET
);
1461 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1462 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1463 val
|= MAC_RGMII_MODE_RX_INT_B
|
1464 MAC_RGMII_MODE_RX_QUALITY
|
1465 MAC_RGMII_MODE_RX_ACTIVITY
|
1466 MAC_RGMII_MODE_RX_ENG_DET
;
1467 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1468 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1469 MAC_RGMII_MODE_TX_LOWPWR
|
1470 MAC_RGMII_MODE_TX_RESET
;
1472 tw32(MAC_EXT_RGMII_MODE
, val
);
1475 static void tg3_mdio_start(struct tg3
*tp
)
1477 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1478 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1481 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1482 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1483 tg3_mdio_config_5785(tp
);
1486 static int tg3_mdio_init(struct tg3
*tp
)
1490 struct phy_device
*phydev
;
1492 if (tg3_flag(tp
, 5717_PLUS
)) {
1495 tp
->phy_addr
= tp
->pci_fn
+ 1;
1497 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1498 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1500 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1505 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1509 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1512 tp
->mdio_bus
= mdiobus_alloc();
1513 if (tp
->mdio_bus
== NULL
)
1516 tp
->mdio_bus
->name
= "tg3 mdio bus";
1517 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1518 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1519 tp
->mdio_bus
->priv
= tp
;
1520 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1521 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1522 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1523 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1524 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1525 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1527 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1528 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1535 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1538 i
= mdiobus_register(tp
->mdio_bus
);
1540 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1541 mdiobus_free(tp
->mdio_bus
);
1545 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1547 if (!phydev
|| !phydev
->drv
) {
1548 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1549 mdiobus_unregister(tp
->mdio_bus
);
1550 mdiobus_free(tp
->mdio_bus
);
1554 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1555 case PHY_ID_BCM57780
:
1556 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1557 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1559 case PHY_ID_BCM50610
:
1560 case PHY_ID_BCM50610M
:
1561 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1562 PHY_BRCM_RX_REFCLK_UNUSED
|
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1565 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1566 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1567 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1568 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1569 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1570 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1572 case PHY_ID_RTL8211C
:
1573 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1575 case PHY_ID_RTL8201E
:
1576 case PHY_ID_BCMAC131
:
1577 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1578 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1579 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1583 tg3_flag_set(tp
, MDIOBUS_INITED
);
1585 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1586 tg3_mdio_config_5785(tp
);
1591 static void tg3_mdio_fini(struct tg3
*tp
)
1593 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1594 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1595 mdiobus_unregister(tp
->mdio_bus
);
1596 mdiobus_free(tp
->mdio_bus
);
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1605 val
= tr32(GRC_RX_CPU_EVENT
);
1606 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1607 tw32_f(GRC_RX_CPU_EVENT
, val
);
1609 tp
->last_event_jiffies
= jiffies
;
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1618 unsigned int delay_cnt
;
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1625 if (time_remain
< 0)
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt
= jiffies_to_usecs(time_remain
);
1630 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1631 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1632 delay_cnt
= (delay_cnt
>> 3) + 1;
1634 for (i
= 0; i
< delay_cnt
; i
++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1647 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1649 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1650 val
|= (reg
& 0xffff);
1654 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1656 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1657 val
|= (reg
& 0xffff);
1661 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1662 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1664 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1665 val
|= (reg
& 0xffff);
1669 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3
*tp
)
1681 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1684 tg3_phy_gather_ump_data(tp
, data
);
1686 tg3_wait_for_event_ack(tp
);
1688 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1689 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1690 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1691 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1692 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1693 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1695 tg3_generate_fw_event(tp
);
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3
*tp
)
1701 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp
);
1705 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1707 tg3_generate_fw_event(tp
);
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp
);
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1717 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1720 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1722 case RESET_KIND_INIT
:
1723 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1727 case RESET_KIND_SHUTDOWN
:
1728 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1732 case RESET_KIND_SUSPEND
:
1733 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1742 if (kind
== RESET_KIND_INIT
||
1743 kind
== RESET_KIND_SUSPEND
)
1744 tg3_ape_driver_state_change(tp
, kind
);
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1750 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1752 case RESET_KIND_INIT
:
1753 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1754 DRV_STATE_START_DONE
);
1757 case RESET_KIND_SHUTDOWN
:
1758 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1759 DRV_STATE_UNLOAD_DONE
);
1767 if (kind
== RESET_KIND_SHUTDOWN
)
1768 tg3_ape_driver_state_change(tp
, kind
);
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1774 if (tg3_flag(tp
, ENABLE_ASF
)) {
1776 case RESET_KIND_INIT
:
1777 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1781 case RESET_KIND_SHUTDOWN
:
1782 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1786 case RESET_KIND_SUSPEND
:
1787 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1797 static int tg3_poll_fw(struct tg3
*tp
)
1802 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1803 /* We don't use firmware. */
1807 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1808 /* Wait up to 20ms for init done. */
1809 for (i
= 0; i
< 200; i
++) {
1810 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1817 /* Wait for firmware initialization to complete. */
1818 for (i
= 0; i
< 100000; i
++) {
1819 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1820 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1830 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1831 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1833 netdev_info(tp
->dev
, "No firmware running\n");
1836 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1846 static void tg3_link_report(struct tg3
*tp
)
1848 if (!netif_carrier_ok(tp
->dev
)) {
1849 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1850 tg3_ump_link_report(tp
);
1851 } else if (netif_msg_link(tp
)) {
1852 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1853 (tp
->link_config
.active_speed
== SPEED_1000
?
1855 (tp
->link_config
.active_speed
== SPEED_100
?
1857 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1860 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1861 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1863 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1866 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1867 netdev_info(tp
->dev
, "EEE is %s\n",
1868 tp
->setlpicnt
? "enabled" : "disabled");
1870 tg3_ump_link_report(tp
);
1874 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1878 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1879 miireg
= ADVERTISE_1000XPAUSE
;
1880 else if (flow_ctrl
& FLOW_CTRL_TX
)
1881 miireg
= ADVERTISE_1000XPSE_ASYM
;
1882 else if (flow_ctrl
& FLOW_CTRL_RX
)
1883 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1890 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1894 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1895 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1896 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1897 if (lcladv
& ADVERTISE_1000XPAUSE
)
1899 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1906 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1910 u32 old_rx_mode
= tp
->rx_mode
;
1911 u32 old_tx_mode
= tp
->tx_mode
;
1913 if (tg3_flag(tp
, USE_PHYLIB
))
1914 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1916 autoneg
= tp
->link_config
.autoneg
;
1918 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1919 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1920 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1922 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1924 flowctrl
= tp
->link_config
.flowctrl
;
1926 tp
->link_config
.active_flowctrl
= flowctrl
;
1928 if (flowctrl
& FLOW_CTRL_RX
)
1929 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1931 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1933 if (old_rx_mode
!= tp
->rx_mode
)
1934 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1936 if (flowctrl
& FLOW_CTRL_TX
)
1937 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1939 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1941 if (old_tx_mode
!= tp
->tx_mode
)
1942 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1945 static void tg3_adjust_link(struct net_device
*dev
)
1947 u8 oldflowctrl
, linkmesg
= 0;
1948 u32 mac_mode
, lcl_adv
, rmt_adv
;
1949 struct tg3
*tp
= netdev_priv(dev
);
1950 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1952 spin_lock_bh(&tp
->lock
);
1954 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1955 MAC_MODE_HALF_DUPLEX
);
1957 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1963 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1964 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1965 else if (phydev
->speed
== SPEED_1000
||
1966 tg3_asic_rev(tp
) != ASIC_REV_5785
)
1967 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1969 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1971 if (phydev
->duplex
== DUPLEX_HALF
)
1972 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1974 lcl_adv
= mii_advertise_flowctrl(
1975 tp
->link_config
.flowctrl
);
1978 rmt_adv
= LPA_PAUSE_CAP
;
1979 if (phydev
->asym_pause
)
1980 rmt_adv
|= LPA_PAUSE_ASYM
;
1983 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1985 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1987 if (mac_mode
!= tp
->mac_mode
) {
1988 tp
->mac_mode
= mac_mode
;
1989 tw32_f(MAC_MODE
, tp
->mac_mode
);
1993 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
1994 if (phydev
->speed
== SPEED_10
)
1996 MAC_MI_STAT_10MBPS_MODE
|
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1999 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2002 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2003 tw32(MAC_TX_LENGTHS
,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2005 (6 << TX_LENGTHS_IPG_SHIFT
) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2008 tw32(MAC_TX_LENGTHS
,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2010 (6 << TX_LENGTHS_IPG_SHIFT
) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2013 if (phydev
->link
!= tp
->old_link
||
2014 phydev
->speed
!= tp
->link_config
.active_speed
||
2015 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2016 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2019 tp
->old_link
= phydev
->link
;
2020 tp
->link_config
.active_speed
= phydev
->speed
;
2021 tp
->link_config
.active_duplex
= phydev
->duplex
;
2023 spin_unlock_bh(&tp
->lock
);
2026 tg3_link_report(tp
);
2029 static int tg3_phy_init(struct tg3
*tp
)
2031 struct phy_device
*phydev
;
2033 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2036 /* Bring the PHY back to a known state. */
2039 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2041 /* Attach the MAC to the PHY. */
2042 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2043 tg3_adjust_link
, phydev
->interface
);
2044 if (IS_ERR(phydev
)) {
2045 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev
);
2049 /* Mask with MAC supported features. */
2050 switch (phydev
->interface
) {
2051 case PHY_INTERFACE_MODE_GMII
:
2052 case PHY_INTERFACE_MODE_RGMII
:
2053 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2054 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2056 SUPPORTED_Asym_Pause
);
2060 case PHY_INTERFACE_MODE_MII
:
2061 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2063 SUPPORTED_Asym_Pause
);
2066 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2070 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2072 phydev
->advertising
= phydev
->supported
;
2077 static void tg3_phy_start(struct tg3
*tp
)
2079 struct phy_device
*phydev
;
2081 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2084 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2086 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2087 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2088 phydev
->speed
= tp
->link_config
.speed
;
2089 phydev
->duplex
= tp
->link_config
.duplex
;
2090 phydev
->autoneg
= tp
->link_config
.autoneg
;
2091 phydev
->advertising
= tp
->link_config
.advertising
;
2096 phy_start_aneg(phydev
);
2099 static void tg3_phy_stop(struct tg3
*tp
)
2101 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2104 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2107 static void tg3_phy_fini(struct tg3
*tp
)
2109 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2110 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2111 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2115 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2120 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2123 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err
= tg3_phy_auxctl_write(tp
,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2132 err
= tg3_phy_auxctl_read(tp
,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2137 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2138 err
= tg3_phy_auxctl_write(tp
,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2145 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2149 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2152 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2153 phytest
| MII_TG3_FET_SHADOW_EN
);
2154 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2156 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2158 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2159 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2161 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2165 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2169 if (!tg3_flag(tp
, 5705_PLUS
) ||
2170 (tg3_flag(tp
, 5717_PLUS
) &&
2171 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2174 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2175 tg3_phy_fet_toggle_apd(tp
, enable
);
2179 reg
= MII_TG3_MISC_SHDW_WREN
|
2180 MII_TG3_MISC_SHDW_SCR5_SEL
|
2181 MII_TG3_MISC_SHDW_SCR5_LPED
|
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2183 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2184 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2185 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2186 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2188 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2191 reg
= MII_TG3_MISC_SHDW_WREN
|
2192 MII_TG3_MISC_SHDW_APD_SEL
|
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2195 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2197 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2200 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2204 if (!tg3_flag(tp
, 5705_PLUS
) ||
2205 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2208 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2211 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2212 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2214 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2215 ephy
| MII_TG3_FET_SHADOW_EN
);
2216 if (!tg3_readphy(tp
, reg
, &phy
)) {
2218 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2220 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2221 tg3_writephy(tp
, reg
, phy
);
2223 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2228 ret
= tg3_phy_auxctl_read(tp
,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2232 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2234 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2235 tg3_phy_auxctl_write(tp
,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2241 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2246 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2249 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2251 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2252 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2255 static void tg3_phy_apply_otp(struct tg3
*tp
)
2264 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2267 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2268 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2269 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2271 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2272 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2273 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2275 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2276 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2277 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2279 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2280 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2282 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2283 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2285 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2286 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2287 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2289 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2292 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2296 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2301 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2302 current_link_up
== 1 &&
2303 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2304 (tp
->link_config
.active_speed
== SPEED_100
||
2305 tp
->link_config
.active_speed
== SPEED_1000
)) {
2308 if (tp
->link_config
.active_speed
== SPEED_1000
)
2309 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2311 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2313 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2315 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2316 TG3_CL45_D7_EEERES_STAT
, &val
);
2318 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2319 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2323 if (!tp
->setlpicnt
) {
2324 if (current_link_up
== 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2326 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2330 val
= tr32(TG3_CPMU_EEE_MODE
);
2331 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2335 static void tg3_phy_eee_enable(struct tg3
*tp
)
2339 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2340 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2341 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2342 tg3_flag(tp
, 57765_CLASS
)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2344 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2345 MII_TG3_DSP_TAP26_RMRXSTO
;
2346 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2347 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2350 val
= tr32(TG3_CPMU_EEE_MODE
);
2351 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2354 static int tg3_wait_macro_done(struct tg3
*tp
)
2361 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2362 if ((tmp32
& 0x1000) == 0)
2372 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2374 static const u32 test_pat
[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382 for (chan
= 0; chan
< 4; chan
++) {
2385 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2386 (chan
* 0x2000) | 0x0200);
2387 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2389 for (i
= 0; i
< 6; i
++)
2390 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2393 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2394 if (tg3_wait_macro_done(tp
)) {
2399 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2400 (chan
* 0x2000) | 0x0200);
2401 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2402 if (tg3_wait_macro_done(tp
)) {
2407 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2408 if (tg3_wait_macro_done(tp
)) {
2413 for (i
= 0; i
< 6; i
+= 2) {
2416 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2417 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2418 tg3_wait_macro_done(tp
)) {
2424 if (low
!= test_pat
[chan
][i
] ||
2425 high
!= test_pat
[chan
][i
+1]) {
2426 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2427 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2428 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2438 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2442 for (chan
= 0; chan
< 4; chan
++) {
2445 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2446 (chan
* 0x2000) | 0x0200);
2447 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2448 for (i
= 0; i
< 6; i
++)
2449 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2450 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2451 if (tg3_wait_macro_done(tp
))
2458 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2460 u32 reg32
, phy9_orig
;
2461 int retries
, do_phy_reset
, err
;
2467 err
= tg3_bmcr_reset(tp
);
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2478 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp
, MII_BMCR
,
2482 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2488 tg3_writephy(tp
, MII_CTRL1000
,
2489 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2491 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2498 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2501 } while (--retries
);
2503 err
= tg3_phy_reset_chanpat(tp
);
2507 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2509 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2510 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2512 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2514 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2516 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2518 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2525 static void tg3_carrier_on(struct tg3
*tp
)
2527 netif_carrier_on(tp
->dev
);
2531 static void tg3_carrier_off(struct tg3
*tp
)
2533 netif_carrier_off(tp
->dev
);
2534 tp
->link_up
= false;
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2540 static int tg3_phy_reset(struct tg3
*tp
)
2545 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2546 val
= tr32(GRC_MISC_CFG
);
2547 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2550 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2551 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2555 if (netif_running(tp
->dev
) && tp
->link_up
) {
2556 tg3_carrier_off(tp
);
2557 tg3_link_report(tp
);
2560 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2561 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2562 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2563 err
= tg3_phy_reset_5703_4_5(tp
);
2570 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2571 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2572 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2573 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2575 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2578 err
= tg3_bmcr_reset(tp
);
2582 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2583 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2584 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2586 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2589 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2590 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2591 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2592 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2594 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2600 if (tg3_flag(tp
, 5717_PLUS
) &&
2601 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2604 tg3_phy_apply_otp(tp
);
2606 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2607 tg3_phy_toggle_apd(tp
, true);
2609 tg3_phy_toggle_apd(tp
, false);
2612 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2614 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2619 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2620 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2621 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2624 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2626 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2631 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2633 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2634 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2635 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2636 tg3_writephy(tp
, MII_TG3_TEST1
,
2637 MII_TG3_TEST1_TRIM_EN
| 0x4);
2639 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2641 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2650 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err
= tg3_phy_auxctl_read(tp
,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2655 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2656 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2662 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2663 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2664 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2665 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2668 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2673 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2674 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2676 tg3_phy_toggle_automdix(tp
, 1);
2677 tg3_phy_set_wirespeed(tp
);
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2697 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2701 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2702 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2703 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2705 status
= tr32(TG3_CPMU_DRV_STATUS
);
2707 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2708 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2709 status
|= (newstat
<< shift
);
2711 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2712 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2713 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2715 tw32(TG3_CPMU_DRV_STATUS
, status
);
2717 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2722 if (!tg3_flag(tp
, IS_NIC
))
2725 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2726 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2727 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2728 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2731 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2733 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2736 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2738 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2749 if (!tg3_flag(tp
, IS_NIC
) ||
2750 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2751 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2754 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2756 tw32_wait_f(GRC_LOCAL_CTRL
,
2757 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2760 tw32_wait_f(GRC_LOCAL_CTRL
,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2764 tw32_wait_f(GRC_LOCAL_CTRL
,
2765 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2771 if (!tg3_flag(tp
, IS_NIC
))
2774 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2775 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2776 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2777 (GRC_LCLCTRL_GPIO_OE0
|
2778 GRC_LCLCTRL_GPIO_OE1
|
2779 GRC_LCLCTRL_GPIO_OE2
|
2780 GRC_LCLCTRL_GPIO_OUTPUT0
|
2781 GRC_LCLCTRL_GPIO_OUTPUT1
),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2783 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2784 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2787 GRC_LCLCTRL_GPIO_OE1
|
2788 GRC_LCLCTRL_GPIO_OE2
|
2789 GRC_LCLCTRL_GPIO_OUTPUT0
|
2790 GRC_LCLCTRL_GPIO_OUTPUT1
|
2792 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2795 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2796 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2799 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2800 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2804 u32 grc_local_ctrl
= 0;
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2808 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2809 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2811 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2
= tp
->nic_sram_data_cfg
&
2816 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2818 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2819 GRC_LCLCTRL_GPIO_OE1
|
2820 GRC_LCLCTRL_GPIO_OE2
|
2821 GRC_LCLCTRL_GPIO_OUTPUT1
|
2822 GRC_LCLCTRL_GPIO_OUTPUT2
;
2824 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2825 GRC_LCLCTRL_GPIO_OUTPUT2
);
2827 tw32_wait_f(GRC_LOCAL_CTRL
,
2828 tp
->grc_local_ctrl
| grc_local_ctrl
,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2831 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2833 tw32_wait_f(GRC_LOCAL_CTRL
,
2834 tp
->grc_local_ctrl
| grc_local_ctrl
,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2838 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2839 tw32_wait_f(GRC_LOCAL_CTRL
,
2840 tp
->grc_local_ctrl
| grc_local_ctrl
,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2846 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2854 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2855 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2857 msg
= tg3_set_function_status(tp
, msg
);
2859 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2862 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2863 tg3_pwrsrc_switch_to_vaux(tp
);
2865 tg3_pwrsrc_die_with_vmain(tp
);
2868 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2871 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2873 bool need_vaux
= false;
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2879 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2880 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2881 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2882 tg3_frob_aux_power_5717(tp
, include_wol
?
2883 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2887 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2888 struct net_device
*dev_peer
;
2890 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2892 /* remove_one() may have been run on the peer. */
2894 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2896 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2899 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2900 tg3_flag(tp_peer
, ENABLE_ASF
))
2905 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2906 tg3_flag(tp
, ENABLE_ASF
))
2910 tg3_pwrsrc_switch_to_vaux(tp
);
2912 tg3_pwrsrc_die_with_vmain(tp
);
2915 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2917 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2919 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2920 if (speed
!= SPEED_10
)
2922 } else if (speed
== SPEED_10
)
2928 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2932 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2933 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
2934 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2935 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2938 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2939 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2940 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2945 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2947 val
= tr32(GRC_MISC_CFG
);
2948 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2951 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2953 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2956 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2957 tg3_writephy(tp
, MII_BMCR
,
2958 BMCR_ANENABLE
| BMCR_ANRESTART
);
2960 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2961 phytest
| MII_TG3_FET_SHADOW_EN
);
2962 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2963 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2965 MII_TG3_FET_SHDW_AUXMODE4
,
2968 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2971 } else if (do_low_power
) {
2972 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2975 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2977 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2978 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2981 /* The PHY should not be powered down on some chips because
2984 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2985 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2986 (tg3_asic_rev(tp
) == ASIC_REV_5780
&&
2987 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2988 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
2992 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2993 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2994 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2995 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2996 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3000 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3
*tp
)
3006 if (tg3_flag(tp
, NVRAM
)) {
3009 if (tp
->nvram_lock_cnt
== 0) {
3010 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3011 for (i
= 0; i
< 8000; i
++) {
3012 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3017 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3021 tp
->nvram_lock_cnt
++;
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3
*tp
)
3029 if (tg3_flag(tp
, NVRAM
)) {
3030 if (tp
->nvram_lock_cnt
> 0)
3031 tp
->nvram_lock_cnt
--;
3032 if (tp
->nvram_lock_cnt
== 0)
3033 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3
*tp
)
3040 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3041 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3043 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3
*tp
)
3050 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3051 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3053 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3057 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3058 u32 offset
, u32
*val
)
3063 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3066 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3067 EEPROM_ADDR_DEVID_MASK
|
3069 tw32(GRC_EEPROM_ADDR
,
3071 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3072 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3073 EEPROM_ADDR_ADDR_MASK
) |
3074 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3076 for (i
= 0; i
< 1000; i
++) {
3077 tmp
= tr32(GRC_EEPROM_ADDR
);
3079 if (tmp
& EEPROM_ADDR_COMPLETE
)
3083 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3086 tmp
= tr32(GRC_EEPROM_DATA
);
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3097 #define NVRAM_CMD_TIMEOUT 10000
3099 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3103 tw32(NVRAM_CMD
, nvram_cmd
);
3104 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3106 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3112 if (i
== NVRAM_CMD_TIMEOUT
)
3118 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3120 if (tg3_flag(tp
, NVRAM
) &&
3121 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3122 tg3_flag(tp
, FLASH
) &&
3123 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3124 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3126 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS
) +
3128 (addr
% tp
->nvram_pagesize
);
3133 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3135 if (tg3_flag(tp
, NVRAM
) &&
3136 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3137 tg3_flag(tp
, FLASH
) &&
3138 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3139 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3141 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3142 tp
->nvram_pagesize
) +
3143 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3154 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3158 if (!tg3_flag(tp
, NVRAM
))
3159 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3161 offset
= tg3_nvram_phys_addr(tp
, offset
);
3163 if (offset
> NVRAM_ADDR_MSK
)
3166 ret
= tg3_nvram_lock(tp
);
3170 tg3_enable_nvram_access(tp
);
3172 tw32(NVRAM_ADDR
, offset
);
3173 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3174 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3177 *val
= tr32(NVRAM_RDDATA
);
3179 tg3_disable_nvram_access(tp
);
3181 tg3_nvram_unlock(tp
);
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3190 int res
= tg3_nvram_read(tp
, offset
, &v
);
3192 *val
= cpu_to_be32(v
);
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3197 u32 offset
, u32 len
, u8
*buf
)
3202 for (i
= 0; i
< len
; i
+= 4) {
3208 memcpy(&data
, buf
+ i
, 4);
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3216 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3218 val
= tr32(GRC_EEPROM_ADDR
);
3219 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3221 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3223 tw32(GRC_EEPROM_ADDR
, val
|
3224 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3225 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3229 for (j
= 0; j
< 1000; j
++) {
3230 val
= tr32(GRC_EEPROM_ADDR
);
3232 if (val
& EEPROM_ADDR_COMPLETE
)
3236 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3250 u32 pagesize
= tp
->nvram_pagesize
;
3251 u32 pagemask
= pagesize
- 1;
3255 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3261 u32 phy_addr
, page_off
, size
;
3263 phy_addr
= offset
& ~pagemask
;
3265 for (j
= 0; j
< pagesize
; j
+= 4) {
3266 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3267 (__be32
*) (tmp
+ j
));
3274 page_off
= offset
& pagemask
;
3281 memcpy(tmp
+ page_off
, buf
, size
);
3283 offset
= offset
+ (pagesize
- page_off
);
3285 tg3_enable_nvram_access(tp
);
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3291 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3293 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR
, phy_addr
);
3299 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3300 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3302 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3308 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3311 for (j
= 0; j
< pagesize
; j
+= 4) {
3314 data
= *((__be32
*) (tmp
+ j
));
3316 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3318 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3320 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3324 nvram_cmd
|= NVRAM_CMD_FIRST
;
3325 else if (j
== (pagesize
- 4))
3326 nvram_cmd
|= NVRAM_CMD_LAST
;
3328 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3336 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3337 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3350 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3351 u32 page_off
, phy_addr
, nvram_cmd
;
3354 memcpy(&data
, buf
+ i
, 4);
3355 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3357 page_off
= offset
% tp
->nvram_pagesize
;
3359 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3361 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3363 if (page_off
== 0 || i
== 0)
3364 nvram_cmd
|= NVRAM_CMD_FIRST
;
3365 if (page_off
== (tp
->nvram_pagesize
- 4))
3366 nvram_cmd
|= NVRAM_CMD_LAST
;
3369 nvram_cmd
|= NVRAM_CMD_LAST
;
3371 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3372 !tg3_flag(tp
, FLASH
) ||
3373 !tg3_flag(tp
, 57765_PLUS
))
3374 tw32(NVRAM_ADDR
, phy_addr
);
3376 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3377 !tg3_flag(tp
, 5755_PLUS
) &&
3378 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3379 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3382 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3383 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3387 if (!tg3_flag(tp
, FLASH
)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3392 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3404 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3405 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3410 if (!tg3_flag(tp
, NVRAM
)) {
3411 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3415 ret
= tg3_nvram_lock(tp
);
3419 tg3_enable_nvram_access(tp
);
3420 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3421 tw32(NVRAM_WRITE1
, 0x406);
3423 grc_mode
= tr32(GRC_MODE
);
3424 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3426 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3427 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3430 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3434 grc_mode
= tr32(GRC_MODE
);
3435 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3437 tg3_disable_nvram_access(tp
);
3438 tg3_nvram_unlock(tp
);
3441 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3442 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3454 /* tp->lock is held. */
3455 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3458 const int iters
= 10000;
3460 for (i
= 0; i
< iters
; i
++) {
3461 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3462 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3463 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3467 return (i
== iters
) ? -EBUSY
: 0;
3470 /* tp->lock is held. */
3471 static int tg3_rxcpu_pause(struct tg3
*tp
)
3473 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3475 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3476 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3482 /* tp->lock is held. */
3483 static int tg3_txcpu_pause(struct tg3
*tp
)
3485 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3488 /* tp->lock is held. */
3489 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3491 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3492 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3495 /* tp->lock is held. */
3496 static void tg3_rxcpu_resume(struct tg3
*tp
)
3498 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3501 /* tp->lock is held. */
3502 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3506 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3508 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3509 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3511 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3514 if (cpu_base
== RX_CPU_BASE
) {
3515 rc
= tg3_rxcpu_pause(tp
);
3518 * There is only an Rx CPU for the 5750 derivative in the
3521 if (tg3_flag(tp
, IS_SSB_CORE
))
3524 rc
= tg3_txcpu_pause(tp
);
3528 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3529 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3533 /* Clear firmware's nvram arbitration. */
3534 if (tg3_flag(tp
, NVRAM
))
3535 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3540 unsigned int fw_base
;
3541 unsigned int fw_len
;
3542 const __be32
*fw_data
;
3545 /* tp->lock is held. */
3546 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3547 u32 cpu_scratch_base
, int cpu_scratch_size
,
3548 struct fw_info
*info
)
3550 int err
, lock_err
, i
;
3551 void (*write_op
)(struct tg3
*, u32
, u32
);
3553 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3555 "%s: Trying to load TX cpu firmware which is 5705\n",
3560 if (tg3_flag(tp
, 5705_PLUS
))
3561 write_op
= tg3_write_mem
;
3563 write_op
= tg3_write_indirect_reg32
;
3565 /* It is possible that bootcode is still loading at this point.
3566 * Get the nvram lock first before halting the cpu.
3568 lock_err
= tg3_nvram_lock(tp
);
3569 err
= tg3_halt_cpu(tp
, cpu_base
);
3571 tg3_nvram_unlock(tp
);
3575 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3576 write_op(tp
, cpu_scratch_base
+ i
, 0);
3577 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3578 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3579 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3580 write_op(tp
, (cpu_scratch_base
+
3581 (info
->fw_base
& 0xffff) +
3583 be32_to_cpu(info
->fw_data
[i
]));
3591 /* tp->lock is held. */
3592 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3595 const int iters
= 5;
3597 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3598 tw32_f(cpu_base
+ CPU_PC
, pc
);
3600 for (i
= 0; i
< iters
; i
++) {
3601 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3603 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3604 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3605 tw32_f(cpu_base
+ CPU_PC
, pc
);
3609 return (i
== iters
) ? -EBUSY
: 0;
3612 /* tp->lock is held. */
3613 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3615 struct fw_info info
;
3616 const __be32
*fw_data
;
3619 fw_data
= (void *)tp
->fw
->data
;
3621 /* Firmware blob starts with version numbers, followed by
3622 start address and length. We are setting complete length.
3623 length = end_address_of_bss - start_address_of_text.
3624 Remainder is the blob to be loaded contiguously
3625 from start address. */
3627 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3628 info
.fw_len
= tp
->fw
->size
- 12;
3629 info
.fw_data
= &fw_data
[3];
3631 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3632 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3637 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3638 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3643 /* Now startup only the RX cpu. */
3644 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
, info
.fw_base
);
3646 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3647 "should be %08x\n", __func__
,
3648 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3652 tg3_rxcpu_resume(tp
);
3657 /* tp->lock is held. */
3658 static int tg3_load_tso_firmware(struct tg3
*tp
)
3660 struct fw_info info
;
3661 const __be32
*fw_data
;
3662 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3665 if (!tg3_flag(tp
, FW_TSO
))
3668 fw_data
= (void *)tp
->fw
->data
;
3670 /* Firmware blob starts with version numbers, followed by
3671 start address and length. We are setting complete length.
3672 length = end_address_of_bss - start_address_of_text.
3673 Remainder is the blob to be loaded contiguously
3674 from start address. */
3676 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3677 cpu_scratch_size
= tp
->fw_len
;
3678 info
.fw_len
= tp
->fw
->size
- 12;
3679 info
.fw_data
= &fw_data
[3];
3681 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3682 cpu_base
= RX_CPU_BASE
;
3683 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3685 cpu_base
= TX_CPU_BASE
;
3686 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3687 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3690 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3691 cpu_scratch_base
, cpu_scratch_size
,
3696 /* Now startup the cpu. */
3697 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
, info
.fw_base
);
3700 "%s fails to set CPU PC, is %08x should be %08x\n",
3701 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3705 tg3_resume_cpu(tp
, cpu_base
);
3710 /* tp->lock is held. */
3711 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3713 u32 addr_high
, addr_low
;
3716 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3717 tp
->dev
->dev_addr
[1]);
3718 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3719 (tp
->dev
->dev_addr
[3] << 16) |
3720 (tp
->dev
->dev_addr
[4] << 8) |
3721 (tp
->dev
->dev_addr
[5] << 0));
3722 for (i
= 0; i
< 4; i
++) {
3723 if (i
== 1 && skip_mac_1
)
3725 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3726 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3729 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3730 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3731 for (i
= 0; i
< 12; i
++) {
3732 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3733 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3737 addr_high
= (tp
->dev
->dev_addr
[0] +
3738 tp
->dev
->dev_addr
[1] +
3739 tp
->dev
->dev_addr
[2] +
3740 tp
->dev
->dev_addr
[3] +
3741 tp
->dev
->dev_addr
[4] +
3742 tp
->dev
->dev_addr
[5]) &
3743 TX_BACKOFF_SEED_MASK
;
3744 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3747 static void tg3_enable_register_access(struct tg3
*tp
)
3750 * Make sure register accesses (indirect or otherwise) will function
3753 pci_write_config_dword(tp
->pdev
,
3754 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3757 static int tg3_power_up(struct tg3
*tp
)
3761 tg3_enable_register_access(tp
);
3763 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3765 /* Switch out of Vaux if it is a NIC */
3766 tg3_pwrsrc_switch_to_vmain(tp
);
3768 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3774 static int tg3_setup_phy(struct tg3
*, int);
3776 static int tg3_power_down_prepare(struct tg3
*tp
)
3779 bool device_should_wake
, do_low_power
;
3781 tg3_enable_register_access(tp
);
3783 /* Restore the CLKREQ setting. */
3784 if (tg3_flag(tp
, CLKREQ_BUG
))
3785 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3786 PCI_EXP_LNKCTL_CLKREQ_EN
);
3788 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3789 tw32(TG3PCI_MISC_HOST_CTRL
,
3790 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3792 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3793 tg3_flag(tp
, WOL_ENABLE
);
3795 if (tg3_flag(tp
, USE_PHYLIB
)) {
3796 do_low_power
= false;
3797 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3798 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3799 struct phy_device
*phydev
;
3800 u32 phyid
, advertising
;
3802 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3804 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3806 tp
->link_config
.speed
= phydev
->speed
;
3807 tp
->link_config
.duplex
= phydev
->duplex
;
3808 tp
->link_config
.autoneg
= phydev
->autoneg
;
3809 tp
->link_config
.advertising
= phydev
->advertising
;
3811 advertising
= ADVERTISED_TP
|
3813 ADVERTISED_Autoneg
|
3814 ADVERTISED_10baseT_Half
;
3816 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3817 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3819 ADVERTISED_100baseT_Half
|
3820 ADVERTISED_100baseT_Full
|
3821 ADVERTISED_10baseT_Full
;
3823 advertising
|= ADVERTISED_10baseT_Full
;
3826 phydev
->advertising
= advertising
;
3828 phy_start_aneg(phydev
);
3830 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3831 if (phyid
!= PHY_ID_BCMAC131
) {
3832 phyid
&= PHY_BCM_OUI_MASK
;
3833 if (phyid
== PHY_BCM_OUI_1
||
3834 phyid
== PHY_BCM_OUI_2
||
3835 phyid
== PHY_BCM_OUI_3
)
3836 do_low_power
= true;
3840 do_low_power
= true;
3842 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3843 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3845 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3846 tg3_setup_phy(tp
, 0);
3849 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3852 val
= tr32(GRC_VCPU_EXT_CTRL
);
3853 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3854 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3858 for (i
= 0; i
< 200; i
++) {
3859 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3860 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3865 if (tg3_flag(tp
, WOL_CAP
))
3866 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3867 WOL_DRV_STATE_SHUTDOWN
|
3871 if (device_should_wake
) {
3874 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3876 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3877 tg3_phy_auxctl_write(tp
,
3878 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3879 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3880 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3881 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3885 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3886 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3888 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3890 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3891 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
3892 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3893 SPEED_100
: SPEED_10
;
3894 if (tg3_5700_link_polarity(tp
, speed
))
3895 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3897 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3900 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3903 if (!tg3_flag(tp
, 5750_PLUS
))
3904 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3906 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3907 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3908 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3909 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3911 if (tg3_flag(tp
, ENABLE_APE
))
3912 mac_mode
|= MAC_MODE_APE_TX_EN
|
3913 MAC_MODE_APE_RX_EN
|
3914 MAC_MODE_TDE_ENABLE
;
3916 tw32_f(MAC_MODE
, mac_mode
);
3919 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3923 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3924 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3925 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
3928 base_val
= tp
->pci_clock_ctrl
;
3929 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3930 CLOCK_CTRL_TXCLK_DISABLE
);
3932 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3933 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3934 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3935 tg3_flag(tp
, CPMU_PRESENT
) ||
3936 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3938 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3939 u32 newbits1
, newbits2
;
3941 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3942 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3943 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3944 CLOCK_CTRL_TXCLK_DISABLE
|
3946 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3947 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3948 newbits1
= CLOCK_CTRL_625_CORE
;
3949 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3951 newbits1
= CLOCK_CTRL_ALTCLK
;
3952 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3955 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3958 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3961 if (!tg3_flag(tp
, 5705_PLUS
)) {
3964 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3965 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3966 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3967 CLOCK_CTRL_TXCLK_DISABLE
|
3968 CLOCK_CTRL_44MHZ_CORE
);
3970 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3973 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3974 tp
->pci_clock_ctrl
| newbits3
, 40);
3978 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3979 tg3_power_down_phy(tp
, do_low_power
);
3981 tg3_frob_aux_power(tp
, true);
3983 /* Workaround for unstable PLL clock */
3984 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
3985 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
3986 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
3987 u32 val
= tr32(0x7d00);
3989 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3991 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3994 err
= tg3_nvram_lock(tp
);
3995 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3997 tg3_nvram_unlock(tp
);
4001 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4006 static void tg3_power_down(struct tg3
*tp
)
4008 tg3_power_down_prepare(tp
);
4010 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4011 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4014 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4016 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4017 case MII_TG3_AUX_STAT_10HALF
:
4019 *duplex
= DUPLEX_HALF
;
4022 case MII_TG3_AUX_STAT_10FULL
:
4024 *duplex
= DUPLEX_FULL
;
4027 case MII_TG3_AUX_STAT_100HALF
:
4029 *duplex
= DUPLEX_HALF
;
4032 case MII_TG3_AUX_STAT_100FULL
:
4034 *duplex
= DUPLEX_FULL
;
4037 case MII_TG3_AUX_STAT_1000HALF
:
4038 *speed
= SPEED_1000
;
4039 *duplex
= DUPLEX_HALF
;
4042 case MII_TG3_AUX_STAT_1000FULL
:
4043 *speed
= SPEED_1000
;
4044 *duplex
= DUPLEX_FULL
;
4048 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4049 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4051 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4055 *speed
= SPEED_UNKNOWN
;
4056 *duplex
= DUPLEX_UNKNOWN
;
4061 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4066 new_adv
= ADVERTISE_CSMA
;
4067 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4068 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4070 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4074 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4075 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4077 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4078 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4079 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4081 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4086 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4089 tw32(TG3_CPMU_EEE_MODE
,
4090 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4092 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4097 /* Advertise 100-BaseTX EEE ability */
4098 if (advertise
& ADVERTISED_100baseT_Full
)
4099 val
|= MDIO_AN_EEE_ADV_100TX
;
4100 /* Advertise 1000-BaseT EEE ability */
4101 if (advertise
& ADVERTISED_1000baseT_Full
)
4102 val
|= MDIO_AN_EEE_ADV_1000T
;
4103 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4107 switch (tg3_asic_rev(tp
)) {
4109 case ASIC_REV_57765
:
4110 case ASIC_REV_57766
:
4112 /* If we advertised any eee advertisements above... */
4114 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4115 MII_TG3_DSP_TAP26_RMRXSTO
|
4116 MII_TG3_DSP_TAP26_OPCSINPT
;
4117 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4121 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4122 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4123 MII_TG3_DSP_CH34TP2_HIBW01
);
4126 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4135 static void tg3_phy_copper_begin(struct tg3
*tp
)
4137 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4138 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4141 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
4142 adv
= ADVERTISED_10baseT_Half
|
4143 ADVERTISED_10baseT_Full
;
4144 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4145 adv
|= ADVERTISED_100baseT_Half
|
4146 ADVERTISED_100baseT_Full
;
4148 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4150 adv
= tp
->link_config
.advertising
;
4151 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4152 adv
&= ~(ADVERTISED_1000baseT_Half
|
4153 ADVERTISED_1000baseT_Full
);
4155 fc
= tp
->link_config
.flowctrl
;
4158 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4160 tg3_writephy(tp
, MII_BMCR
,
4161 BMCR_ANENABLE
| BMCR_ANRESTART
);
4164 u32 bmcr
, orig_bmcr
;
4166 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4167 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4170 switch (tp
->link_config
.speed
) {
4176 bmcr
|= BMCR_SPEED100
;
4180 bmcr
|= BMCR_SPEED1000
;
4184 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4185 bmcr
|= BMCR_FULLDPLX
;
4187 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4188 (bmcr
!= orig_bmcr
)) {
4189 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4190 for (i
= 0; i
< 1500; i
++) {
4194 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4195 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4197 if (!(tmp
& BMSR_LSTATUS
)) {
4202 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4208 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4212 /* Turn off tap power management. */
4213 /* Set Extended packet length bit */
4214 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4216 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4217 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4218 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4219 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4220 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4227 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4229 u32 advmsk
, tgtadv
, advertising
;
4231 advertising
= tp
->link_config
.advertising
;
4232 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4234 advmsk
= ADVERTISE_ALL
;
4235 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4236 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4237 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4240 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4243 if ((*lcladv
& advmsk
) != tgtadv
)
4246 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4249 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4251 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4255 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4256 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4257 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4258 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4259 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4261 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4264 if (tg3_ctrl
!= tgtadv
)
4271 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4275 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4278 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4281 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4284 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4287 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4288 tp
->link_config
.rmt_adv
= lpeth
;
4293 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, int curr_link_up
)
4295 if (curr_link_up
!= tp
->link_up
) {
4299 tg3_carrier_off(tp
);
4300 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4301 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4304 tg3_link_report(tp
);
4311 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4313 int current_link_up
;
4315 u32 lcl_adv
, rmt_adv
;
4323 (MAC_STATUS_SYNC_CHANGED
|
4324 MAC_STATUS_CFG_CHANGED
|
4325 MAC_STATUS_MI_COMPLETION
|
4326 MAC_STATUS_LNKSTATE_CHANGED
));
4329 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4331 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4335 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4337 /* Some third-party PHYs need to be reset on link going
4340 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4341 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4342 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4344 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4345 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4346 !(bmsr
& BMSR_LSTATUS
))
4352 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4353 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4354 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4355 !tg3_flag(tp
, INIT_COMPLETE
))
4358 if (!(bmsr
& BMSR_LSTATUS
)) {
4359 err
= tg3_init_5401phy_dsp(tp
);
4363 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4364 for (i
= 0; i
< 1000; i
++) {
4366 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4367 (bmsr
& BMSR_LSTATUS
)) {
4373 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4374 TG3_PHY_REV_BCM5401_B0
&&
4375 !(bmsr
& BMSR_LSTATUS
) &&
4376 tp
->link_config
.active_speed
== SPEED_1000
) {
4377 err
= tg3_phy_reset(tp
);
4379 err
= tg3_init_5401phy_dsp(tp
);
4384 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4385 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4386 /* 5701 {A0,B0} CRC bug workaround */
4387 tg3_writephy(tp
, 0x15, 0x0a75);
4388 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4389 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4390 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4393 /* Clear pending interrupts... */
4394 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4395 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4397 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4398 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4399 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4400 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4402 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4403 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4404 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4405 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4406 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4408 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4411 current_link_up
= 0;
4412 current_speed
= SPEED_UNKNOWN
;
4413 current_duplex
= DUPLEX_UNKNOWN
;
4414 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4415 tp
->link_config
.rmt_adv
= 0;
4417 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4418 err
= tg3_phy_auxctl_read(tp
,
4419 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4421 if (!err
&& !(val
& (1 << 10))) {
4422 tg3_phy_auxctl_write(tp
,
4423 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4430 for (i
= 0; i
< 100; i
++) {
4431 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4432 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4433 (bmsr
& BMSR_LSTATUS
))
4438 if (bmsr
& BMSR_LSTATUS
) {
4441 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4442 for (i
= 0; i
< 2000; i
++) {
4444 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4449 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4454 for (i
= 0; i
< 200; i
++) {
4455 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4456 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4458 if (bmcr
&& bmcr
!= 0x7fff)
4466 tp
->link_config
.active_speed
= current_speed
;
4467 tp
->link_config
.active_duplex
= current_duplex
;
4469 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4470 if ((bmcr
& BMCR_ANENABLE
) &&
4471 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4472 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4473 current_link_up
= 1;
4475 if (!(bmcr
& BMCR_ANENABLE
) &&
4476 tp
->link_config
.speed
== current_speed
&&
4477 tp
->link_config
.duplex
== current_duplex
&&
4478 tp
->link_config
.flowctrl
==
4479 tp
->link_config
.active_flowctrl
) {
4480 current_link_up
= 1;
4484 if (current_link_up
== 1 &&
4485 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4488 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4489 reg
= MII_TG3_FET_GEN_STAT
;
4490 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4492 reg
= MII_TG3_EXT_STAT
;
4493 bit
= MII_TG3_EXT_STAT_MDIX
;
4496 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4497 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4499 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4504 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4505 tg3_phy_copper_begin(tp
);
4507 if (tg3_flag(tp
, ROBOSWITCH
)) {
4508 current_link_up
= 1;
4509 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4510 current_speed
= SPEED_1000
;
4511 current_duplex
= DUPLEX_FULL
;
4512 tp
->link_config
.active_speed
= current_speed
;
4513 tp
->link_config
.active_duplex
= current_duplex
;
4516 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4517 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4518 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4519 current_link_up
= 1;
4522 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4523 if (current_link_up
== 1) {
4524 if (tp
->link_config
.active_speed
== SPEED_100
||
4525 tp
->link_config
.active_speed
== SPEED_10
)
4526 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4528 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4529 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4530 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4532 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4534 /* In order for the 5750 core in BCM4785 chip to work properly
4535 * in RGMII mode, the Led Control Register must be set up.
4537 if (tg3_flag(tp
, RGMII_MODE
)) {
4538 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4539 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4541 if (tp
->link_config
.active_speed
== SPEED_10
)
4542 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4543 else if (tp
->link_config
.active_speed
== SPEED_100
)
4544 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4545 LED_CTRL_100MBPS_ON
);
4546 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4547 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4548 LED_CTRL_1000MBPS_ON
);
4550 tw32(MAC_LED_CTRL
, led_ctrl
);
4554 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4555 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4556 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4558 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4559 if (current_link_up
== 1 &&
4560 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4561 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4563 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4566 /* ??? Without this setting Netgear GA302T PHY does not
4567 * ??? send/receive packets...
4569 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4570 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4571 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4572 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4576 tw32_f(MAC_MODE
, tp
->mac_mode
);
4579 tg3_phy_eee_adjust(tp
, current_link_up
);
4581 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4582 /* Polled via timer. */
4583 tw32_f(MAC_EVENT
, 0);
4585 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4589 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
4590 current_link_up
== 1 &&
4591 tp
->link_config
.active_speed
== SPEED_1000
&&
4592 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4595 (MAC_STATUS_SYNC_CHANGED
|
4596 MAC_STATUS_CFG_CHANGED
));
4599 NIC_SRAM_FIRMWARE_MBOX
,
4600 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4603 /* Prevent send BD corruption. */
4604 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4605 if (tp
->link_config
.active_speed
== SPEED_100
||
4606 tp
->link_config
.active_speed
== SPEED_10
)
4607 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4608 PCI_EXP_LNKCTL_CLKREQ_EN
);
4610 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4611 PCI_EXP_LNKCTL_CLKREQ_EN
);
4614 tg3_test_and_report_link_chg(tp
, current_link_up
);
4619 struct tg3_fiber_aneginfo
{
4621 #define ANEG_STATE_UNKNOWN 0
4622 #define ANEG_STATE_AN_ENABLE 1
4623 #define ANEG_STATE_RESTART_INIT 2
4624 #define ANEG_STATE_RESTART 3
4625 #define ANEG_STATE_DISABLE_LINK_OK 4
4626 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4627 #define ANEG_STATE_ABILITY_DETECT 6
4628 #define ANEG_STATE_ACK_DETECT_INIT 7
4629 #define ANEG_STATE_ACK_DETECT 8
4630 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4631 #define ANEG_STATE_COMPLETE_ACK 10
4632 #define ANEG_STATE_IDLE_DETECT_INIT 11
4633 #define ANEG_STATE_IDLE_DETECT 12
4634 #define ANEG_STATE_LINK_OK 13
4635 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4636 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4639 #define MR_AN_ENABLE 0x00000001
4640 #define MR_RESTART_AN 0x00000002
4641 #define MR_AN_COMPLETE 0x00000004
4642 #define MR_PAGE_RX 0x00000008
4643 #define MR_NP_LOADED 0x00000010
4644 #define MR_TOGGLE_TX 0x00000020
4645 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4646 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4647 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4648 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4649 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4650 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4651 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4652 #define MR_TOGGLE_RX 0x00002000
4653 #define MR_NP_RX 0x00004000
4655 #define MR_LINK_OK 0x80000000
4657 unsigned long link_time
, cur_time
;
4659 u32 ability_match_cfg
;
4660 int ability_match_count
;
4662 char ability_match
, idle_match
, ack_match
;
4664 u32 txconfig
, rxconfig
;
4665 #define ANEG_CFG_NP 0x00000080
4666 #define ANEG_CFG_ACK 0x00000040
4667 #define ANEG_CFG_RF2 0x00000020
4668 #define ANEG_CFG_RF1 0x00000010
4669 #define ANEG_CFG_PS2 0x00000001
4670 #define ANEG_CFG_PS1 0x00008000
4671 #define ANEG_CFG_HD 0x00004000
4672 #define ANEG_CFG_FD 0x00002000
4673 #define ANEG_CFG_INVAL 0x00001f06
4678 #define ANEG_TIMER_ENAB 2
4679 #define ANEG_FAILED -1
4681 #define ANEG_STATE_SETTLE_TIME 10000
4683 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4684 struct tg3_fiber_aneginfo
*ap
)
4687 unsigned long delta
;
4691 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4695 ap
->ability_match_cfg
= 0;
4696 ap
->ability_match_count
= 0;
4697 ap
->ability_match
= 0;
4703 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4704 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4706 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4707 ap
->ability_match_cfg
= rx_cfg_reg
;
4708 ap
->ability_match
= 0;
4709 ap
->ability_match_count
= 0;
4711 if (++ap
->ability_match_count
> 1) {
4712 ap
->ability_match
= 1;
4713 ap
->ability_match_cfg
= rx_cfg_reg
;
4716 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4724 ap
->ability_match_cfg
= 0;
4725 ap
->ability_match_count
= 0;
4726 ap
->ability_match
= 0;
4732 ap
->rxconfig
= rx_cfg_reg
;
4735 switch (ap
->state
) {
4736 case ANEG_STATE_UNKNOWN
:
4737 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4738 ap
->state
= ANEG_STATE_AN_ENABLE
;
4741 case ANEG_STATE_AN_ENABLE
:
4742 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4743 if (ap
->flags
& MR_AN_ENABLE
) {
4746 ap
->ability_match_cfg
= 0;
4747 ap
->ability_match_count
= 0;
4748 ap
->ability_match
= 0;
4752 ap
->state
= ANEG_STATE_RESTART_INIT
;
4754 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4758 case ANEG_STATE_RESTART_INIT
:
4759 ap
->link_time
= ap
->cur_time
;
4760 ap
->flags
&= ~(MR_NP_LOADED
);
4762 tw32(MAC_TX_AUTO_NEG
, 0);
4763 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4764 tw32_f(MAC_MODE
, tp
->mac_mode
);
4767 ret
= ANEG_TIMER_ENAB
;
4768 ap
->state
= ANEG_STATE_RESTART
;
4771 case ANEG_STATE_RESTART
:
4772 delta
= ap
->cur_time
- ap
->link_time
;
4773 if (delta
> ANEG_STATE_SETTLE_TIME
)
4774 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4776 ret
= ANEG_TIMER_ENAB
;
4779 case ANEG_STATE_DISABLE_LINK_OK
:
4783 case ANEG_STATE_ABILITY_DETECT_INIT
:
4784 ap
->flags
&= ~(MR_TOGGLE_TX
);
4785 ap
->txconfig
= ANEG_CFG_FD
;
4786 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4787 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4788 ap
->txconfig
|= ANEG_CFG_PS1
;
4789 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4790 ap
->txconfig
|= ANEG_CFG_PS2
;
4791 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4792 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4793 tw32_f(MAC_MODE
, tp
->mac_mode
);
4796 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4799 case ANEG_STATE_ABILITY_DETECT
:
4800 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4801 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4804 case ANEG_STATE_ACK_DETECT_INIT
:
4805 ap
->txconfig
|= ANEG_CFG_ACK
;
4806 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4807 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4808 tw32_f(MAC_MODE
, tp
->mac_mode
);
4811 ap
->state
= ANEG_STATE_ACK_DETECT
;
4814 case ANEG_STATE_ACK_DETECT
:
4815 if (ap
->ack_match
!= 0) {
4816 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4817 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4818 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4820 ap
->state
= ANEG_STATE_AN_ENABLE
;
4822 } else if (ap
->ability_match
!= 0 &&
4823 ap
->rxconfig
== 0) {
4824 ap
->state
= ANEG_STATE_AN_ENABLE
;
4828 case ANEG_STATE_COMPLETE_ACK_INIT
:
4829 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4833 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4834 MR_LP_ADV_HALF_DUPLEX
|
4835 MR_LP_ADV_SYM_PAUSE
|
4836 MR_LP_ADV_ASYM_PAUSE
|
4837 MR_LP_ADV_REMOTE_FAULT1
|
4838 MR_LP_ADV_REMOTE_FAULT2
|
4839 MR_LP_ADV_NEXT_PAGE
|
4842 if (ap
->rxconfig
& ANEG_CFG_FD
)
4843 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4844 if (ap
->rxconfig
& ANEG_CFG_HD
)
4845 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4846 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4847 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4848 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4849 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4850 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4851 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4852 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4853 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4854 if (ap
->rxconfig
& ANEG_CFG_NP
)
4855 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4857 ap
->link_time
= ap
->cur_time
;
4859 ap
->flags
^= (MR_TOGGLE_TX
);
4860 if (ap
->rxconfig
& 0x0008)
4861 ap
->flags
|= MR_TOGGLE_RX
;
4862 if (ap
->rxconfig
& ANEG_CFG_NP
)
4863 ap
->flags
|= MR_NP_RX
;
4864 ap
->flags
|= MR_PAGE_RX
;
4866 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4867 ret
= ANEG_TIMER_ENAB
;
4870 case ANEG_STATE_COMPLETE_ACK
:
4871 if (ap
->ability_match
!= 0 &&
4872 ap
->rxconfig
== 0) {
4873 ap
->state
= ANEG_STATE_AN_ENABLE
;
4876 delta
= ap
->cur_time
- ap
->link_time
;
4877 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4878 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4879 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4881 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4882 !(ap
->flags
& MR_NP_RX
)) {
4883 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4891 case ANEG_STATE_IDLE_DETECT_INIT
:
4892 ap
->link_time
= ap
->cur_time
;
4893 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4894 tw32_f(MAC_MODE
, tp
->mac_mode
);
4897 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4898 ret
= ANEG_TIMER_ENAB
;
4901 case ANEG_STATE_IDLE_DETECT
:
4902 if (ap
->ability_match
!= 0 &&
4903 ap
->rxconfig
== 0) {
4904 ap
->state
= ANEG_STATE_AN_ENABLE
;
4907 delta
= ap
->cur_time
- ap
->link_time
;
4908 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4909 /* XXX another gem from the Broadcom driver :( */
4910 ap
->state
= ANEG_STATE_LINK_OK
;
4914 case ANEG_STATE_LINK_OK
:
4915 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4919 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4920 /* ??? unimplemented */
4923 case ANEG_STATE_NEXT_PAGE_WAIT
:
4924 /* ??? unimplemented */
4935 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4938 struct tg3_fiber_aneginfo aninfo
;
4939 int status
= ANEG_FAILED
;
4943 tw32_f(MAC_TX_AUTO_NEG
, 0);
4945 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4946 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4949 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4952 memset(&aninfo
, 0, sizeof(aninfo
));
4953 aninfo
.flags
|= MR_AN_ENABLE
;
4954 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4955 aninfo
.cur_time
= 0;
4957 while (++tick
< 195000) {
4958 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4959 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4965 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4966 tw32_f(MAC_MODE
, tp
->mac_mode
);
4969 *txflags
= aninfo
.txconfig
;
4970 *rxflags
= aninfo
.flags
;
4972 if (status
== ANEG_DONE
&&
4973 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4974 MR_LP_ADV_FULL_DUPLEX
)))
4980 static void tg3_init_bcm8002(struct tg3
*tp
)
4982 u32 mac_status
= tr32(MAC_STATUS
);
4985 /* Reset when initting first time or we have a link. */
4986 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4987 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4990 /* Set PLL lock range. */
4991 tg3_writephy(tp
, 0x16, 0x8007);
4994 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4996 /* Wait for reset to complete. */
4997 /* XXX schedule_timeout() ... */
4998 for (i
= 0; i
< 500; i
++)
5001 /* Config mode; select PMA/Ch 1 regs. */
5002 tg3_writephy(tp
, 0x10, 0x8411);
5004 /* Enable auto-lock and comdet, select txclk for tx. */
5005 tg3_writephy(tp
, 0x11, 0x0a10);
5007 tg3_writephy(tp
, 0x18, 0x00a0);
5008 tg3_writephy(tp
, 0x16, 0x41ff);
5010 /* Assert and deassert POR. */
5011 tg3_writephy(tp
, 0x13, 0x0400);
5013 tg3_writephy(tp
, 0x13, 0x0000);
5015 tg3_writephy(tp
, 0x11, 0x0a50);
5017 tg3_writephy(tp
, 0x11, 0x0a10);
5019 /* Wait for signal to stabilize */
5020 /* XXX schedule_timeout() ... */
5021 for (i
= 0; i
< 15000; i
++)
5024 /* Deselect the channel register so we can read the PHYID
5027 tg3_writephy(tp
, 0x10, 0x8011);
5030 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5033 u32 sg_dig_ctrl
, sg_dig_status
;
5034 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5035 int workaround
, port_a
;
5036 int current_link_up
;
5039 expected_sg_dig_ctrl
= 0;
5042 current_link_up
= 0;
5044 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5045 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5047 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5050 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5051 /* preserve bits 20-23 for voltage regulator */
5052 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5055 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5057 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5058 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5060 u32 val
= serdes_cfg
;
5066 tw32_f(MAC_SERDES_CFG
, val
);
5069 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5071 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5072 tg3_setup_flow_control(tp
, 0, 0);
5073 current_link_up
= 1;
5078 /* Want auto-negotiation. */
5079 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5081 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5082 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5083 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5084 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5085 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5087 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5088 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5089 tp
->serdes_counter
&&
5090 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5091 MAC_STATUS_RCVD_CFG
)) ==
5092 MAC_STATUS_PCS_SYNCED
)) {
5093 tp
->serdes_counter
--;
5094 current_link_up
= 1;
5099 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5100 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5102 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5104 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5105 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5106 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5107 MAC_STATUS_SIGNAL_DET
)) {
5108 sg_dig_status
= tr32(SG_DIG_STATUS
);
5109 mac_status
= tr32(MAC_STATUS
);
5111 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5112 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5113 u32 local_adv
= 0, remote_adv
= 0;
5115 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5116 local_adv
|= ADVERTISE_1000XPAUSE
;
5117 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5118 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5120 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5121 remote_adv
|= LPA_1000XPAUSE
;
5122 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5123 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5125 tp
->link_config
.rmt_adv
=
5126 mii_adv_to_ethtool_adv_x(remote_adv
);
5128 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5129 current_link_up
= 1;
5130 tp
->serdes_counter
= 0;
5131 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5132 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5133 if (tp
->serdes_counter
)
5134 tp
->serdes_counter
--;
5137 u32 val
= serdes_cfg
;
5144 tw32_f(MAC_SERDES_CFG
, val
);
5147 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5150 /* Link parallel detection - link is up */
5151 /* only if we have PCS_SYNC and not */
5152 /* receiving config code words */
5153 mac_status
= tr32(MAC_STATUS
);
5154 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5155 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5156 tg3_setup_flow_control(tp
, 0, 0);
5157 current_link_up
= 1;
5159 TG3_PHYFLG_PARALLEL_DETECT
;
5160 tp
->serdes_counter
=
5161 SERDES_PARALLEL_DET_TIMEOUT
;
5163 goto restart_autoneg
;
5167 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5168 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5172 return current_link_up
;
5175 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5177 int current_link_up
= 0;
5179 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5182 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5183 u32 txflags
, rxflags
;
5186 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5187 u32 local_adv
= 0, remote_adv
= 0;
5189 if (txflags
& ANEG_CFG_PS1
)
5190 local_adv
|= ADVERTISE_1000XPAUSE
;
5191 if (txflags
& ANEG_CFG_PS2
)
5192 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5194 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5195 remote_adv
|= LPA_1000XPAUSE
;
5196 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5197 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5199 tp
->link_config
.rmt_adv
=
5200 mii_adv_to_ethtool_adv_x(remote_adv
);
5202 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5204 current_link_up
= 1;
5206 for (i
= 0; i
< 30; i
++) {
5209 (MAC_STATUS_SYNC_CHANGED
|
5210 MAC_STATUS_CFG_CHANGED
));
5212 if ((tr32(MAC_STATUS
) &
5213 (MAC_STATUS_SYNC_CHANGED
|
5214 MAC_STATUS_CFG_CHANGED
)) == 0)
5218 mac_status
= tr32(MAC_STATUS
);
5219 if (current_link_up
== 0 &&
5220 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5221 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5222 current_link_up
= 1;
5224 tg3_setup_flow_control(tp
, 0, 0);
5226 /* Forcing 1000FD link up. */
5227 current_link_up
= 1;
5229 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5232 tw32_f(MAC_MODE
, tp
->mac_mode
);
5237 return current_link_up
;
5240 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5243 u16 orig_active_speed
;
5244 u8 orig_active_duplex
;
5246 int current_link_up
;
5249 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5250 orig_active_speed
= tp
->link_config
.active_speed
;
5251 orig_active_duplex
= tp
->link_config
.active_duplex
;
5253 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5255 tg3_flag(tp
, INIT_COMPLETE
)) {
5256 mac_status
= tr32(MAC_STATUS
);
5257 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5258 MAC_STATUS_SIGNAL_DET
|
5259 MAC_STATUS_CFG_CHANGED
|
5260 MAC_STATUS_RCVD_CFG
);
5261 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5262 MAC_STATUS_SIGNAL_DET
)) {
5263 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5264 MAC_STATUS_CFG_CHANGED
));
5269 tw32_f(MAC_TX_AUTO_NEG
, 0);
5271 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5272 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5273 tw32_f(MAC_MODE
, tp
->mac_mode
);
5276 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5277 tg3_init_bcm8002(tp
);
5279 /* Enable link change event even when serdes polling. */
5280 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5283 current_link_up
= 0;
5284 tp
->link_config
.rmt_adv
= 0;
5285 mac_status
= tr32(MAC_STATUS
);
5287 if (tg3_flag(tp
, HW_AUTONEG
))
5288 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5290 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5292 tp
->napi
[0].hw_status
->status
=
5293 (SD_STATUS_UPDATED
|
5294 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5296 for (i
= 0; i
< 100; i
++) {
5297 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5298 MAC_STATUS_CFG_CHANGED
));
5300 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5301 MAC_STATUS_CFG_CHANGED
|
5302 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5306 mac_status
= tr32(MAC_STATUS
);
5307 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5308 current_link_up
= 0;
5309 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5310 tp
->serdes_counter
== 0) {
5311 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5312 MAC_MODE_SEND_CONFIGS
));
5314 tw32_f(MAC_MODE
, tp
->mac_mode
);
5318 if (current_link_up
== 1) {
5319 tp
->link_config
.active_speed
= SPEED_1000
;
5320 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5321 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5322 LED_CTRL_LNKLED_OVERRIDE
|
5323 LED_CTRL_1000MBPS_ON
));
5325 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5326 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5327 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5328 LED_CTRL_LNKLED_OVERRIDE
|
5329 LED_CTRL_TRAFFIC_OVERRIDE
));
5332 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5333 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5334 if (orig_pause_cfg
!= now_pause_cfg
||
5335 orig_active_speed
!= tp
->link_config
.active_speed
||
5336 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5337 tg3_link_report(tp
);
5343 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5345 int current_link_up
, err
= 0;
5349 u32 local_adv
, remote_adv
;
5351 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5352 tw32_f(MAC_MODE
, tp
->mac_mode
);
5358 (MAC_STATUS_SYNC_CHANGED
|
5359 MAC_STATUS_CFG_CHANGED
|
5360 MAC_STATUS_MI_COMPLETION
|
5361 MAC_STATUS_LNKSTATE_CHANGED
));
5367 current_link_up
= 0;
5368 current_speed
= SPEED_UNKNOWN
;
5369 current_duplex
= DUPLEX_UNKNOWN
;
5370 tp
->link_config
.rmt_adv
= 0;
5372 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5373 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5374 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5375 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5376 bmsr
|= BMSR_LSTATUS
;
5378 bmsr
&= ~BMSR_LSTATUS
;
5381 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5383 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5384 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5385 /* do nothing, just check for link up at the end */
5386 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5389 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5390 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5391 ADVERTISE_1000XPAUSE
|
5392 ADVERTISE_1000XPSE_ASYM
|
5395 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5396 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5398 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5399 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5400 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5401 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5403 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5404 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5405 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5412 bmcr
&= ~BMCR_SPEED1000
;
5413 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5415 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5416 new_bmcr
|= BMCR_FULLDPLX
;
5418 if (new_bmcr
!= bmcr
) {
5419 /* BMCR_SPEED1000 is a reserved bit that needs
5420 * to be set on write.
5422 new_bmcr
|= BMCR_SPEED1000
;
5424 /* Force a linkdown */
5428 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5429 adv
&= ~(ADVERTISE_1000XFULL
|
5430 ADVERTISE_1000XHALF
|
5432 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5433 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5437 tg3_carrier_off(tp
);
5439 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5441 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5442 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5443 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5444 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5445 bmsr
|= BMSR_LSTATUS
;
5447 bmsr
&= ~BMSR_LSTATUS
;
5449 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5453 if (bmsr
& BMSR_LSTATUS
) {
5454 current_speed
= SPEED_1000
;
5455 current_link_up
= 1;
5456 if (bmcr
& BMCR_FULLDPLX
)
5457 current_duplex
= DUPLEX_FULL
;
5459 current_duplex
= DUPLEX_HALF
;
5464 if (bmcr
& BMCR_ANENABLE
) {
5467 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5468 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5469 common
= local_adv
& remote_adv
;
5470 if (common
& (ADVERTISE_1000XHALF
|
5471 ADVERTISE_1000XFULL
)) {
5472 if (common
& ADVERTISE_1000XFULL
)
5473 current_duplex
= DUPLEX_FULL
;
5475 current_duplex
= DUPLEX_HALF
;
5477 tp
->link_config
.rmt_adv
=
5478 mii_adv_to_ethtool_adv_x(remote_adv
);
5479 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5480 /* Link is up via parallel detect */
5482 current_link_up
= 0;
5487 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5488 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5490 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5491 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5492 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5494 tw32_f(MAC_MODE
, tp
->mac_mode
);
5497 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5499 tp
->link_config
.active_speed
= current_speed
;
5500 tp
->link_config
.active_duplex
= current_duplex
;
5502 tg3_test_and_report_link_chg(tp
, current_link_up
);
5506 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5508 if (tp
->serdes_counter
) {
5509 /* Give autoneg time to complete. */
5510 tp
->serdes_counter
--;
5515 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5518 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5519 if (bmcr
& BMCR_ANENABLE
) {
5522 /* Select shadow register 0x1f */
5523 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5524 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5526 /* Select expansion interrupt status register */
5527 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5528 MII_TG3_DSP_EXP1_INT_STAT
);
5529 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5530 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5532 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5533 /* We have signal detect and not receiving
5534 * config code words, link is up by parallel
5538 bmcr
&= ~BMCR_ANENABLE
;
5539 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5540 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5541 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5544 } else if (tp
->link_up
&&
5545 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5546 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5549 /* Select expansion interrupt status register */
5550 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5551 MII_TG3_DSP_EXP1_INT_STAT
);
5552 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5556 /* Config code words received, turn on autoneg. */
5557 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5558 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5560 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5566 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5571 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5572 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5573 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5574 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5576 err
= tg3_setup_copper_phy(tp
, force_reset
);
5578 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
5581 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5582 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5584 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5589 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5590 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5591 tw32(GRC_MISC_CFG
, val
);
5594 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5595 (6 << TX_LENGTHS_IPG_SHIFT
);
5596 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
5597 tg3_asic_rev(tp
) == ASIC_REV_5762
)
5598 val
|= tr32(MAC_TX_LENGTHS
) &
5599 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5600 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5602 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5603 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5604 tw32(MAC_TX_LENGTHS
, val
|
5605 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5607 tw32(MAC_TX_LENGTHS
, val
|
5608 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5610 if (!tg3_flag(tp
, 5705_PLUS
)) {
5612 tw32(HOSTCC_STAT_COAL_TICKS
,
5613 tp
->coal
.stats_block_coalesce_usecs
);
5615 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5619 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5620 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5622 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5625 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5626 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5632 /* tp->lock must be held */
5633 static u64
tg3_refclk_read(struct tg3
*tp
)
5635 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
5636 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
5639 /* tp->lock must be held */
5640 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
5642 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
5643 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
5644 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
5645 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
5648 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
5649 static inline void tg3_full_unlock(struct tg3
*tp
);
5650 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
5652 struct tg3
*tp
= netdev_priv(dev
);
5654 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
5655 SOF_TIMESTAMPING_RX_SOFTWARE
|
5656 SOF_TIMESTAMPING_SOFTWARE
|
5657 SOF_TIMESTAMPING_TX_HARDWARE
|
5658 SOF_TIMESTAMPING_RX_HARDWARE
|
5659 SOF_TIMESTAMPING_RAW_HARDWARE
;
5662 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
5664 info
->phc_index
= -1;
5666 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
5668 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
5669 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
5670 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
5671 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
5675 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
5677 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5678 bool neg_adj
= false;
5686 /* Frequency adjustment is performed using hardware with a 24 bit
5687 * accumulator and a programmable correction value. On each clk, the
5688 * correction value gets added to the accumulator and when it
5689 * overflows, the time counter is incremented/decremented.
5691 * So conversion from ppb to correction value is
5692 * ppb * (1 << 24) / 1000000000
5694 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
5695 TG3_EAV_REF_CLK_CORRECT_MASK
;
5697 tg3_full_lock(tp
, 0);
5700 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
5701 TG3_EAV_REF_CLK_CORRECT_EN
|
5702 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
5704 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
5706 tg3_full_unlock(tp
);
5711 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
5713 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5715 tg3_full_lock(tp
, 0);
5716 tp
->ptp_adjust
+= delta
;
5717 tg3_full_unlock(tp
);
5722 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
5726 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5728 tg3_full_lock(tp
, 0);
5729 ns
= tg3_refclk_read(tp
);
5730 ns
+= tp
->ptp_adjust
;
5731 tg3_full_unlock(tp
);
5733 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
5734 ts
->tv_nsec
= remainder
;
5739 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
5740 const struct timespec
*ts
)
5743 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5745 ns
= timespec_to_ns(ts
);
5747 tg3_full_lock(tp
, 0);
5748 tg3_refclk_write(tp
, ns
);
5750 tg3_full_unlock(tp
);
5755 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
5756 struct ptp_clock_request
*rq
, int on
)
5761 static const struct ptp_clock_info tg3_ptp_caps
= {
5762 .owner
= THIS_MODULE
,
5763 .name
= "tg3 clock",
5764 .max_adj
= 250000000,
5769 .adjfreq
= tg3_ptp_adjfreq
,
5770 .adjtime
= tg3_ptp_adjtime
,
5771 .gettime
= tg3_ptp_gettime
,
5772 .settime
= tg3_ptp_settime
,
5773 .enable
= tg3_ptp_enable
,
5776 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
5777 struct skb_shared_hwtstamps
*timestamp
)
5779 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
5780 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
5784 /* tp->lock must be held */
5785 static void tg3_ptp_init(struct tg3
*tp
)
5787 if (!tg3_flag(tp
, PTP_CAPABLE
))
5790 /* Initialize the hardware clock to the system time. */
5791 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
5793 tp
->ptp_info
= tg3_ptp_caps
;
5796 /* tp->lock must be held */
5797 static void tg3_ptp_resume(struct tg3
*tp
)
5799 if (!tg3_flag(tp
, PTP_CAPABLE
))
5802 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
5806 static void tg3_ptp_fini(struct tg3
*tp
)
5808 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
5811 ptp_clock_unregister(tp
->ptp_clock
);
5812 tp
->ptp_clock
= NULL
;
5816 static inline int tg3_irq_sync(struct tg3
*tp
)
5818 return tp
->irq_sync
;
5821 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5825 dst
= (u32
*)((u8
*)dst
+ off
);
5826 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5827 *dst
++ = tr32(off
+ i
);
5830 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5832 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5833 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5834 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5835 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5836 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5837 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5838 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5839 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5840 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5841 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5842 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5843 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5844 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5845 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5846 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5847 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5848 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5849 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5850 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5852 if (tg3_flag(tp
, SUPPORT_MSIX
))
5853 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5855 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5856 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5857 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5858 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5859 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5860 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5861 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5862 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5864 if (!tg3_flag(tp
, 5705_PLUS
)) {
5865 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5866 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5867 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5870 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5871 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5872 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5873 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5874 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5876 if (tg3_flag(tp
, NVRAM
))
5877 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5880 static void tg3_dump_state(struct tg3
*tp
)
5885 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5889 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5890 /* Read up to but not including private PCI registers */
5891 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5892 regs
[i
/ sizeof(u32
)] = tr32(i
);
5894 tg3_dump_legacy_regs(tp
, regs
);
5896 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5897 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5898 !regs
[i
+ 2] && !regs
[i
+ 3])
5901 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5903 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5908 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5909 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5911 /* SW status block */
5913 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5915 tnapi
->hw_status
->status
,
5916 tnapi
->hw_status
->status_tag
,
5917 tnapi
->hw_status
->rx_jumbo_consumer
,
5918 tnapi
->hw_status
->rx_consumer
,
5919 tnapi
->hw_status
->rx_mini_consumer
,
5920 tnapi
->hw_status
->idx
[0].rx_producer
,
5921 tnapi
->hw_status
->idx
[0].tx_consumer
);
5924 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5926 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5927 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5929 tnapi
->prodring
.rx_std_prod_idx
,
5930 tnapi
->prodring
.rx_std_cons_idx
,
5931 tnapi
->prodring
.rx_jmb_prod_idx
,
5932 tnapi
->prodring
.rx_jmb_cons_idx
);
5936 /* This is called whenever we suspect that the system chipset is re-
5937 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5938 * is bogus tx completions. We try to recover by setting the
5939 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5942 static void tg3_tx_recover(struct tg3
*tp
)
5944 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5945 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5947 netdev_warn(tp
->dev
,
5948 "The system may be re-ordering memory-mapped I/O "
5949 "cycles to the network device, attempting to recover. "
5950 "Please report the problem to the driver maintainer "
5951 "and include system chipset information.\n");
5953 spin_lock(&tp
->lock
);
5954 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5955 spin_unlock(&tp
->lock
);
5958 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5960 /* Tell compiler to fetch tx indices from memory. */
5962 return tnapi
->tx_pending
-
5963 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5966 /* Tigon3 never reports partial packet sends. So we do not
5967 * need special logic to handle SKBs that have not had all
5968 * of their frags sent yet, like SunGEM does.
5970 static void tg3_tx(struct tg3_napi
*tnapi
)
5972 struct tg3
*tp
= tnapi
->tp
;
5973 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5974 u32 sw_idx
= tnapi
->tx_cons
;
5975 struct netdev_queue
*txq
;
5976 int index
= tnapi
- tp
->napi
;
5977 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5979 if (tg3_flag(tp
, ENABLE_TSS
))
5982 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5984 while (sw_idx
!= hw_idx
) {
5985 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5986 struct sk_buff
*skb
= ri
->skb
;
5989 if (unlikely(skb
== NULL
)) {
5994 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
5995 struct skb_shared_hwtstamps timestamp
;
5996 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
5997 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
5999 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6001 skb_tstamp_tx(skb
, ×tamp
);
6004 pci_unmap_single(tp
->pdev
,
6005 dma_unmap_addr(ri
, mapping
),
6011 while (ri
->fragmented
) {
6012 ri
->fragmented
= false;
6013 sw_idx
= NEXT_TX(sw_idx
);
6014 ri
= &tnapi
->tx_buffers
[sw_idx
];
6017 sw_idx
= NEXT_TX(sw_idx
);
6019 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6020 ri
= &tnapi
->tx_buffers
[sw_idx
];
6021 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6024 pci_unmap_page(tp
->pdev
,
6025 dma_unmap_addr(ri
, mapping
),
6026 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6029 while (ri
->fragmented
) {
6030 ri
->fragmented
= false;
6031 sw_idx
= NEXT_TX(sw_idx
);
6032 ri
= &tnapi
->tx_buffers
[sw_idx
];
6035 sw_idx
= NEXT_TX(sw_idx
);
6039 bytes_compl
+= skb
->len
;
6043 if (unlikely(tx_bug
)) {
6049 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6051 tnapi
->tx_cons
= sw_idx
;
6053 /* Need to make the tx_cons update visible to tg3_start_xmit()
6054 * before checking for netif_queue_stopped(). Without the
6055 * memory barrier, there is a small possibility that tg3_start_xmit()
6056 * will miss it and cause the queue to be stopped forever.
6060 if (unlikely(netif_tx_queue_stopped(txq
) &&
6061 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6062 __netif_tx_lock(txq
, smp_processor_id());
6063 if (netif_tx_queue_stopped(txq
) &&
6064 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6065 netif_tx_wake_queue(txq
);
6066 __netif_tx_unlock(txq
);
6070 static void tg3_frag_free(bool is_frag
, void *data
)
6073 put_page(virt_to_head_page(data
));
6078 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6080 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6081 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6086 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6087 map_sz
, PCI_DMA_FROMDEVICE
);
6088 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6093 /* Returns size of skb allocated or < 0 on error.
6095 * We only need to fill in the address because the other members
6096 * of the RX descriptor are invariant, see tg3_init_rings.
6098 * Note the purposeful assymetry of cpu vs. chip accesses. For
6099 * posting buffers we only dirty the first cache line of the RX
6100 * descriptor (containing the address). Whereas for the RX status
6101 * buffers the cpu only reads the last cacheline of the RX descriptor
6102 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6104 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6105 u32 opaque_key
, u32 dest_idx_unmasked
,
6106 unsigned int *frag_size
)
6108 struct tg3_rx_buffer_desc
*desc
;
6109 struct ring_info
*map
;
6112 int skb_size
, data_size
, dest_idx
;
6114 switch (opaque_key
) {
6115 case RXD_OPAQUE_RING_STD
:
6116 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6117 desc
= &tpr
->rx_std
[dest_idx
];
6118 map
= &tpr
->rx_std_buffers
[dest_idx
];
6119 data_size
= tp
->rx_pkt_map_sz
;
6122 case RXD_OPAQUE_RING_JUMBO
:
6123 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6124 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6125 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6126 data_size
= TG3_RX_JMB_MAP_SZ
;
6133 /* Do not overwrite any of the map or rp information
6134 * until we are sure we can commit to a new buffer.
6136 * Callers depend upon this behavior and assume that
6137 * we leave everything unchanged if we fail.
6139 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6140 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6141 if (skb_size
<= PAGE_SIZE
) {
6142 data
= netdev_alloc_frag(skb_size
);
6143 *frag_size
= skb_size
;
6145 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6151 mapping
= pci_map_single(tp
->pdev
,
6152 data
+ TG3_RX_OFFSET(tp
),
6154 PCI_DMA_FROMDEVICE
);
6155 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6156 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6161 dma_unmap_addr_set(map
, mapping
, mapping
);
6163 desc
->addr_hi
= ((u64
)mapping
>> 32);
6164 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6169 /* We only need to move over in the address because the other
6170 * members of the RX descriptor are invariant. See notes above
6171 * tg3_alloc_rx_data for full details.
6173 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6174 struct tg3_rx_prodring_set
*dpr
,
6175 u32 opaque_key
, int src_idx
,
6176 u32 dest_idx_unmasked
)
6178 struct tg3
*tp
= tnapi
->tp
;
6179 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6180 struct ring_info
*src_map
, *dest_map
;
6181 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6184 switch (opaque_key
) {
6185 case RXD_OPAQUE_RING_STD
:
6186 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6187 dest_desc
= &dpr
->rx_std
[dest_idx
];
6188 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6189 src_desc
= &spr
->rx_std
[src_idx
];
6190 src_map
= &spr
->rx_std_buffers
[src_idx
];
6193 case RXD_OPAQUE_RING_JUMBO
:
6194 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6195 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6196 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6197 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6198 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6205 dest_map
->data
= src_map
->data
;
6206 dma_unmap_addr_set(dest_map
, mapping
,
6207 dma_unmap_addr(src_map
, mapping
));
6208 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6209 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6211 /* Ensure that the update to the skb happens after the physical
6212 * addresses have been transferred to the new BD location.
6216 src_map
->data
= NULL
;
6219 /* The RX ring scheme is composed of multiple rings which post fresh
6220 * buffers to the chip, and one special ring the chip uses to report
6221 * status back to the host.
6223 * The special ring reports the status of received packets to the
6224 * host. The chip does not write into the original descriptor the
6225 * RX buffer was obtained from. The chip simply takes the original
6226 * descriptor as provided by the host, updates the status and length
6227 * field, then writes this into the next status ring entry.
6229 * Each ring the host uses to post buffers to the chip is described
6230 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6231 * it is first placed into the on-chip ram. When the packet's length
6232 * is known, it walks down the TG3_BDINFO entries to select the ring.
6233 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6234 * which is within the range of the new packet's length is chosen.
6236 * The "separate ring for rx status" scheme may sound queer, but it makes
6237 * sense from a cache coherency perspective. If only the host writes
6238 * to the buffer post rings, and only the chip writes to the rx status
6239 * rings, then cache lines never move beyond shared-modified state.
6240 * If both the host and chip were to write into the same ring, cache line
6241 * eviction could occur since both entities want it in an exclusive state.
6243 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6245 struct tg3
*tp
= tnapi
->tp
;
6246 u32 work_mask
, rx_std_posted
= 0;
6247 u32 std_prod_idx
, jmb_prod_idx
;
6248 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6251 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6253 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6255 * We need to order the read of hw_idx and the read of
6256 * the opaque cookie.
6261 std_prod_idx
= tpr
->rx_std_prod_idx
;
6262 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6263 while (sw_idx
!= hw_idx
&& budget
> 0) {
6264 struct ring_info
*ri
;
6265 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6267 struct sk_buff
*skb
;
6268 dma_addr_t dma_addr
;
6269 u32 opaque_key
, desc_idx
, *post_ptr
;
6273 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6274 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6275 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6276 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6277 dma_addr
= dma_unmap_addr(ri
, mapping
);
6279 post_ptr
= &std_prod_idx
;
6281 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6282 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6283 dma_addr
= dma_unmap_addr(ri
, mapping
);
6285 post_ptr
= &jmb_prod_idx
;
6287 goto next_pkt_nopost
;
6289 work_mask
|= opaque_key
;
6291 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6292 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6294 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6295 desc_idx
, *post_ptr
);
6297 /* Other statistics kept track of by card. */
6302 prefetch(data
+ TG3_RX_OFFSET(tp
));
6303 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6306 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6307 RXD_FLAG_PTPSTAT_PTPV1
||
6308 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6309 RXD_FLAG_PTPSTAT_PTPV2
) {
6310 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6311 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6314 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6316 unsigned int frag_size
;
6318 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6319 *post_ptr
, &frag_size
);
6323 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6324 PCI_DMA_FROMDEVICE
);
6326 skb
= build_skb(data
, frag_size
);
6328 tg3_frag_free(frag_size
!= 0, data
);
6329 goto drop_it_no_recycle
;
6331 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6332 /* Ensure that the update to the data happens
6333 * after the usage of the old DMA mapping.
6340 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6341 desc_idx
, *post_ptr
);
6343 skb
= netdev_alloc_skb(tp
->dev
,
6344 len
+ TG3_RAW_IP_ALIGN
);
6346 goto drop_it_no_recycle
;
6348 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6349 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6351 data
+ TG3_RX_OFFSET(tp
),
6353 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6358 tg3_hwclock_to_timestamp(tp
, tstamp
,
6359 skb_hwtstamps(skb
));
6361 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6362 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6363 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6364 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6365 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6367 skb_checksum_none_assert(skb
);
6369 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6371 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6372 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6374 goto drop_it_no_recycle
;
6377 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6378 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6379 __vlan_hwaccel_put_tag(skb
,
6380 desc
->err_vlan
& RXD_VLAN_MASK
);
6382 napi_gro_receive(&tnapi
->napi
, skb
);
6390 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6391 tpr
->rx_std_prod_idx
= std_prod_idx
&
6392 tp
->rx_std_ring_mask
;
6393 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6394 tpr
->rx_std_prod_idx
);
6395 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6400 sw_idx
&= tp
->rx_ret_ring_mask
;
6402 /* Refresh hw_idx to see if there is new work */
6403 if (sw_idx
== hw_idx
) {
6404 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6409 /* ACK the status ring. */
6410 tnapi
->rx_rcb_ptr
= sw_idx
;
6411 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6413 /* Refill RX ring(s). */
6414 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6415 /* Sync BD data before updating mailbox */
6418 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6419 tpr
->rx_std_prod_idx
= std_prod_idx
&
6420 tp
->rx_std_ring_mask
;
6421 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6422 tpr
->rx_std_prod_idx
);
6424 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6425 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6426 tp
->rx_jmb_ring_mask
;
6427 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6428 tpr
->rx_jmb_prod_idx
);
6431 } else if (work_mask
) {
6432 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6433 * updated before the producer indices can be updated.
6437 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6438 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6440 if (tnapi
!= &tp
->napi
[1]) {
6441 tp
->rx_refill
= true;
6442 napi_schedule(&tp
->napi
[1].napi
);
6449 static void tg3_poll_link(struct tg3
*tp
)
6451 /* handle link change and other phy events */
6452 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6453 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6455 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6456 sblk
->status
= SD_STATUS_UPDATED
|
6457 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6458 spin_lock(&tp
->lock
);
6459 if (tg3_flag(tp
, USE_PHYLIB
)) {
6461 (MAC_STATUS_SYNC_CHANGED
|
6462 MAC_STATUS_CFG_CHANGED
|
6463 MAC_STATUS_MI_COMPLETION
|
6464 MAC_STATUS_LNKSTATE_CHANGED
));
6467 tg3_setup_phy(tp
, 0);
6468 spin_unlock(&tp
->lock
);
6473 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6474 struct tg3_rx_prodring_set
*dpr
,
6475 struct tg3_rx_prodring_set
*spr
)
6477 u32 si
, di
, cpycnt
, src_prod_idx
;
6481 src_prod_idx
= spr
->rx_std_prod_idx
;
6483 /* Make sure updates to the rx_std_buffers[] entries and the
6484 * standard producer index are seen in the correct order.
6488 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6491 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6492 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6494 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6495 spr
->rx_std_cons_idx
;
6497 cpycnt
= min(cpycnt
,
6498 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6500 si
= spr
->rx_std_cons_idx
;
6501 di
= dpr
->rx_std_prod_idx
;
6503 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6504 if (dpr
->rx_std_buffers
[i
].data
) {
6514 /* Ensure that updates to the rx_std_buffers ring and the
6515 * shadowed hardware producer ring from tg3_recycle_skb() are
6516 * ordered correctly WRT the skb check above.
6520 memcpy(&dpr
->rx_std_buffers
[di
],
6521 &spr
->rx_std_buffers
[si
],
6522 cpycnt
* sizeof(struct ring_info
));
6524 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6525 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6526 sbd
= &spr
->rx_std
[si
];
6527 dbd
= &dpr
->rx_std
[di
];
6528 dbd
->addr_hi
= sbd
->addr_hi
;
6529 dbd
->addr_lo
= sbd
->addr_lo
;
6532 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6533 tp
->rx_std_ring_mask
;
6534 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6535 tp
->rx_std_ring_mask
;
6539 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6541 /* Make sure updates to the rx_jmb_buffers[] entries and
6542 * the jumbo producer index are seen in the correct order.
6546 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6549 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6550 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6552 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6553 spr
->rx_jmb_cons_idx
;
6555 cpycnt
= min(cpycnt
,
6556 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6558 si
= spr
->rx_jmb_cons_idx
;
6559 di
= dpr
->rx_jmb_prod_idx
;
6561 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6562 if (dpr
->rx_jmb_buffers
[i
].data
) {
6572 /* Ensure that updates to the rx_jmb_buffers ring and the
6573 * shadowed hardware producer ring from tg3_recycle_skb() are
6574 * ordered correctly WRT the skb check above.
6578 memcpy(&dpr
->rx_jmb_buffers
[di
],
6579 &spr
->rx_jmb_buffers
[si
],
6580 cpycnt
* sizeof(struct ring_info
));
6582 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6583 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6584 sbd
= &spr
->rx_jmb
[si
].std
;
6585 dbd
= &dpr
->rx_jmb
[di
].std
;
6586 dbd
->addr_hi
= sbd
->addr_hi
;
6587 dbd
->addr_lo
= sbd
->addr_lo
;
6590 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6591 tp
->rx_jmb_ring_mask
;
6592 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6593 tp
->rx_jmb_ring_mask
;
6599 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6601 struct tg3
*tp
= tnapi
->tp
;
6603 /* run TX completion thread */
6604 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6606 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6610 if (!tnapi
->rx_rcb_prod_idx
)
6613 /* run RX thread, within the bounds set by NAPI.
6614 * All RX "locking" is done by ensuring outside
6615 * code synchronizes with tg3->napi.poll()
6617 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6618 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6620 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6621 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6623 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6624 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6626 tp
->rx_refill
= false;
6627 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
6628 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6629 &tp
->napi
[i
].prodring
);
6633 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6634 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6635 dpr
->rx_std_prod_idx
);
6637 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6638 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6639 dpr
->rx_jmb_prod_idx
);
6644 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6650 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6652 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6653 schedule_work(&tp
->reset_task
);
6656 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6658 cancel_work_sync(&tp
->reset_task
);
6659 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6660 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6663 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6665 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6666 struct tg3
*tp
= tnapi
->tp
;
6668 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6671 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6673 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6676 if (unlikely(work_done
>= budget
))
6679 /* tp->last_tag is used in tg3_int_reenable() below
6680 * to tell the hw how much work has been processed,
6681 * so we must read it before checking for more work.
6683 tnapi
->last_tag
= sblk
->status_tag
;
6684 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6687 /* check for RX/TX work to do */
6688 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6689 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6691 /* This test here is not race free, but will reduce
6692 * the number of interrupts by looping again.
6694 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6697 napi_complete(napi
);
6698 /* Reenable interrupts. */
6699 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6701 /* This test here is synchronized by napi_schedule()
6702 * and napi_complete() to close the race condition.
6704 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6705 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6706 HOSTCC_MODE_ENABLE
|
6717 /* work_done is guaranteed to be less than budget. */
6718 napi_complete(napi
);
6719 tg3_reset_task_schedule(tp
);
6723 static void tg3_process_error(struct tg3
*tp
)
6726 bool real_error
= false;
6728 if (tg3_flag(tp
, ERROR_PROCESSED
))
6731 /* Check Flow Attention register */
6732 val
= tr32(HOSTCC_FLOW_ATTN
);
6733 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6734 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6738 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6739 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6743 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6744 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6753 tg3_flag_set(tp
, ERROR_PROCESSED
);
6754 tg3_reset_task_schedule(tp
);
6757 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6759 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6760 struct tg3
*tp
= tnapi
->tp
;
6762 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6765 if (sblk
->status
& SD_STATUS_ERROR
)
6766 tg3_process_error(tp
);
6770 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6772 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6775 if (unlikely(work_done
>= budget
))
6778 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6779 /* tp->last_tag is used in tg3_int_reenable() below
6780 * to tell the hw how much work has been processed,
6781 * so we must read it before checking for more work.
6783 tnapi
->last_tag
= sblk
->status_tag
;
6784 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6787 sblk
->status
&= ~SD_STATUS_UPDATED
;
6789 if (likely(!tg3_has_work(tnapi
))) {
6790 napi_complete(napi
);
6791 tg3_int_reenable(tnapi
);
6799 /* work_done is guaranteed to be less than budget. */
6800 napi_complete(napi
);
6801 tg3_reset_task_schedule(tp
);
6805 static void tg3_napi_disable(struct tg3
*tp
)
6809 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6810 napi_disable(&tp
->napi
[i
].napi
);
6813 static void tg3_napi_enable(struct tg3
*tp
)
6817 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6818 napi_enable(&tp
->napi
[i
].napi
);
6821 static void tg3_napi_init(struct tg3
*tp
)
6825 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6826 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6827 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6830 static void tg3_napi_fini(struct tg3
*tp
)
6834 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6835 netif_napi_del(&tp
->napi
[i
].napi
);
6838 static inline void tg3_netif_stop(struct tg3
*tp
)
6840 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6841 tg3_napi_disable(tp
);
6842 netif_carrier_off(tp
->dev
);
6843 netif_tx_disable(tp
->dev
);
6846 /* tp->lock must be held */
6847 static inline void tg3_netif_start(struct tg3
*tp
)
6851 /* NOTE: unconditional netif_tx_wake_all_queues is only
6852 * appropriate so long as all callers are assured to
6853 * have free tx slots (such as after tg3_init_hw)
6855 netif_tx_wake_all_queues(tp
->dev
);
6858 netif_carrier_on(tp
->dev
);
6860 tg3_napi_enable(tp
);
6861 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6862 tg3_enable_ints(tp
);
6865 static void tg3_irq_quiesce(struct tg3
*tp
)
6869 BUG_ON(tp
->irq_sync
);
6874 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6875 synchronize_irq(tp
->napi
[i
].irq_vec
);
6878 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6879 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6880 * with as well. Most of the time, this is not necessary except when
6881 * shutting down the device.
6883 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6885 spin_lock_bh(&tp
->lock
);
6887 tg3_irq_quiesce(tp
);
6890 static inline void tg3_full_unlock(struct tg3
*tp
)
6892 spin_unlock_bh(&tp
->lock
);
6895 /* One-shot MSI handler - Chip automatically disables interrupt
6896 * after sending MSI so driver doesn't have to do it.
6898 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6900 struct tg3_napi
*tnapi
= dev_id
;
6901 struct tg3
*tp
= tnapi
->tp
;
6903 prefetch(tnapi
->hw_status
);
6905 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6907 if (likely(!tg3_irq_sync(tp
)))
6908 napi_schedule(&tnapi
->napi
);
6913 /* MSI ISR - No need to check for interrupt sharing and no need to
6914 * flush status block and interrupt mailbox. PCI ordering rules
6915 * guarantee that MSI will arrive after the status block.
6917 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6919 struct tg3_napi
*tnapi
= dev_id
;
6920 struct tg3
*tp
= tnapi
->tp
;
6922 prefetch(tnapi
->hw_status
);
6924 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6926 * Writing any value to intr-mbox-0 clears PCI INTA# and
6927 * chip-internal interrupt pending events.
6928 * Writing non-zero to intr-mbox-0 additional tells the
6929 * NIC to stop sending us irqs, engaging "in-intr-handler"
6932 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6933 if (likely(!tg3_irq_sync(tp
)))
6934 napi_schedule(&tnapi
->napi
);
6936 return IRQ_RETVAL(1);
6939 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6941 struct tg3_napi
*tnapi
= dev_id
;
6942 struct tg3
*tp
= tnapi
->tp
;
6943 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6944 unsigned int handled
= 1;
6946 /* In INTx mode, it is possible for the interrupt to arrive at
6947 * the CPU before the status block posted prior to the interrupt.
6948 * Reading the PCI State register will confirm whether the
6949 * interrupt is ours and will flush the status block.
6951 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6952 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6953 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6960 * Writing any value to intr-mbox-0 clears PCI INTA# and
6961 * chip-internal interrupt pending events.
6962 * Writing non-zero to intr-mbox-0 additional tells the
6963 * NIC to stop sending us irqs, engaging "in-intr-handler"
6966 * Flush the mailbox to de-assert the IRQ immediately to prevent
6967 * spurious interrupts. The flush impacts performance but
6968 * excessive spurious interrupts can be worse in some cases.
6970 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6971 if (tg3_irq_sync(tp
))
6973 sblk
->status
&= ~SD_STATUS_UPDATED
;
6974 if (likely(tg3_has_work(tnapi
))) {
6975 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6976 napi_schedule(&tnapi
->napi
);
6978 /* No work, shared interrupt perhaps? re-enable
6979 * interrupts, and flush that PCI write
6981 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6985 return IRQ_RETVAL(handled
);
6988 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6990 struct tg3_napi
*tnapi
= dev_id
;
6991 struct tg3
*tp
= tnapi
->tp
;
6992 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6993 unsigned int handled
= 1;
6995 /* In INTx mode, it is possible for the interrupt to arrive at
6996 * the CPU before the status block posted prior to the interrupt.
6997 * Reading the PCI State register will confirm whether the
6998 * interrupt is ours and will flush the status block.
7000 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7001 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7002 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7009 * writing any value to intr-mbox-0 clears PCI INTA# and
7010 * chip-internal interrupt pending events.
7011 * writing non-zero to intr-mbox-0 additional tells the
7012 * NIC to stop sending us irqs, engaging "in-intr-handler"
7015 * Flush the mailbox to de-assert the IRQ immediately to prevent
7016 * spurious interrupts. The flush impacts performance but
7017 * excessive spurious interrupts can be worse in some cases.
7019 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7022 * In a shared interrupt configuration, sometimes other devices'
7023 * interrupts will scream. We record the current status tag here
7024 * so that the above check can report that the screaming interrupts
7025 * are unhandled. Eventually they will be silenced.
7027 tnapi
->last_irq_tag
= sblk
->status_tag
;
7029 if (tg3_irq_sync(tp
))
7032 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7034 napi_schedule(&tnapi
->napi
);
7037 return IRQ_RETVAL(handled
);
7040 /* ISR for interrupt test */
7041 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7043 struct tg3_napi
*tnapi
= dev_id
;
7044 struct tg3
*tp
= tnapi
->tp
;
7045 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7047 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7048 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7049 tg3_disable_ints(tp
);
7050 return IRQ_RETVAL(1);
7052 return IRQ_RETVAL(0);
7055 #ifdef CONFIG_NET_POLL_CONTROLLER
7056 static void tg3_poll_controller(struct net_device
*dev
)
7059 struct tg3
*tp
= netdev_priv(dev
);
7061 if (tg3_irq_sync(tp
))
7064 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7065 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7069 static void tg3_tx_timeout(struct net_device
*dev
)
7071 struct tg3
*tp
= netdev_priv(dev
);
7073 if (netif_msg_tx_err(tp
)) {
7074 netdev_err(dev
, "transmit timed out, resetting\n");
7078 tg3_reset_task_schedule(tp
);
7081 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7082 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7084 u32 base
= (u32
) mapping
& 0xffffffff;
7086 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7089 /* Test for DMA addresses > 40-bit */
7090 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7093 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7094 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7095 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7102 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7103 dma_addr_t mapping
, u32 len
, u32 flags
,
7106 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7107 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7108 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7109 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7112 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7113 dma_addr_t map
, u32 len
, u32 flags
,
7116 struct tg3
*tp
= tnapi
->tp
;
7119 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7122 if (tg3_4g_overflow_test(map
, len
))
7125 if (tg3_40bit_overflow_test(tp
, map
, len
))
7128 if (tp
->dma_limit
) {
7129 u32 prvidx
= *entry
;
7130 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7131 while (len
> tp
->dma_limit
&& *budget
) {
7132 u32 frag_len
= tp
->dma_limit
;
7133 len
-= tp
->dma_limit
;
7135 /* Avoid the 8byte DMA problem */
7137 len
+= tp
->dma_limit
/ 2;
7138 frag_len
= tp
->dma_limit
/ 2;
7141 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7143 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7144 frag_len
, tmp_flag
, mss
, vlan
);
7147 *entry
= NEXT_TX(*entry
);
7154 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7155 len
, flags
, mss
, vlan
);
7157 *entry
= NEXT_TX(*entry
);
7160 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7164 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7165 len
, flags
, mss
, vlan
);
7166 *entry
= NEXT_TX(*entry
);
7172 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7175 struct sk_buff
*skb
;
7176 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7181 pci_unmap_single(tnapi
->tp
->pdev
,
7182 dma_unmap_addr(txb
, mapping
),
7186 while (txb
->fragmented
) {
7187 txb
->fragmented
= false;
7188 entry
= NEXT_TX(entry
);
7189 txb
= &tnapi
->tx_buffers
[entry
];
7192 for (i
= 0; i
<= last
; i
++) {
7193 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7195 entry
= NEXT_TX(entry
);
7196 txb
= &tnapi
->tx_buffers
[entry
];
7198 pci_unmap_page(tnapi
->tp
->pdev
,
7199 dma_unmap_addr(txb
, mapping
),
7200 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7202 while (txb
->fragmented
) {
7203 txb
->fragmented
= false;
7204 entry
= NEXT_TX(entry
);
7205 txb
= &tnapi
->tx_buffers
[entry
];
7210 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7211 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7212 struct sk_buff
**pskb
,
7213 u32
*entry
, u32
*budget
,
7214 u32 base_flags
, u32 mss
, u32 vlan
)
7216 struct tg3
*tp
= tnapi
->tp
;
7217 struct sk_buff
*new_skb
, *skb
= *pskb
;
7218 dma_addr_t new_addr
= 0;
7221 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7222 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7224 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7226 new_skb
= skb_copy_expand(skb
,
7227 skb_headroom(skb
) + more_headroom
,
7228 skb_tailroom(skb
), GFP_ATOMIC
);
7234 /* New SKB is guaranteed to be linear. */
7235 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7237 /* Make sure the mapping succeeded */
7238 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7239 dev_kfree_skb(new_skb
);
7242 u32 save_entry
= *entry
;
7244 base_flags
|= TXD_FLAG_END
;
7246 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7247 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7250 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7251 new_skb
->len
, base_flags
,
7253 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7254 dev_kfree_skb(new_skb
);
7265 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7267 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7268 * TSO header is greater than 80 bytes.
7270 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7272 struct sk_buff
*segs
, *nskb
;
7273 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7275 /* Estimate the number of fragments in the worst case */
7276 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7277 netif_stop_queue(tp
->dev
);
7279 /* netif_tx_stop_queue() must be done before checking
7280 * checking tx index in tg3_tx_avail() below, because in
7281 * tg3_tx(), we update tx index before checking for
7282 * netif_tx_queue_stopped().
7285 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7286 return NETDEV_TX_BUSY
;
7288 netif_wake_queue(tp
->dev
);
7291 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7293 goto tg3_tso_bug_end
;
7299 tg3_start_xmit(nskb
, tp
->dev
);
7305 return NETDEV_TX_OK
;
7308 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7309 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7311 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7313 struct tg3
*tp
= netdev_priv(dev
);
7314 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7316 int i
= -1, would_hit_hwbug
;
7318 struct tg3_napi
*tnapi
;
7319 struct netdev_queue
*txq
;
7322 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7323 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7324 if (tg3_flag(tp
, ENABLE_TSS
))
7327 budget
= tg3_tx_avail(tnapi
);
7329 /* We are running in BH disabled context with netif_tx_lock
7330 * and TX reclaim runs via tp->napi.poll inside of a software
7331 * interrupt. Furthermore, IRQ processing runs lockless so we have
7332 * no IRQ context deadlocks to worry about either. Rejoice!
7334 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7335 if (!netif_tx_queue_stopped(txq
)) {
7336 netif_tx_stop_queue(txq
);
7338 /* This is a hard error, log it. */
7340 "BUG! Tx Ring full when queue awake!\n");
7342 return NETDEV_TX_BUSY
;
7345 entry
= tnapi
->tx_prod
;
7347 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7348 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7350 mss
= skb_shinfo(skb
)->gso_size
;
7353 u32 tcp_opt_len
, hdr_len
;
7355 if (skb_header_cloned(skb
) &&
7356 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7360 tcp_opt_len
= tcp_optlen(skb
);
7362 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7364 if (!skb_is_gso_v6(skb
)) {
7366 iph
->tot_len
= htons(mss
+ hdr_len
);
7369 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7370 tg3_flag(tp
, TSO_BUG
))
7371 return tg3_tso_bug(tp
, skb
);
7373 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7374 TXD_FLAG_CPU_POST_DMA
);
7376 if (tg3_flag(tp
, HW_TSO_1
) ||
7377 tg3_flag(tp
, HW_TSO_2
) ||
7378 tg3_flag(tp
, HW_TSO_3
)) {
7379 tcp_hdr(skb
)->check
= 0;
7380 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7382 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7387 if (tg3_flag(tp
, HW_TSO_3
)) {
7388 mss
|= (hdr_len
& 0xc) << 12;
7390 base_flags
|= 0x00000010;
7391 base_flags
|= (hdr_len
& 0x3e0) << 5;
7392 } else if (tg3_flag(tp
, HW_TSO_2
))
7393 mss
|= hdr_len
<< 9;
7394 else if (tg3_flag(tp
, HW_TSO_1
) ||
7395 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7396 if (tcp_opt_len
|| iph
->ihl
> 5) {
7399 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7400 mss
|= (tsflags
<< 11);
7403 if (tcp_opt_len
|| iph
->ihl
> 5) {
7406 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7407 base_flags
|= tsflags
<< 12;
7412 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7413 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7414 base_flags
|= TXD_FLAG_JMB_PKT
;
7416 if (vlan_tx_tag_present(skb
)) {
7417 base_flags
|= TXD_FLAG_VLAN
;
7418 vlan
= vlan_tx_tag_get(skb
);
7421 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7422 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7423 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7424 base_flags
|= TXD_FLAG_HWTSTAMP
;
7427 len
= skb_headlen(skb
);
7429 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7430 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7434 tnapi
->tx_buffers
[entry
].skb
= skb
;
7435 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7437 would_hit_hwbug
= 0;
7439 if (tg3_flag(tp
, 5701_DMA_BUG
))
7440 would_hit_hwbug
= 1;
7442 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7443 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7445 would_hit_hwbug
= 1;
7446 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7449 if (!tg3_flag(tp
, HW_TSO_1
) &&
7450 !tg3_flag(tp
, HW_TSO_2
) &&
7451 !tg3_flag(tp
, HW_TSO_3
))
7454 /* Now loop through additional data
7455 * fragments, and queue them.
7457 last
= skb_shinfo(skb
)->nr_frags
- 1;
7458 for (i
= 0; i
<= last
; i
++) {
7459 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7461 len
= skb_frag_size(frag
);
7462 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7463 len
, DMA_TO_DEVICE
);
7465 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7466 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7468 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7472 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7474 ((i
== last
) ? TXD_FLAG_END
: 0),
7476 would_hit_hwbug
= 1;
7482 if (would_hit_hwbug
) {
7483 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7485 /* If the workaround fails due to memory/mapping
7486 * failure, silently drop this packet.
7488 entry
= tnapi
->tx_prod
;
7489 budget
= tg3_tx_avail(tnapi
);
7490 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7491 base_flags
, mss
, vlan
))
7495 skb_tx_timestamp(skb
);
7496 netdev_tx_sent_queue(txq
, skb
->len
);
7498 /* Sync BD data before updating mailbox */
7501 /* Packets are ready, update Tx producer idx local and on card. */
7502 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7504 tnapi
->tx_prod
= entry
;
7505 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7506 netif_tx_stop_queue(txq
);
7508 /* netif_tx_stop_queue() must be done before checking
7509 * checking tx index in tg3_tx_avail() below, because in
7510 * tg3_tx(), we update tx index before checking for
7511 * netif_tx_queue_stopped().
7514 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7515 netif_tx_wake_queue(txq
);
7519 return NETDEV_TX_OK
;
7522 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7523 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7528 return NETDEV_TX_OK
;
7531 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7534 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7535 MAC_MODE_PORT_MODE_MASK
);
7537 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7539 if (!tg3_flag(tp
, 5705_PLUS
))
7540 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7542 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7543 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7545 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7547 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7549 if (tg3_flag(tp
, 5705_PLUS
) ||
7550 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7551 tg3_asic_rev(tp
) == ASIC_REV_5700
)
7552 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7555 tw32(MAC_MODE
, tp
->mac_mode
);
7559 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7561 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7563 tg3_phy_toggle_apd(tp
, false);
7564 tg3_phy_toggle_automdix(tp
, 0);
7566 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7569 bmcr
= BMCR_FULLDPLX
;
7574 bmcr
|= BMCR_SPEED100
;
7578 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7580 bmcr
|= BMCR_SPEED100
;
7583 bmcr
|= BMCR_SPEED1000
;
7588 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7589 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7590 val
|= CTL1000_AS_MASTER
|
7591 CTL1000_ENABLE_MASTER
;
7592 tg3_writephy(tp
, MII_CTRL1000
, val
);
7594 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7595 MII_TG3_FET_PTEST_TRIM_2
;
7596 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7599 bmcr
|= BMCR_LOOPBACK
;
7601 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7603 /* The write needs to be flushed for the FETs */
7604 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7605 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7609 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7610 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
7611 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7612 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7613 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7615 /* The write needs to be flushed for the AC131 */
7616 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7619 /* Reset to prevent losing 1st rx packet intermittently */
7620 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7621 tg3_flag(tp
, 5780_CLASS
)) {
7622 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7624 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7627 mac_mode
= tp
->mac_mode
&
7628 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7629 if (speed
== SPEED_1000
)
7630 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7632 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7634 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
7635 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7637 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7638 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7639 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7640 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7642 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7643 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7646 tw32(MAC_MODE
, mac_mode
);
7652 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7654 struct tg3
*tp
= netdev_priv(dev
);
7656 if (features
& NETIF_F_LOOPBACK
) {
7657 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7660 spin_lock_bh(&tp
->lock
);
7661 tg3_mac_loopback(tp
, true);
7662 netif_carrier_on(tp
->dev
);
7663 spin_unlock_bh(&tp
->lock
);
7664 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7666 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7669 spin_lock_bh(&tp
->lock
);
7670 tg3_mac_loopback(tp
, false);
7671 /* Force link status check */
7672 tg3_setup_phy(tp
, 1);
7673 spin_unlock_bh(&tp
->lock
);
7674 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7678 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7679 netdev_features_t features
)
7681 struct tg3
*tp
= netdev_priv(dev
);
7683 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7684 features
&= ~NETIF_F_ALL_TSO
;
7689 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7691 netdev_features_t changed
= dev
->features
^ features
;
7693 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7694 tg3_set_loopback(dev
, features
);
7699 static void tg3_rx_prodring_free(struct tg3
*tp
,
7700 struct tg3_rx_prodring_set
*tpr
)
7704 if (tpr
!= &tp
->napi
[0].prodring
) {
7705 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7706 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7707 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7710 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7711 for (i
= tpr
->rx_jmb_cons_idx
;
7712 i
!= tpr
->rx_jmb_prod_idx
;
7713 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7714 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7722 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7723 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7726 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7727 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7728 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7733 /* Initialize rx rings for packet processing.
7735 * The chip has been shut down and the driver detached from
7736 * the networking, so no interrupts or new tx packets will
7737 * end up in the driver. tp->{tx,}lock are held and thus
7740 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7741 struct tg3_rx_prodring_set
*tpr
)
7743 u32 i
, rx_pkt_dma_sz
;
7745 tpr
->rx_std_cons_idx
= 0;
7746 tpr
->rx_std_prod_idx
= 0;
7747 tpr
->rx_jmb_cons_idx
= 0;
7748 tpr
->rx_jmb_prod_idx
= 0;
7750 if (tpr
!= &tp
->napi
[0].prodring
) {
7751 memset(&tpr
->rx_std_buffers
[0], 0,
7752 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7753 if (tpr
->rx_jmb_buffers
)
7754 memset(&tpr
->rx_jmb_buffers
[0], 0,
7755 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7759 /* Zero out all descriptors. */
7760 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7762 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7763 if (tg3_flag(tp
, 5780_CLASS
) &&
7764 tp
->dev
->mtu
> ETH_DATA_LEN
)
7765 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7766 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7768 /* Initialize invariants of the rings, we only set this
7769 * stuff once. This works because the card does not
7770 * write into the rx buffer posting rings.
7772 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7773 struct tg3_rx_buffer_desc
*rxd
;
7775 rxd
= &tpr
->rx_std
[i
];
7776 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7777 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7778 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7779 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7782 /* Now allocate fresh SKBs for each rx ring. */
7783 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7784 unsigned int frag_size
;
7786 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
7788 netdev_warn(tp
->dev
,
7789 "Using a smaller RX standard ring. Only "
7790 "%d out of %d buffers were allocated "
7791 "successfully\n", i
, tp
->rx_pending
);
7799 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7802 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7804 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7807 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7808 struct tg3_rx_buffer_desc
*rxd
;
7810 rxd
= &tpr
->rx_jmb
[i
].std
;
7811 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7812 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7814 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7815 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7818 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7819 unsigned int frag_size
;
7821 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
7823 netdev_warn(tp
->dev
,
7824 "Using a smaller RX jumbo ring. Only %d "
7825 "out of %d buffers were allocated "
7826 "successfully\n", i
, tp
->rx_jumbo_pending
);
7829 tp
->rx_jumbo_pending
= i
;
7838 tg3_rx_prodring_free(tp
, tpr
);
7842 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7843 struct tg3_rx_prodring_set
*tpr
)
7845 kfree(tpr
->rx_std_buffers
);
7846 tpr
->rx_std_buffers
= NULL
;
7847 kfree(tpr
->rx_jmb_buffers
);
7848 tpr
->rx_jmb_buffers
= NULL
;
7850 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7851 tpr
->rx_std
, tpr
->rx_std_mapping
);
7855 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7856 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7861 static int tg3_rx_prodring_init(struct tg3
*tp
,
7862 struct tg3_rx_prodring_set
*tpr
)
7864 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7866 if (!tpr
->rx_std_buffers
)
7869 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7870 TG3_RX_STD_RING_BYTES(tp
),
7871 &tpr
->rx_std_mapping
,
7876 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7877 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7879 if (!tpr
->rx_jmb_buffers
)
7882 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7883 TG3_RX_JMB_RING_BYTES(tp
),
7884 &tpr
->rx_jmb_mapping
,
7893 tg3_rx_prodring_fini(tp
, tpr
);
7897 /* Free up pending packets in all rx/tx rings.
7899 * The chip has been shut down and the driver detached from
7900 * the networking, so no interrupts or new tx packets will
7901 * end up in the driver. tp->{tx,}lock is not held and we are not
7902 * in an interrupt context and thus may sleep.
7904 static void tg3_free_rings(struct tg3
*tp
)
7908 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7909 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7911 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7913 if (!tnapi
->tx_buffers
)
7916 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7917 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7922 tg3_tx_skb_unmap(tnapi
, i
,
7923 skb_shinfo(skb
)->nr_frags
- 1);
7925 dev_kfree_skb_any(skb
);
7927 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7931 /* Initialize tx/rx rings for packet processing.
7933 * The chip has been shut down and the driver detached from
7934 * the networking, so no interrupts or new tx packets will
7935 * end up in the driver. tp->{tx,}lock are held and thus
7938 static int tg3_init_rings(struct tg3
*tp
)
7942 /* Free up all the SKBs. */
7945 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7946 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7948 tnapi
->last_tag
= 0;
7949 tnapi
->last_irq_tag
= 0;
7950 tnapi
->hw_status
->status
= 0;
7951 tnapi
->hw_status
->status_tag
= 0;
7952 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7957 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7959 tnapi
->rx_rcb_ptr
= 0;
7961 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7963 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7972 static void tg3_mem_tx_release(struct tg3
*tp
)
7976 for (i
= 0; i
< tp
->irq_max
; i
++) {
7977 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7979 if (tnapi
->tx_ring
) {
7980 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7981 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7982 tnapi
->tx_ring
= NULL
;
7985 kfree(tnapi
->tx_buffers
);
7986 tnapi
->tx_buffers
= NULL
;
7990 static int tg3_mem_tx_acquire(struct tg3
*tp
)
7993 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7995 /* If multivector TSS is enabled, vector 0 does not handle
7996 * tx interrupts. Don't allocate any resources for it.
7998 if (tg3_flag(tp
, ENABLE_TSS
))
8001 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8002 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8003 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8004 if (!tnapi
->tx_buffers
)
8007 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8009 &tnapi
->tx_desc_mapping
,
8011 if (!tnapi
->tx_ring
)
8018 tg3_mem_tx_release(tp
);
8022 static void tg3_mem_rx_release(struct tg3
*tp
)
8026 for (i
= 0; i
< tp
->irq_max
; i
++) {
8027 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8029 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8034 dma_free_coherent(&tp
->pdev
->dev
,
8035 TG3_RX_RCB_RING_BYTES(tp
),
8037 tnapi
->rx_rcb_mapping
);
8038 tnapi
->rx_rcb
= NULL
;
8042 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8044 unsigned int i
, limit
;
8046 limit
= tp
->rxq_cnt
;
8048 /* If RSS is enabled, we need a (dummy) producer ring
8049 * set on vector zero. This is the true hw prodring.
8051 if (tg3_flag(tp
, ENABLE_RSS
))
8054 for (i
= 0; i
< limit
; i
++) {
8055 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8057 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8060 /* If multivector RSS is enabled, vector 0
8061 * does not handle rx or tx interrupts.
8062 * Don't allocate any resources for it.
8064 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8067 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8068 TG3_RX_RCB_RING_BYTES(tp
),
8069 &tnapi
->rx_rcb_mapping
,
8074 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8080 tg3_mem_rx_release(tp
);
8085 * Must not be invoked with interrupt sources disabled and
8086 * the hardware shutdown down.
8088 static void tg3_free_consistent(struct tg3
*tp
)
8092 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8093 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8095 if (tnapi
->hw_status
) {
8096 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8098 tnapi
->status_mapping
);
8099 tnapi
->hw_status
= NULL
;
8103 tg3_mem_rx_release(tp
);
8104 tg3_mem_tx_release(tp
);
8107 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8108 tp
->hw_stats
, tp
->stats_mapping
);
8109 tp
->hw_stats
= NULL
;
8114 * Must not be invoked with interrupt sources disabled and
8115 * the hardware shutdown down. Can sleep.
8117 static int tg3_alloc_consistent(struct tg3
*tp
)
8121 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8122 sizeof(struct tg3_hw_stats
),
8128 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8130 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8131 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8132 struct tg3_hw_status
*sblk
;
8134 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8136 &tnapi
->status_mapping
,
8138 if (!tnapi
->hw_status
)
8141 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8142 sblk
= tnapi
->hw_status
;
8144 if (tg3_flag(tp
, ENABLE_RSS
)) {
8145 u16
*prodptr
= NULL
;
8148 * When RSS is enabled, the status block format changes
8149 * slightly. The "rx_jumbo_consumer", "reserved",
8150 * and "rx_mini_consumer" members get mapped to the
8151 * other three rx return ring producer indexes.
8155 prodptr
= &sblk
->idx
[0].rx_producer
;
8158 prodptr
= &sblk
->rx_jumbo_consumer
;
8161 prodptr
= &sblk
->reserved
;
8164 prodptr
= &sblk
->rx_mini_consumer
;
8167 tnapi
->rx_rcb_prod_idx
= prodptr
;
8169 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8173 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8179 tg3_free_consistent(tp
);
8183 #define MAX_WAIT_CNT 1000
8185 /* To stop a block, clear the enable bit and poll till it
8186 * clears. tp->lock is held.
8188 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
8193 if (tg3_flag(tp
, 5705_PLUS
)) {
8200 /* We can't enable/disable these bits of the
8201 * 5705/5750, just say success.
8214 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8217 if ((val
& enable_bit
) == 0)
8221 if (i
== MAX_WAIT_CNT
&& !silent
) {
8222 dev_err(&tp
->pdev
->dev
,
8223 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8231 /* tp->lock is held. */
8232 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
8236 tg3_disable_ints(tp
);
8238 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8239 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8242 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8243 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8244 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8245 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8246 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8247 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8249 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8250 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8251 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8252 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8253 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8254 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8255 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8257 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8258 tw32_f(MAC_MODE
, tp
->mac_mode
);
8261 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8262 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8264 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8266 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8269 if (i
>= MAX_WAIT_CNT
) {
8270 dev_err(&tp
->pdev
->dev
,
8271 "%s timed out, TX_MODE_ENABLE will not clear "
8272 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8276 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8277 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8278 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8280 tw32(FTQ_RESET
, 0xffffffff);
8281 tw32(FTQ_RESET
, 0x00000000);
8283 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8284 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8286 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8287 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8288 if (tnapi
->hw_status
)
8289 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8295 /* Save PCI command register before chip reset */
8296 static void tg3_save_pci_state(struct tg3
*tp
)
8298 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8301 /* Restore PCI state after chip reset */
8302 static void tg3_restore_pci_state(struct tg3
*tp
)
8306 /* Re-enable indirect register accesses. */
8307 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8308 tp
->misc_host_ctrl
);
8310 /* Set MAX PCI retry to zero. */
8311 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8312 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8313 tg3_flag(tp
, PCIX_MODE
))
8314 val
|= PCISTATE_RETRY_SAME_DMA
;
8315 /* Allow reads and writes to the APE register and memory space. */
8316 if (tg3_flag(tp
, ENABLE_APE
))
8317 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8318 PCISTATE_ALLOW_APE_SHMEM_WR
|
8319 PCISTATE_ALLOW_APE_PSPACE_WR
;
8320 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8322 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8324 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8325 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8326 tp
->pci_cacheline_sz
);
8327 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8331 /* Make sure PCI-X relaxed ordering bit is clear. */
8332 if (tg3_flag(tp
, PCIX_MODE
)) {
8335 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8337 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8338 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8342 if (tg3_flag(tp
, 5780_CLASS
)) {
8344 /* Chip reset on 5780 will reset MSI enable bit,
8345 * so need to restore it.
8347 if (tg3_flag(tp
, USING_MSI
)) {
8350 pci_read_config_word(tp
->pdev
,
8351 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8353 pci_write_config_word(tp
->pdev
,
8354 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8355 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8356 val
= tr32(MSGINT_MODE
);
8357 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8362 /* tp->lock is held. */
8363 static int tg3_chip_reset(struct tg3
*tp
)
8366 void (*write_op
)(struct tg3
*, u32
, u32
);
8371 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8373 /* No matching tg3_nvram_unlock() after this because
8374 * chip reset below will undo the nvram lock.
8376 tp
->nvram_lock_cnt
= 0;
8378 /* GRC_MISC_CFG core clock reset will clear the memory
8379 * enable bit in PCI register 4 and the MSI enable bit
8380 * on some chips, so we save relevant registers here.
8382 tg3_save_pci_state(tp
);
8384 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8385 tg3_flag(tp
, 5755_PLUS
))
8386 tw32(GRC_FASTBOOT_PC
, 0);
8389 * We must avoid the readl() that normally takes place.
8390 * It locks machines, causes machine checks, and other
8391 * fun things. So, temporarily disable the 5701
8392 * hardware workaround, while we do the reset.
8394 write_op
= tp
->write32
;
8395 if (write_op
== tg3_write_flush_reg32
)
8396 tp
->write32
= tg3_write32
;
8398 /* Prevent the irq handler from reading or writing PCI registers
8399 * during chip reset when the memory enable bit in the PCI command
8400 * register may be cleared. The chip does not generate interrupt
8401 * at this time, but the irq handler may still be called due to irq
8402 * sharing or irqpoll.
8404 tg3_flag_set(tp
, CHIP_RESETTING
);
8405 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8406 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8407 if (tnapi
->hw_status
) {
8408 tnapi
->hw_status
->status
= 0;
8409 tnapi
->hw_status
->status_tag
= 0;
8411 tnapi
->last_tag
= 0;
8412 tnapi
->last_irq_tag
= 0;
8416 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8417 synchronize_irq(tp
->napi
[i
].irq_vec
);
8419 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8420 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8421 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8425 val
= GRC_MISC_CFG_CORECLK_RESET
;
8427 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8428 /* Force PCIe 1.0a mode */
8429 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8430 !tg3_flag(tp
, 57765_PLUS
) &&
8431 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8432 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8433 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8435 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8436 tw32(GRC_MISC_CFG
, (1 << 29));
8441 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8442 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8443 tw32(GRC_VCPU_EXT_CTRL
,
8444 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8447 /* Manage gphy power for all CPMU absent PCIe devices. */
8448 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8449 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8451 tw32(GRC_MISC_CFG
, val
);
8453 /* restore 5701 hardware bug workaround write method */
8454 tp
->write32
= write_op
;
8456 /* Unfortunately, we have to delay before the PCI read back.
8457 * Some 575X chips even will not respond to a PCI cfg access
8458 * when the reset command is given to the chip.
8460 * How do these hardware designers expect things to work
8461 * properly if the PCI write is posted for a long period
8462 * of time? It is always necessary to have some method by
8463 * which a register read back can occur to push the write
8464 * out which does the reset.
8466 * For most tg3 variants the trick below was working.
8471 /* Flush PCI posted writes. The normal MMIO registers
8472 * are inaccessible at this time so this is the only
8473 * way to make this reliably (actually, this is no longer
8474 * the case, see above). I tried to use indirect
8475 * register read/write but this upset some 5701 variants.
8477 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8481 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8484 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8488 /* Wait for link training to complete. */
8489 for (j
= 0; j
< 5000; j
++)
8492 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8493 pci_write_config_dword(tp
->pdev
, 0xc4,
8494 cfg_val
| (1 << 15));
8497 /* Clear the "no snoop" and "relaxed ordering" bits. */
8498 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8500 * Older PCIe devices only support the 128 byte
8501 * MPS setting. Enforce the restriction.
8503 if (!tg3_flag(tp
, CPMU_PRESENT
))
8504 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8505 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8507 /* Clear error status */
8508 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8509 PCI_EXP_DEVSTA_CED
|
8510 PCI_EXP_DEVSTA_NFED
|
8511 PCI_EXP_DEVSTA_FED
|
8512 PCI_EXP_DEVSTA_URD
);
8515 tg3_restore_pci_state(tp
);
8517 tg3_flag_clear(tp
, CHIP_RESETTING
);
8518 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8521 if (tg3_flag(tp
, 5780_CLASS
))
8522 val
= tr32(MEMARB_MODE
);
8523 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8525 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
8527 tw32(0x5000, 0x400);
8530 if (tg3_flag(tp
, IS_SSB_CORE
)) {
8532 * BCM4785: In order to avoid repercussions from using
8533 * potentially defective internal ROM, stop the Rx RISC CPU,
8534 * which is not required.
8537 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8540 tw32(GRC_MODE
, tp
->grc_mode
);
8542 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
8545 tw32(0xc4, val
| (1 << 15));
8548 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8549 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8550 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8551 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
8552 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8553 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8556 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8557 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8559 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8560 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8565 tw32_f(MAC_MODE
, val
);
8568 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8570 err
= tg3_poll_fw(tp
);
8576 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8577 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
8578 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8579 !tg3_flag(tp
, 57765_PLUS
)) {
8582 tw32(0x7c00, val
| (1 << 25));
8585 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
8586 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8587 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8590 /* Reprobe ASF enable state. */
8591 tg3_flag_clear(tp
, ENABLE_ASF
);
8592 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8593 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8594 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8597 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8598 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8599 tg3_flag_set(tp
, ENABLE_ASF
);
8600 tp
->last_event_jiffies
= jiffies
;
8601 if (tg3_flag(tp
, 5750_PLUS
))
8602 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8609 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8610 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8612 /* tp->lock is held. */
8613 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8619 tg3_write_sig_pre_reset(tp
, kind
);
8621 tg3_abort_hw(tp
, silent
);
8622 err
= tg3_chip_reset(tp
);
8624 __tg3_set_mac_addr(tp
, 0);
8626 tg3_write_sig_legacy(tp
, kind
);
8627 tg3_write_sig_post_reset(tp
, kind
);
8630 /* Save the stats across chip resets... */
8631 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8632 tg3_get_estats(tp
, &tp
->estats_prev
);
8634 /* And make sure the next sample is new data */
8635 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8644 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8646 struct tg3
*tp
= netdev_priv(dev
);
8647 struct sockaddr
*addr
= p
;
8648 int err
= 0, skip_mac_1
= 0;
8650 if (!is_valid_ether_addr(addr
->sa_data
))
8651 return -EADDRNOTAVAIL
;
8653 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8655 if (!netif_running(dev
))
8658 if (tg3_flag(tp
, ENABLE_ASF
)) {
8659 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8661 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8662 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8663 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8664 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8666 /* Skip MAC addr 1 if ASF is using it. */
8667 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8668 !(addr1_high
== 0 && addr1_low
== 0))
8671 spin_lock_bh(&tp
->lock
);
8672 __tg3_set_mac_addr(tp
, skip_mac_1
);
8673 spin_unlock_bh(&tp
->lock
);
8678 /* tp->lock is held. */
8679 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8680 dma_addr_t mapping
, u32 maxlen_flags
,
8684 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8685 ((u64
) mapping
>> 32));
8687 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8688 ((u64
) mapping
& 0xffffffff));
8690 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8693 if (!tg3_flag(tp
, 5705_PLUS
))
8695 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8700 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8704 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8705 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8706 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8707 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8709 tw32(HOSTCC_TXCOL_TICKS
, 0);
8710 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8711 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8713 for (; i
< tp
->txq_cnt
; i
++) {
8716 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8717 tw32(reg
, ec
->tx_coalesce_usecs
);
8718 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8719 tw32(reg
, ec
->tx_max_coalesced_frames
);
8720 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8721 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8725 for (; i
< tp
->irq_max
- 1; i
++) {
8726 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8727 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8728 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8732 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8735 u32 limit
= tp
->rxq_cnt
;
8737 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8738 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8739 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8740 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8743 tw32(HOSTCC_RXCOL_TICKS
, 0);
8744 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8745 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8748 for (; i
< limit
; i
++) {
8751 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8752 tw32(reg
, ec
->rx_coalesce_usecs
);
8753 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8754 tw32(reg
, ec
->rx_max_coalesced_frames
);
8755 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8756 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8759 for (; i
< tp
->irq_max
- 1; i
++) {
8760 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8761 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8762 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8766 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8768 tg3_coal_tx_init(tp
, ec
);
8769 tg3_coal_rx_init(tp
, ec
);
8771 if (!tg3_flag(tp
, 5705_PLUS
)) {
8772 u32 val
= ec
->stats_block_coalesce_usecs
;
8774 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8775 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8780 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8784 /* tp->lock is held. */
8785 static void tg3_rings_reset(struct tg3
*tp
)
8788 u32 stblk
, txrcb
, rxrcb
, limit
;
8789 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8791 /* Disable all transmit rings but the first. */
8792 if (!tg3_flag(tp
, 5705_PLUS
))
8793 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8794 else if (tg3_flag(tp
, 5717_PLUS
))
8795 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8796 else if (tg3_flag(tp
, 57765_CLASS
) ||
8797 tg3_asic_rev(tp
) == ASIC_REV_5762
)
8798 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8800 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8802 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8803 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8804 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8805 BDINFO_FLAGS_DISABLED
);
8808 /* Disable all receive return rings but the first. */
8809 if (tg3_flag(tp
, 5717_PLUS
))
8810 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8811 else if (!tg3_flag(tp
, 5705_PLUS
))
8812 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8813 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8814 tg3_asic_rev(tp
) == ASIC_REV_5762
||
8815 tg3_flag(tp
, 57765_CLASS
))
8816 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8818 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8820 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8821 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8822 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8823 BDINFO_FLAGS_DISABLED
);
8825 /* Disable interrupts */
8826 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8827 tp
->napi
[0].chk_msi_cnt
= 0;
8828 tp
->napi
[0].last_rx_cons
= 0;
8829 tp
->napi
[0].last_tx_cons
= 0;
8831 /* Zero mailbox registers. */
8832 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8833 for (i
= 1; i
< tp
->irq_max
; i
++) {
8834 tp
->napi
[i
].tx_prod
= 0;
8835 tp
->napi
[i
].tx_cons
= 0;
8836 if (tg3_flag(tp
, ENABLE_TSS
))
8837 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8838 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8839 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8840 tp
->napi
[i
].chk_msi_cnt
= 0;
8841 tp
->napi
[i
].last_rx_cons
= 0;
8842 tp
->napi
[i
].last_tx_cons
= 0;
8844 if (!tg3_flag(tp
, ENABLE_TSS
))
8845 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8847 tp
->napi
[0].tx_prod
= 0;
8848 tp
->napi
[0].tx_cons
= 0;
8849 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8850 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8853 /* Make sure the NIC-based send BD rings are disabled. */
8854 if (!tg3_flag(tp
, 5705_PLUS
)) {
8855 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8856 for (i
= 0; i
< 16; i
++)
8857 tw32_tx_mbox(mbox
+ i
* 8, 0);
8860 txrcb
= NIC_SRAM_SEND_RCB
;
8861 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8863 /* Clear status block in ram. */
8864 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8866 /* Set status block DMA address */
8867 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8868 ((u64
) tnapi
->status_mapping
>> 32));
8869 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8870 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8872 if (tnapi
->tx_ring
) {
8873 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8874 (TG3_TX_RING_SIZE
<<
8875 BDINFO_FLAGS_MAXLEN_SHIFT
),
8876 NIC_SRAM_TX_BUFFER_DESC
);
8877 txrcb
+= TG3_BDINFO_SIZE
;
8880 if (tnapi
->rx_rcb
) {
8881 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8882 (tp
->rx_ret_ring_mask
+ 1) <<
8883 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8884 rxrcb
+= TG3_BDINFO_SIZE
;
8887 stblk
= HOSTCC_STATBLCK_RING1
;
8889 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8890 u64 mapping
= (u64
)tnapi
->status_mapping
;
8891 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8892 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8894 /* Clear status block in ram. */
8895 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8897 if (tnapi
->tx_ring
) {
8898 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8899 (TG3_TX_RING_SIZE
<<
8900 BDINFO_FLAGS_MAXLEN_SHIFT
),
8901 NIC_SRAM_TX_BUFFER_DESC
);
8902 txrcb
+= TG3_BDINFO_SIZE
;
8905 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8906 ((tp
->rx_ret_ring_mask
+ 1) <<
8907 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8910 rxrcb
+= TG3_BDINFO_SIZE
;
8914 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8916 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8918 if (!tg3_flag(tp
, 5750_PLUS
) ||
8919 tg3_flag(tp
, 5780_CLASS
) ||
8920 tg3_asic_rev(tp
) == ASIC_REV_5750
||
8921 tg3_asic_rev(tp
) == ASIC_REV_5752
||
8922 tg3_flag(tp
, 57765_PLUS
))
8923 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8924 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8925 tg3_asic_rev(tp
) == ASIC_REV_5787
)
8926 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8928 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8930 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8931 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8933 val
= min(nic_rep_thresh
, host_rep_thresh
);
8934 tw32(RCVBDI_STD_THRESH
, val
);
8936 if (tg3_flag(tp
, 57765_PLUS
))
8937 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8939 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8942 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8944 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8946 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8947 tw32(RCVBDI_JUMBO_THRESH
, val
);
8949 if (tg3_flag(tp
, 57765_PLUS
))
8950 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8953 static inline u32
calc_crc(unsigned char *buf
, int len
)
8961 for (j
= 0; j
< len
; j
++) {
8964 for (k
= 0; k
< 8; k
++) {
8977 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
8979 /* accept or reject all multicast frames */
8980 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
8981 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
8982 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
8983 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
8986 static void __tg3_set_rx_mode(struct net_device
*dev
)
8988 struct tg3
*tp
= netdev_priv(dev
);
8991 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
8992 RX_MODE_KEEP_VLAN_TAG
);
8994 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8995 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8998 if (!tg3_flag(tp
, ENABLE_ASF
))
8999 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9002 if (dev
->flags
& IFF_PROMISC
) {
9003 /* Promiscuous mode. */
9004 rx_mode
|= RX_MODE_PROMISC
;
9005 } else if (dev
->flags
& IFF_ALLMULTI
) {
9006 /* Accept all multicast. */
9007 tg3_set_multi(tp
, 1);
9008 } else if (netdev_mc_empty(dev
)) {
9009 /* Reject all multicast. */
9010 tg3_set_multi(tp
, 0);
9012 /* Accept one or more multicast(s). */
9013 struct netdev_hw_addr
*ha
;
9014 u32 mc_filter
[4] = { 0, };
9019 netdev_for_each_mc_addr(ha
, dev
) {
9020 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9022 regidx
= (bit
& 0x60) >> 5;
9024 mc_filter
[regidx
] |= (1 << bit
);
9027 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9028 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9029 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9030 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9033 if (rx_mode
!= tp
->rx_mode
) {
9034 tp
->rx_mode
= rx_mode
;
9035 tw32_f(MAC_RX_MODE
, rx_mode
);
9040 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9044 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9045 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9048 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9052 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9055 if (tp
->rxq_cnt
== 1) {
9056 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9060 /* Validate table against current IRQ count */
9061 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9062 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9066 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9067 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9070 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9073 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9075 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9076 u32 val
= tp
->rss_ind_tbl
[i
];
9078 for (; i
% 8; i
++) {
9080 val
|= tp
->rss_ind_tbl
[i
];
9087 /* tp->lock is held. */
9088 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
9090 u32 val
, rdmac_mode
;
9092 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9094 tg3_disable_ints(tp
);
9098 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9100 if (tg3_flag(tp
, INIT_COMPLETE
))
9101 tg3_abort_hw(tp
, 1);
9103 /* Enable MAC control of LPI */
9104 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
9105 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
9106 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
9107 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9108 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
9110 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
9112 tw32_f(TG3_CPMU_EEE_CTRL
,
9113 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9115 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9116 TG3_CPMU_EEEMD_LPI_IN_TX
|
9117 TG3_CPMU_EEEMD_LPI_IN_RX
|
9118 TG3_CPMU_EEEMD_EEE_ENABLE
;
9120 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
9121 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9123 if (tg3_flag(tp
, ENABLE_APE
))
9124 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9126 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9128 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9129 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9130 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9132 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9133 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9134 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9140 err
= tg3_chip_reset(tp
);
9144 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9146 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9147 val
= tr32(TG3_CPMU_CTRL
);
9148 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9149 tw32(TG3_CPMU_CTRL
, val
);
9151 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9152 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9153 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9154 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9156 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9157 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9158 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9159 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9161 val
= tr32(TG3_CPMU_HST_ACC
);
9162 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9163 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9164 tw32(TG3_CPMU_HST_ACC
, val
);
9167 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9168 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9169 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9170 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9171 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9173 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9174 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9176 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9178 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9179 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9182 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9183 u32 grc_mode
= tr32(GRC_MODE
);
9185 /* Access the lower 1K of PL PCIE block registers. */
9186 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9187 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9189 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9190 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9191 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9193 tw32(GRC_MODE
, grc_mode
);
9196 if (tg3_flag(tp
, 57765_CLASS
)) {
9197 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9198 u32 grc_mode
= tr32(GRC_MODE
);
9200 /* Access the lower 1K of PL PCIE block registers. */
9201 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9202 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9204 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9205 TG3_PCIE_PL_LO_PHYCTL5
);
9206 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9207 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9209 tw32(GRC_MODE
, grc_mode
);
9212 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9215 /* Fix transmit hangs */
9216 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9217 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9218 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9220 grc_mode
= tr32(GRC_MODE
);
9222 /* Access the lower 1K of DL PCIE block registers. */
9223 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9224 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9226 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9227 TG3_PCIE_DL_LO_FTSMAX
);
9228 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9229 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9230 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9232 tw32(GRC_MODE
, grc_mode
);
9235 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9236 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9237 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9238 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9241 /* This works around an issue with Athlon chipsets on
9242 * B3 tigon3 silicon. This bit has no effect on any
9243 * other revision. But do not set this on PCI Express
9244 * chips and don't even touch the clocks if the CPMU is present.
9246 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9247 if (!tg3_flag(tp
, PCI_EXPRESS
))
9248 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9249 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9252 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9253 tg3_flag(tp
, PCIX_MODE
)) {
9254 val
= tr32(TG3PCI_PCISTATE
);
9255 val
|= PCISTATE_RETRY_SAME_DMA
;
9256 tw32(TG3PCI_PCISTATE
, val
);
9259 if (tg3_flag(tp
, ENABLE_APE
)) {
9260 /* Allow reads and writes to the
9261 * APE register and memory space.
9263 val
= tr32(TG3PCI_PCISTATE
);
9264 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9265 PCISTATE_ALLOW_APE_SHMEM_WR
|
9266 PCISTATE_ALLOW_APE_PSPACE_WR
;
9267 tw32(TG3PCI_PCISTATE
, val
);
9270 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9271 /* Enable some hw fixes. */
9272 val
= tr32(TG3PCI_MSI_DATA
);
9273 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9274 tw32(TG3PCI_MSI_DATA
, val
);
9277 /* Descriptor ring init may make accesses to the
9278 * NIC SRAM area to setup the TX descriptors, so we
9279 * can only do this after the hardware has been
9280 * successfully reset.
9282 err
= tg3_init_rings(tp
);
9286 if (tg3_flag(tp
, 57765_PLUS
)) {
9287 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9288 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9289 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9290 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9291 if (!tg3_flag(tp
, 57765_CLASS
) &&
9292 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9293 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9294 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9295 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9296 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9297 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9298 /* This value is determined during the probe time DMA
9299 * engine test, tg3_test_dma.
9301 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9304 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9305 GRC_MODE_4X_NIC_SEND_RINGS
|
9306 GRC_MODE_NO_TX_PHDR_CSUM
|
9307 GRC_MODE_NO_RX_PHDR_CSUM
);
9308 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9310 /* Pseudo-header checksum is done by hardware logic and not
9311 * the offload processers, so make the chip do the pseudo-
9312 * header checksums on receive. For transmit it is more
9313 * convenient to do the pseudo-header checksum in software
9314 * as Linux does that on transmit for us in all cases.
9316 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9318 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9320 tw32(TG3_RX_PTP_CTL
,
9321 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9323 if (tg3_flag(tp
, PTP_CAPABLE
))
9324 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9326 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9328 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9329 val
= tr32(GRC_MISC_CFG
);
9331 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9332 tw32(GRC_MISC_CFG
, val
);
9334 /* Initialize MBUF/DESC pool. */
9335 if (tg3_flag(tp
, 5750_PLUS
)) {
9337 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9338 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9339 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9340 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9342 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9343 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9344 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9345 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9348 fw_len
= tp
->fw_len
;
9349 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9350 tw32(BUFMGR_MB_POOL_ADDR
,
9351 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9352 tw32(BUFMGR_MB_POOL_SIZE
,
9353 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9356 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9357 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9358 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9359 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9360 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9361 tw32(BUFMGR_MB_HIGH_WATER
,
9362 tp
->bufmgr_config
.mbuf_high_water
);
9364 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9365 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9366 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9367 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9368 tw32(BUFMGR_MB_HIGH_WATER
,
9369 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9371 tw32(BUFMGR_DMA_LOW_WATER
,
9372 tp
->bufmgr_config
.dma_low_water
);
9373 tw32(BUFMGR_DMA_HIGH_WATER
,
9374 tp
->bufmgr_config
.dma_high_water
);
9376 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9377 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9378 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9379 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9380 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9381 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9382 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9383 tw32(BUFMGR_MODE
, val
);
9384 for (i
= 0; i
< 2000; i
++) {
9385 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9390 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9394 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9395 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9397 tg3_setup_rxbd_thresholds(tp
);
9399 /* Initialize TG3_BDINFO's at:
9400 * RCVDBDI_STD_BD: standard eth size rx ring
9401 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9402 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9405 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9406 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9407 * ring attribute flags
9408 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9410 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9411 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9413 * The size of each ring is fixed in the firmware, but the location is
9416 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9417 ((u64
) tpr
->rx_std_mapping
>> 32));
9418 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9419 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9420 if (!tg3_flag(tp
, 5717_PLUS
))
9421 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9422 NIC_SRAM_RX_BUFFER_DESC
);
9424 /* Disable the mini ring */
9425 if (!tg3_flag(tp
, 5705_PLUS
))
9426 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9427 BDINFO_FLAGS_DISABLED
);
9429 /* Program the jumbo buffer descriptor ring control
9430 * blocks on those devices that have them.
9432 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9433 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9435 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9436 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9437 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9438 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9439 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9440 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9441 BDINFO_FLAGS_MAXLEN_SHIFT
;
9442 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9443 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9444 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9445 tg3_flag(tp
, 57765_CLASS
) ||
9446 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9447 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9448 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9450 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9451 BDINFO_FLAGS_DISABLED
);
9454 if (tg3_flag(tp
, 57765_PLUS
)) {
9455 val
= TG3_RX_STD_RING_SIZE(tp
);
9456 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9457 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9459 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9461 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9463 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9465 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9466 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9468 tpr
->rx_jmb_prod_idx
=
9469 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9470 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9472 tg3_rings_reset(tp
);
9474 /* Initialize MAC address and backoff seed. */
9475 __tg3_set_mac_addr(tp
, 0);
9477 /* MTU + ethernet header + FCS + optional VLAN tag */
9478 tw32(MAC_RX_MTU_SIZE
,
9479 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9481 /* The slot time is changed by tg3_setup_phy if we
9482 * run at gigabit with half duplex.
9484 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9485 (6 << TX_LENGTHS_IPG_SHIFT
) |
9486 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9488 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9489 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9490 val
|= tr32(MAC_TX_LENGTHS
) &
9491 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9492 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9494 tw32(MAC_TX_LENGTHS
, val
);
9496 /* Receive rules. */
9497 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9498 tw32(RCVLPC_CONFIG
, 0x0181);
9500 /* Calculate RDMAC_MODE setting early, we need it to determine
9501 * the RCVLPC_STATE_ENABLE mask.
9503 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9504 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9505 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9506 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9507 RDMAC_MODE_LNGREAD_ENAB
);
9509 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
9510 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9512 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
9513 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9514 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9515 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9516 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9517 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9519 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9520 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9521 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9522 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9523 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9524 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9525 !tg3_flag(tp
, IS_5788
)) {
9526 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9530 if (tg3_flag(tp
, PCI_EXPRESS
))
9531 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9533 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9535 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9536 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
9537 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
9541 if (tg3_flag(tp
, HW_TSO_1
) ||
9542 tg3_flag(tp
, HW_TSO_2
) ||
9543 tg3_flag(tp
, HW_TSO_3
))
9544 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9546 if (tg3_flag(tp
, 57765_PLUS
) ||
9547 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9548 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9549 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9551 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9552 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9553 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9555 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
9556 tg3_asic_rev(tp
) == ASIC_REV_5784
||
9557 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9558 tg3_asic_rev(tp
) == ASIC_REV_57780
||
9559 tg3_flag(tp
, 57765_PLUS
)) {
9562 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9563 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
9565 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
9568 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9569 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9570 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9571 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9572 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9573 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9574 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9575 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9577 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9580 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
9581 tg3_asic_rev(tp
) == ASIC_REV_5720
||
9582 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9585 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9586 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
9588 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
9592 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9593 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9596 /* Receive/send statistics. */
9597 if (tg3_flag(tp
, 5750_PLUS
)) {
9598 val
= tr32(RCVLPC_STATS_ENABLE
);
9599 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9600 tw32(RCVLPC_STATS_ENABLE
, val
);
9601 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9602 tg3_flag(tp
, TSO_CAPABLE
)) {
9603 val
= tr32(RCVLPC_STATS_ENABLE
);
9604 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9605 tw32(RCVLPC_STATS_ENABLE
, val
);
9607 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9609 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9610 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9611 tw32(SNDDATAI_STATSCTRL
,
9612 (SNDDATAI_SCTRL_ENABLE
|
9613 SNDDATAI_SCTRL_FASTUPD
));
9615 /* Setup host coalescing engine. */
9616 tw32(HOSTCC_MODE
, 0);
9617 for (i
= 0; i
< 2000; i
++) {
9618 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9623 __tg3_set_coalesce(tp
, &tp
->coal
);
9625 if (!tg3_flag(tp
, 5705_PLUS
)) {
9626 /* Status/statistics block address. See tg3_timer,
9627 * the tg3_periodic_fetch_stats call there, and
9628 * tg3_get_stats to see how this works for 5705/5750 chips.
9630 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9631 ((u64
) tp
->stats_mapping
>> 32));
9632 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9633 ((u64
) tp
->stats_mapping
& 0xffffffff));
9634 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9636 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9638 /* Clear statistics and status block memory areas */
9639 for (i
= NIC_SRAM_STATS_BLK
;
9640 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9642 tg3_write_mem(tp
, i
, 0);
9647 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9649 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9650 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9651 if (!tg3_flag(tp
, 5705_PLUS
))
9652 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9654 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9655 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9656 /* reset to prevent losing 1st rx packet intermittently */
9657 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9661 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9662 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9663 MAC_MODE_FHDE_ENABLE
;
9664 if (tg3_flag(tp
, ENABLE_APE
))
9665 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9666 if (!tg3_flag(tp
, 5705_PLUS
) &&
9667 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9668 tg3_asic_rev(tp
) != ASIC_REV_5700
)
9669 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9670 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9673 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9674 * If TG3_FLAG_IS_NIC is zero, we should read the
9675 * register to preserve the GPIO settings for LOMs. The GPIOs,
9676 * whether used as inputs or outputs, are set by boot code after
9679 if (!tg3_flag(tp
, IS_NIC
)) {
9682 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9683 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9684 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9686 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
9687 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9688 GRC_LCLCTRL_GPIO_OUTPUT3
;
9690 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
9691 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9693 tp
->grc_local_ctrl
&= ~gpio_mask
;
9694 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9696 /* GPIO1 must be driven high for eeprom write protect */
9697 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9698 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9699 GRC_LCLCTRL_GPIO_OUTPUT1
);
9701 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9704 if (tg3_flag(tp
, USING_MSIX
)) {
9705 val
= tr32(MSGINT_MODE
);
9706 val
|= MSGINT_MODE_ENABLE
;
9707 if (tp
->irq_cnt
> 1)
9708 val
|= MSGINT_MODE_MULTIVEC_EN
;
9709 if (!tg3_flag(tp
, 1SHOT_MSI
))
9710 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9711 tw32(MSGINT_MODE
, val
);
9714 if (!tg3_flag(tp
, 5705_PLUS
)) {
9715 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9719 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9720 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9721 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9722 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9723 WDMAC_MODE_LNGREAD_ENAB
);
9725 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9726 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9727 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9728 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
9729 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
9731 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9732 !tg3_flag(tp
, IS_5788
)) {
9733 val
|= WDMAC_MODE_RX_ACCEL
;
9737 /* Enable host coalescing bug fix */
9738 if (tg3_flag(tp
, 5755_PLUS
))
9739 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9741 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
9742 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9744 tw32_f(WDMAC_MODE
, val
);
9747 if (tg3_flag(tp
, PCIX_MODE
)) {
9750 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9752 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
9753 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9754 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9755 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
9756 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9757 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9759 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9763 tw32_f(RDMAC_MODE
, rdmac_mode
);
9766 if (tg3_asic_rev(tp
) == ASIC_REV_5719
) {
9767 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9768 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9771 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9772 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9773 val
|= TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9774 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9775 tg3_flag_set(tp
, 5719_RDMA_BUG
);
9779 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9780 if (!tg3_flag(tp
, 5705_PLUS
))
9781 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9783 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
9785 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9787 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9789 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9790 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9791 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9792 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9793 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9794 tw32(RCVDBDI_MODE
, val
);
9795 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9796 if (tg3_flag(tp
, HW_TSO_1
) ||
9797 tg3_flag(tp
, HW_TSO_2
) ||
9798 tg3_flag(tp
, HW_TSO_3
))
9799 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9800 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9801 if (tg3_flag(tp
, ENABLE_TSS
))
9802 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9803 tw32(SNDBDI_MODE
, val
);
9804 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9806 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
9807 err
= tg3_load_5701_a0_firmware_fix(tp
);
9812 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9813 err
= tg3_load_tso_firmware(tp
);
9818 tp
->tx_mode
= TX_MODE_ENABLE
;
9820 if (tg3_flag(tp
, 5755_PLUS
) ||
9821 tg3_asic_rev(tp
) == ASIC_REV_5906
)
9822 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9824 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9825 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9826 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9827 tp
->tx_mode
&= ~val
;
9828 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9831 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9834 if (tg3_flag(tp
, ENABLE_RSS
)) {
9835 tg3_rss_write_indir_tbl(tp
);
9837 /* Setup the "secret" hash key. */
9838 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9839 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9840 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9841 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9842 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9843 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9844 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9845 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9846 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9847 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9850 tp
->rx_mode
= RX_MODE_ENABLE
;
9851 if (tg3_flag(tp
, 5755_PLUS
))
9852 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9854 if (tg3_flag(tp
, ENABLE_RSS
))
9855 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9856 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9857 RX_MODE_RSS_IPV6_HASH_EN
|
9858 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9859 RX_MODE_RSS_IPV4_HASH_EN
|
9860 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9862 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9865 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9867 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9868 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9869 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9872 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9875 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9876 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
9877 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9878 /* Set drive transmission level to 1.2V */
9879 /* only if the signal pre-emphasis bit is not set */
9880 val
= tr32(MAC_SERDES_CFG
);
9883 tw32(MAC_SERDES_CFG
, val
);
9885 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
9886 tw32(MAC_SERDES_CFG
, 0x616000);
9889 /* Prevent chip from dropping frames when flow control
9892 if (tg3_flag(tp
, 57765_CLASS
))
9896 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9898 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
9899 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9900 /* Use hardware link auto-negotiation */
9901 tg3_flag_set(tp
, HW_AUTONEG
);
9904 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9905 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
9908 tmp
= tr32(SERDES_RX_CTRL
);
9909 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9910 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9911 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9912 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9915 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9916 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9917 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9919 err
= tg3_setup_phy(tp
, 0);
9923 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9924 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9927 /* Clear CRC stats. */
9928 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9929 tg3_writephy(tp
, MII_TG3_TEST1
,
9930 tmp
| MII_TG3_TEST1_CRC_EN
);
9931 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9936 __tg3_set_rx_mode(tp
->dev
);
9938 /* Initialize receive rules. */
9939 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9940 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9941 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9942 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9944 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9948 if (tg3_flag(tp
, ENABLE_ASF
))
9952 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9954 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9956 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9958 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9960 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9962 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9964 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9966 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9968 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9970 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9972 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9974 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9976 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9978 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9986 if (tg3_flag(tp
, ENABLE_APE
))
9987 /* Write our heartbeat update interval to APE. */
9988 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9989 APE_HOST_HEARTBEAT_INT_DISABLE
);
9991 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9996 /* Called at device open time to get the chip ready for
9997 * packet processing. Invoked with tp->lock held.
9999 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
10001 tg3_switch_clocks(tp
);
10003 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10005 return tg3_reset_hw(tp
, reset_phy
);
10008 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10012 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10013 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10015 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10018 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10019 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10020 memset(ocir
, 0, TG3_OCIR_LEN
);
10024 /* sysfs attributes for hwmon */
10025 static ssize_t
tg3_show_temp(struct device
*dev
,
10026 struct device_attribute
*devattr
, char *buf
)
10028 struct pci_dev
*pdev
= to_pci_dev(dev
);
10029 struct net_device
*netdev
= pci_get_drvdata(pdev
);
10030 struct tg3
*tp
= netdev_priv(netdev
);
10031 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10034 spin_lock_bh(&tp
->lock
);
10035 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10036 sizeof(temperature
));
10037 spin_unlock_bh(&tp
->lock
);
10038 return sprintf(buf
, "%u\n", temperature
);
10042 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10043 TG3_TEMP_SENSOR_OFFSET
);
10044 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10045 TG3_TEMP_CAUTION_OFFSET
);
10046 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10047 TG3_TEMP_MAX_OFFSET
);
10049 static struct attribute
*tg3_attributes
[] = {
10050 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10051 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10052 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10056 static const struct attribute_group tg3_group
= {
10057 .attrs
= tg3_attributes
,
10060 static void tg3_hwmon_close(struct tg3
*tp
)
10062 if (tp
->hwmon_dev
) {
10063 hwmon_device_unregister(tp
->hwmon_dev
);
10064 tp
->hwmon_dev
= NULL
;
10065 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10069 static void tg3_hwmon_open(struct tg3
*tp
)
10073 struct pci_dev
*pdev
= tp
->pdev
;
10074 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10076 tg3_sd_scan_scratchpad(tp
, ocirs
);
10078 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10079 if (!ocirs
[i
].src_data_length
)
10082 size
+= ocirs
[i
].src_hdr_length
;
10083 size
+= ocirs
[i
].src_data_length
;
10089 /* Register hwmon sysfs hooks */
10090 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10092 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10096 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10097 if (IS_ERR(tp
->hwmon_dev
)) {
10098 tp
->hwmon_dev
= NULL
;
10099 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10100 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10105 #define TG3_STAT_ADD32(PSTAT, REG) \
10106 do { u32 __val = tr32(REG); \
10107 (PSTAT)->low += __val; \
10108 if ((PSTAT)->low < __val) \
10109 (PSTAT)->high += 1; \
10112 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10114 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10119 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10120 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10121 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10122 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10123 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10124 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10125 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10126 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10127 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10128 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10129 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10130 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10131 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10132 if (unlikely(tg3_flag(tp
, 5719_RDMA_BUG
) &&
10133 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10134 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10137 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10138 val
&= ~TG3_LSO_RD_DMA_TX_LENGTH_WA
;
10139 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10140 tg3_flag_clear(tp
, 5719_RDMA_BUG
);
10143 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10144 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10145 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10146 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10147 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10148 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10149 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10150 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10151 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10152 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10153 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10154 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10155 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10156 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10158 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10159 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10160 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10161 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10162 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10164 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10165 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10167 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10168 sp
->rx_discards
.low
+= val
;
10169 if (sp
->rx_discards
.low
< val
)
10170 sp
->rx_discards
.high
+= 1;
10172 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10174 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10177 static void tg3_chk_missed_msi(struct tg3
*tp
)
10181 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10182 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10184 if (tg3_has_work(tnapi
)) {
10185 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10186 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10187 if (tnapi
->chk_msi_cnt
< 1) {
10188 tnapi
->chk_msi_cnt
++;
10194 tnapi
->chk_msi_cnt
= 0;
10195 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10196 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10200 static void tg3_timer(unsigned long __opaque
)
10202 struct tg3
*tp
= (struct tg3
*) __opaque
;
10204 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10205 goto restart_timer
;
10207 spin_lock(&tp
->lock
);
10209 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10210 tg3_flag(tp
, 57765_CLASS
))
10211 tg3_chk_missed_msi(tp
);
10213 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10214 /* BCM4785: Flush posted writes from GbE to host memory. */
10218 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10219 /* All of this garbage is because when using non-tagged
10220 * IRQ status the mailbox/status_block protocol the chip
10221 * uses with the cpu is race prone.
10223 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10224 tw32(GRC_LOCAL_CTRL
,
10225 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10227 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10228 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10231 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10232 spin_unlock(&tp
->lock
);
10233 tg3_reset_task_schedule(tp
);
10234 goto restart_timer
;
10238 /* This part only runs once per second. */
10239 if (!--tp
->timer_counter
) {
10240 if (tg3_flag(tp
, 5705_PLUS
))
10241 tg3_periodic_fetch_stats(tp
);
10243 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10244 tg3_phy_eee_enable(tp
);
10246 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10250 mac_stat
= tr32(MAC_STATUS
);
10253 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10254 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10256 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10260 tg3_setup_phy(tp
, 0);
10261 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10262 u32 mac_stat
= tr32(MAC_STATUS
);
10263 int need_setup
= 0;
10266 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10269 if (!tp
->link_up
&&
10270 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10271 MAC_STATUS_SIGNAL_DET
))) {
10275 if (!tp
->serdes_counter
) {
10278 ~MAC_MODE_PORT_MODE_MASK
));
10280 tw32_f(MAC_MODE
, tp
->mac_mode
);
10283 tg3_setup_phy(tp
, 0);
10285 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10286 tg3_flag(tp
, 5780_CLASS
)) {
10287 tg3_serdes_parallel_detect(tp
);
10290 tp
->timer_counter
= tp
->timer_multiplier
;
10293 /* Heartbeat is only sent once every 2 seconds.
10295 * The heartbeat is to tell the ASF firmware that the host
10296 * driver is still alive. In the event that the OS crashes,
10297 * ASF needs to reset the hardware to free up the FIFO space
10298 * that may be filled with rx packets destined for the host.
10299 * If the FIFO is full, ASF will no longer function properly.
10301 * Unintended resets have been reported on real time kernels
10302 * where the timer doesn't run on time. Netpoll will also have
10305 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10306 * to check the ring condition when the heartbeat is expiring
10307 * before doing the reset. This will prevent most unintended
10310 if (!--tp
->asf_counter
) {
10311 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10312 tg3_wait_for_event_ack(tp
);
10314 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10315 FWCMD_NICDRV_ALIVE3
);
10316 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10317 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10318 TG3_FW_UPDATE_TIMEOUT_SEC
);
10320 tg3_generate_fw_event(tp
);
10322 tp
->asf_counter
= tp
->asf_multiplier
;
10325 spin_unlock(&tp
->lock
);
10328 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10329 add_timer(&tp
->timer
);
10332 static void tg3_timer_init(struct tg3
*tp
)
10334 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10335 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10336 !tg3_flag(tp
, 57765_CLASS
))
10337 tp
->timer_offset
= HZ
;
10339 tp
->timer_offset
= HZ
/ 10;
10341 BUG_ON(tp
->timer_offset
> HZ
);
10343 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10344 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10345 TG3_FW_UPDATE_FREQ_SEC
;
10347 init_timer(&tp
->timer
);
10348 tp
->timer
.data
= (unsigned long) tp
;
10349 tp
->timer
.function
= tg3_timer
;
10352 static void tg3_timer_start(struct tg3
*tp
)
10354 tp
->asf_counter
= tp
->asf_multiplier
;
10355 tp
->timer_counter
= tp
->timer_multiplier
;
10357 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10358 add_timer(&tp
->timer
);
10361 static void tg3_timer_stop(struct tg3
*tp
)
10363 del_timer_sync(&tp
->timer
);
10366 /* Restart hardware after configuration changes, self-test, etc.
10367 * Invoked with tp->lock held.
10369 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
10370 __releases(tp
->lock
)
10371 __acquires(tp
->lock
)
10375 err
= tg3_init_hw(tp
, reset_phy
);
10377 netdev_err(tp
->dev
,
10378 "Failed to re-initialize device, aborting\n");
10379 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10380 tg3_full_unlock(tp
);
10381 tg3_timer_stop(tp
);
10383 tg3_napi_enable(tp
);
10384 dev_close(tp
->dev
);
10385 tg3_full_lock(tp
, 0);
10390 static void tg3_reset_task(struct work_struct
*work
)
10392 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10395 tg3_full_lock(tp
, 0);
10397 if (!netif_running(tp
->dev
)) {
10398 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10399 tg3_full_unlock(tp
);
10403 tg3_full_unlock(tp
);
10407 tg3_netif_stop(tp
);
10409 tg3_full_lock(tp
, 1);
10411 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10412 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10413 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10414 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10415 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10418 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10419 err
= tg3_init_hw(tp
, 1);
10423 tg3_netif_start(tp
);
10426 tg3_full_unlock(tp
);
10431 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10434 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10437 unsigned long flags
;
10439 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10441 if (tp
->irq_cnt
== 1)
10442 name
= tp
->dev
->name
;
10444 name
= &tnapi
->irq_lbl
[0];
10445 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10446 name
[IFNAMSIZ
-1] = 0;
10449 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10451 if (tg3_flag(tp
, 1SHOT_MSI
))
10452 fn
= tg3_msi_1shot
;
10455 fn
= tg3_interrupt
;
10456 if (tg3_flag(tp
, TAGGED_STATUS
))
10457 fn
= tg3_interrupt_tagged
;
10458 flags
= IRQF_SHARED
;
10461 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10464 static int tg3_test_interrupt(struct tg3
*tp
)
10466 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10467 struct net_device
*dev
= tp
->dev
;
10468 int err
, i
, intr_ok
= 0;
10471 if (!netif_running(dev
))
10474 tg3_disable_ints(tp
);
10476 free_irq(tnapi
->irq_vec
, tnapi
);
10479 * Turn off MSI one shot mode. Otherwise this test has no
10480 * observable way to know whether the interrupt was delivered.
10482 if (tg3_flag(tp
, 57765_PLUS
)) {
10483 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10484 tw32(MSGINT_MODE
, val
);
10487 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10488 IRQF_SHARED
, dev
->name
, tnapi
);
10492 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10493 tg3_enable_ints(tp
);
10495 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10498 for (i
= 0; i
< 5; i
++) {
10499 u32 int_mbox
, misc_host_ctrl
;
10501 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10502 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10504 if ((int_mbox
!= 0) ||
10505 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10510 if (tg3_flag(tp
, 57765_PLUS
) &&
10511 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10512 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10517 tg3_disable_ints(tp
);
10519 free_irq(tnapi
->irq_vec
, tnapi
);
10521 err
= tg3_request_irq(tp
, 0);
10527 /* Reenable MSI one shot mode. */
10528 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10529 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10530 tw32(MSGINT_MODE
, val
);
10538 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10539 * successfully restored
10541 static int tg3_test_msi(struct tg3
*tp
)
10546 if (!tg3_flag(tp
, USING_MSI
))
10549 /* Turn off SERR reporting in case MSI terminates with Master
10552 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10553 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
10554 pci_cmd
& ~PCI_COMMAND_SERR
);
10556 err
= tg3_test_interrupt(tp
);
10558 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10563 /* other failures */
10567 /* MSI test failed, go back to INTx mode */
10568 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
10569 "to INTx mode. Please report this failure to the PCI "
10570 "maintainer and include system chipset information\n");
10572 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10574 pci_disable_msi(tp
->pdev
);
10576 tg3_flag_clear(tp
, USING_MSI
);
10577 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10579 err
= tg3_request_irq(tp
, 0);
10583 /* Need to reset the chip because the MSI cycle may have terminated
10584 * with Master Abort.
10586 tg3_full_lock(tp
, 1);
10588 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10589 err
= tg3_init_hw(tp
, 1);
10591 tg3_full_unlock(tp
);
10594 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10599 static int tg3_request_firmware(struct tg3
*tp
)
10601 const __be32
*fw_data
;
10603 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
10604 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
10609 fw_data
= (void *)tp
->fw
->data
;
10611 /* Firmware blob starts with version numbers, followed by
10612 * start address and _full_ length including BSS sections
10613 * (which must be longer than the actual data, of course
10616 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
10617 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
10618 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
10619 tp
->fw_len
, tp
->fw_needed
);
10620 release_firmware(tp
->fw
);
10625 /* We no longer need firmware; we have it. */
10626 tp
->fw_needed
= NULL
;
10630 static u32
tg3_irq_count(struct tg3
*tp
)
10632 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
10635 /* We want as many rx rings enabled as there are cpus.
10636 * In multiqueue MSI-X mode, the first MSI-X vector
10637 * only deals with link interrupts, etc, so we add
10638 * one to the number of vectors we are requesting.
10640 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
10646 static bool tg3_enable_msix(struct tg3
*tp
)
10649 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
10651 tp
->txq_cnt
= tp
->txq_req
;
10652 tp
->rxq_cnt
= tp
->rxq_req
;
10654 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
10655 if (tp
->rxq_cnt
> tp
->rxq_max
)
10656 tp
->rxq_cnt
= tp
->rxq_max
;
10658 /* Disable multiple TX rings by default. Simple round-robin hardware
10659 * scheduling of the TX rings can cause starvation of rings with
10660 * small packets when other rings have TSO or jumbo packets.
10665 tp
->irq_cnt
= tg3_irq_count(tp
);
10667 for (i
= 0; i
< tp
->irq_max
; i
++) {
10668 msix_ent
[i
].entry
= i
;
10669 msix_ent
[i
].vector
= 0;
10672 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
10675 } else if (rc
!= 0) {
10676 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
10678 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
10681 tp
->rxq_cnt
= max(rc
- 1, 1);
10683 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
10686 for (i
= 0; i
< tp
->irq_max
; i
++)
10687 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
10689 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
10690 pci_disable_msix(tp
->pdev
);
10694 if (tp
->irq_cnt
== 1)
10697 tg3_flag_set(tp
, ENABLE_RSS
);
10699 if (tp
->txq_cnt
> 1)
10700 tg3_flag_set(tp
, ENABLE_TSS
);
10702 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
10707 static void tg3_ints_init(struct tg3
*tp
)
10709 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10710 !tg3_flag(tp
, TAGGED_STATUS
)) {
10711 /* All MSI supporting chips should support tagged
10712 * status. Assert that this is the case.
10714 netdev_warn(tp
->dev
,
10715 "MSI without TAGGED_STATUS? Not using MSI\n");
10719 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10720 tg3_flag_set(tp
, USING_MSIX
);
10721 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10722 tg3_flag_set(tp
, USING_MSI
);
10724 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10725 u32 msi_mode
= tr32(MSGINT_MODE
);
10726 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10727 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10728 if (!tg3_flag(tp
, 1SHOT_MSI
))
10729 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10730 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10733 if (!tg3_flag(tp
, USING_MSIX
)) {
10735 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10738 if (tp
->irq_cnt
== 1) {
10741 netif_set_real_num_tx_queues(tp
->dev
, 1);
10742 netif_set_real_num_rx_queues(tp
->dev
, 1);
10746 static void tg3_ints_fini(struct tg3
*tp
)
10748 if (tg3_flag(tp
, USING_MSIX
))
10749 pci_disable_msix(tp
->pdev
);
10750 else if (tg3_flag(tp
, USING_MSI
))
10751 pci_disable_msi(tp
->pdev
);
10752 tg3_flag_clear(tp
, USING_MSI
);
10753 tg3_flag_clear(tp
, USING_MSIX
);
10754 tg3_flag_clear(tp
, ENABLE_RSS
);
10755 tg3_flag_clear(tp
, ENABLE_TSS
);
10758 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
10761 struct net_device
*dev
= tp
->dev
;
10765 * Setup interrupts first so we know how
10766 * many NAPI resources to allocate
10770 tg3_rss_check_indir_tbl(tp
);
10772 /* The placement of this call is tied
10773 * to the setup and use of Host TX descriptors.
10775 err
= tg3_alloc_consistent(tp
);
10781 tg3_napi_enable(tp
);
10783 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10784 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10785 err
= tg3_request_irq(tp
, i
);
10787 for (i
--; i
>= 0; i
--) {
10788 tnapi
= &tp
->napi
[i
];
10789 free_irq(tnapi
->irq_vec
, tnapi
);
10795 tg3_full_lock(tp
, 0);
10797 err
= tg3_init_hw(tp
, reset_phy
);
10799 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10800 tg3_free_rings(tp
);
10803 tg3_full_unlock(tp
);
10808 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
10809 err
= tg3_test_msi(tp
);
10812 tg3_full_lock(tp
, 0);
10813 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10814 tg3_free_rings(tp
);
10815 tg3_full_unlock(tp
);
10820 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10821 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10823 tw32(PCIE_TRANSACTION_CFG
,
10824 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10830 tg3_hwmon_open(tp
);
10832 tg3_full_lock(tp
, 0);
10834 tg3_timer_start(tp
);
10835 tg3_flag_set(tp
, INIT_COMPLETE
);
10836 tg3_enable_ints(tp
);
10841 tg3_ptp_resume(tp
);
10844 tg3_full_unlock(tp
);
10846 netif_tx_start_all_queues(dev
);
10849 * Reset loopback feature if it was turned on while the device was down
10850 * make sure that it's installed properly now.
10852 if (dev
->features
& NETIF_F_LOOPBACK
)
10853 tg3_set_loopback(dev
, dev
->features
);
10858 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10859 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10860 free_irq(tnapi
->irq_vec
, tnapi
);
10864 tg3_napi_disable(tp
);
10866 tg3_free_consistent(tp
);
10874 static void tg3_stop(struct tg3
*tp
)
10878 tg3_reset_task_cancel(tp
);
10879 tg3_netif_stop(tp
);
10881 tg3_timer_stop(tp
);
10883 tg3_hwmon_close(tp
);
10887 tg3_full_lock(tp
, 1);
10889 tg3_disable_ints(tp
);
10891 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10892 tg3_free_rings(tp
);
10893 tg3_flag_clear(tp
, INIT_COMPLETE
);
10895 tg3_full_unlock(tp
);
10897 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10898 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10899 free_irq(tnapi
->irq_vec
, tnapi
);
10906 tg3_free_consistent(tp
);
10909 static int tg3_open(struct net_device
*dev
)
10911 struct tg3
*tp
= netdev_priv(dev
);
10914 if (tp
->fw_needed
) {
10915 err
= tg3_request_firmware(tp
);
10916 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10920 netdev_warn(tp
->dev
, "TSO capability disabled\n");
10921 tg3_flag_clear(tp
, TSO_CAPABLE
);
10922 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
10923 netdev_notice(tp
->dev
, "TSO capability restored\n");
10924 tg3_flag_set(tp
, TSO_CAPABLE
);
10928 tg3_carrier_off(tp
);
10930 err
= tg3_power_up(tp
);
10934 tg3_full_lock(tp
, 0);
10936 tg3_disable_ints(tp
);
10937 tg3_flag_clear(tp
, INIT_COMPLETE
);
10939 tg3_full_unlock(tp
);
10941 err
= tg3_start(tp
, true, true, true);
10943 tg3_frob_aux_power(tp
, false);
10944 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
10947 if (tg3_flag(tp
, PTP_CAPABLE
)) {
10948 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
10950 if (IS_ERR(tp
->ptp_clock
))
10951 tp
->ptp_clock
= NULL
;
10957 static int tg3_close(struct net_device
*dev
)
10959 struct tg3
*tp
= netdev_priv(dev
);
10965 /* Clear stats across close / open calls */
10966 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
10967 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
10969 tg3_power_down(tp
);
10971 tg3_carrier_off(tp
);
10976 static inline u64
get_stat64(tg3_stat64_t
*val
)
10978 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
10981 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
10983 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10985 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10986 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
10987 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
10990 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
10991 tg3_writephy(tp
, MII_TG3_TEST1
,
10992 val
| MII_TG3_TEST1_CRC_EN
);
10993 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
10997 tp
->phy_crc_errors
+= val
;
10999 return tp
->phy_crc_errors
;
11002 return get_stat64(&hw_stats
->rx_fcs_errors
);
11005 #define ESTAT_ADD(member) \
11006 estats->member = old_estats->member + \
11007 get_stat64(&hw_stats->member)
11009 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11011 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11012 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11014 ESTAT_ADD(rx_octets
);
11015 ESTAT_ADD(rx_fragments
);
11016 ESTAT_ADD(rx_ucast_packets
);
11017 ESTAT_ADD(rx_mcast_packets
);
11018 ESTAT_ADD(rx_bcast_packets
);
11019 ESTAT_ADD(rx_fcs_errors
);
11020 ESTAT_ADD(rx_align_errors
);
11021 ESTAT_ADD(rx_xon_pause_rcvd
);
11022 ESTAT_ADD(rx_xoff_pause_rcvd
);
11023 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11024 ESTAT_ADD(rx_xoff_entered
);
11025 ESTAT_ADD(rx_frame_too_long_errors
);
11026 ESTAT_ADD(rx_jabbers
);
11027 ESTAT_ADD(rx_undersize_packets
);
11028 ESTAT_ADD(rx_in_length_errors
);
11029 ESTAT_ADD(rx_out_length_errors
);
11030 ESTAT_ADD(rx_64_or_less_octet_packets
);
11031 ESTAT_ADD(rx_65_to_127_octet_packets
);
11032 ESTAT_ADD(rx_128_to_255_octet_packets
);
11033 ESTAT_ADD(rx_256_to_511_octet_packets
);
11034 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11035 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11036 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11037 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11038 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11039 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11041 ESTAT_ADD(tx_octets
);
11042 ESTAT_ADD(tx_collisions
);
11043 ESTAT_ADD(tx_xon_sent
);
11044 ESTAT_ADD(tx_xoff_sent
);
11045 ESTAT_ADD(tx_flow_control
);
11046 ESTAT_ADD(tx_mac_errors
);
11047 ESTAT_ADD(tx_single_collisions
);
11048 ESTAT_ADD(tx_mult_collisions
);
11049 ESTAT_ADD(tx_deferred
);
11050 ESTAT_ADD(tx_excessive_collisions
);
11051 ESTAT_ADD(tx_late_collisions
);
11052 ESTAT_ADD(tx_collide_2times
);
11053 ESTAT_ADD(tx_collide_3times
);
11054 ESTAT_ADD(tx_collide_4times
);
11055 ESTAT_ADD(tx_collide_5times
);
11056 ESTAT_ADD(tx_collide_6times
);
11057 ESTAT_ADD(tx_collide_7times
);
11058 ESTAT_ADD(tx_collide_8times
);
11059 ESTAT_ADD(tx_collide_9times
);
11060 ESTAT_ADD(tx_collide_10times
);
11061 ESTAT_ADD(tx_collide_11times
);
11062 ESTAT_ADD(tx_collide_12times
);
11063 ESTAT_ADD(tx_collide_13times
);
11064 ESTAT_ADD(tx_collide_14times
);
11065 ESTAT_ADD(tx_collide_15times
);
11066 ESTAT_ADD(tx_ucast_packets
);
11067 ESTAT_ADD(tx_mcast_packets
);
11068 ESTAT_ADD(tx_bcast_packets
);
11069 ESTAT_ADD(tx_carrier_sense_errors
);
11070 ESTAT_ADD(tx_discards
);
11071 ESTAT_ADD(tx_errors
);
11073 ESTAT_ADD(dma_writeq_full
);
11074 ESTAT_ADD(dma_write_prioq_full
);
11075 ESTAT_ADD(rxbds_empty
);
11076 ESTAT_ADD(rx_discards
);
11077 ESTAT_ADD(rx_errors
);
11078 ESTAT_ADD(rx_threshold_hit
);
11080 ESTAT_ADD(dma_readq_full
);
11081 ESTAT_ADD(dma_read_prioq_full
);
11082 ESTAT_ADD(tx_comp_queue_full
);
11084 ESTAT_ADD(ring_set_send_prod_index
);
11085 ESTAT_ADD(ring_status_update
);
11086 ESTAT_ADD(nic_irqs
);
11087 ESTAT_ADD(nic_avoided_irqs
);
11088 ESTAT_ADD(nic_tx_threshold_hit
);
11090 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11093 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11095 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11096 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11098 stats
->rx_packets
= old_stats
->rx_packets
+
11099 get_stat64(&hw_stats
->rx_ucast_packets
) +
11100 get_stat64(&hw_stats
->rx_mcast_packets
) +
11101 get_stat64(&hw_stats
->rx_bcast_packets
);
11103 stats
->tx_packets
= old_stats
->tx_packets
+
11104 get_stat64(&hw_stats
->tx_ucast_packets
) +
11105 get_stat64(&hw_stats
->tx_mcast_packets
) +
11106 get_stat64(&hw_stats
->tx_bcast_packets
);
11108 stats
->rx_bytes
= old_stats
->rx_bytes
+
11109 get_stat64(&hw_stats
->rx_octets
);
11110 stats
->tx_bytes
= old_stats
->tx_bytes
+
11111 get_stat64(&hw_stats
->tx_octets
);
11113 stats
->rx_errors
= old_stats
->rx_errors
+
11114 get_stat64(&hw_stats
->rx_errors
);
11115 stats
->tx_errors
= old_stats
->tx_errors
+
11116 get_stat64(&hw_stats
->tx_errors
) +
11117 get_stat64(&hw_stats
->tx_mac_errors
) +
11118 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11119 get_stat64(&hw_stats
->tx_discards
);
11121 stats
->multicast
= old_stats
->multicast
+
11122 get_stat64(&hw_stats
->rx_mcast_packets
);
11123 stats
->collisions
= old_stats
->collisions
+
11124 get_stat64(&hw_stats
->tx_collisions
);
11126 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11127 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11128 get_stat64(&hw_stats
->rx_undersize_packets
);
11130 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11131 get_stat64(&hw_stats
->rxbds_empty
);
11132 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11133 get_stat64(&hw_stats
->rx_align_errors
);
11134 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11135 get_stat64(&hw_stats
->tx_discards
);
11136 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11137 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11139 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11140 tg3_calc_crc_errors(tp
);
11142 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11143 get_stat64(&hw_stats
->rx_discards
);
11145 stats
->rx_dropped
= tp
->rx_dropped
;
11146 stats
->tx_dropped
= tp
->tx_dropped
;
11149 static int tg3_get_regs_len(struct net_device
*dev
)
11151 return TG3_REG_BLK_SIZE
;
11154 static void tg3_get_regs(struct net_device
*dev
,
11155 struct ethtool_regs
*regs
, void *_p
)
11157 struct tg3
*tp
= netdev_priv(dev
);
11161 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11163 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11166 tg3_full_lock(tp
, 0);
11168 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11170 tg3_full_unlock(tp
);
11173 static int tg3_get_eeprom_len(struct net_device
*dev
)
11175 struct tg3
*tp
= netdev_priv(dev
);
11177 return tp
->nvram_size
;
11180 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11182 struct tg3
*tp
= netdev_priv(dev
);
11185 u32 i
, offset
, len
, b_offset
, b_count
;
11188 if (tg3_flag(tp
, NO_NVRAM
))
11191 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11194 offset
= eeprom
->offset
;
11198 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11201 /* adjustments to start on required 4 byte boundary */
11202 b_offset
= offset
& 3;
11203 b_count
= 4 - b_offset
;
11204 if (b_count
> len
) {
11205 /* i.e. offset=1 len=2 */
11208 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11211 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11214 eeprom
->len
+= b_count
;
11217 /* read bytes up to the last 4 byte boundary */
11218 pd
= &data
[eeprom
->len
];
11219 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11220 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11225 memcpy(pd
+ i
, &val
, 4);
11230 /* read last bytes not ending on 4 byte boundary */
11231 pd
= &data
[eeprom
->len
];
11233 b_offset
= offset
+ len
- b_count
;
11234 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11237 memcpy(pd
, &val
, b_count
);
11238 eeprom
->len
+= b_count
;
11243 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11245 struct tg3
*tp
= netdev_priv(dev
);
11247 u32 offset
, len
, b_offset
, odd_len
;
11251 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11254 if (tg3_flag(tp
, NO_NVRAM
) ||
11255 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11258 offset
= eeprom
->offset
;
11261 if ((b_offset
= (offset
& 3))) {
11262 /* adjustments to start on required 4 byte boundary */
11263 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11274 /* adjustments to end on required 4 byte boundary */
11276 len
= (len
+ 3) & ~3;
11277 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11283 if (b_offset
|| odd_len
) {
11284 buf
= kmalloc(len
, GFP_KERNEL
);
11288 memcpy(buf
, &start
, 4);
11290 memcpy(buf
+len
-4, &end
, 4);
11291 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11294 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11302 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11304 struct tg3
*tp
= netdev_priv(dev
);
11306 if (tg3_flag(tp
, USE_PHYLIB
)) {
11307 struct phy_device
*phydev
;
11308 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11310 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11311 return phy_ethtool_gset(phydev
, cmd
);
11314 cmd
->supported
= (SUPPORTED_Autoneg
);
11316 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11317 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11318 SUPPORTED_1000baseT_Full
);
11320 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11321 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11322 SUPPORTED_100baseT_Full
|
11323 SUPPORTED_10baseT_Half
|
11324 SUPPORTED_10baseT_Full
|
11326 cmd
->port
= PORT_TP
;
11328 cmd
->supported
|= SUPPORTED_FIBRE
;
11329 cmd
->port
= PORT_FIBRE
;
11332 cmd
->advertising
= tp
->link_config
.advertising
;
11333 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11334 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11335 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11336 cmd
->advertising
|= ADVERTISED_Pause
;
11338 cmd
->advertising
|= ADVERTISED_Pause
|
11339 ADVERTISED_Asym_Pause
;
11341 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11342 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11345 if (netif_running(dev
) && tp
->link_up
) {
11346 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11347 cmd
->duplex
= tp
->link_config
.active_duplex
;
11348 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11349 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11350 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11351 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11353 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11356 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11357 cmd
->duplex
= DUPLEX_UNKNOWN
;
11358 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11360 cmd
->phy_address
= tp
->phy_addr
;
11361 cmd
->transceiver
= XCVR_INTERNAL
;
11362 cmd
->autoneg
= tp
->link_config
.autoneg
;
11368 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11370 struct tg3
*tp
= netdev_priv(dev
);
11371 u32 speed
= ethtool_cmd_speed(cmd
);
11373 if (tg3_flag(tp
, USE_PHYLIB
)) {
11374 struct phy_device
*phydev
;
11375 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11377 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11378 return phy_ethtool_sset(phydev
, cmd
);
11381 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11382 cmd
->autoneg
!= AUTONEG_DISABLE
)
11385 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11386 cmd
->duplex
!= DUPLEX_FULL
&&
11387 cmd
->duplex
!= DUPLEX_HALF
)
11390 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11391 u32 mask
= ADVERTISED_Autoneg
|
11393 ADVERTISED_Asym_Pause
;
11395 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11396 mask
|= ADVERTISED_1000baseT_Half
|
11397 ADVERTISED_1000baseT_Full
;
11399 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11400 mask
|= ADVERTISED_100baseT_Half
|
11401 ADVERTISED_100baseT_Full
|
11402 ADVERTISED_10baseT_Half
|
11403 ADVERTISED_10baseT_Full
|
11406 mask
|= ADVERTISED_FIBRE
;
11408 if (cmd
->advertising
& ~mask
)
11411 mask
&= (ADVERTISED_1000baseT_Half
|
11412 ADVERTISED_1000baseT_Full
|
11413 ADVERTISED_100baseT_Half
|
11414 ADVERTISED_100baseT_Full
|
11415 ADVERTISED_10baseT_Half
|
11416 ADVERTISED_10baseT_Full
);
11418 cmd
->advertising
&= mask
;
11420 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11421 if (speed
!= SPEED_1000
)
11424 if (cmd
->duplex
!= DUPLEX_FULL
)
11427 if (speed
!= SPEED_100
&&
11433 tg3_full_lock(tp
, 0);
11435 tp
->link_config
.autoneg
= cmd
->autoneg
;
11436 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11437 tp
->link_config
.advertising
= (cmd
->advertising
|
11438 ADVERTISED_Autoneg
);
11439 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11440 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11442 tp
->link_config
.advertising
= 0;
11443 tp
->link_config
.speed
= speed
;
11444 tp
->link_config
.duplex
= cmd
->duplex
;
11447 if (netif_running(dev
))
11448 tg3_setup_phy(tp
, 1);
11450 tg3_full_unlock(tp
);
11455 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11457 struct tg3
*tp
= netdev_priv(dev
);
11459 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11460 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11461 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11462 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11465 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11467 struct tg3
*tp
= netdev_priv(dev
);
11469 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11470 wol
->supported
= WAKE_MAGIC
;
11472 wol
->supported
= 0;
11474 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11475 wol
->wolopts
= WAKE_MAGIC
;
11476 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11479 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11481 struct tg3
*tp
= netdev_priv(dev
);
11482 struct device
*dp
= &tp
->pdev
->dev
;
11484 if (wol
->wolopts
& ~WAKE_MAGIC
)
11486 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11487 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11490 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11492 spin_lock_bh(&tp
->lock
);
11493 if (device_may_wakeup(dp
))
11494 tg3_flag_set(tp
, WOL_ENABLE
);
11496 tg3_flag_clear(tp
, WOL_ENABLE
);
11497 spin_unlock_bh(&tp
->lock
);
11502 static u32
tg3_get_msglevel(struct net_device
*dev
)
11504 struct tg3
*tp
= netdev_priv(dev
);
11505 return tp
->msg_enable
;
11508 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11510 struct tg3
*tp
= netdev_priv(dev
);
11511 tp
->msg_enable
= value
;
11514 static int tg3_nway_reset(struct net_device
*dev
)
11516 struct tg3
*tp
= netdev_priv(dev
);
11519 if (!netif_running(dev
))
11522 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11525 if (tg3_flag(tp
, USE_PHYLIB
)) {
11526 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11528 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
11532 spin_lock_bh(&tp
->lock
);
11534 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
11535 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
11536 ((bmcr
& BMCR_ANENABLE
) ||
11537 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
11538 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
11542 spin_unlock_bh(&tp
->lock
);
11548 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11550 struct tg3
*tp
= netdev_priv(dev
);
11552 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
11553 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11554 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
11556 ering
->rx_jumbo_max_pending
= 0;
11558 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
11560 ering
->rx_pending
= tp
->rx_pending
;
11561 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11562 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
11564 ering
->rx_jumbo_pending
= 0;
11566 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
11569 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11571 struct tg3
*tp
= netdev_priv(dev
);
11572 int i
, irq_sync
= 0, err
= 0;
11574 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
11575 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
11576 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
11577 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
11578 (tg3_flag(tp
, TSO_BUG
) &&
11579 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
11582 if (netif_running(dev
)) {
11584 tg3_netif_stop(tp
);
11588 tg3_full_lock(tp
, irq_sync
);
11590 tp
->rx_pending
= ering
->rx_pending
;
11592 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
11593 tp
->rx_pending
> 63)
11594 tp
->rx_pending
= 63;
11595 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
11597 for (i
= 0; i
< tp
->irq_max
; i
++)
11598 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
11600 if (netif_running(dev
)) {
11601 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11602 err
= tg3_restart_hw(tp
, 1);
11604 tg3_netif_start(tp
);
11607 tg3_full_unlock(tp
);
11609 if (irq_sync
&& !err
)
11615 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11617 struct tg3
*tp
= netdev_priv(dev
);
11619 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
11621 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
11622 epause
->rx_pause
= 1;
11624 epause
->rx_pause
= 0;
11626 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
11627 epause
->tx_pause
= 1;
11629 epause
->tx_pause
= 0;
11632 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11634 struct tg3
*tp
= netdev_priv(dev
);
11637 if (tg3_flag(tp
, USE_PHYLIB
)) {
11639 struct phy_device
*phydev
;
11641 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11643 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
11644 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
11645 (epause
->rx_pause
!= epause
->tx_pause
)))
11648 tp
->link_config
.flowctrl
= 0;
11649 if (epause
->rx_pause
) {
11650 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11652 if (epause
->tx_pause
) {
11653 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11654 newadv
= ADVERTISED_Pause
;
11656 newadv
= ADVERTISED_Pause
|
11657 ADVERTISED_Asym_Pause
;
11658 } else if (epause
->tx_pause
) {
11659 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11660 newadv
= ADVERTISED_Asym_Pause
;
11664 if (epause
->autoneg
)
11665 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11667 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11669 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
11670 u32 oldadv
= phydev
->advertising
&
11671 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
11672 if (oldadv
!= newadv
) {
11673 phydev
->advertising
&=
11674 ~(ADVERTISED_Pause
|
11675 ADVERTISED_Asym_Pause
);
11676 phydev
->advertising
|= newadv
;
11677 if (phydev
->autoneg
) {
11679 * Always renegotiate the link to
11680 * inform our link partner of our
11681 * flow control settings, even if the
11682 * flow control is forced. Let
11683 * tg3_adjust_link() do the final
11684 * flow control setup.
11686 return phy_start_aneg(phydev
);
11690 if (!epause
->autoneg
)
11691 tg3_setup_flow_control(tp
, 0, 0);
11693 tp
->link_config
.advertising
&=
11694 ~(ADVERTISED_Pause
|
11695 ADVERTISED_Asym_Pause
);
11696 tp
->link_config
.advertising
|= newadv
;
11701 if (netif_running(dev
)) {
11702 tg3_netif_stop(tp
);
11706 tg3_full_lock(tp
, irq_sync
);
11708 if (epause
->autoneg
)
11709 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11711 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11712 if (epause
->rx_pause
)
11713 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11715 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
11716 if (epause
->tx_pause
)
11717 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11719 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
11721 if (netif_running(dev
)) {
11722 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11723 err
= tg3_restart_hw(tp
, 1);
11725 tg3_netif_start(tp
);
11728 tg3_full_unlock(tp
);
11734 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11738 return TG3_NUM_TEST
;
11740 return TG3_NUM_STATS
;
11742 return -EOPNOTSUPP
;
11746 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11747 u32
*rules __always_unused
)
11749 struct tg3
*tp
= netdev_priv(dev
);
11751 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11752 return -EOPNOTSUPP
;
11754 switch (info
->cmd
) {
11755 case ETHTOOL_GRXRINGS
:
11756 if (netif_running(tp
->dev
))
11757 info
->data
= tp
->rxq_cnt
;
11759 info
->data
= num_online_cpus();
11760 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
11761 info
->data
= TG3_RSS_MAX_NUM_QS
;
11764 /* The first interrupt vector only
11765 * handles link interrupts.
11771 return -EOPNOTSUPP
;
11775 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11778 struct tg3
*tp
= netdev_priv(dev
);
11780 if (tg3_flag(tp
, SUPPORT_MSIX
))
11781 size
= TG3_RSS_INDIR_TBL_SIZE
;
11786 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11788 struct tg3
*tp
= netdev_priv(dev
);
11791 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11792 indir
[i
] = tp
->rss_ind_tbl
[i
];
11797 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11799 struct tg3
*tp
= netdev_priv(dev
);
11802 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11803 tp
->rss_ind_tbl
[i
] = indir
[i
];
11805 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11808 /* It is legal to write the indirection
11809 * table while the device is running.
11811 tg3_full_lock(tp
, 0);
11812 tg3_rss_write_indir_tbl(tp
);
11813 tg3_full_unlock(tp
);
11818 static void tg3_get_channels(struct net_device
*dev
,
11819 struct ethtool_channels
*channel
)
11821 struct tg3
*tp
= netdev_priv(dev
);
11822 u32 deflt_qs
= netif_get_num_default_rss_queues();
11824 channel
->max_rx
= tp
->rxq_max
;
11825 channel
->max_tx
= tp
->txq_max
;
11827 if (netif_running(dev
)) {
11828 channel
->rx_count
= tp
->rxq_cnt
;
11829 channel
->tx_count
= tp
->txq_cnt
;
11832 channel
->rx_count
= tp
->rxq_req
;
11834 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
11837 channel
->tx_count
= tp
->txq_req
;
11839 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
11843 static int tg3_set_channels(struct net_device
*dev
,
11844 struct ethtool_channels
*channel
)
11846 struct tg3
*tp
= netdev_priv(dev
);
11848 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11849 return -EOPNOTSUPP
;
11851 if (channel
->rx_count
> tp
->rxq_max
||
11852 channel
->tx_count
> tp
->txq_max
)
11855 tp
->rxq_req
= channel
->rx_count
;
11856 tp
->txq_req
= channel
->tx_count
;
11858 if (!netif_running(dev
))
11863 tg3_carrier_off(tp
);
11865 tg3_start(tp
, true, false, false);
11870 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11872 switch (stringset
) {
11874 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
11877 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
11880 WARN_ON(1); /* we need a WARN() */
11885 static int tg3_set_phys_id(struct net_device
*dev
,
11886 enum ethtool_phys_id_state state
)
11888 struct tg3
*tp
= netdev_priv(dev
);
11890 if (!netif_running(tp
->dev
))
11894 case ETHTOOL_ID_ACTIVE
:
11895 return 1; /* cycle on/off once per second */
11897 case ETHTOOL_ID_ON
:
11898 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11899 LED_CTRL_1000MBPS_ON
|
11900 LED_CTRL_100MBPS_ON
|
11901 LED_CTRL_10MBPS_ON
|
11902 LED_CTRL_TRAFFIC_OVERRIDE
|
11903 LED_CTRL_TRAFFIC_BLINK
|
11904 LED_CTRL_TRAFFIC_LED
);
11907 case ETHTOOL_ID_OFF
:
11908 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11909 LED_CTRL_TRAFFIC_OVERRIDE
);
11912 case ETHTOOL_ID_INACTIVE
:
11913 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
11920 static void tg3_get_ethtool_stats(struct net_device
*dev
,
11921 struct ethtool_stats
*estats
, u64
*tmp_stats
)
11923 struct tg3
*tp
= netdev_priv(dev
);
11926 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
11928 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
11931 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
11935 u32 offset
= 0, len
= 0;
11938 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
11941 if (magic
== TG3_EEPROM_MAGIC
) {
11942 for (offset
= TG3_NVM_DIR_START
;
11943 offset
< TG3_NVM_DIR_END
;
11944 offset
+= TG3_NVM_DIRENT_SIZE
) {
11945 if (tg3_nvram_read(tp
, offset
, &val
))
11948 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
11949 TG3_NVM_DIRTYPE_EXTVPD
)
11953 if (offset
!= TG3_NVM_DIR_END
) {
11954 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
11955 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
11958 offset
= tg3_nvram_logical_addr(tp
, offset
);
11962 if (!offset
|| !len
) {
11963 offset
= TG3_NVM_VPD_OFF
;
11964 len
= TG3_NVM_VPD_LEN
;
11967 buf
= kmalloc(len
, GFP_KERNEL
);
11971 if (magic
== TG3_EEPROM_MAGIC
) {
11972 for (i
= 0; i
< len
; i
+= 4) {
11973 /* The data is in little-endian format in NVRAM.
11974 * Use the big-endian read routines to preserve
11975 * the byte order as it exists in NVRAM.
11977 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
11983 unsigned int pos
= 0;
11985 ptr
= (u8
*)&buf
[0];
11986 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
11987 cnt
= pci_read_vpd(tp
->pdev
, pos
,
11989 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12007 #define NVRAM_TEST_SIZE 0x100
12008 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12009 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12010 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12011 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12012 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12013 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12014 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12015 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12017 static int tg3_test_nvram(struct tg3
*tp
)
12019 u32 csum
, magic
, len
;
12021 int i
, j
, k
, err
= 0, size
;
12023 if (tg3_flag(tp
, NO_NVRAM
))
12026 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12029 if (magic
== TG3_EEPROM_MAGIC
)
12030 size
= NVRAM_TEST_SIZE
;
12031 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12032 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12033 TG3_EEPROM_SB_FORMAT_1
) {
12034 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12035 case TG3_EEPROM_SB_REVISION_0
:
12036 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12038 case TG3_EEPROM_SB_REVISION_2
:
12039 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12041 case TG3_EEPROM_SB_REVISION_3
:
12042 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12044 case TG3_EEPROM_SB_REVISION_4
:
12045 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12047 case TG3_EEPROM_SB_REVISION_5
:
12048 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12050 case TG3_EEPROM_SB_REVISION_6
:
12051 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12058 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12059 size
= NVRAM_SELFBOOT_HW_SIZE
;
12063 buf
= kmalloc(size
, GFP_KERNEL
);
12068 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12069 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12076 /* Selfboot format */
12077 magic
= be32_to_cpu(buf
[0]);
12078 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12079 TG3_EEPROM_MAGIC_FW
) {
12080 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12082 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12083 TG3_EEPROM_SB_REVISION_2
) {
12084 /* For rev 2, the csum doesn't include the MBA. */
12085 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12087 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12090 for (i
= 0; i
< size
; i
++)
12103 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12104 TG3_EEPROM_MAGIC_HW
) {
12105 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12106 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12107 u8
*buf8
= (u8
*) buf
;
12109 /* Separate the parity bits and the data bytes. */
12110 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12111 if ((i
== 0) || (i
== 8)) {
12115 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12116 parity
[k
++] = buf8
[i
] & msk
;
12118 } else if (i
== 16) {
12122 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12123 parity
[k
++] = buf8
[i
] & msk
;
12126 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12127 parity
[k
++] = buf8
[i
] & msk
;
12130 data
[j
++] = buf8
[i
];
12134 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12135 u8 hw8
= hweight8(data
[i
]);
12137 if ((hw8
& 0x1) && parity
[i
])
12139 else if (!(hw8
& 0x1) && !parity
[i
])
12148 /* Bootstrap checksum at offset 0x10 */
12149 csum
= calc_crc((unsigned char *) buf
, 0x10);
12150 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12153 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12154 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12155 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12160 buf
= tg3_vpd_readblock(tp
, &len
);
12164 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12166 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12170 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12173 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12174 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12175 PCI_VPD_RO_KEYWORD_CHKSUM
);
12179 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12181 for (i
= 0; i
<= j
; i
++)
12182 csum8
+= ((u8
*)buf
)[i
];
12196 #define TG3_SERDES_TIMEOUT_SEC 2
12197 #define TG3_COPPER_TIMEOUT_SEC 6
12199 static int tg3_test_link(struct tg3
*tp
)
12203 if (!netif_running(tp
->dev
))
12206 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12207 max
= TG3_SERDES_TIMEOUT_SEC
;
12209 max
= TG3_COPPER_TIMEOUT_SEC
;
12211 for (i
= 0; i
< max
; i
++) {
12215 if (msleep_interruptible(1000))
12222 /* Only test the commonly used registers */
12223 static int tg3_test_registers(struct tg3
*tp
)
12225 int i
, is_5705
, is_5750
;
12226 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12230 #define TG3_FL_5705 0x1
12231 #define TG3_FL_NOT_5705 0x2
12232 #define TG3_FL_NOT_5788 0x4
12233 #define TG3_FL_NOT_5750 0x8
12237 /* MAC Control Registers */
12238 { MAC_MODE
, TG3_FL_NOT_5705
,
12239 0x00000000, 0x00ef6f8c },
12240 { MAC_MODE
, TG3_FL_5705
,
12241 0x00000000, 0x01ef6b8c },
12242 { MAC_STATUS
, TG3_FL_NOT_5705
,
12243 0x03800107, 0x00000000 },
12244 { MAC_STATUS
, TG3_FL_5705
,
12245 0x03800100, 0x00000000 },
12246 { MAC_ADDR_0_HIGH
, 0x0000,
12247 0x00000000, 0x0000ffff },
12248 { MAC_ADDR_0_LOW
, 0x0000,
12249 0x00000000, 0xffffffff },
12250 { MAC_RX_MTU_SIZE
, 0x0000,
12251 0x00000000, 0x0000ffff },
12252 { MAC_TX_MODE
, 0x0000,
12253 0x00000000, 0x00000070 },
12254 { MAC_TX_LENGTHS
, 0x0000,
12255 0x00000000, 0x00003fff },
12256 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12257 0x00000000, 0x000007fc },
12258 { MAC_RX_MODE
, TG3_FL_5705
,
12259 0x00000000, 0x000007dc },
12260 { MAC_HASH_REG_0
, 0x0000,
12261 0x00000000, 0xffffffff },
12262 { MAC_HASH_REG_1
, 0x0000,
12263 0x00000000, 0xffffffff },
12264 { MAC_HASH_REG_2
, 0x0000,
12265 0x00000000, 0xffffffff },
12266 { MAC_HASH_REG_3
, 0x0000,
12267 0x00000000, 0xffffffff },
12269 /* Receive Data and Receive BD Initiator Control Registers. */
12270 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12271 0x00000000, 0xffffffff },
12272 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12273 0x00000000, 0xffffffff },
12274 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12275 0x00000000, 0x00000003 },
12276 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12277 0x00000000, 0xffffffff },
12278 { RCVDBDI_STD_BD
+0, 0x0000,
12279 0x00000000, 0xffffffff },
12280 { RCVDBDI_STD_BD
+4, 0x0000,
12281 0x00000000, 0xffffffff },
12282 { RCVDBDI_STD_BD
+8, 0x0000,
12283 0x00000000, 0xffff0002 },
12284 { RCVDBDI_STD_BD
+0xc, 0x0000,
12285 0x00000000, 0xffffffff },
12287 /* Receive BD Initiator Control Registers. */
12288 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12289 0x00000000, 0xffffffff },
12290 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12291 0x00000000, 0x000003ff },
12292 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12293 0x00000000, 0xffffffff },
12295 /* Host Coalescing Control Registers. */
12296 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12297 0x00000000, 0x00000004 },
12298 { HOSTCC_MODE
, TG3_FL_5705
,
12299 0x00000000, 0x000000f6 },
12300 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12301 0x00000000, 0xffffffff },
12302 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12303 0x00000000, 0x000003ff },
12304 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12305 0x00000000, 0xffffffff },
12306 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12307 0x00000000, 0x000003ff },
12308 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12309 0x00000000, 0xffffffff },
12310 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12311 0x00000000, 0x000000ff },
12312 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12313 0x00000000, 0xffffffff },
12314 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12315 0x00000000, 0x000000ff },
12316 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12317 0x00000000, 0xffffffff },
12318 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12319 0x00000000, 0xffffffff },
12320 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12321 0x00000000, 0xffffffff },
12322 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12323 0x00000000, 0x000000ff },
12324 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12325 0x00000000, 0xffffffff },
12326 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12327 0x00000000, 0x000000ff },
12328 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12329 0x00000000, 0xffffffff },
12330 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12331 0x00000000, 0xffffffff },
12332 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12333 0x00000000, 0xffffffff },
12334 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12335 0x00000000, 0xffffffff },
12336 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12337 0x00000000, 0xffffffff },
12338 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12339 0xffffffff, 0x00000000 },
12340 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12341 0xffffffff, 0x00000000 },
12343 /* Buffer Manager Control Registers. */
12344 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12345 0x00000000, 0x007fff80 },
12346 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12347 0x00000000, 0x007fffff },
12348 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12349 0x00000000, 0x0000003f },
12350 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12351 0x00000000, 0x000001ff },
12352 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12353 0x00000000, 0x000001ff },
12354 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12355 0xffffffff, 0x00000000 },
12356 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12357 0xffffffff, 0x00000000 },
12359 /* Mailbox Registers */
12360 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12361 0x00000000, 0x000001ff },
12362 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12363 0x00000000, 0x000001ff },
12364 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12365 0x00000000, 0x000007ff },
12366 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12367 0x00000000, 0x000001ff },
12369 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12372 is_5705
= is_5750
= 0;
12373 if (tg3_flag(tp
, 5705_PLUS
)) {
12375 if (tg3_flag(tp
, 5750_PLUS
))
12379 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12380 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12383 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12386 if (tg3_flag(tp
, IS_5788
) &&
12387 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12390 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12393 offset
= (u32
) reg_tbl
[i
].offset
;
12394 read_mask
= reg_tbl
[i
].read_mask
;
12395 write_mask
= reg_tbl
[i
].write_mask
;
12397 /* Save the original register content */
12398 save_val
= tr32(offset
);
12400 /* Determine the read-only value. */
12401 read_val
= save_val
& read_mask
;
12403 /* Write zero to the register, then make sure the read-only bits
12404 * are not changed and the read/write bits are all zeros.
12408 val
= tr32(offset
);
12410 /* Test the read-only and read/write bits. */
12411 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12414 /* Write ones to all the bits defined by RdMask and WrMask, then
12415 * make sure the read-only bits are not changed and the
12416 * read/write bits are all ones.
12418 tw32(offset
, read_mask
| write_mask
);
12420 val
= tr32(offset
);
12422 /* Test the read-only bits. */
12423 if ((val
& read_mask
) != read_val
)
12426 /* Test the read/write bits. */
12427 if ((val
& write_mask
) != write_mask
)
12430 tw32(offset
, save_val
);
12436 if (netif_msg_hw(tp
))
12437 netdev_err(tp
->dev
,
12438 "Register test failed at offset %x\n", offset
);
12439 tw32(offset
, save_val
);
12443 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12445 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12449 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12450 for (j
= 0; j
< len
; j
+= 4) {
12453 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12454 tg3_read_mem(tp
, offset
+ j
, &val
);
12455 if (val
!= test_pattern
[i
])
12462 static int tg3_test_memory(struct tg3
*tp
)
12464 static struct mem_entry
{
12467 } mem_tbl_570x
[] = {
12468 { 0x00000000, 0x00b50},
12469 { 0x00002000, 0x1c000},
12470 { 0xffffffff, 0x00000}
12471 }, mem_tbl_5705
[] = {
12472 { 0x00000100, 0x0000c},
12473 { 0x00000200, 0x00008},
12474 { 0x00004000, 0x00800},
12475 { 0x00006000, 0x01000},
12476 { 0x00008000, 0x02000},
12477 { 0x00010000, 0x0e000},
12478 { 0xffffffff, 0x00000}
12479 }, mem_tbl_5755
[] = {
12480 { 0x00000200, 0x00008},
12481 { 0x00004000, 0x00800},
12482 { 0x00006000, 0x00800},
12483 { 0x00008000, 0x02000},
12484 { 0x00010000, 0x0c000},
12485 { 0xffffffff, 0x00000}
12486 }, mem_tbl_5906
[] = {
12487 { 0x00000200, 0x00008},
12488 { 0x00004000, 0x00400},
12489 { 0x00006000, 0x00400},
12490 { 0x00008000, 0x01000},
12491 { 0x00010000, 0x01000},
12492 { 0xffffffff, 0x00000}
12493 }, mem_tbl_5717
[] = {
12494 { 0x00000200, 0x00008},
12495 { 0x00010000, 0x0a000},
12496 { 0x00020000, 0x13c00},
12497 { 0xffffffff, 0x00000}
12498 }, mem_tbl_57765
[] = {
12499 { 0x00000200, 0x00008},
12500 { 0x00004000, 0x00800},
12501 { 0x00006000, 0x09800},
12502 { 0x00010000, 0x0a000},
12503 { 0xffffffff, 0x00000}
12505 struct mem_entry
*mem_tbl
;
12509 if (tg3_flag(tp
, 5717_PLUS
))
12510 mem_tbl
= mem_tbl_5717
;
12511 else if (tg3_flag(tp
, 57765_CLASS
) ||
12512 tg3_asic_rev(tp
) == ASIC_REV_5762
)
12513 mem_tbl
= mem_tbl_57765
;
12514 else if (tg3_flag(tp
, 5755_PLUS
))
12515 mem_tbl
= mem_tbl_5755
;
12516 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
12517 mem_tbl
= mem_tbl_5906
;
12518 else if (tg3_flag(tp
, 5705_PLUS
))
12519 mem_tbl
= mem_tbl_5705
;
12521 mem_tbl
= mem_tbl_570x
;
12523 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
12524 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
12532 #define TG3_TSO_MSS 500
12534 #define TG3_TSO_IP_HDR_LEN 20
12535 #define TG3_TSO_TCP_HDR_LEN 20
12536 #define TG3_TSO_TCP_OPT_LEN 12
12538 static const u8 tg3_tso_header
[] = {
12540 0x45, 0x00, 0x00, 0x00,
12541 0x00, 0x00, 0x40, 0x00,
12542 0x40, 0x06, 0x00, 0x00,
12543 0x0a, 0x00, 0x00, 0x01,
12544 0x0a, 0x00, 0x00, 0x02,
12545 0x0d, 0x00, 0xe0, 0x00,
12546 0x00, 0x00, 0x01, 0x00,
12547 0x00, 0x00, 0x02, 0x00,
12548 0x80, 0x10, 0x10, 0x00,
12549 0x14, 0x09, 0x00, 0x00,
12550 0x01, 0x01, 0x08, 0x0a,
12551 0x11, 0x11, 0x11, 0x11,
12552 0x11, 0x11, 0x11, 0x11,
12555 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
12557 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
12558 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
12560 struct sk_buff
*skb
;
12561 u8
*tx_data
, *rx_data
;
12563 int num_pkts
, tx_len
, rx_len
, i
, err
;
12564 struct tg3_rx_buffer_desc
*desc
;
12565 struct tg3_napi
*tnapi
, *rnapi
;
12566 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
12568 tnapi
= &tp
->napi
[0];
12569 rnapi
= &tp
->napi
[0];
12570 if (tp
->irq_cnt
> 1) {
12571 if (tg3_flag(tp
, ENABLE_RSS
))
12572 rnapi
= &tp
->napi
[1];
12573 if (tg3_flag(tp
, ENABLE_TSS
))
12574 tnapi
= &tp
->napi
[1];
12576 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
12581 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
12585 tx_data
= skb_put(skb
, tx_len
);
12586 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
12587 memset(tx_data
+ 6, 0x0, 8);
12589 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
12591 if (tso_loopback
) {
12592 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
12594 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
12595 TG3_TSO_TCP_OPT_LEN
;
12597 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
12598 sizeof(tg3_tso_header
));
12601 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
12602 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
12604 /* Set the total length field in the IP header */
12605 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
12607 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
12608 TXD_FLAG_CPU_POST_DMA
);
12610 if (tg3_flag(tp
, HW_TSO_1
) ||
12611 tg3_flag(tp
, HW_TSO_2
) ||
12612 tg3_flag(tp
, HW_TSO_3
)) {
12614 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
12615 th
= (struct tcphdr
*)&tx_data
[val
];
12618 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
12620 if (tg3_flag(tp
, HW_TSO_3
)) {
12621 mss
|= (hdr_len
& 0xc) << 12;
12622 if (hdr_len
& 0x10)
12623 base_flags
|= 0x00000010;
12624 base_flags
|= (hdr_len
& 0x3e0) << 5;
12625 } else if (tg3_flag(tp
, HW_TSO_2
))
12626 mss
|= hdr_len
<< 9;
12627 else if (tg3_flag(tp
, HW_TSO_1
) ||
12628 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
12629 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
12631 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
12634 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
12637 data_off
= ETH_HLEN
;
12639 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
12640 tx_len
> VLAN_ETH_FRAME_LEN
)
12641 base_flags
|= TXD_FLAG_JMB_PKT
;
12644 for (i
= data_off
; i
< tx_len
; i
++)
12645 tx_data
[i
] = (u8
) (i
& 0xff);
12647 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
12648 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
12649 dev_kfree_skb(skb
);
12653 val
= tnapi
->tx_prod
;
12654 tnapi
->tx_buffers
[val
].skb
= skb
;
12655 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
12657 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12662 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12664 budget
= tg3_tx_avail(tnapi
);
12665 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
12666 base_flags
| TXD_FLAG_END
, mss
, 0)) {
12667 tnapi
->tx_buffers
[val
].skb
= NULL
;
12668 dev_kfree_skb(skb
);
12674 /* Sync BD data before updating mailbox */
12677 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
12678 tr32_mailbox(tnapi
->prodmbox
);
12682 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12683 for (i
= 0; i
< 35; i
++) {
12684 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12689 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
12690 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12691 if ((tx_idx
== tnapi
->tx_prod
) &&
12692 (rx_idx
== (rx_start_idx
+ num_pkts
)))
12696 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
12697 dev_kfree_skb(skb
);
12699 if (tx_idx
!= tnapi
->tx_prod
)
12702 if (rx_idx
!= rx_start_idx
+ num_pkts
)
12706 while (rx_idx
!= rx_start_idx
) {
12707 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
12708 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
12709 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
12711 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
12712 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
12715 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
12718 if (!tso_loopback
) {
12719 if (rx_len
!= tx_len
)
12722 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
12723 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
12726 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
12729 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
12730 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
12731 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
12735 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
12736 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
12737 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
12739 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
12740 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
12741 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
12746 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
12747 PCI_DMA_FROMDEVICE
);
12749 rx_data
+= TG3_RX_OFFSET(tp
);
12750 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
12751 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
12758 /* tg3_free_rings will unmap and free the rx_data */
12763 #define TG3_STD_LOOPBACK_FAILED 1
12764 #define TG3_JMB_LOOPBACK_FAILED 2
12765 #define TG3_TSO_LOOPBACK_FAILED 4
12766 #define TG3_LOOPBACK_FAILED \
12767 (TG3_STD_LOOPBACK_FAILED | \
12768 TG3_JMB_LOOPBACK_FAILED | \
12769 TG3_TSO_LOOPBACK_FAILED)
12771 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
12775 u32 jmb_pkt_sz
= 9000;
12778 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
12780 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
12781 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12783 if (!netif_running(tp
->dev
)) {
12784 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12785 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12787 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12791 err
= tg3_reset_hw(tp
, 1);
12793 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12794 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12796 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12800 if (tg3_flag(tp
, ENABLE_RSS
)) {
12803 /* Reroute all rx packets to the 1st queue */
12804 for (i
= MAC_RSS_INDIR_TBL_0
;
12805 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12809 /* HW errata - mac loopback fails in some cases on 5780.
12810 * Normal traffic and PHY loopback are not affected by
12811 * errata. Also, the MAC loopback test is deprecated for
12812 * all newer ASIC revisions.
12814 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
12815 !tg3_flag(tp
, CPMU_PRESENT
)) {
12816 tg3_mac_loopback(tp
, true);
12818 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12819 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12821 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12822 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12823 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12825 tg3_mac_loopback(tp
, false);
12828 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12829 !tg3_flag(tp
, USE_PHYLIB
)) {
12832 tg3_phy_lpbk_set(tp
, 0, false);
12834 /* Wait for link */
12835 for (i
= 0; i
< 100; i
++) {
12836 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12841 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12842 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12843 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12844 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12845 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
12846 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12847 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12848 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12851 tg3_phy_lpbk_set(tp
, 0, true);
12853 /* All link indications report up, but the hardware
12854 * isn't really ready for about 20 msec. Double it
12859 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12860 data
[TG3_EXT_LOOPB_TEST
] |=
12861 TG3_STD_LOOPBACK_FAILED
;
12862 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12863 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12864 data
[TG3_EXT_LOOPB_TEST
] |=
12865 TG3_TSO_LOOPBACK_FAILED
;
12866 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12867 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12868 data
[TG3_EXT_LOOPB_TEST
] |=
12869 TG3_JMB_LOOPBACK_FAILED
;
12872 /* Re-enable gphy autopowerdown. */
12873 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12874 tg3_phy_toggle_apd(tp
, true);
12877 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
12878 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
12881 tp
->phy_flags
|= eee_cap
;
12886 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
12889 struct tg3
*tp
= netdev_priv(dev
);
12890 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
12892 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
12893 tg3_power_up(tp
)) {
12894 etest
->flags
|= ETH_TEST_FL_FAILED
;
12895 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
12899 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
12901 if (tg3_test_nvram(tp
) != 0) {
12902 etest
->flags
|= ETH_TEST_FL_FAILED
;
12903 data
[TG3_NVRAM_TEST
] = 1;
12905 if (!doextlpbk
&& tg3_test_link(tp
)) {
12906 etest
->flags
|= ETH_TEST_FL_FAILED
;
12907 data
[TG3_LINK_TEST
] = 1;
12909 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
12910 int err
, err2
= 0, irq_sync
= 0;
12912 if (netif_running(dev
)) {
12914 tg3_netif_stop(tp
);
12918 tg3_full_lock(tp
, irq_sync
);
12919 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
12920 err
= tg3_nvram_lock(tp
);
12921 tg3_halt_cpu(tp
, RX_CPU_BASE
);
12922 if (!tg3_flag(tp
, 5705_PLUS
))
12923 tg3_halt_cpu(tp
, TX_CPU_BASE
);
12925 tg3_nvram_unlock(tp
);
12927 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
12930 if (tg3_test_registers(tp
) != 0) {
12931 etest
->flags
|= ETH_TEST_FL_FAILED
;
12932 data
[TG3_REGISTER_TEST
] = 1;
12935 if (tg3_test_memory(tp
) != 0) {
12936 etest
->flags
|= ETH_TEST_FL_FAILED
;
12937 data
[TG3_MEMORY_TEST
] = 1;
12941 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
12943 if (tg3_test_loopback(tp
, data
, doextlpbk
))
12944 etest
->flags
|= ETH_TEST_FL_FAILED
;
12946 tg3_full_unlock(tp
);
12948 if (tg3_test_interrupt(tp
) != 0) {
12949 etest
->flags
|= ETH_TEST_FL_FAILED
;
12950 data
[TG3_INTERRUPT_TEST
] = 1;
12953 tg3_full_lock(tp
, 0);
12955 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12956 if (netif_running(dev
)) {
12957 tg3_flag_set(tp
, INIT_COMPLETE
);
12958 err2
= tg3_restart_hw(tp
, 1);
12960 tg3_netif_start(tp
);
12963 tg3_full_unlock(tp
);
12965 if (irq_sync
&& !err2
)
12968 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
12969 tg3_power_down(tp
);
12973 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
12974 struct ifreq
*ifr
, int cmd
)
12976 struct tg3
*tp
= netdev_priv(dev
);
12977 struct hwtstamp_config stmpconf
;
12979 if (!tg3_flag(tp
, PTP_CAPABLE
))
12982 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
12985 if (stmpconf
.flags
)
12988 switch (stmpconf
.tx_type
) {
12989 case HWTSTAMP_TX_ON
:
12990 tg3_flag_set(tp
, TX_TSTAMP_EN
);
12992 case HWTSTAMP_TX_OFF
:
12993 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
12999 switch (stmpconf
.rx_filter
) {
13000 case HWTSTAMP_FILTER_NONE
:
13003 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13004 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13005 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13007 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13008 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13009 TG3_RX_PTP_CTL_SYNC_EVNT
;
13011 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13012 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13013 TG3_RX_PTP_CTL_DELAY_REQ
;
13015 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13016 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13017 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13019 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13020 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13021 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13023 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13024 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13025 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13027 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13028 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13029 TG3_RX_PTP_CTL_SYNC_EVNT
;
13031 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13032 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13033 TG3_RX_PTP_CTL_SYNC_EVNT
;
13035 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13036 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13037 TG3_RX_PTP_CTL_SYNC_EVNT
;
13039 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13040 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13041 TG3_RX_PTP_CTL_DELAY_REQ
;
13043 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13044 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13045 TG3_RX_PTP_CTL_DELAY_REQ
;
13047 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13048 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13049 TG3_RX_PTP_CTL_DELAY_REQ
;
13055 if (netif_running(dev
) && tp
->rxptpctl
)
13056 tw32(TG3_RX_PTP_CTL
,
13057 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13059 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13063 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13065 struct mii_ioctl_data
*data
= if_mii(ifr
);
13066 struct tg3
*tp
= netdev_priv(dev
);
13069 if (tg3_flag(tp
, USE_PHYLIB
)) {
13070 struct phy_device
*phydev
;
13071 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13073 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13074 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13079 data
->phy_id
= tp
->phy_addr
;
13082 case SIOCGMIIREG
: {
13085 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13086 break; /* We have no PHY */
13088 if (!netif_running(dev
))
13091 spin_lock_bh(&tp
->lock
);
13092 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13093 data
->reg_num
& 0x1f, &mii_regval
);
13094 spin_unlock_bh(&tp
->lock
);
13096 data
->val_out
= mii_regval
;
13102 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13103 break; /* We have no PHY */
13105 if (!netif_running(dev
))
13108 spin_lock_bh(&tp
->lock
);
13109 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13110 data
->reg_num
& 0x1f, data
->val_in
);
13111 spin_unlock_bh(&tp
->lock
);
13115 case SIOCSHWTSTAMP
:
13116 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13122 return -EOPNOTSUPP
;
13125 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13127 struct tg3
*tp
= netdev_priv(dev
);
13129 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13133 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13135 struct tg3
*tp
= netdev_priv(dev
);
13136 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13137 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13139 if (!tg3_flag(tp
, 5705_PLUS
)) {
13140 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13141 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13142 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13143 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13146 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13147 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13148 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13149 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13150 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13151 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13152 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13153 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13154 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13155 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13158 /* No rx interrupts will be generated if both are zero */
13159 if ((ec
->rx_coalesce_usecs
== 0) &&
13160 (ec
->rx_max_coalesced_frames
== 0))
13163 /* No tx interrupts will be generated if both are zero */
13164 if ((ec
->tx_coalesce_usecs
== 0) &&
13165 (ec
->tx_max_coalesced_frames
== 0))
13168 /* Only copy relevant parameters, ignore all others. */
13169 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13170 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13171 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13172 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13173 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13174 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13175 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13176 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13177 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13179 if (netif_running(dev
)) {
13180 tg3_full_lock(tp
, 0);
13181 __tg3_set_coalesce(tp
, &tp
->coal
);
13182 tg3_full_unlock(tp
);
13187 static const struct ethtool_ops tg3_ethtool_ops
= {
13188 .get_settings
= tg3_get_settings
,
13189 .set_settings
= tg3_set_settings
,
13190 .get_drvinfo
= tg3_get_drvinfo
,
13191 .get_regs_len
= tg3_get_regs_len
,
13192 .get_regs
= tg3_get_regs
,
13193 .get_wol
= tg3_get_wol
,
13194 .set_wol
= tg3_set_wol
,
13195 .get_msglevel
= tg3_get_msglevel
,
13196 .set_msglevel
= tg3_set_msglevel
,
13197 .nway_reset
= tg3_nway_reset
,
13198 .get_link
= ethtool_op_get_link
,
13199 .get_eeprom_len
= tg3_get_eeprom_len
,
13200 .get_eeprom
= tg3_get_eeprom
,
13201 .set_eeprom
= tg3_set_eeprom
,
13202 .get_ringparam
= tg3_get_ringparam
,
13203 .set_ringparam
= tg3_set_ringparam
,
13204 .get_pauseparam
= tg3_get_pauseparam
,
13205 .set_pauseparam
= tg3_set_pauseparam
,
13206 .self_test
= tg3_self_test
,
13207 .get_strings
= tg3_get_strings
,
13208 .set_phys_id
= tg3_set_phys_id
,
13209 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13210 .get_coalesce
= tg3_get_coalesce
,
13211 .set_coalesce
= tg3_set_coalesce
,
13212 .get_sset_count
= tg3_get_sset_count
,
13213 .get_rxnfc
= tg3_get_rxnfc
,
13214 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13215 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13216 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13217 .get_channels
= tg3_get_channels
,
13218 .set_channels
= tg3_set_channels
,
13219 .get_ts_info
= tg3_get_ts_info
,
13222 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13223 struct rtnl_link_stats64
*stats
)
13225 struct tg3
*tp
= netdev_priv(dev
);
13227 spin_lock_bh(&tp
->lock
);
13228 if (!tp
->hw_stats
) {
13229 spin_unlock_bh(&tp
->lock
);
13230 return &tp
->net_stats_prev
;
13233 tg3_get_nstats(tp
, stats
);
13234 spin_unlock_bh(&tp
->lock
);
13239 static void tg3_set_rx_mode(struct net_device
*dev
)
13241 struct tg3
*tp
= netdev_priv(dev
);
13243 if (!netif_running(dev
))
13246 tg3_full_lock(tp
, 0);
13247 __tg3_set_rx_mode(dev
);
13248 tg3_full_unlock(tp
);
13251 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13254 dev
->mtu
= new_mtu
;
13256 if (new_mtu
> ETH_DATA_LEN
) {
13257 if (tg3_flag(tp
, 5780_CLASS
)) {
13258 netdev_update_features(dev
);
13259 tg3_flag_clear(tp
, TSO_CAPABLE
);
13261 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13264 if (tg3_flag(tp
, 5780_CLASS
)) {
13265 tg3_flag_set(tp
, TSO_CAPABLE
);
13266 netdev_update_features(dev
);
13268 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13272 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13274 struct tg3
*tp
= netdev_priv(dev
);
13275 int err
, reset_phy
= 0;
13277 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13280 if (!netif_running(dev
)) {
13281 /* We'll just catch it later when the
13284 tg3_set_mtu(dev
, tp
, new_mtu
);
13290 tg3_netif_stop(tp
);
13292 tg3_full_lock(tp
, 1);
13294 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13296 tg3_set_mtu(dev
, tp
, new_mtu
);
13298 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13299 * breaks all requests to 256 bytes.
13301 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13304 err
= tg3_restart_hw(tp
, reset_phy
);
13307 tg3_netif_start(tp
);
13309 tg3_full_unlock(tp
);
13317 static const struct net_device_ops tg3_netdev_ops
= {
13318 .ndo_open
= tg3_open
,
13319 .ndo_stop
= tg3_close
,
13320 .ndo_start_xmit
= tg3_start_xmit
,
13321 .ndo_get_stats64
= tg3_get_stats64
,
13322 .ndo_validate_addr
= eth_validate_addr
,
13323 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13324 .ndo_set_mac_address
= tg3_set_mac_addr
,
13325 .ndo_do_ioctl
= tg3_ioctl
,
13326 .ndo_tx_timeout
= tg3_tx_timeout
,
13327 .ndo_change_mtu
= tg3_change_mtu
,
13328 .ndo_fix_features
= tg3_fix_features
,
13329 .ndo_set_features
= tg3_set_features
,
13330 #ifdef CONFIG_NET_POLL_CONTROLLER
13331 .ndo_poll_controller
= tg3_poll_controller
,
13335 static void tg3_get_eeprom_size(struct tg3
*tp
)
13337 u32 cursize
, val
, magic
;
13339 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13341 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13344 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13345 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13346 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13350 * Size the chip by reading offsets at increasing powers of two.
13351 * When we encounter our validation signature, we know the addressing
13352 * has wrapped around, and thus have our chip size.
13356 while (cursize
< tp
->nvram_size
) {
13357 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13366 tp
->nvram_size
= cursize
;
13369 static void tg3_get_nvram_size(struct tg3
*tp
)
13373 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13376 /* Selfboot format */
13377 if (val
!= TG3_EEPROM_MAGIC
) {
13378 tg3_get_eeprom_size(tp
);
13382 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13384 /* This is confusing. We want to operate on the
13385 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13386 * call will read from NVRAM and byteswap the data
13387 * according to the byteswapping settings for all
13388 * other register accesses. This ensures the data we
13389 * want will always reside in the lower 16-bits.
13390 * However, the data in NVRAM is in LE format, which
13391 * means the data from the NVRAM read will always be
13392 * opposite the endianness of the CPU. The 16-bit
13393 * byteswap then brings the data to CPU endianness.
13395 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13399 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13402 static void tg3_get_nvram_info(struct tg3
*tp
)
13406 nvcfg1
= tr32(NVRAM_CFG1
);
13407 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13408 tg3_flag_set(tp
, FLASH
);
13410 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13411 tw32(NVRAM_CFG1
, nvcfg1
);
13414 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
13415 tg3_flag(tp
, 5780_CLASS
)) {
13416 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13417 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13418 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13419 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13420 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13422 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13423 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13424 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13426 case FLASH_VENDOR_ATMEL_EEPROM
:
13427 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13428 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13429 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13431 case FLASH_VENDOR_ST
:
13432 tp
->nvram_jedecnum
= JEDEC_ST
;
13433 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13434 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13436 case FLASH_VENDOR_SAIFUN
:
13437 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13438 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13440 case FLASH_VENDOR_SST_SMALL
:
13441 case FLASH_VENDOR_SST_LARGE
:
13442 tp
->nvram_jedecnum
= JEDEC_SST
;
13443 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13447 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13448 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13449 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13453 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13455 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13456 case FLASH_5752PAGE_SIZE_256
:
13457 tp
->nvram_pagesize
= 256;
13459 case FLASH_5752PAGE_SIZE_512
:
13460 tp
->nvram_pagesize
= 512;
13462 case FLASH_5752PAGE_SIZE_1K
:
13463 tp
->nvram_pagesize
= 1024;
13465 case FLASH_5752PAGE_SIZE_2K
:
13466 tp
->nvram_pagesize
= 2048;
13468 case FLASH_5752PAGE_SIZE_4K
:
13469 tp
->nvram_pagesize
= 4096;
13471 case FLASH_5752PAGE_SIZE_264
:
13472 tp
->nvram_pagesize
= 264;
13474 case FLASH_5752PAGE_SIZE_528
:
13475 tp
->nvram_pagesize
= 528;
13480 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13484 nvcfg1
= tr32(NVRAM_CFG1
);
13486 /* NVRAM protection for TPM */
13487 if (nvcfg1
& (1 << 27))
13488 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13490 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13491 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13492 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13493 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13494 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13496 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13497 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13498 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13499 tg3_flag_set(tp
, FLASH
);
13501 case FLASH_5752VENDOR_ST_M45PE10
:
13502 case FLASH_5752VENDOR_ST_M45PE20
:
13503 case FLASH_5752VENDOR_ST_M45PE40
:
13504 tp
->nvram_jedecnum
= JEDEC_ST
;
13505 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13506 tg3_flag_set(tp
, FLASH
);
13510 if (tg3_flag(tp
, FLASH
)) {
13511 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13513 /* For eeprom, set pagesize to maximum eeprom size */
13514 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13516 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13517 tw32(NVRAM_CFG1
, nvcfg1
);
13521 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
13523 u32 nvcfg1
, protect
= 0;
13525 nvcfg1
= tr32(NVRAM_CFG1
);
13527 /* NVRAM protection for TPM */
13528 if (nvcfg1
& (1 << 27)) {
13529 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13533 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13535 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13536 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13537 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13538 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
13539 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13540 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13541 tg3_flag_set(tp
, FLASH
);
13542 tp
->nvram_pagesize
= 264;
13543 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
13544 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
13545 tp
->nvram_size
= (protect
? 0x3e200 :
13546 TG3_NVRAM_SIZE_512KB
);
13547 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
13548 tp
->nvram_size
= (protect
? 0x1f200 :
13549 TG3_NVRAM_SIZE_256KB
);
13551 tp
->nvram_size
= (protect
? 0x1f200 :
13552 TG3_NVRAM_SIZE_128KB
);
13554 case FLASH_5752VENDOR_ST_M45PE10
:
13555 case FLASH_5752VENDOR_ST_M45PE20
:
13556 case FLASH_5752VENDOR_ST_M45PE40
:
13557 tp
->nvram_jedecnum
= JEDEC_ST
;
13558 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13559 tg3_flag_set(tp
, FLASH
);
13560 tp
->nvram_pagesize
= 256;
13561 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
13562 tp
->nvram_size
= (protect
?
13563 TG3_NVRAM_SIZE_64KB
:
13564 TG3_NVRAM_SIZE_128KB
);
13565 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
13566 tp
->nvram_size
= (protect
?
13567 TG3_NVRAM_SIZE_64KB
:
13568 TG3_NVRAM_SIZE_256KB
);
13570 tp
->nvram_size
= (protect
?
13571 TG3_NVRAM_SIZE_128KB
:
13572 TG3_NVRAM_SIZE_512KB
);
13577 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
13581 nvcfg1
= tr32(NVRAM_CFG1
);
13583 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13584 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
13585 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13586 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
13587 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13588 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13589 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13590 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13592 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13593 tw32(NVRAM_CFG1
, nvcfg1
);
13595 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13596 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13597 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13598 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13599 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13600 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13601 tg3_flag_set(tp
, FLASH
);
13602 tp
->nvram_pagesize
= 264;
13604 case FLASH_5752VENDOR_ST_M45PE10
:
13605 case FLASH_5752VENDOR_ST_M45PE20
:
13606 case FLASH_5752VENDOR_ST_M45PE40
:
13607 tp
->nvram_jedecnum
= JEDEC_ST
;
13608 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13609 tg3_flag_set(tp
, FLASH
);
13610 tp
->nvram_pagesize
= 256;
13615 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
13617 u32 nvcfg1
, protect
= 0;
13619 nvcfg1
= tr32(NVRAM_CFG1
);
13621 /* NVRAM protection for TPM */
13622 if (nvcfg1
& (1 << 27)) {
13623 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13627 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13629 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13630 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13631 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13632 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13633 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13634 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13635 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13636 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13637 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13638 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13639 tg3_flag_set(tp
, FLASH
);
13640 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13641 tp
->nvram_pagesize
= 256;
13643 case FLASH_5761VENDOR_ST_A_M45PE20
:
13644 case FLASH_5761VENDOR_ST_A_M45PE40
:
13645 case FLASH_5761VENDOR_ST_A_M45PE80
:
13646 case FLASH_5761VENDOR_ST_A_M45PE16
:
13647 case FLASH_5761VENDOR_ST_M_M45PE20
:
13648 case FLASH_5761VENDOR_ST_M_M45PE40
:
13649 case FLASH_5761VENDOR_ST_M_M45PE80
:
13650 case FLASH_5761VENDOR_ST_M_M45PE16
:
13651 tp
->nvram_jedecnum
= JEDEC_ST
;
13652 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13653 tg3_flag_set(tp
, FLASH
);
13654 tp
->nvram_pagesize
= 256;
13659 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
13662 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13663 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13664 case FLASH_5761VENDOR_ST_A_M45PE16
:
13665 case FLASH_5761VENDOR_ST_M_M45PE16
:
13666 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
13668 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13669 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13670 case FLASH_5761VENDOR_ST_A_M45PE80
:
13671 case FLASH_5761VENDOR_ST_M_M45PE80
:
13672 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13674 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13675 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13676 case FLASH_5761VENDOR_ST_A_M45PE40
:
13677 case FLASH_5761VENDOR_ST_M_M45PE40
:
13678 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13680 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13681 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13682 case FLASH_5761VENDOR_ST_A_M45PE20
:
13683 case FLASH_5761VENDOR_ST_M_M45PE20
:
13684 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13690 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
13692 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13693 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13694 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13697 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
13701 nvcfg1
= tr32(NVRAM_CFG1
);
13703 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13704 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13705 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13706 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13707 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13708 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13710 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13711 tw32(NVRAM_CFG1
, nvcfg1
);
13713 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13714 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13715 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13716 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13717 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13718 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13719 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13720 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13721 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13722 tg3_flag_set(tp
, FLASH
);
13724 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13725 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13726 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13727 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13728 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13730 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13731 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13732 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13734 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13735 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13736 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13740 case FLASH_5752VENDOR_ST_M45PE10
:
13741 case FLASH_5752VENDOR_ST_M45PE20
:
13742 case FLASH_5752VENDOR_ST_M45PE40
:
13743 tp
->nvram_jedecnum
= JEDEC_ST
;
13744 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13745 tg3_flag_set(tp
, FLASH
);
13747 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13748 case FLASH_5752VENDOR_ST_M45PE10
:
13749 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13751 case FLASH_5752VENDOR_ST_M45PE20
:
13752 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13754 case FLASH_5752VENDOR_ST_M45PE40
:
13755 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13760 tg3_flag_set(tp
, NO_NVRAM
);
13764 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13765 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13766 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13770 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
13774 nvcfg1
= tr32(NVRAM_CFG1
);
13776 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13777 case FLASH_5717VENDOR_ATMEL_EEPROM
:
13778 case FLASH_5717VENDOR_MICRO_EEPROM
:
13779 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13780 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13781 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13783 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13784 tw32(NVRAM_CFG1
, nvcfg1
);
13786 case FLASH_5717VENDOR_ATMEL_MDB011D
:
13787 case FLASH_5717VENDOR_ATMEL_ADB011B
:
13788 case FLASH_5717VENDOR_ATMEL_ADB011D
:
13789 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13790 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13791 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13792 case FLASH_5717VENDOR_ATMEL_45USPT
:
13793 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13794 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13795 tg3_flag_set(tp
, FLASH
);
13797 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13798 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13799 /* Detect size with tg3_nvram_get_size() */
13801 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13802 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13803 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13806 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13810 case FLASH_5717VENDOR_ST_M_M25PE10
:
13811 case FLASH_5717VENDOR_ST_A_M25PE10
:
13812 case FLASH_5717VENDOR_ST_M_M45PE10
:
13813 case FLASH_5717VENDOR_ST_A_M45PE10
:
13814 case FLASH_5717VENDOR_ST_M_M25PE20
:
13815 case FLASH_5717VENDOR_ST_A_M25PE20
:
13816 case FLASH_5717VENDOR_ST_M_M45PE20
:
13817 case FLASH_5717VENDOR_ST_A_M45PE20
:
13818 case FLASH_5717VENDOR_ST_25USPT
:
13819 case FLASH_5717VENDOR_ST_45USPT
:
13820 tp
->nvram_jedecnum
= JEDEC_ST
;
13821 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13822 tg3_flag_set(tp
, FLASH
);
13824 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13825 case FLASH_5717VENDOR_ST_M_M25PE20
:
13826 case FLASH_5717VENDOR_ST_M_M45PE20
:
13827 /* Detect size with tg3_nvram_get_size() */
13829 case FLASH_5717VENDOR_ST_A_M25PE20
:
13830 case FLASH_5717VENDOR_ST_A_M45PE20
:
13831 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13834 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13839 tg3_flag_set(tp
, NO_NVRAM
);
13843 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13844 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13845 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13848 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
13850 u32 nvcfg1
, nvmpinstrp
;
13852 nvcfg1
= tr32(NVRAM_CFG1
);
13853 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
13855 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13856 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
13857 tg3_flag_set(tp
, NO_NVRAM
);
13861 switch (nvmpinstrp
) {
13862 case FLASH_5762_EEPROM_HD
:
13863 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
13865 case FLASH_5762_EEPROM_LD
:
13866 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
13871 switch (nvmpinstrp
) {
13872 case FLASH_5720_EEPROM_HD
:
13873 case FLASH_5720_EEPROM_LD
:
13874 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13875 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13877 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13878 tw32(NVRAM_CFG1
, nvcfg1
);
13879 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
13880 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13882 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
13884 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
13885 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
13886 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
13887 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13888 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13889 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13890 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13891 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13892 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13893 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13894 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13895 case FLASH_5720VENDOR_ATMEL_45USPT
:
13896 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13897 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13898 tg3_flag_set(tp
, FLASH
);
13900 switch (nvmpinstrp
) {
13901 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13902 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13903 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13904 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13906 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13907 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13908 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13909 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13911 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13912 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13913 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13916 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13917 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13921 case FLASH_5720VENDOR_M_ST_M25PE10
:
13922 case FLASH_5720VENDOR_M_ST_M45PE10
:
13923 case FLASH_5720VENDOR_A_ST_M25PE10
:
13924 case FLASH_5720VENDOR_A_ST_M45PE10
:
13925 case FLASH_5720VENDOR_M_ST_M25PE20
:
13926 case FLASH_5720VENDOR_M_ST_M45PE20
:
13927 case FLASH_5720VENDOR_A_ST_M25PE20
:
13928 case FLASH_5720VENDOR_A_ST_M45PE20
:
13929 case FLASH_5720VENDOR_M_ST_M25PE40
:
13930 case FLASH_5720VENDOR_M_ST_M45PE40
:
13931 case FLASH_5720VENDOR_A_ST_M25PE40
:
13932 case FLASH_5720VENDOR_A_ST_M45PE40
:
13933 case FLASH_5720VENDOR_M_ST_M25PE80
:
13934 case FLASH_5720VENDOR_M_ST_M45PE80
:
13935 case FLASH_5720VENDOR_A_ST_M25PE80
:
13936 case FLASH_5720VENDOR_A_ST_M45PE80
:
13937 case FLASH_5720VENDOR_ST_25USPT
:
13938 case FLASH_5720VENDOR_ST_45USPT
:
13939 tp
->nvram_jedecnum
= JEDEC_ST
;
13940 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13941 tg3_flag_set(tp
, FLASH
);
13943 switch (nvmpinstrp
) {
13944 case FLASH_5720VENDOR_M_ST_M25PE20
:
13945 case FLASH_5720VENDOR_M_ST_M45PE20
:
13946 case FLASH_5720VENDOR_A_ST_M25PE20
:
13947 case FLASH_5720VENDOR_A_ST_M45PE20
:
13948 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13950 case FLASH_5720VENDOR_M_ST_M25PE40
:
13951 case FLASH_5720VENDOR_M_ST_M45PE40
:
13952 case FLASH_5720VENDOR_A_ST_M25PE40
:
13953 case FLASH_5720VENDOR_A_ST_M45PE40
:
13954 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13956 case FLASH_5720VENDOR_M_ST_M25PE80
:
13957 case FLASH_5720VENDOR_M_ST_M45PE80
:
13958 case FLASH_5720VENDOR_A_ST_M25PE80
:
13959 case FLASH_5720VENDOR_A_ST_M45PE80
:
13960 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13963 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13964 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13969 tg3_flag_set(tp
, NO_NVRAM
);
13973 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13974 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13975 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13977 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13980 if (tg3_nvram_read(tp
, 0, &val
))
13983 if (val
!= TG3_EEPROM_MAGIC
&&
13984 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
13985 tg3_flag_set(tp
, NO_NVRAM
);
13989 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13990 static void tg3_nvram_init(struct tg3
*tp
)
13992 if (tg3_flag(tp
, IS_SSB_CORE
)) {
13993 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13994 tg3_flag_clear(tp
, NVRAM
);
13995 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
13996 tg3_flag_set(tp
, NO_NVRAM
);
14000 tw32_f(GRC_EEPROM_ADDR
,
14001 (EEPROM_ADDR_FSM_RESET
|
14002 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14003 EEPROM_ADDR_CLKPERD_SHIFT
)));
14007 /* Enable seeprom accesses. */
14008 tw32_f(GRC_LOCAL_CTRL
,
14009 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14012 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14013 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14014 tg3_flag_set(tp
, NVRAM
);
14016 if (tg3_nvram_lock(tp
)) {
14017 netdev_warn(tp
->dev
,
14018 "Cannot get nvram lock, %s failed\n",
14022 tg3_enable_nvram_access(tp
);
14024 tp
->nvram_size
= 0;
14026 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14027 tg3_get_5752_nvram_info(tp
);
14028 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14029 tg3_get_5755_nvram_info(tp
);
14030 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14031 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14032 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14033 tg3_get_5787_nvram_info(tp
);
14034 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14035 tg3_get_5761_nvram_info(tp
);
14036 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14037 tg3_get_5906_nvram_info(tp
);
14038 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14039 tg3_flag(tp
, 57765_CLASS
))
14040 tg3_get_57780_nvram_info(tp
);
14041 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14042 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14043 tg3_get_5717_nvram_info(tp
);
14044 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14045 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14046 tg3_get_5720_nvram_info(tp
);
14048 tg3_get_nvram_info(tp
);
14050 if (tp
->nvram_size
== 0)
14051 tg3_get_nvram_size(tp
);
14053 tg3_disable_nvram_access(tp
);
14054 tg3_nvram_unlock(tp
);
14057 tg3_flag_clear(tp
, NVRAM
);
14058 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14060 tg3_get_eeprom_size(tp
);
14064 struct subsys_tbl_ent
{
14065 u16 subsys_vendor
, subsys_devid
;
14069 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14070 /* Broadcom boards. */
14071 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14072 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14073 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14074 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14075 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14076 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14077 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14078 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14079 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14080 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14081 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14082 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14083 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14084 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14085 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14086 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14087 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14088 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14089 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14090 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14091 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14092 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14095 { TG3PCI_SUBVENDOR_ID_3COM
,
14096 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14097 { TG3PCI_SUBVENDOR_ID_3COM
,
14098 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14099 { TG3PCI_SUBVENDOR_ID_3COM
,
14100 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14101 { TG3PCI_SUBVENDOR_ID_3COM
,
14102 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14103 { TG3PCI_SUBVENDOR_ID_3COM
,
14104 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14107 { TG3PCI_SUBVENDOR_ID_DELL
,
14108 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14109 { TG3PCI_SUBVENDOR_ID_DELL
,
14110 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14111 { TG3PCI_SUBVENDOR_ID_DELL
,
14112 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14113 { TG3PCI_SUBVENDOR_ID_DELL
,
14114 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14116 /* Compaq boards. */
14117 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14118 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14119 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14120 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14121 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14122 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14123 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14124 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14125 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14126 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14129 { TG3PCI_SUBVENDOR_ID_IBM
,
14130 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14133 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14137 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14138 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14139 tp
->pdev
->subsystem_vendor
) &&
14140 (subsys_id_to_phy_id
[i
].subsys_devid
==
14141 tp
->pdev
->subsystem_device
))
14142 return &subsys_id_to_phy_id
[i
];
14147 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14151 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14152 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14154 /* Assume an onboard device and WOL capable by default. */
14155 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14156 tg3_flag_set(tp
, WOL_CAP
);
14158 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14159 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14160 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14161 tg3_flag_set(tp
, IS_NIC
);
14163 val
= tr32(VCPU_CFGSHDW
);
14164 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14165 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14166 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14167 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14168 tg3_flag_set(tp
, WOL_ENABLE
);
14169 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14174 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14175 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14176 u32 nic_cfg
, led_cfg
;
14177 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14178 int eeprom_phy_serdes
= 0;
14180 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14181 tp
->nic_sram_data_cfg
= nic_cfg
;
14183 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14184 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14185 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14186 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14187 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14188 (ver
> 0) && (ver
< 0x100))
14189 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14191 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14192 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14194 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14195 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14196 eeprom_phy_serdes
= 1;
14198 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14199 if (nic_phy_id
!= 0) {
14200 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14201 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14203 eeprom_phy_id
= (id1
>> 16) << 10;
14204 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14205 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14209 tp
->phy_id
= eeprom_phy_id
;
14210 if (eeprom_phy_serdes
) {
14211 if (!tg3_flag(tp
, 5705_PLUS
))
14212 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14214 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14217 if (tg3_flag(tp
, 5750_PLUS
))
14218 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14219 SHASTA_EXT_LED_MODE_MASK
);
14221 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14225 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14226 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14229 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14230 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14233 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14234 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14236 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14237 * read on some older 5700/5701 bootcode.
14239 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14240 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14241 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14245 case SHASTA_EXT_LED_SHARED
:
14246 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14247 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14248 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14249 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14250 LED_CTRL_MODE_PHY_2
);
14253 case SHASTA_EXT_LED_MAC
:
14254 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14257 case SHASTA_EXT_LED_COMBO
:
14258 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14259 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14260 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14261 LED_CTRL_MODE_PHY_2
);
14266 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14267 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14268 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14269 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14271 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14272 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14274 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14275 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14276 if ((tp
->pdev
->subsystem_vendor
==
14277 PCI_VENDOR_ID_ARIMA
) &&
14278 (tp
->pdev
->subsystem_device
== 0x205a ||
14279 tp
->pdev
->subsystem_device
== 0x2063))
14280 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14282 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14283 tg3_flag_set(tp
, IS_NIC
);
14286 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14287 tg3_flag_set(tp
, ENABLE_ASF
);
14288 if (tg3_flag(tp
, 5750_PLUS
))
14289 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14292 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14293 tg3_flag(tp
, 5750_PLUS
))
14294 tg3_flag_set(tp
, ENABLE_APE
);
14296 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14297 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14298 tg3_flag_clear(tp
, WOL_CAP
);
14300 if (tg3_flag(tp
, WOL_CAP
) &&
14301 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14302 tg3_flag_set(tp
, WOL_ENABLE
);
14303 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14306 if (cfg2
& (1 << 17))
14307 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14309 /* serdes signal pre-emphasis in register 0x590 set by */
14310 /* bootcode if bit 18 is set */
14311 if (cfg2
& (1 << 18))
14312 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14314 if ((tg3_flag(tp
, 57765_PLUS
) ||
14315 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14316 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14317 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14318 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14320 if (tg3_flag(tp
, PCI_EXPRESS
) &&
14321 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14322 !tg3_flag(tp
, 57765_PLUS
)) {
14325 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14326 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
14327 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14330 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14331 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14332 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14333 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14334 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14335 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14338 if (tg3_flag(tp
, WOL_CAP
))
14339 device_set_wakeup_enable(&tp
->pdev
->dev
,
14340 tg3_flag(tp
, WOL_ENABLE
));
14342 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14345 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14348 u32 val2
, off
= offset
* 8;
14350 err
= tg3_nvram_lock(tp
);
14354 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14355 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14356 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14357 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14360 for (i
= 0; i
< 100; i
++) {
14361 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14362 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14363 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14369 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14371 tg3_nvram_unlock(tp
);
14372 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14378 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14383 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14384 tw32(OTP_CTRL
, cmd
);
14386 /* Wait for up to 1 ms for command to execute. */
14387 for (i
= 0; i
< 100; i
++) {
14388 val
= tr32(OTP_STATUS
);
14389 if (val
& OTP_STATUS_CMD_DONE
)
14394 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14397 /* Read the gphy configuration from the OTP region of the chip. The gphy
14398 * configuration is a 32-bit value that straddles the alignment boundary.
14399 * We do two 32-bit reads and then shift and merge the results.
14401 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14403 u32 bhalf_otp
, thalf_otp
;
14405 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14407 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14410 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14412 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14415 thalf_otp
= tr32(OTP_READ_DATA
);
14417 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14419 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14422 bhalf_otp
= tr32(OTP_READ_DATA
);
14424 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14427 static void tg3_phy_init_link_config(struct tg3
*tp
)
14429 u32 adv
= ADVERTISED_Autoneg
;
14431 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14432 adv
|= ADVERTISED_1000baseT_Half
|
14433 ADVERTISED_1000baseT_Full
;
14435 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14436 adv
|= ADVERTISED_100baseT_Half
|
14437 ADVERTISED_100baseT_Full
|
14438 ADVERTISED_10baseT_Half
|
14439 ADVERTISED_10baseT_Full
|
14442 adv
|= ADVERTISED_FIBRE
;
14444 tp
->link_config
.advertising
= adv
;
14445 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14446 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14447 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14448 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14449 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14454 static int tg3_phy_probe(struct tg3
*tp
)
14456 u32 hw_phy_id_1
, hw_phy_id_2
;
14457 u32 hw_phy_id
, hw_phy_id_masked
;
14460 /* flow control autonegotiation is default behavior */
14461 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14462 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14464 if (tg3_flag(tp
, ENABLE_APE
)) {
14465 switch (tp
->pci_fn
) {
14467 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14470 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14473 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14476 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14481 if (tg3_flag(tp
, USE_PHYLIB
))
14482 return tg3_phy_init(tp
);
14484 /* Reading the PHY ID register can conflict with ASF
14485 * firmware access to the PHY hardware.
14488 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14489 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14491 /* Now read the physical PHY_ID from the chip and verify
14492 * that it is sane. If it doesn't look good, we fall back
14493 * to either the hard-coded table based PHY_ID and failing
14494 * that the value found in the eeprom area.
14496 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
14497 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
14499 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
14500 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
14501 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
14503 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
14506 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
14507 tp
->phy_id
= hw_phy_id
;
14508 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
14509 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14511 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
14513 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
14514 /* Do nothing, phy ID already set up in
14515 * tg3_get_eeprom_hw_cfg().
14518 struct subsys_tbl_ent
*p
;
14520 /* No eeprom signature? Try the hardcoded
14521 * subsys device table.
14523 p
= tg3_lookup_by_subsys(tp
);
14525 tp
->phy_id
= p
->phy_id
;
14526 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
14527 /* For now we saw the IDs 0xbc050cd0,
14528 * 0xbc050f80 and 0xbc050c30 on devices
14529 * connected to an BCM4785 and there are
14530 * probably more. Just assume that the phy is
14531 * supported when it is connected to a SSB core
14538 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
14539 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14543 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14544 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
14545 tg3_asic_rev(tp
) == ASIC_REV_5720
||
14546 tg3_asic_rev(tp
) == ASIC_REV_5762
||
14547 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
14548 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
14549 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
14550 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
)))
14551 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
14553 tg3_phy_init_link_config(tp
);
14555 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14556 !tg3_flag(tp
, ENABLE_APE
) &&
14557 !tg3_flag(tp
, ENABLE_ASF
)) {
14560 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
14561 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
14562 (bmsr
& BMSR_LSTATUS
))
14563 goto skip_phy_reset
;
14565 err
= tg3_phy_reset(tp
);
14569 tg3_phy_set_wirespeed(tp
);
14571 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
14572 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
14573 tp
->link_config
.flowctrl
);
14575 tg3_writephy(tp
, MII_BMCR
,
14576 BMCR_ANENABLE
| BMCR_ANRESTART
);
14581 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
14582 err
= tg3_init_5401phy_dsp(tp
);
14586 err
= tg3_init_5401phy_dsp(tp
);
14592 static void tg3_read_vpd(struct tg3
*tp
)
14595 unsigned int block_end
, rosize
, len
;
14599 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
14603 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
14605 goto out_not_found
;
14607 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
14608 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
14609 i
+= PCI_VPD_LRDT_TAG_SIZE
;
14611 if (block_end
> vpdlen
)
14612 goto out_not_found
;
14614 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14615 PCI_VPD_RO_KEYWORD_MFR_ID
);
14617 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14619 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14620 if (j
+ len
> block_end
|| len
!= 4 ||
14621 memcmp(&vpd_data
[j
], "1028", 4))
14624 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14625 PCI_VPD_RO_KEYWORD_VENDOR0
);
14629 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14631 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14632 if (j
+ len
> block_end
)
14635 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
14636 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
14640 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14641 PCI_VPD_RO_KEYWORD_PARTNO
);
14643 goto out_not_found
;
14645 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
14647 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14648 if (len
> TG3_BPN_SIZE
||
14649 (len
+ i
) > vpdlen
)
14650 goto out_not_found
;
14652 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
14656 if (tp
->board_part_number
[0])
14660 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
14661 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14662 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
14663 strcpy(tp
->board_part_number
, "BCM5717");
14664 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
14665 strcpy(tp
->board_part_number
, "BCM5718");
14668 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
14669 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
14670 strcpy(tp
->board_part_number
, "BCM57780");
14671 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
14672 strcpy(tp
->board_part_number
, "BCM57760");
14673 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
14674 strcpy(tp
->board_part_number
, "BCM57790");
14675 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
14676 strcpy(tp
->board_part_number
, "BCM57788");
14679 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
14680 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
14681 strcpy(tp
->board_part_number
, "BCM57761");
14682 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
14683 strcpy(tp
->board_part_number
, "BCM57765");
14684 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
14685 strcpy(tp
->board_part_number
, "BCM57781");
14686 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
14687 strcpy(tp
->board_part_number
, "BCM57785");
14688 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
14689 strcpy(tp
->board_part_number
, "BCM57791");
14690 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
14691 strcpy(tp
->board_part_number
, "BCM57795");
14694 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
14695 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
14696 strcpy(tp
->board_part_number
, "BCM57762");
14697 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
14698 strcpy(tp
->board_part_number
, "BCM57766");
14699 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
14700 strcpy(tp
->board_part_number
, "BCM57782");
14701 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14702 strcpy(tp
->board_part_number
, "BCM57786");
14705 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14706 strcpy(tp
->board_part_number
, "BCM95906");
14709 strcpy(tp
->board_part_number
, "none");
14713 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
14717 if (tg3_nvram_read(tp
, offset
, &val
) ||
14718 (val
& 0xfc000000) != 0x0c000000 ||
14719 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
14726 static void tg3_read_bc_ver(struct tg3
*tp
)
14728 u32 val
, offset
, start
, ver_offset
;
14730 bool newver
= false;
14732 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
14733 tg3_nvram_read(tp
, 0x4, &start
))
14736 offset
= tg3_nvram_logical_addr(tp
, offset
);
14738 if (tg3_nvram_read(tp
, offset
, &val
))
14741 if ((val
& 0xfc000000) == 0x0c000000) {
14742 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
14749 dst_off
= strlen(tp
->fw_ver
);
14752 if (TG3_VER_SIZE
- dst_off
< 16 ||
14753 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
14756 offset
= offset
+ ver_offset
- start
;
14757 for (i
= 0; i
< 16; i
+= 4) {
14759 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
14762 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
14767 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
14770 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
14771 TG3_NVM_BCVER_MAJSFT
;
14772 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
14773 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
14774 "v%d.%02d", major
, minor
);
14778 static void tg3_read_hwsb_ver(struct tg3
*tp
)
14780 u32 val
, major
, minor
;
14782 /* Use native endian representation */
14783 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
14786 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
14787 TG3_NVM_HWSB_CFG1_MAJSFT
;
14788 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
14789 TG3_NVM_HWSB_CFG1_MINSFT
;
14791 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
14794 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
14796 u32 offset
, major
, minor
, build
;
14798 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
14800 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
14803 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
14804 case TG3_EEPROM_SB_REVISION_0
:
14805 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
14807 case TG3_EEPROM_SB_REVISION_2
:
14808 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
14810 case TG3_EEPROM_SB_REVISION_3
:
14811 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
14813 case TG3_EEPROM_SB_REVISION_4
:
14814 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
14816 case TG3_EEPROM_SB_REVISION_5
:
14817 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
14819 case TG3_EEPROM_SB_REVISION_6
:
14820 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
14826 if (tg3_nvram_read(tp
, offset
, &val
))
14829 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
14830 TG3_EEPROM_SB_EDH_BLD_SHFT
;
14831 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
14832 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
14833 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
14835 if (minor
> 99 || build
> 26)
14838 offset
= strlen(tp
->fw_ver
);
14839 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
14840 " v%d.%02d", major
, minor
);
14843 offset
= strlen(tp
->fw_ver
);
14844 if (offset
< TG3_VER_SIZE
- 1)
14845 tp
->fw_ver
[offset
] = 'a' + build
- 1;
14849 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
14851 u32 val
, offset
, start
;
14854 for (offset
= TG3_NVM_DIR_START
;
14855 offset
< TG3_NVM_DIR_END
;
14856 offset
+= TG3_NVM_DIRENT_SIZE
) {
14857 if (tg3_nvram_read(tp
, offset
, &val
))
14860 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
14864 if (offset
== TG3_NVM_DIR_END
)
14867 if (!tg3_flag(tp
, 5705_PLUS
))
14868 start
= 0x08000000;
14869 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
14872 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
14873 !tg3_fw_img_is_valid(tp
, offset
) ||
14874 tg3_nvram_read(tp
, offset
+ 8, &val
))
14877 offset
+= val
- start
;
14879 vlen
= strlen(tp
->fw_ver
);
14881 tp
->fw_ver
[vlen
++] = ',';
14882 tp
->fw_ver
[vlen
++] = ' ';
14884 for (i
= 0; i
< 4; i
++) {
14886 if (tg3_nvram_read_be32(tp
, offset
, &v
))
14889 offset
+= sizeof(v
);
14891 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
14892 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
14896 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
14901 static void tg3_probe_ncsi(struct tg3
*tp
)
14905 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
14906 if (apedata
!= APE_SEG_SIG_MAGIC
)
14909 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
14910 if (!(apedata
& APE_FW_STATUS_READY
))
14913 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
14914 tg3_flag_set(tp
, APE_HAS_NCSI
);
14917 static void tg3_read_dash_ver(struct tg3
*tp
)
14923 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
14925 if (tg3_flag(tp
, APE_HAS_NCSI
))
14927 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
14932 vlen
= strlen(tp
->fw_ver
);
14934 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
14936 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
14937 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
14938 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
14939 (apedata
& APE_FW_VERSION_BLDMSK
));
14942 static void tg3_read_otp_ver(struct tg3
*tp
)
14946 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14949 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
14950 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
14951 TG3_OTP_MAGIC0_VALID(val
)) {
14952 u64 val64
= (u64
) val
<< 32 | val2
;
14956 for (i
= 0; i
< 7; i
++) {
14957 if ((val64
& 0xff) == 0)
14959 ver
= val64
& 0xff;
14962 vlen
= strlen(tp
->fw_ver
);
14963 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
14967 static void tg3_read_fw_ver(struct tg3
*tp
)
14970 bool vpd_vers
= false;
14972 if (tp
->fw_ver
[0] != 0)
14975 if (tg3_flag(tp
, NO_NVRAM
)) {
14976 strcat(tp
->fw_ver
, "sb");
14977 tg3_read_otp_ver(tp
);
14981 if (tg3_nvram_read(tp
, 0, &val
))
14984 if (val
== TG3_EEPROM_MAGIC
)
14985 tg3_read_bc_ver(tp
);
14986 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
14987 tg3_read_sb_ver(tp
, val
);
14988 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
14989 tg3_read_hwsb_ver(tp
);
14991 if (tg3_flag(tp
, ENABLE_ASF
)) {
14992 if (tg3_flag(tp
, ENABLE_APE
)) {
14993 tg3_probe_ncsi(tp
);
14995 tg3_read_dash_ver(tp
);
14996 } else if (!vpd_vers
) {
14997 tg3_read_mgmtfw_ver(tp
);
15001 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15004 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15006 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15007 return TG3_RX_RET_MAX_SIZE_5717
;
15008 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15009 return TG3_RX_RET_MAX_SIZE_5700
;
15011 return TG3_RX_RET_MAX_SIZE_5705
;
15014 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15015 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15016 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15017 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15021 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15023 struct pci_dev
*peer
;
15024 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15026 for (func
= 0; func
< 8; func
++) {
15027 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15028 if (peer
&& peer
!= tp
->pdev
)
15032 /* 5704 can be configured in single-port mode, set peer to
15033 * tp->pdev in that case.
15041 * We don't need to keep the refcount elevated; there's no way
15042 * to remove one half of this device without removing the other
15049 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15051 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15052 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15055 /* All devices that use the alternate
15056 * ASIC REV location have a CPMU.
15058 tg3_flag_set(tp
, CPMU_PRESENT
);
15060 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15061 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15062 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15063 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15064 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15065 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15066 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15067 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15068 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15069 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15070 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15071 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15072 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15073 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15074 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15075 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15076 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15077 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15078 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15079 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15081 reg
= TG3PCI_PRODID_ASICREV
;
15083 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15086 /* Wrong chip ID in 5752 A0. This code can be removed later
15087 * as A0 is not in production.
15089 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15090 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15092 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15093 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15095 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15096 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15097 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15098 tg3_flag_set(tp
, 5717_PLUS
);
15100 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15101 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15102 tg3_flag_set(tp
, 57765_CLASS
);
15104 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15105 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15106 tg3_flag_set(tp
, 57765_PLUS
);
15108 /* Intentionally exclude ASIC_REV_5906 */
15109 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15110 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15111 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15112 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15113 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15114 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15115 tg3_flag(tp
, 57765_PLUS
))
15116 tg3_flag_set(tp
, 5755_PLUS
);
15118 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15119 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15120 tg3_flag_set(tp
, 5780_CLASS
);
15122 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15123 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15124 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15125 tg3_flag(tp
, 5755_PLUS
) ||
15126 tg3_flag(tp
, 5780_CLASS
))
15127 tg3_flag_set(tp
, 5750_PLUS
);
15129 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15130 tg3_flag(tp
, 5750_PLUS
))
15131 tg3_flag_set(tp
, 5705_PLUS
);
15134 static bool tg3_10_100_only_device(struct tg3
*tp
,
15135 const struct pci_device_id
*ent
)
15137 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15139 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15140 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15141 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15144 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15145 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15146 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15156 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15159 u32 pci_state_reg
, grc_misc_cfg
;
15164 /* Force memory write invalidate off. If we leave it on,
15165 * then on 5700_BX chips we have to enable a workaround.
15166 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15167 * to match the cacheline size. The Broadcom driver have this
15168 * workaround but turns MWI off all the times so never uses
15169 * it. This seems to suggest that the workaround is insufficient.
15171 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15172 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15173 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15175 /* Important! -- Make sure register accesses are byteswapped
15176 * correctly. Also, for those chips that require it, make
15177 * sure that indirect register accesses are enabled before
15178 * the first operation.
15180 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15182 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15183 MISC_HOST_CTRL_CHIPREV
);
15184 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15185 tp
->misc_host_ctrl
);
15187 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15189 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15190 * we need to disable memory and use config. cycles
15191 * only to access all registers. The 5702/03 chips
15192 * can mistakenly decode the special cycles from the
15193 * ICH chipsets as memory write cycles, causing corruption
15194 * of register and memory space. Only certain ICH bridges
15195 * will drive special cycles with non-zero data during the
15196 * address phase which can fall within the 5703's address
15197 * range. This is not an ICH bug as the PCI spec allows
15198 * non-zero address during special cycles. However, only
15199 * these ICH bridges are known to drive non-zero addresses
15200 * during special cycles.
15202 * Since special cycles do not cross PCI bridges, we only
15203 * enable this workaround if the 5703 is on the secondary
15204 * bus of these ICH bridges.
15206 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15207 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15208 static struct tg3_dev_id
{
15212 } ich_chipsets
[] = {
15213 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15215 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15217 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15219 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15223 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15224 struct pci_dev
*bridge
= NULL
;
15226 while (pci_id
->vendor
!= 0) {
15227 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15233 if (pci_id
->rev
!= PCI_ANY_ID
) {
15234 if (bridge
->revision
> pci_id
->rev
)
15237 if (bridge
->subordinate
&&
15238 (bridge
->subordinate
->number
==
15239 tp
->pdev
->bus
->number
)) {
15240 tg3_flag_set(tp
, ICH_WORKAROUND
);
15241 pci_dev_put(bridge
);
15247 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15248 static struct tg3_dev_id
{
15251 } bridge_chipsets
[] = {
15252 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15253 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15256 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15257 struct pci_dev
*bridge
= NULL
;
15259 while (pci_id
->vendor
!= 0) {
15260 bridge
= pci_get_device(pci_id
->vendor
,
15267 if (bridge
->subordinate
&&
15268 (bridge
->subordinate
->number
<=
15269 tp
->pdev
->bus
->number
) &&
15270 (bridge
->subordinate
->busn_res
.end
>=
15271 tp
->pdev
->bus
->number
)) {
15272 tg3_flag_set(tp
, 5701_DMA_BUG
);
15273 pci_dev_put(bridge
);
15279 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15280 * DMA addresses > 40-bit. This bridge may have other additional
15281 * 57xx devices behind it in some 4-port NIC designs for example.
15282 * Any tg3 device found behind the bridge will also need the 40-bit
15285 if (tg3_flag(tp
, 5780_CLASS
)) {
15286 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15287 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15289 struct pci_dev
*bridge
= NULL
;
15292 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15293 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15295 if (bridge
&& bridge
->subordinate
&&
15296 (bridge
->subordinate
->number
<=
15297 tp
->pdev
->bus
->number
) &&
15298 (bridge
->subordinate
->busn_res
.end
>=
15299 tp
->pdev
->bus
->number
)) {
15300 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15301 pci_dev_put(bridge
);
15307 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15308 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15309 tp
->pdev_peer
= tg3_find_peer(tp
);
15311 /* Determine TSO capabilities */
15312 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15313 ; /* Do nothing. HW bug. */
15314 else if (tg3_flag(tp
, 57765_PLUS
))
15315 tg3_flag_set(tp
, HW_TSO_3
);
15316 else if (tg3_flag(tp
, 5755_PLUS
) ||
15317 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15318 tg3_flag_set(tp
, HW_TSO_2
);
15319 else if (tg3_flag(tp
, 5750_PLUS
)) {
15320 tg3_flag_set(tp
, HW_TSO_1
);
15321 tg3_flag_set(tp
, TSO_BUG
);
15322 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15323 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15324 tg3_flag_clear(tp
, TSO_BUG
);
15325 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15326 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15327 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15328 tg3_flag_set(tp
, FW_TSO
);
15329 tg3_flag_set(tp
, TSO_BUG
);
15330 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15331 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15333 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15336 /* Selectively allow TSO based on operating conditions */
15337 if (tg3_flag(tp
, HW_TSO_1
) ||
15338 tg3_flag(tp
, HW_TSO_2
) ||
15339 tg3_flag(tp
, HW_TSO_3
) ||
15340 tg3_flag(tp
, FW_TSO
)) {
15341 /* For firmware TSO, assume ASF is disabled.
15342 * We'll disable TSO later if we discover ASF
15343 * is enabled in tg3_get_eeprom_hw_cfg().
15345 tg3_flag_set(tp
, TSO_CAPABLE
);
15347 tg3_flag_clear(tp
, TSO_CAPABLE
);
15348 tg3_flag_clear(tp
, TSO_BUG
);
15349 tp
->fw_needed
= NULL
;
15352 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15353 tp
->fw_needed
= FIRMWARE_TG3
;
15357 if (tg3_flag(tp
, 5750_PLUS
)) {
15358 tg3_flag_set(tp
, SUPPORT_MSI
);
15359 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15360 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15361 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15362 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15363 tp
->pdev_peer
== tp
->pdev
))
15364 tg3_flag_clear(tp
, SUPPORT_MSI
);
15366 if (tg3_flag(tp
, 5755_PLUS
) ||
15367 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15368 tg3_flag_set(tp
, 1SHOT_MSI
);
15371 if (tg3_flag(tp
, 57765_PLUS
)) {
15372 tg3_flag_set(tp
, SUPPORT_MSIX
);
15373 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15379 if (tp
->irq_max
> 1) {
15380 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15381 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15383 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15384 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15385 tp
->txq_max
= tp
->irq_max
- 1;
15388 if (tg3_flag(tp
, 5755_PLUS
) ||
15389 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15390 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15392 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
15393 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15395 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15396 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15397 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15398 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15399 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15401 if (tg3_flag(tp
, 57765_PLUS
) &&
15402 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
15403 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15405 if (!tg3_flag(tp
, 5705_PLUS
) ||
15406 tg3_flag(tp
, 5780_CLASS
) ||
15407 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15408 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15410 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15413 if (pci_is_pcie(tp
->pdev
)) {
15416 tg3_flag_set(tp
, PCI_EXPRESS
);
15418 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15419 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15420 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15421 tg3_flag_clear(tp
, HW_TSO_2
);
15422 tg3_flag_clear(tp
, TSO_CAPABLE
);
15424 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
15425 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15426 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
15427 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
15428 tg3_flag_set(tp
, CLKREQ_BUG
);
15429 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
15430 tg3_flag_set(tp
, L1PLLPD_EN
);
15432 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
15433 /* BCM5785 devices are effectively PCIe devices, and should
15434 * follow PCIe codepaths, but do not have a PCIe capabilities
15437 tg3_flag_set(tp
, PCI_EXPRESS
);
15438 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15439 tg3_flag(tp
, 5780_CLASS
)) {
15440 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15441 if (!tp
->pcix_cap
) {
15442 dev_err(&tp
->pdev
->dev
,
15443 "Cannot find PCI-X capability, aborting\n");
15447 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15448 tg3_flag_set(tp
, PCIX_MODE
);
15451 /* If we have an AMD 762 or VIA K8T800 chipset, write
15452 * reordering to the mailbox registers done by the host
15453 * controller can cause major troubles. We read back from
15454 * every mailbox register write to force the writes to be
15455 * posted to the chip in order.
15457 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15458 !tg3_flag(tp
, PCI_EXPRESS
))
15459 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15461 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15462 &tp
->pci_cacheline_sz
);
15463 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15464 &tp
->pci_lat_timer
);
15465 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15466 tp
->pci_lat_timer
< 64) {
15467 tp
->pci_lat_timer
= 64;
15468 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15469 tp
->pci_lat_timer
);
15472 /* Important! -- It is critical that the PCI-X hw workaround
15473 * situation is decided before the first MMIO register access.
15475 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
15476 /* 5700 BX chips need to have their TX producer index
15477 * mailboxes written twice to workaround a bug.
15479 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15481 /* If we are in PCI-X mode, enable register write workaround.
15483 * The workaround is to use indirect register accesses
15484 * for all chip writes not to mailbox registers.
15486 if (tg3_flag(tp
, PCIX_MODE
)) {
15489 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15491 /* The chip can have it's power management PCI config
15492 * space registers clobbered due to this bug.
15493 * So explicitly force the chip into D0 here.
15495 pci_read_config_dword(tp
->pdev
,
15496 tp
->pm_cap
+ PCI_PM_CTRL
,
15498 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
15499 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
15500 pci_write_config_dword(tp
->pdev
,
15501 tp
->pm_cap
+ PCI_PM_CTRL
,
15504 /* Also, force SERR#/PERR# in PCI command. */
15505 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15506 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
15507 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15511 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
15512 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
15513 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
15514 tg3_flag_set(tp
, PCI_32BIT
);
15516 /* Chip-specific fixup from Broadcom driver */
15517 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
15518 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
15519 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
15520 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
15523 /* Default fast path register access methods */
15524 tp
->read32
= tg3_read32
;
15525 tp
->write32
= tg3_write32
;
15526 tp
->read32_mbox
= tg3_read32
;
15527 tp
->write32_mbox
= tg3_write32
;
15528 tp
->write32_tx_mbox
= tg3_write32
;
15529 tp
->write32_rx_mbox
= tg3_write32
;
15531 /* Various workaround register access methods */
15532 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
15533 tp
->write32
= tg3_write_indirect_reg32
;
15534 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
15535 (tg3_flag(tp
, PCI_EXPRESS
) &&
15536 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
15538 * Back to back register writes can cause problems on these
15539 * chips, the workaround is to read back all reg writes
15540 * except those to mailbox regs.
15542 * See tg3_write_indirect_reg32().
15544 tp
->write32
= tg3_write_flush_reg32
;
15547 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
15548 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
15549 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
15550 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15553 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
15554 tp
->read32
= tg3_read_indirect_reg32
;
15555 tp
->write32
= tg3_write_indirect_reg32
;
15556 tp
->read32_mbox
= tg3_read_indirect_mbox
;
15557 tp
->write32_mbox
= tg3_write_indirect_mbox
;
15558 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
15559 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
15564 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15565 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
15566 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15568 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15569 tp
->read32_mbox
= tg3_read32_mbox_5906
;
15570 tp
->write32_mbox
= tg3_write32_mbox_5906
;
15571 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
15572 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
15575 if (tp
->write32
== tg3_write_indirect_reg32
||
15576 (tg3_flag(tp
, PCIX_MODE
) &&
15577 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15578 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
15579 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
15581 /* The memory arbiter has to be enabled in order for SRAM accesses
15582 * to succeed. Normally on powerup the tg3 chip firmware will make
15583 * sure it is enabled, but other entities such as system netboot
15584 * code might disable it.
15586 val
= tr32(MEMARB_MODE
);
15587 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
15589 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
15590 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15591 tg3_flag(tp
, 5780_CLASS
)) {
15592 if (tg3_flag(tp
, PCIX_MODE
)) {
15593 pci_read_config_dword(tp
->pdev
,
15594 tp
->pcix_cap
+ PCI_X_STATUS
,
15596 tp
->pci_fn
= val
& 0x7;
15598 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15599 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15600 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
15601 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
15602 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
15603 val
= tr32(TG3_CPMU_STATUS
);
15605 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
15606 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
15608 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
15609 TG3_CPMU_STATUS_FSHFT_5719
;
15612 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
15613 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
15614 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15617 /* Get eeprom hw config before calling tg3_set_power_state().
15618 * In particular, the TG3_FLAG_IS_NIC flag must be
15619 * determined before calling tg3_set_power_state() so that
15620 * we know whether or not to switch out of Vaux power.
15621 * When the flag is set, it means that GPIO1 is used for eeprom
15622 * write protect and also implies that it is a LOM where GPIOs
15623 * are not used to switch power.
15625 tg3_get_eeprom_hw_cfg(tp
);
15627 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
15628 tg3_flag_clear(tp
, TSO_CAPABLE
);
15629 tg3_flag_clear(tp
, TSO_BUG
);
15630 tp
->fw_needed
= NULL
;
15633 if (tg3_flag(tp
, ENABLE_APE
)) {
15634 /* Allow reads and writes to the
15635 * APE register and memory space.
15637 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
15638 PCISTATE_ALLOW_APE_SHMEM_WR
|
15639 PCISTATE_ALLOW_APE_PSPACE_WR
;
15640 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15643 tg3_ape_lock_init(tp
);
15646 /* Set up tp->grc_local_ctrl before calling
15647 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15648 * will bring 5700's external PHY out of reset.
15649 * It is also used as eeprom write protect on LOMs.
15651 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
15652 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15653 tg3_flag(tp
, EEPROM_WRITE_PROT
))
15654 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
15655 GRC_LCLCTRL_GPIO_OUTPUT1
);
15656 /* Unused GPIO3 must be driven as output on 5752 because there
15657 * are no pull-up resistors on unused GPIO pins.
15659 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15660 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
15662 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15663 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15664 tg3_flag(tp
, 57765_CLASS
))
15665 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15667 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15668 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
15669 /* Turn off the debug UART. */
15670 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15671 if (tg3_flag(tp
, IS_NIC
))
15672 /* Keep VMain power. */
15673 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
15674 GRC_LCLCTRL_GPIO_OUTPUT0
;
15677 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
15678 tp
->grc_local_ctrl
|=
15679 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
15681 /* Switch out of Vaux if it is a NIC */
15682 tg3_pwrsrc_switch_to_vmain(tp
);
15684 /* Derive initial jumbo mode from MTU assigned in
15685 * ether_setup() via the alloc_etherdev() call
15687 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
15688 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
15690 /* Determine WakeOnLan speed to use. */
15691 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15692 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15693 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15694 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
15695 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
15697 tg3_flag_set(tp
, WOL_SPEED_100MB
);
15700 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15701 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
15703 /* A few boards don't want Ethernet@WireSpeed phy feature */
15704 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15705 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15706 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
15707 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
15708 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
15709 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15710 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
15712 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
15713 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
15714 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
15715 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
15716 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
15718 if (tg3_flag(tp
, 5705_PLUS
) &&
15719 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
15720 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15721 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
15722 !tg3_flag(tp
, 57765_PLUS
)) {
15723 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15724 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15725 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15726 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
15727 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
15728 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
15729 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
15730 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
15731 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
15733 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
15736 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15737 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
15738 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
15739 if (tp
->phy_otp
== 0)
15740 tp
->phy_otp
= TG3_OTP_DEFAULT
;
15743 if (tg3_flag(tp
, CPMU_PRESENT
))
15744 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
15746 tp
->mi_mode
= MAC_MI_MODE_BASE
;
15748 tp
->coalesce_mode
= 0;
15749 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
15750 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
15751 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
15753 /* Set these bits to enable statistics workaround. */
15754 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15755 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
15756 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
15757 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
15758 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
15761 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
15762 tg3_asic_rev(tp
) == ASIC_REV_57780
)
15763 tg3_flag_set(tp
, USE_PHYLIB
);
15765 err
= tg3_mdio_init(tp
);
15769 /* Initialize data/descriptor byte/word swapping. */
15770 val
= tr32(GRC_MODE
);
15771 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15772 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15773 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
15774 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
15775 GRC_MODE_B2HRX_ENABLE
|
15776 GRC_MODE_HTX2B_ENABLE
|
15777 GRC_MODE_HOST_STACKUP
);
15779 val
&= GRC_MODE_HOST_STACKUP
;
15781 tw32(GRC_MODE
, val
| tp
->grc_mode
);
15783 tg3_switch_clocks(tp
);
15785 /* Clear this out for sanity. */
15786 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15788 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15790 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
15791 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
15792 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15793 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15794 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
15795 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
15796 void __iomem
*sram_base
;
15798 /* Write some dummy words into the SRAM status block
15799 * area, see if it reads back correctly. If the return
15800 * value is bad, force enable the PCIX workaround.
15802 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
15804 writel(0x00000000, sram_base
);
15805 writel(0x00000000, sram_base
+ 4);
15806 writel(0xffffffff, sram_base
+ 4);
15807 if (readl(sram_base
) != 0x00000000)
15808 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15813 tg3_nvram_init(tp
);
15815 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
15816 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
15818 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15819 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
15820 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
15821 tg3_flag_set(tp
, IS_5788
);
15823 if (!tg3_flag(tp
, IS_5788
) &&
15824 tg3_asic_rev(tp
) != ASIC_REV_5700
)
15825 tg3_flag_set(tp
, TAGGED_STATUS
);
15826 if (tg3_flag(tp
, TAGGED_STATUS
)) {
15827 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
15828 HOSTCC_MODE_CLRTICK_TXBD
);
15830 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
15831 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15832 tp
->misc_host_ctrl
);
15835 /* Preserve the APE MAC_MODE bits */
15836 if (tg3_flag(tp
, ENABLE_APE
))
15837 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
15841 if (tg3_10_100_only_device(tp
, ent
))
15842 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
15844 err
= tg3_phy_probe(tp
);
15846 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
15847 /* ... but do not return immediately ... */
15852 tg3_read_fw_ver(tp
);
15854 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
15855 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15857 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15858 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15860 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15863 /* 5700 {AX,BX} chips have a broken status block link
15864 * change bit implementation, so we must use the
15865 * status register in those cases.
15867 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15868 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15870 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
15872 /* The led_ctrl is set during tg3_phy_probe, here we might
15873 * have to force the link status polling mechanism based
15874 * upon subsystem IDs.
15876 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
15877 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15878 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
15879 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15880 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15883 /* For all SERDES we poll the MAC status register. */
15884 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
15885 tg3_flag_set(tp
, POLL_SERDES
);
15887 tg3_flag_clear(tp
, POLL_SERDES
);
15889 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
15890 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
15891 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15892 tg3_flag(tp
, PCIX_MODE
)) {
15893 tp
->rx_offset
= NET_SKB_PAD
;
15894 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15895 tp
->rx_copy_thresh
= ~(u16
)0;
15899 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
15900 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
15901 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
15903 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
15905 /* Increment the rx prod index on the rx std ring by at most
15906 * 8 for these chips to workaround hw errata.
15908 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15909 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15910 tg3_asic_rev(tp
) == ASIC_REV_5755
)
15911 tp
->rx_std_max_post
= 8;
15913 if (tg3_flag(tp
, ASPM_WORKAROUND
))
15914 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
15915 PCIE_PWR_MGMT_L1_THRESH_MSK
;
15920 #ifdef CONFIG_SPARC
15921 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
15923 struct net_device
*dev
= tp
->dev
;
15924 struct pci_dev
*pdev
= tp
->pdev
;
15925 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
15926 const unsigned char *addr
;
15929 addr
= of_get_property(dp
, "local-mac-address", &len
);
15930 if (addr
&& len
== 6) {
15931 memcpy(dev
->dev_addr
, addr
, 6);
15937 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
15939 struct net_device
*dev
= tp
->dev
;
15941 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
15946 static int tg3_get_device_address(struct tg3
*tp
)
15948 struct net_device
*dev
= tp
->dev
;
15949 u32 hi
, lo
, mac_offset
;
15953 #ifdef CONFIG_SPARC
15954 if (!tg3_get_macaddr_sparc(tp
))
15958 if (tg3_flag(tp
, IS_SSB_CORE
)) {
15959 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
15960 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
15965 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15966 tg3_flag(tp
, 5780_CLASS
)) {
15967 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
15969 if (tg3_nvram_lock(tp
))
15970 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
15972 tg3_nvram_unlock(tp
);
15973 } else if (tg3_flag(tp
, 5717_PLUS
)) {
15974 if (tp
->pci_fn
& 1)
15976 if (tp
->pci_fn
> 1)
15977 mac_offset
+= 0x18c;
15978 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15981 /* First try to get it from MAC address mailbox. */
15982 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
15983 if ((hi
>> 16) == 0x484b) {
15984 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15985 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
15987 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
15988 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15989 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15990 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15991 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
15993 /* Some old bootcode may report a 0 MAC address in SRAM */
15994 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
15997 /* Next, try NVRAM. */
15998 if (!tg3_flag(tp
, NO_NVRAM
) &&
15999 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16000 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16001 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16002 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16004 /* Finally just fetch it out of the MAC control regs. */
16006 hi
= tr32(MAC_ADDR_0_HIGH
);
16007 lo
= tr32(MAC_ADDR_0_LOW
);
16009 dev
->dev_addr
[5] = lo
& 0xff;
16010 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16011 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16012 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16013 dev
->dev_addr
[1] = hi
& 0xff;
16014 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16018 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16019 #ifdef CONFIG_SPARC
16020 if (!tg3_get_default_macaddr_sparc(tp
))
16028 #define BOUNDARY_SINGLE_CACHELINE 1
16029 #define BOUNDARY_MULTI_CACHELINE 2
16031 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16033 int cacheline_size
;
16037 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16039 cacheline_size
= 1024;
16041 cacheline_size
= (int) byte
* 4;
16043 /* On 5703 and later chips, the boundary bits have no
16046 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16047 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16048 !tg3_flag(tp
, PCI_EXPRESS
))
16051 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16052 goal
= BOUNDARY_MULTI_CACHELINE
;
16054 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16055 goal
= BOUNDARY_SINGLE_CACHELINE
;
16061 if (tg3_flag(tp
, 57765_PLUS
)) {
16062 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16069 /* PCI controllers on most RISC systems tend to disconnect
16070 * when a device tries to burst across a cache-line boundary.
16071 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16073 * Unfortunately, for PCI-E there are only limited
16074 * write-side controls for this, and thus for reads
16075 * we will still get the disconnects. We'll also waste
16076 * these PCI cycles for both read and write for chips
16077 * other than 5700 and 5701 which do not implement the
16080 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16081 switch (cacheline_size
) {
16086 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16087 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16088 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16090 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16091 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16096 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16097 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16101 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16102 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16105 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16106 switch (cacheline_size
) {
16110 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16111 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16112 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16118 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16119 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16123 switch (cacheline_size
) {
16125 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16126 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16127 DMA_RWCTRL_WRITE_BNDRY_16
);
16132 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16133 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16134 DMA_RWCTRL_WRITE_BNDRY_32
);
16139 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16140 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16141 DMA_RWCTRL_WRITE_BNDRY_64
);
16146 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16147 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16148 DMA_RWCTRL_WRITE_BNDRY_128
);
16153 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16154 DMA_RWCTRL_WRITE_BNDRY_256
);
16157 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16158 DMA_RWCTRL_WRITE_BNDRY_512
);
16162 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16163 DMA_RWCTRL_WRITE_BNDRY_1024
);
16172 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16173 int size
, int to_device
)
16175 struct tg3_internal_buffer_desc test_desc
;
16176 u32 sram_dma_descs
;
16179 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16181 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16182 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16183 tw32(RDMAC_STATUS
, 0);
16184 tw32(WDMAC_STATUS
, 0);
16186 tw32(BUFMGR_MODE
, 0);
16187 tw32(FTQ_RESET
, 0);
16189 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16190 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16191 test_desc
.nic_mbuf
= 0x00002100;
16192 test_desc
.len
= size
;
16195 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16196 * the *second* time the tg3 driver was getting loaded after an
16199 * Broadcom tells me:
16200 * ...the DMA engine is connected to the GRC block and a DMA
16201 * reset may affect the GRC block in some unpredictable way...
16202 * The behavior of resets to individual blocks has not been tested.
16204 * Broadcom noted the GRC reset will also reset all sub-components.
16207 test_desc
.cqid_sqid
= (13 << 8) | 2;
16209 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16212 test_desc
.cqid_sqid
= (16 << 8) | 7;
16214 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16217 test_desc
.flags
= 0x00000005;
16219 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16222 val
= *(((u32
*)&test_desc
) + i
);
16223 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16224 sram_dma_descs
+ (i
* sizeof(u32
)));
16225 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16227 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16230 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16232 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16235 for (i
= 0; i
< 40; i
++) {
16239 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16241 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16242 if ((val
& 0xffff) == sram_dma_descs
) {
16253 #define TEST_BUFFER_SIZE 0x2000
16255 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16256 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16260 static int tg3_test_dma(struct tg3
*tp
)
16262 dma_addr_t buf_dma
;
16263 u32
*buf
, saved_dma_rwctrl
;
16266 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16267 &buf_dma
, GFP_KERNEL
);
16273 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16274 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16276 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16278 if (tg3_flag(tp
, 57765_PLUS
))
16281 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16282 /* DMA read watermark not used on PCIE */
16283 tp
->dma_rwctrl
|= 0x00180000;
16284 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16285 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16286 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16287 tp
->dma_rwctrl
|= 0x003f0000;
16289 tp
->dma_rwctrl
|= 0x003f000f;
16291 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16292 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16293 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16294 u32 read_water
= 0x7;
16296 /* If the 5704 is behind the EPB bridge, we can
16297 * do the less restrictive ONE_DMA workaround for
16298 * better performance.
16300 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16301 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16302 tp
->dma_rwctrl
|= 0x8000;
16303 else if (ccval
== 0x6 || ccval
== 0x7)
16304 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16306 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16308 /* Set bit 23 to enable PCIX hw bug fix */
16310 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16311 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16313 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16314 /* 5780 always in PCIX mode */
16315 tp
->dma_rwctrl
|= 0x00144000;
16316 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16317 /* 5714 always in PCIX mode */
16318 tp
->dma_rwctrl
|= 0x00148000;
16320 tp
->dma_rwctrl
|= 0x001b000f;
16323 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16324 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16326 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16327 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16328 tp
->dma_rwctrl
&= 0xfffffff0;
16330 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16331 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16332 /* Remove this if it causes problems for some boards. */
16333 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16335 /* On 5700/5701 chips, we need to set this bit.
16336 * Otherwise the chip will issue cacheline transactions
16337 * to streamable DMA memory with not all the byte
16338 * enables turned on. This is an error on several
16339 * RISC PCI controllers, in particular sparc64.
16341 * On 5703/5704 chips, this bit has been reassigned
16342 * a different meaning. In particular, it is used
16343 * on those chips to enable a PCI-X workaround.
16345 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16348 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16351 /* Unneeded, already done by tg3_get_invariants. */
16352 tg3_switch_clocks(tp
);
16355 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16356 tg3_asic_rev(tp
) != ASIC_REV_5701
)
16359 /* It is best to perform DMA test with maximum write burst size
16360 * to expose the 5700/5701 write DMA bug.
16362 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16363 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16364 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16369 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16372 /* Send the buffer to the chip. */
16373 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
16375 dev_err(&tp
->pdev
->dev
,
16376 "%s: Buffer write failed. err = %d\n",
16382 /* validate data reached card RAM correctly. */
16383 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16385 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16386 if (le32_to_cpu(val
) != p
[i
]) {
16387 dev_err(&tp
->pdev
->dev
,
16388 "%s: Buffer corrupted on device! "
16389 "(%d != %d)\n", __func__
, val
, i
);
16390 /* ret = -ENODEV here? */
16395 /* Now read it back. */
16396 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
16398 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16399 "err = %d\n", __func__
, ret
);
16404 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16408 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16409 DMA_RWCTRL_WRITE_BNDRY_16
) {
16410 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16411 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16412 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16415 dev_err(&tp
->pdev
->dev
,
16416 "%s: Buffer corrupted on read back! "
16417 "(%d != %d)\n", __func__
, p
[i
], i
);
16423 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16429 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16430 DMA_RWCTRL_WRITE_BNDRY_16
) {
16431 /* DMA test passed without adjusting DMA boundary,
16432 * now look for chipsets that are known to expose the
16433 * DMA bug without failing the test.
16435 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16436 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16437 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16439 /* Safe to use the calculated DMA boundary. */
16440 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16443 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16447 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16452 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16454 if (tg3_flag(tp
, 57765_PLUS
)) {
16455 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16456 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16457 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16458 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16459 tp
->bufmgr_config
.mbuf_high_water
=
16460 DEFAULT_MB_HIGH_WATER_57765
;
16462 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16463 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16464 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16465 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16466 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16467 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16468 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16469 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16470 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16471 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16472 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16473 tp
->bufmgr_config
.mbuf_high_water
=
16474 DEFAULT_MB_HIGH_WATER_5705
;
16475 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16476 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16477 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16478 tp
->bufmgr_config
.mbuf_high_water
=
16479 DEFAULT_MB_HIGH_WATER_5906
;
16482 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16483 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
16484 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16485 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
16486 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16487 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
16489 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16490 DEFAULT_MB_RDMA_LOW_WATER
;
16491 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16492 DEFAULT_MB_MACRX_LOW_WATER
;
16493 tp
->bufmgr_config
.mbuf_high_water
=
16494 DEFAULT_MB_HIGH_WATER
;
16496 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16497 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
16498 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16499 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
16500 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16501 DEFAULT_MB_HIGH_WATER_JUMBO
;
16504 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
16505 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
16508 static char *tg3_phy_string(struct tg3
*tp
)
16510 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
16511 case TG3_PHY_ID_BCM5400
: return "5400";
16512 case TG3_PHY_ID_BCM5401
: return "5401";
16513 case TG3_PHY_ID_BCM5411
: return "5411";
16514 case TG3_PHY_ID_BCM5701
: return "5701";
16515 case TG3_PHY_ID_BCM5703
: return "5703";
16516 case TG3_PHY_ID_BCM5704
: return "5704";
16517 case TG3_PHY_ID_BCM5705
: return "5705";
16518 case TG3_PHY_ID_BCM5750
: return "5750";
16519 case TG3_PHY_ID_BCM5752
: return "5752";
16520 case TG3_PHY_ID_BCM5714
: return "5714";
16521 case TG3_PHY_ID_BCM5780
: return "5780";
16522 case TG3_PHY_ID_BCM5755
: return "5755";
16523 case TG3_PHY_ID_BCM5787
: return "5787";
16524 case TG3_PHY_ID_BCM5784
: return "5784";
16525 case TG3_PHY_ID_BCM5756
: return "5722/5756";
16526 case TG3_PHY_ID_BCM5906
: return "5906";
16527 case TG3_PHY_ID_BCM5761
: return "5761";
16528 case TG3_PHY_ID_BCM5718C
: return "5718C";
16529 case TG3_PHY_ID_BCM5718S
: return "5718S";
16530 case TG3_PHY_ID_BCM57765
: return "57765";
16531 case TG3_PHY_ID_BCM5719C
: return "5719C";
16532 case TG3_PHY_ID_BCM5720C
: return "5720C";
16533 case TG3_PHY_ID_BCM5762
: return "5762C";
16534 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
16535 case 0: return "serdes";
16536 default: return "unknown";
16540 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
16542 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16543 strcpy(str
, "PCI Express");
16545 } else if (tg3_flag(tp
, PCIX_MODE
)) {
16546 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
16548 strcpy(str
, "PCIX:");
16550 if ((clock_ctrl
== 7) ||
16551 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
16552 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
16553 strcat(str
, "133MHz");
16554 else if (clock_ctrl
== 0)
16555 strcat(str
, "33MHz");
16556 else if (clock_ctrl
== 2)
16557 strcat(str
, "50MHz");
16558 else if (clock_ctrl
== 4)
16559 strcat(str
, "66MHz");
16560 else if (clock_ctrl
== 6)
16561 strcat(str
, "100MHz");
16563 strcpy(str
, "PCI:");
16564 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
16565 strcat(str
, "66MHz");
16567 strcat(str
, "33MHz");
16569 if (tg3_flag(tp
, PCI_32BIT
))
16570 strcat(str
, ":32-bit");
16572 strcat(str
, ":64-bit");
16576 static void tg3_init_coal(struct tg3
*tp
)
16578 struct ethtool_coalesce
*ec
= &tp
->coal
;
16580 memset(ec
, 0, sizeof(*ec
));
16581 ec
->cmd
= ETHTOOL_GCOALESCE
;
16582 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
16583 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
16584 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
16585 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
16586 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
16587 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
16588 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
16589 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
16590 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
16592 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
16593 HOSTCC_MODE_CLRTICK_TXBD
)) {
16594 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
16595 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
16596 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
16597 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
16600 if (tg3_flag(tp
, 5705_PLUS
)) {
16601 ec
->rx_coalesce_usecs_irq
= 0;
16602 ec
->tx_coalesce_usecs_irq
= 0;
16603 ec
->stats_block_coalesce_usecs
= 0;
16607 static int tg3_init_one(struct pci_dev
*pdev
,
16608 const struct pci_device_id
*ent
)
16610 struct net_device
*dev
;
16612 int i
, err
, pm_cap
;
16613 u32 sndmbx
, rcvmbx
, intmbx
;
16615 u64 dma_mask
, persist_dma_mask
;
16616 netdev_features_t features
= 0;
16618 printk_once(KERN_INFO
"%s\n", version
);
16620 err
= pci_enable_device(pdev
);
16622 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
16626 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
16628 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
16629 goto err_out_disable_pdev
;
16632 pci_set_master(pdev
);
16634 /* Find power-management capability. */
16635 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
16637 dev_err(&pdev
->dev
,
16638 "Cannot find Power Management capability, aborting\n");
16640 goto err_out_free_res
;
16643 err
= pci_set_power_state(pdev
, PCI_D0
);
16645 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
16646 goto err_out_free_res
;
16649 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
16652 goto err_out_power_down
;
16655 SET_NETDEV_DEV(dev
, &pdev
->dev
);
16657 tp
= netdev_priv(dev
);
16660 tp
->pm_cap
= pm_cap
;
16661 tp
->rx_mode
= TG3_DEF_RX_MODE
;
16662 tp
->tx_mode
= TG3_DEF_TX_MODE
;
16666 tp
->msg_enable
= tg3_debug
;
16668 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
16670 if (pdev_is_ssb_gige_core(pdev
)) {
16671 tg3_flag_set(tp
, IS_SSB_CORE
);
16672 if (ssb_gige_must_flush_posted_writes(pdev
))
16673 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
16674 if (ssb_gige_one_dma_at_once(pdev
))
16675 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
16676 if (ssb_gige_have_roboswitch(pdev
))
16677 tg3_flag_set(tp
, ROBOSWITCH
);
16678 if (ssb_gige_is_rgmii(pdev
))
16679 tg3_flag_set(tp
, RGMII_MODE
);
16682 /* The word/byte swap controls here control register access byte
16683 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16686 tp
->misc_host_ctrl
=
16687 MISC_HOST_CTRL_MASK_PCI_INT
|
16688 MISC_HOST_CTRL_WORD_SWAP
|
16689 MISC_HOST_CTRL_INDIR_ACCESS
|
16690 MISC_HOST_CTRL_PCISTATE_RW
;
16692 /* The NONFRM (non-frame) byte/word swap controls take effect
16693 * on descriptor entries, anything which isn't packet data.
16695 * The StrongARM chips on the board (one for tx, one for rx)
16696 * are running in big-endian mode.
16698 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
16699 GRC_MODE_WSWAP_NONFRM_DATA
);
16700 #ifdef __BIG_ENDIAN
16701 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
16703 spin_lock_init(&tp
->lock
);
16704 spin_lock_init(&tp
->indirect_lock
);
16705 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
16707 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
16709 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
16711 goto err_out_free_dev
;
16714 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16715 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
16716 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
16717 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
16718 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16719 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16720 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16721 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16722 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16723 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16724 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16725 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
16726 tg3_flag_set(tp
, ENABLE_APE
);
16727 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
16728 if (!tp
->aperegs
) {
16729 dev_err(&pdev
->dev
,
16730 "Cannot map APE registers, aborting\n");
16732 goto err_out_iounmap
;
16736 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
16737 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
16739 dev
->ethtool_ops
= &tg3_ethtool_ops
;
16740 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
16741 dev
->netdev_ops
= &tg3_netdev_ops
;
16742 dev
->irq
= pdev
->irq
;
16744 err
= tg3_get_invariants(tp
, ent
);
16746 dev_err(&pdev
->dev
,
16747 "Problem fetching invariants of chip, aborting\n");
16748 goto err_out_apeunmap
;
16751 /* The EPB bridge inside 5714, 5715, and 5780 and any
16752 * device behind the EPB cannot support DMA addresses > 40-bit.
16753 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16754 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16755 * do DMA address check in tg3_start_xmit().
16757 if (tg3_flag(tp
, IS_5788
))
16758 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
16759 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
16760 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
16761 #ifdef CONFIG_HIGHMEM
16762 dma_mask
= DMA_BIT_MASK(64);
16765 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
16767 /* Configure DMA attributes. */
16768 if (dma_mask
> DMA_BIT_MASK(32)) {
16769 err
= pci_set_dma_mask(pdev
, dma_mask
);
16771 features
|= NETIF_F_HIGHDMA
;
16772 err
= pci_set_consistent_dma_mask(pdev
,
16775 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
16776 "DMA for consistent allocations\n");
16777 goto err_out_apeunmap
;
16781 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
16782 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
16784 dev_err(&pdev
->dev
,
16785 "No usable DMA configuration, aborting\n");
16786 goto err_out_apeunmap
;
16790 tg3_init_bufmgr_config(tp
);
16792 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
16794 /* 5700 B0 chips do not support checksumming correctly due
16795 * to hardware bugs.
16797 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
16798 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
16800 if (tg3_flag(tp
, 5755_PLUS
))
16801 features
|= NETIF_F_IPV6_CSUM
;
16804 /* TSO is on by default on chips that support hardware TSO.
16805 * Firmware TSO on older chips gives lower performance, so it
16806 * is off by default, but can be enabled using ethtool.
16808 if ((tg3_flag(tp
, HW_TSO_1
) ||
16809 tg3_flag(tp
, HW_TSO_2
) ||
16810 tg3_flag(tp
, HW_TSO_3
)) &&
16811 (features
& NETIF_F_IP_CSUM
))
16812 features
|= NETIF_F_TSO
;
16813 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
16814 if (features
& NETIF_F_IPV6_CSUM
)
16815 features
|= NETIF_F_TSO6
;
16816 if (tg3_flag(tp
, HW_TSO_3
) ||
16817 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16818 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16819 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
16820 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16821 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16822 features
|= NETIF_F_TSO_ECN
;
16825 dev
->features
|= features
;
16826 dev
->vlan_features
|= features
;
16829 * Add loopback capability only for a subset of devices that support
16830 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16831 * loopback for the remaining devices.
16833 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
16834 !tg3_flag(tp
, CPMU_PRESENT
))
16835 /* Add the loopback capability */
16836 features
|= NETIF_F_LOOPBACK
;
16838 dev
->hw_features
|= features
;
16840 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
16841 !tg3_flag(tp
, TSO_CAPABLE
) &&
16842 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
16843 tg3_flag_set(tp
, MAX_RXPEND_64
);
16844 tp
->rx_pending
= 63;
16847 err
= tg3_get_device_address(tp
);
16849 dev_err(&pdev
->dev
,
16850 "Could not obtain valid ethernet address, aborting\n");
16851 goto err_out_apeunmap
;
16855 * Reset chip in case UNDI or EFI driver did not shutdown
16856 * DMA self test will enable WDMAC and we'll see (spurious)
16857 * pending DMA on the PCI bus at that point.
16859 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
16860 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
16861 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
16862 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16865 err
= tg3_test_dma(tp
);
16867 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
16868 goto err_out_apeunmap
;
16871 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
16872 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
16873 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
16874 for (i
= 0; i
< tp
->irq_max
; i
++) {
16875 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
16878 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
16880 tnapi
->int_mbox
= intmbx
;
16886 tnapi
->consmbox
= rcvmbx
;
16887 tnapi
->prodmbox
= sndmbx
;
16890 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
16892 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
16894 if (!tg3_flag(tp
, SUPPORT_MSIX
))
16898 * If we support MSIX, we'll be using RSS. If we're using
16899 * RSS, the first vector only handles link interrupts and the
16900 * remaining vectors handle rx and tx interrupts. Reuse the
16901 * mailbox values for the next iteration. The values we setup
16902 * above are still useful for the single vectored mode.
16917 pci_set_drvdata(pdev
, dev
);
16919 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16920 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16921 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16922 tg3_flag_set(tp
, PTP_CAPABLE
);
16924 if (tg3_flag(tp
, 5717_PLUS
)) {
16925 /* Resume a low-power mode */
16926 tg3_frob_aux_power(tp
, false);
16929 tg3_timer_init(tp
);
16931 tg3_carrier_off(tp
);
16933 err
= register_netdev(dev
);
16935 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
16936 goto err_out_apeunmap
;
16939 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16940 tp
->board_part_number
,
16941 tg3_chip_rev_id(tp
),
16942 tg3_bus_string(tp
, str
),
16945 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
16946 struct phy_device
*phydev
;
16947 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
16949 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16950 phydev
->drv
->name
, dev_name(&phydev
->dev
));
16954 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
16955 ethtype
= "10/100Base-TX";
16956 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
16957 ethtype
= "1000Base-SX";
16959 ethtype
= "10/100/1000Base-T";
16961 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
16962 "(WireSpeed[%d], EEE[%d])\n",
16963 tg3_phy_string(tp
), ethtype
,
16964 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
16965 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
16968 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16969 (dev
->features
& NETIF_F_RXCSUM
) != 0,
16970 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
16971 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
16972 tg3_flag(tp
, ENABLE_ASF
) != 0,
16973 tg3_flag(tp
, TSO_CAPABLE
) != 0);
16974 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16976 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
16977 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
16979 pci_save_state(pdev
);
16985 iounmap(tp
->aperegs
);
16986 tp
->aperegs
= NULL
;
16998 err_out_power_down
:
16999 pci_set_power_state(pdev
, PCI_D3hot
);
17002 pci_release_regions(pdev
);
17004 err_out_disable_pdev
:
17005 pci_disable_device(pdev
);
17006 pci_set_drvdata(pdev
, NULL
);
17010 static void tg3_remove_one(struct pci_dev
*pdev
)
17012 struct net_device
*dev
= pci_get_drvdata(pdev
);
17015 struct tg3
*tp
= netdev_priv(dev
);
17017 release_firmware(tp
->fw
);
17019 tg3_reset_task_cancel(tp
);
17021 if (tg3_flag(tp
, USE_PHYLIB
)) {
17026 unregister_netdev(dev
);
17028 iounmap(tp
->aperegs
);
17029 tp
->aperegs
= NULL
;
17036 pci_release_regions(pdev
);
17037 pci_disable_device(pdev
);
17038 pci_set_drvdata(pdev
, NULL
);
17042 #ifdef CONFIG_PM_SLEEP
17043 static int tg3_suspend(struct device
*device
)
17045 struct pci_dev
*pdev
= to_pci_dev(device
);
17046 struct net_device
*dev
= pci_get_drvdata(pdev
);
17047 struct tg3
*tp
= netdev_priv(dev
);
17050 if (!netif_running(dev
))
17053 tg3_reset_task_cancel(tp
);
17055 tg3_netif_stop(tp
);
17057 tg3_timer_stop(tp
);
17059 tg3_full_lock(tp
, 1);
17060 tg3_disable_ints(tp
);
17061 tg3_full_unlock(tp
);
17063 netif_device_detach(dev
);
17065 tg3_full_lock(tp
, 0);
17066 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17067 tg3_flag_clear(tp
, INIT_COMPLETE
);
17068 tg3_full_unlock(tp
);
17070 err
= tg3_power_down_prepare(tp
);
17074 tg3_full_lock(tp
, 0);
17076 tg3_flag_set(tp
, INIT_COMPLETE
);
17077 err2
= tg3_restart_hw(tp
, 1);
17081 tg3_timer_start(tp
);
17083 netif_device_attach(dev
);
17084 tg3_netif_start(tp
);
17087 tg3_full_unlock(tp
);
17096 static int tg3_resume(struct device
*device
)
17098 struct pci_dev
*pdev
= to_pci_dev(device
);
17099 struct net_device
*dev
= pci_get_drvdata(pdev
);
17100 struct tg3
*tp
= netdev_priv(dev
);
17103 if (!netif_running(dev
))
17106 netif_device_attach(dev
);
17108 tg3_full_lock(tp
, 0);
17110 tg3_flag_set(tp
, INIT_COMPLETE
);
17111 err
= tg3_restart_hw(tp
, 1);
17115 tg3_timer_start(tp
);
17117 tg3_netif_start(tp
);
17120 tg3_full_unlock(tp
);
17128 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17129 #define TG3_PM_OPS (&tg3_pm_ops)
17133 #define TG3_PM_OPS NULL
17135 #endif /* CONFIG_PM_SLEEP */
17138 * tg3_io_error_detected - called when PCI error is detected
17139 * @pdev: Pointer to PCI device
17140 * @state: The current pci connection state
17142 * This function is called after a PCI bus error affecting
17143 * this device has been detected.
17145 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17146 pci_channel_state_t state
)
17148 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17149 struct tg3
*tp
= netdev_priv(netdev
);
17150 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17152 netdev_info(netdev
, "PCI I/O error detected\n");
17156 if (!netif_running(netdev
))
17161 tg3_netif_stop(tp
);
17163 tg3_timer_stop(tp
);
17165 /* Want to make sure that the reset task doesn't run */
17166 tg3_reset_task_cancel(tp
);
17168 netif_device_detach(netdev
);
17170 /* Clean up software state, even if MMIO is blocked */
17171 tg3_full_lock(tp
, 0);
17172 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17173 tg3_full_unlock(tp
);
17176 if (state
== pci_channel_io_perm_failure
)
17177 err
= PCI_ERS_RESULT_DISCONNECT
;
17179 pci_disable_device(pdev
);
17187 * tg3_io_slot_reset - called after the pci bus has been reset.
17188 * @pdev: Pointer to PCI device
17190 * Restart the card from scratch, as if from a cold-boot.
17191 * At this point, the card has exprienced a hard reset,
17192 * followed by fixups by BIOS, and has its config space
17193 * set up identically to what it was at cold boot.
17195 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17197 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17198 struct tg3
*tp
= netdev_priv(netdev
);
17199 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17204 if (pci_enable_device(pdev
)) {
17205 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17209 pci_set_master(pdev
);
17210 pci_restore_state(pdev
);
17211 pci_save_state(pdev
);
17213 if (!netif_running(netdev
)) {
17214 rc
= PCI_ERS_RESULT_RECOVERED
;
17218 err
= tg3_power_up(tp
);
17222 rc
= PCI_ERS_RESULT_RECOVERED
;
17231 * tg3_io_resume - called when traffic can start flowing again.
17232 * @pdev: Pointer to PCI device
17234 * This callback is called when the error recovery driver tells
17235 * us that its OK to resume normal operation.
17237 static void tg3_io_resume(struct pci_dev
*pdev
)
17239 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17240 struct tg3
*tp
= netdev_priv(netdev
);
17245 if (!netif_running(netdev
))
17248 tg3_full_lock(tp
, 0);
17249 tg3_flag_set(tp
, INIT_COMPLETE
);
17250 err
= tg3_restart_hw(tp
, 1);
17252 tg3_full_unlock(tp
);
17253 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17257 netif_device_attach(netdev
);
17259 tg3_timer_start(tp
);
17261 tg3_netif_start(tp
);
17263 tg3_full_unlock(tp
);
17271 static const struct pci_error_handlers tg3_err_handler
= {
17272 .error_detected
= tg3_io_error_detected
,
17273 .slot_reset
= tg3_io_slot_reset
,
17274 .resume
= tg3_io_resume
17277 static struct pci_driver tg3_driver
= {
17278 .name
= DRV_MODULE_NAME
,
17279 .id_table
= tg3_pci_tbl
,
17280 .probe
= tg3_init_one
,
17281 .remove
= tg3_remove_one
,
17282 .err_handler
= &tg3_err_handler
,
17283 .driver
.pm
= TG3_PM_OPS
,
17286 static int __init
tg3_init(void)
17288 return pci_register_driver(&tg3_driver
);
17291 static void __exit
tg3_cleanup(void)
17293 pci_unregister_driver(&tg3_driver
);
17296 module_init(tg3_init
);
17297 module_exit(tg3_cleanup
);