2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218 static char version
[] =
219 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION
);
225 MODULE_FIRMWARE(FIRMWARE_TG3
);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
229 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug
, int, 0);
231 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
256 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
257 TG3_DRV_DATA_FLAG_5705_10_100
},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
259 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
260 TG3_DRV_DATA_FLAG_5705_10_100
},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
263 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
264 TG3_DRV_DATA_FLAG_5705_10_100
},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
271 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
277 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
286 PCI_VENDOR_ID_LENOVO
,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
288 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
291 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
311 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
312 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
314 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
315 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
319 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
329 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
331 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
350 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
352 static const struct {
353 const char string
[ETH_GSTRING_LEN
];
354 } ethtool_stats_keys
[] = {
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
387 { "tx_flow_control" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
420 { "rx_threshold_hit" },
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
432 { "mbuf_lwm_thresh_hit" },
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
446 static const struct {
447 const char string
[ETH_GSTRING_LEN
];
448 } ethtool_test_keys
[] = {
449 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
450 [TG3_LINK_TEST
] = { "link test (online) " },
451 [TG3_REGISTER_TEST
] = { "register test (offline)" },
452 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
462 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
464 writel(val
, tp
->regs
+ off
);
467 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
469 return readl(tp
->regs
+ off
);
472 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
474 writel(val
, tp
->aperegs
+ off
);
477 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
479 return readl(tp
->aperegs
+ off
);
482 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
486 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
487 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
489 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
492 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
494 writel(val
, tp
->regs
+ off
);
495 readl(tp
->regs
+ off
);
498 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
503 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
504 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
505 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
506 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
510 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
514 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
515 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
516 TG3_64BIT_REG_LOW
, val
);
519 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
520 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
521 TG3_64BIT_REG_LOW
, val
);
525 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
526 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
528 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
533 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
535 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
536 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
540 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
545 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
546 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
547 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
548 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
559 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
560 /* Non-posted methods */
561 tp
->write32(tp
, off
, val
);
564 tg3_write32(tp
, off
, val
);
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
576 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
578 tp
->write32_mbox(tp
, off
, val
);
579 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
580 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
581 !tg3_flag(tp
, ICH_WORKAROUND
)))
582 tp
->read32_mbox(tp
, off
);
585 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
587 void __iomem
*mbox
= tp
->regs
+ off
;
589 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
591 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
592 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
596 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
598 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
601 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
603 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
617 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
621 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
622 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
625 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
626 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
627 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
634 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
639 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
642 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
646 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
647 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
652 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
653 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
654 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
655 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
661 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
666 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
669 static void tg3_ape_lock_init(struct tg3
*tp
)
674 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
675 regbase
= TG3_APE_LOCK_GRANT
;
677 regbase
= TG3_APE_PER_LOCK_GRANT
;
679 /* Make sure the driver hasn't any stale locks. */
680 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
682 case TG3_APE_LOCK_PHY0
:
683 case TG3_APE_LOCK_PHY1
:
684 case TG3_APE_LOCK_PHY2
:
685 case TG3_APE_LOCK_PHY3
:
686 bit
= APE_LOCK_GRANT_DRIVER
;
690 bit
= APE_LOCK_GRANT_DRIVER
;
692 bit
= 1 << tp
->pci_fn
;
694 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
699 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
703 u32 status
, req
, gnt
, bit
;
705 if (!tg3_flag(tp
, ENABLE_APE
))
709 case TG3_APE_LOCK_GPIO
:
710 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
712 case TG3_APE_LOCK_GRC
:
713 case TG3_APE_LOCK_MEM
:
715 bit
= APE_LOCK_REQ_DRIVER
;
717 bit
= 1 << tp
->pci_fn
;
719 case TG3_APE_LOCK_PHY0
:
720 case TG3_APE_LOCK_PHY1
:
721 case TG3_APE_LOCK_PHY2
:
722 case TG3_APE_LOCK_PHY3
:
723 bit
= APE_LOCK_REQ_DRIVER
;
729 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
730 req
= TG3_APE_LOCK_REQ
;
731 gnt
= TG3_APE_LOCK_GRANT
;
733 req
= TG3_APE_PER_LOCK_REQ
;
734 gnt
= TG3_APE_PER_LOCK_GRANT
;
739 tg3_ape_write32(tp
, req
+ off
, bit
);
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i
= 0; i
< 100; i
++) {
743 status
= tg3_ape_read32(tp
, gnt
+ off
);
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp
, gnt
+ off
, bit
);
758 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
762 if (!tg3_flag(tp
, ENABLE_APE
))
766 case TG3_APE_LOCK_GPIO
:
767 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
769 case TG3_APE_LOCK_GRC
:
770 case TG3_APE_LOCK_MEM
:
772 bit
= APE_LOCK_GRANT_DRIVER
;
774 bit
= 1 << tp
->pci_fn
;
776 case TG3_APE_LOCK_PHY0
:
777 case TG3_APE_LOCK_PHY1
:
778 case TG3_APE_LOCK_PHY2
:
779 case TG3_APE_LOCK_PHY3
:
780 bit
= APE_LOCK_GRANT_DRIVER
;
786 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
787 gnt
= TG3_APE_LOCK_GRANT
;
789 gnt
= TG3_APE_PER_LOCK_GRANT
;
791 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
794 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
799 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
802 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
803 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
806 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
809 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
812 return timeout_us
? 0 : -EBUSY
;
815 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
819 for (i
= 0; i
< timeout_us
/ 10; i
++) {
820 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
822 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
828 return i
== timeout_us
/ 10;
831 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
835 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
837 if (!tg3_flag(tp
, APE_HAS_NCSI
))
840 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
841 if (apedata
!= APE_SEG_SIG_MAGIC
)
844 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
845 if (!(apedata
& APE_FW_STATUS_READY
))
848 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
850 msgoff
= bufoff
+ 2 * sizeof(u32
);
851 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
856 /* Cap xfer sizes to scratchpad limits. */
857 length
= (len
> maxlen
) ? maxlen
: len
;
860 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
861 if (!(apedata
& APE_FW_STATUS_READY
))
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err
= tg3_ape_event_lock(tp
, 1000);
869 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
870 APE_EVENT_STATUS_SCRTCHPD_READ
|
871 APE_EVENT_STATUS_EVENT_PENDING
;
872 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
874 tg3_ape_write32(tp
, bufoff
, base_off
);
875 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
877 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
878 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
882 if (tg3_ape_wait_for_event(tp
, 30000))
885 for (i
= 0; length
; i
+= 4, length
-= 4) {
886 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
887 memcpy(data
, &val
, sizeof(u32
));
895 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
900 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
901 if (apedata
!= APE_SEG_SIG_MAGIC
)
904 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
905 if (!(apedata
& APE_FW_STATUS_READY
))
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err
= tg3_ape_event_lock(tp
, 1000);
913 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
914 event
| APE_EVENT_STATUS_EVENT_PENDING
);
916 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
917 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
922 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
927 if (!tg3_flag(tp
, ENABLE_APE
))
931 case RESET_KIND_INIT
:
932 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
933 APE_HOST_SEG_SIG_MAGIC
);
934 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
935 APE_HOST_SEG_LEN_MAGIC
);
936 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
937 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
940 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
941 APE_HOST_BEHAV_NO_PHYLOCK
);
942 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
943 TG3_APE_HOST_DRVR_STATE_START
);
945 event
= APE_EVENT_STATUS_STATE_START
;
947 case RESET_KIND_SHUTDOWN
:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
953 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
955 if (device_may_wakeup(&tp
->pdev
->dev
) &&
956 tg3_flag(tp
, WOL_ENABLE
)) {
957 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
958 TG3_APE_HOST_WOL_SPEED_AUTO
);
959 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
961 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
963 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
965 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
967 case RESET_KIND_SUSPEND
:
968 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
974 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
976 tg3_ape_send_event(tp
, event
);
979 static void tg3_disable_ints(struct tg3
*tp
)
983 tw32(TG3PCI_MISC_HOST_CTRL
,
984 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
985 for (i
= 0; i
< tp
->irq_max
; i
++)
986 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
989 static void tg3_enable_ints(struct tg3
*tp
)
996 tw32(TG3PCI_MISC_HOST_CTRL
,
997 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
999 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1000 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1001 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1003 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1004 if (tg3_flag(tp
, 1SHOT_MSI
))
1005 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1007 tp
->coal_now
|= tnapi
->coal_now
;
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1012 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1013 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1015 tw32(HOSTCC_MODE
, tp
->coal_now
);
1017 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1020 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1022 struct tg3
*tp
= tnapi
->tp
;
1023 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1024 unsigned int work_exists
= 0;
1026 /* check for phy events */
1027 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1028 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1032 /* check for TX work to do */
1033 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1036 /* check for RX work to do */
1037 if (tnapi
->rx_rcb_prod_idx
&&
1038 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1049 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1051 struct tg3
*tp
= tnapi
->tp
;
1053 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1060 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1061 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1062 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1065 static void tg3_switch_clocks(struct tg3
*tp
)
1068 u32 orig_clock_ctrl
;
1070 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1073 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1075 orig_clock_ctrl
= clock_ctrl
;
1076 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1077 CLOCK_CTRL_CLKRUN_OENABLE
|
1079 tp
->pci_clock_ctrl
= clock_ctrl
;
1081 if (tg3_flag(tp
, 5705_PLUS
)) {
1082 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1084 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1086 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1089 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1092 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1098 #define PHY_BUSY_LOOPS 5000
1100 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1107 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1109 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1113 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1117 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1118 MI_COM_PHY_ADDR_MASK
);
1119 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1120 MI_COM_REG_ADDR_MASK
);
1121 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1123 tw32_f(MAC_MI_COM
, frame_val
);
1125 loops
= PHY_BUSY_LOOPS
;
1126 while (loops
!= 0) {
1128 frame_val
= tr32(MAC_MI_COM
);
1130 if ((frame_val
& MI_COM_BUSY
) == 0) {
1132 frame_val
= tr32(MAC_MI_COM
);
1140 *val
= frame_val
& MI_COM_DATA_MASK
;
1144 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1145 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1149 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1154 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1156 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1159 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1166 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1167 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1170 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1172 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1176 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1178 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1179 MI_COM_PHY_ADDR_MASK
);
1180 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1181 MI_COM_REG_ADDR_MASK
);
1182 frame_val
|= (val
& MI_COM_DATA_MASK
);
1183 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1185 tw32_f(MAC_MI_COM
, frame_val
);
1187 loops
= PHY_BUSY_LOOPS
;
1188 while (loops
!= 0) {
1190 frame_val
= tr32(MAC_MI_COM
);
1191 if ((frame_val
& MI_COM_BUSY
) == 0) {
1193 frame_val
= tr32(MAC_MI_COM
);
1203 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1204 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1208 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1213 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1215 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1218 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1222 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1226 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1230 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1231 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1235 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1241 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1245 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1249 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1253 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1254 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1258 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1264 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1268 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1270 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1275 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1279 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1281 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1286 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1290 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1291 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1294 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1299 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1301 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1302 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1304 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1312 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1318 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1320 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1322 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1323 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1328 static int tg3_bmcr_reset(struct tg3
*tp
)
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1336 phy_control
= BMCR_RESET
;
1337 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1343 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1347 if ((phy_control
& BMCR_RESET
) == 0) {
1359 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1361 struct tg3
*tp
= bp
->priv
;
1364 spin_lock_bh(&tp
->lock
);
1366 if (tg3_readphy(tp
, reg
, &val
))
1369 spin_unlock_bh(&tp
->lock
);
1374 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1376 struct tg3
*tp
= bp
->priv
;
1379 spin_lock_bh(&tp
->lock
);
1381 if (tg3_writephy(tp
, reg
, val
))
1384 spin_unlock_bh(&tp
->lock
);
1389 static int tg3_mdio_reset(struct mii_bus
*bp
)
1394 static void tg3_mdio_config_5785(struct tg3
*tp
)
1397 struct phy_device
*phydev
;
1399 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1400 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1401 case PHY_ID_BCM50610
:
1402 case PHY_ID_BCM50610M
:
1403 val
= MAC_PHYCFG2_50610_LED_MODES
;
1405 case PHY_ID_BCMAC131
:
1406 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1408 case PHY_ID_RTL8211C
:
1409 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1411 case PHY_ID_RTL8201E
:
1412 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1418 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1419 tw32(MAC_PHYCFG2
, val
);
1421 val
= tr32(MAC_PHYCFG1
);
1422 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1423 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1424 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1425 tw32(MAC_PHYCFG1
, val
);
1430 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1431 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1432 MAC_PHYCFG2_FMODE_MASK_MASK
|
1433 MAC_PHYCFG2_GMODE_MASK_MASK
|
1434 MAC_PHYCFG2_ACT_MASK_MASK
|
1435 MAC_PHYCFG2_QUAL_MASK_MASK
|
1436 MAC_PHYCFG2_INBAND_ENABLE
;
1438 tw32(MAC_PHYCFG2
, val
);
1440 val
= tr32(MAC_PHYCFG1
);
1441 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1443 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1444 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1445 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1446 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1447 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1449 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1450 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1451 tw32(MAC_PHYCFG1
, val
);
1453 val
= tr32(MAC_EXT_RGMII_MODE
);
1454 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1455 MAC_RGMII_MODE_RX_QUALITY
|
1456 MAC_RGMII_MODE_RX_ACTIVITY
|
1457 MAC_RGMII_MODE_RX_ENG_DET
|
1458 MAC_RGMII_MODE_TX_ENABLE
|
1459 MAC_RGMII_MODE_TX_LOWPWR
|
1460 MAC_RGMII_MODE_TX_RESET
);
1461 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1462 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1463 val
|= MAC_RGMII_MODE_RX_INT_B
|
1464 MAC_RGMII_MODE_RX_QUALITY
|
1465 MAC_RGMII_MODE_RX_ACTIVITY
|
1466 MAC_RGMII_MODE_RX_ENG_DET
;
1467 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1468 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1469 MAC_RGMII_MODE_TX_LOWPWR
|
1470 MAC_RGMII_MODE_TX_RESET
;
1472 tw32(MAC_EXT_RGMII_MODE
, val
);
1475 static void tg3_mdio_start(struct tg3
*tp
)
1477 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1478 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1481 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1482 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1483 tg3_mdio_config_5785(tp
);
1486 static int tg3_mdio_init(struct tg3
*tp
)
1490 struct phy_device
*phydev
;
1492 if (tg3_flag(tp
, 5717_PLUS
)) {
1495 tp
->phy_addr
= tp
->pci_fn
+ 1;
1497 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1498 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1500 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1505 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1509 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1512 tp
->mdio_bus
= mdiobus_alloc();
1513 if (tp
->mdio_bus
== NULL
)
1516 tp
->mdio_bus
->name
= "tg3 mdio bus";
1517 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1518 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1519 tp
->mdio_bus
->priv
= tp
;
1520 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1521 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1522 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1523 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1524 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1525 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1527 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1528 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1535 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1538 i
= mdiobus_register(tp
->mdio_bus
);
1540 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1541 mdiobus_free(tp
->mdio_bus
);
1545 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1547 if (!phydev
|| !phydev
->drv
) {
1548 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1549 mdiobus_unregister(tp
->mdio_bus
);
1550 mdiobus_free(tp
->mdio_bus
);
1554 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1555 case PHY_ID_BCM57780
:
1556 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1557 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1559 case PHY_ID_BCM50610
:
1560 case PHY_ID_BCM50610M
:
1561 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1562 PHY_BRCM_RX_REFCLK_UNUSED
|
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1565 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1566 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1567 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1568 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1569 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1570 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1572 case PHY_ID_RTL8211C
:
1573 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1575 case PHY_ID_RTL8201E
:
1576 case PHY_ID_BCMAC131
:
1577 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1578 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1579 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1583 tg3_flag_set(tp
, MDIOBUS_INITED
);
1585 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1586 tg3_mdio_config_5785(tp
);
1591 static void tg3_mdio_fini(struct tg3
*tp
)
1593 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1594 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1595 mdiobus_unregister(tp
->mdio_bus
);
1596 mdiobus_free(tp
->mdio_bus
);
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1605 val
= tr32(GRC_RX_CPU_EVENT
);
1606 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1607 tw32_f(GRC_RX_CPU_EVENT
, val
);
1609 tp
->last_event_jiffies
= jiffies
;
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1618 unsigned int delay_cnt
;
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1625 if (time_remain
< 0)
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt
= jiffies_to_usecs(time_remain
);
1630 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1631 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1632 delay_cnt
= (delay_cnt
>> 3) + 1;
1634 for (i
= 0; i
< delay_cnt
; i
++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1647 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1649 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1650 val
|= (reg
& 0xffff);
1654 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1656 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1657 val
|= (reg
& 0xffff);
1661 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1662 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1664 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1665 val
|= (reg
& 0xffff);
1669 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3
*tp
)
1681 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1684 tg3_phy_gather_ump_data(tp
, data
);
1686 tg3_wait_for_event_ack(tp
);
1688 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1689 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1690 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1691 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1692 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1693 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1695 tg3_generate_fw_event(tp
);
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3
*tp
)
1701 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp
);
1705 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1707 tg3_generate_fw_event(tp
);
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp
);
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1717 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1720 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1722 case RESET_KIND_INIT
:
1723 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1727 case RESET_KIND_SHUTDOWN
:
1728 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1732 case RESET_KIND_SUSPEND
:
1733 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1742 if (kind
== RESET_KIND_INIT
||
1743 kind
== RESET_KIND_SUSPEND
)
1744 tg3_ape_driver_state_change(tp
, kind
);
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1750 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1752 case RESET_KIND_INIT
:
1753 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1754 DRV_STATE_START_DONE
);
1757 case RESET_KIND_SHUTDOWN
:
1758 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1759 DRV_STATE_UNLOAD_DONE
);
1767 if (kind
== RESET_KIND_SHUTDOWN
)
1768 tg3_ape_driver_state_change(tp
, kind
);
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1774 if (tg3_flag(tp
, ENABLE_ASF
)) {
1776 case RESET_KIND_INIT
:
1777 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1781 case RESET_KIND_SHUTDOWN
:
1782 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1786 case RESET_KIND_SUSPEND
:
1787 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1797 static int tg3_poll_fw(struct tg3
*tp
)
1802 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1803 /* We don't use firmware. */
1807 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1808 /* Wait up to 20ms for init done. */
1809 for (i
= 0; i
< 200; i
++) {
1810 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1817 /* Wait for firmware initialization to complete. */
1818 for (i
= 0; i
< 100000; i
++) {
1819 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1820 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1830 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1831 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1833 netdev_info(tp
->dev
, "No firmware running\n");
1836 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1846 static void tg3_link_report(struct tg3
*tp
)
1848 if (!netif_carrier_ok(tp
->dev
)) {
1849 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1850 tg3_ump_link_report(tp
);
1851 } else if (netif_msg_link(tp
)) {
1852 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1853 (tp
->link_config
.active_speed
== SPEED_1000
?
1855 (tp
->link_config
.active_speed
== SPEED_100
?
1857 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1860 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1861 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1863 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1866 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1867 netdev_info(tp
->dev
, "EEE is %s\n",
1868 tp
->setlpicnt
? "enabled" : "disabled");
1870 tg3_ump_link_report(tp
);
1873 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1876 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1880 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1881 miireg
= ADVERTISE_1000XPAUSE
;
1882 else if (flow_ctrl
& FLOW_CTRL_TX
)
1883 miireg
= ADVERTISE_1000XPSE_ASYM
;
1884 else if (flow_ctrl
& FLOW_CTRL_RX
)
1885 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1892 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1896 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1897 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1898 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1899 if (lcladv
& ADVERTISE_1000XPAUSE
)
1901 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1908 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1912 u32 old_rx_mode
= tp
->rx_mode
;
1913 u32 old_tx_mode
= tp
->tx_mode
;
1915 if (tg3_flag(tp
, USE_PHYLIB
))
1916 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1918 autoneg
= tp
->link_config
.autoneg
;
1920 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1921 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1922 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1924 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1926 flowctrl
= tp
->link_config
.flowctrl
;
1928 tp
->link_config
.active_flowctrl
= flowctrl
;
1930 if (flowctrl
& FLOW_CTRL_RX
)
1931 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1933 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1935 if (old_rx_mode
!= tp
->rx_mode
)
1936 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1938 if (flowctrl
& FLOW_CTRL_TX
)
1939 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1941 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1943 if (old_tx_mode
!= tp
->tx_mode
)
1944 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1947 static void tg3_adjust_link(struct net_device
*dev
)
1949 u8 oldflowctrl
, linkmesg
= 0;
1950 u32 mac_mode
, lcl_adv
, rmt_adv
;
1951 struct tg3
*tp
= netdev_priv(dev
);
1952 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1954 spin_lock_bh(&tp
->lock
);
1956 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1957 MAC_MODE_HALF_DUPLEX
);
1959 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1965 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1966 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1967 else if (phydev
->speed
== SPEED_1000
||
1968 tg3_asic_rev(tp
) != ASIC_REV_5785
)
1969 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1971 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1973 if (phydev
->duplex
== DUPLEX_HALF
)
1974 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1976 lcl_adv
= mii_advertise_flowctrl(
1977 tp
->link_config
.flowctrl
);
1980 rmt_adv
= LPA_PAUSE_CAP
;
1981 if (phydev
->asym_pause
)
1982 rmt_adv
|= LPA_PAUSE_ASYM
;
1985 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1987 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1989 if (mac_mode
!= tp
->mac_mode
) {
1990 tp
->mac_mode
= mac_mode
;
1991 tw32_f(MAC_MODE
, tp
->mac_mode
);
1995 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
1996 if (phydev
->speed
== SPEED_10
)
1998 MAC_MI_STAT_10MBPS_MODE
|
1999 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2001 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2004 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2005 tw32(MAC_TX_LENGTHS
,
2006 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2007 (6 << TX_LENGTHS_IPG_SHIFT
) |
2008 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2010 tw32(MAC_TX_LENGTHS
,
2011 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2012 (6 << TX_LENGTHS_IPG_SHIFT
) |
2013 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2015 if (phydev
->link
!= tp
->old_link
||
2016 phydev
->speed
!= tp
->link_config
.active_speed
||
2017 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2018 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2021 tp
->old_link
= phydev
->link
;
2022 tp
->link_config
.active_speed
= phydev
->speed
;
2023 tp
->link_config
.active_duplex
= phydev
->duplex
;
2025 spin_unlock_bh(&tp
->lock
);
2028 tg3_link_report(tp
);
2031 static int tg3_phy_init(struct tg3
*tp
)
2033 struct phy_device
*phydev
;
2035 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2038 /* Bring the PHY back to a known state. */
2041 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2043 /* Attach the MAC to the PHY. */
2044 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2045 tg3_adjust_link
, phydev
->interface
);
2046 if (IS_ERR(phydev
)) {
2047 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2048 return PTR_ERR(phydev
);
2051 /* Mask with MAC supported features. */
2052 switch (phydev
->interface
) {
2053 case PHY_INTERFACE_MODE_GMII
:
2054 case PHY_INTERFACE_MODE_RGMII
:
2055 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2056 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2058 SUPPORTED_Asym_Pause
);
2062 case PHY_INTERFACE_MODE_MII
:
2063 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2065 SUPPORTED_Asym_Pause
);
2068 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2072 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2074 phydev
->advertising
= phydev
->supported
;
2079 static void tg3_phy_start(struct tg3
*tp
)
2081 struct phy_device
*phydev
;
2083 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2086 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2088 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2089 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2090 phydev
->speed
= tp
->link_config
.speed
;
2091 phydev
->duplex
= tp
->link_config
.duplex
;
2092 phydev
->autoneg
= tp
->link_config
.autoneg
;
2093 phydev
->advertising
= tp
->link_config
.advertising
;
2098 phy_start_aneg(phydev
);
2101 static void tg3_phy_stop(struct tg3
*tp
)
2103 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2106 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2109 static void tg3_phy_fini(struct tg3
*tp
)
2111 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2112 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2113 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2117 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2122 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2125 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2126 /* Cannot do read-modify-write on 5401 */
2127 err
= tg3_phy_auxctl_write(tp
,
2128 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2129 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2134 err
= tg3_phy_auxctl_read(tp
,
2135 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2139 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2140 err
= tg3_phy_auxctl_write(tp
,
2141 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2147 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2151 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2154 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2155 phytest
| MII_TG3_FET_SHADOW_EN
);
2156 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2158 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2160 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2161 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2163 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2167 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2171 if (!tg3_flag(tp
, 5705_PLUS
) ||
2172 (tg3_flag(tp
, 5717_PLUS
) &&
2173 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2176 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2177 tg3_phy_fet_toggle_apd(tp
, enable
);
2181 reg
= MII_TG3_MISC_SHDW_WREN
|
2182 MII_TG3_MISC_SHDW_SCR5_SEL
|
2183 MII_TG3_MISC_SHDW_SCR5_LPED
|
2184 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2185 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2186 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2187 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2188 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2190 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2193 reg
= MII_TG3_MISC_SHDW_WREN
|
2194 MII_TG3_MISC_SHDW_APD_SEL
|
2195 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2197 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2199 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2202 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2206 if (!tg3_flag(tp
, 5705_PLUS
) ||
2207 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2210 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2213 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2214 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2216 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2217 ephy
| MII_TG3_FET_SHADOW_EN
);
2218 if (!tg3_readphy(tp
, reg
, &phy
)) {
2220 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2222 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2223 tg3_writephy(tp
, reg
, phy
);
2225 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2230 ret
= tg3_phy_auxctl_read(tp
,
2231 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2234 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2236 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2237 tg3_phy_auxctl_write(tp
,
2238 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2243 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2248 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2251 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2253 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2254 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2257 static void tg3_phy_apply_otp(struct tg3
*tp
)
2266 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2269 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2270 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2271 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2273 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2274 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2275 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2277 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2278 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2279 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2281 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2282 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2284 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2285 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2287 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2288 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2289 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2291 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2294 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2298 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2303 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2304 current_link_up
== 1 &&
2305 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2306 (tp
->link_config
.active_speed
== SPEED_100
||
2307 tp
->link_config
.active_speed
== SPEED_1000
)) {
2310 if (tp
->link_config
.active_speed
== SPEED_1000
)
2311 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2313 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2315 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2317 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2318 TG3_CL45_D7_EEERES_STAT
, &val
);
2320 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2321 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2325 if (!tp
->setlpicnt
) {
2326 if (current_link_up
== 1 &&
2327 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2328 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2329 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2332 val
= tr32(TG3_CPMU_EEE_MODE
);
2333 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2337 static void tg3_phy_eee_enable(struct tg3
*tp
)
2341 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2342 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2343 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2344 tg3_flag(tp
, 57765_CLASS
)) &&
2345 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2346 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2347 MII_TG3_DSP_TAP26_RMRXSTO
;
2348 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2349 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2352 val
= tr32(TG3_CPMU_EEE_MODE
);
2353 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2356 static int tg3_wait_macro_done(struct tg3
*tp
)
2363 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2364 if ((tmp32
& 0x1000) == 0)
2374 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2376 static const u32 test_pat
[4][6] = {
2377 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2378 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2379 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2380 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2384 for (chan
= 0; chan
< 4; chan
++) {
2387 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2388 (chan
* 0x2000) | 0x0200);
2389 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2391 for (i
= 0; i
< 6; i
++)
2392 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2395 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2396 if (tg3_wait_macro_done(tp
)) {
2401 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2402 (chan
* 0x2000) | 0x0200);
2403 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2404 if (tg3_wait_macro_done(tp
)) {
2409 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2410 if (tg3_wait_macro_done(tp
)) {
2415 for (i
= 0; i
< 6; i
+= 2) {
2418 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2419 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2420 tg3_wait_macro_done(tp
)) {
2426 if (low
!= test_pat
[chan
][i
] ||
2427 high
!= test_pat
[chan
][i
+1]) {
2428 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2429 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2430 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2440 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2444 for (chan
= 0; chan
< 4; chan
++) {
2447 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2448 (chan
* 0x2000) | 0x0200);
2449 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2450 for (i
= 0; i
< 6; i
++)
2451 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2452 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2453 if (tg3_wait_macro_done(tp
))
2460 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2462 u32 reg32
, phy9_orig
;
2463 int retries
, do_phy_reset
, err
;
2469 err
= tg3_bmcr_reset(tp
);
2475 /* Disable transmitter and interrupt. */
2476 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2480 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2482 /* Set full-duplex, 1000 mbps. */
2483 tg3_writephy(tp
, MII_BMCR
,
2484 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2486 /* Set to master mode. */
2487 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2490 tg3_writephy(tp
, MII_CTRL1000
,
2491 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2493 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2497 /* Block the PHY control access. */
2498 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2500 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2503 } while (--retries
);
2505 err
= tg3_phy_reset_chanpat(tp
);
2509 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2511 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2512 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2514 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2516 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2518 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2520 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2527 static void tg3_carrier_off(struct tg3
*tp
)
2529 netif_carrier_off(tp
->dev
);
2530 tp
->link_up
= false;
2533 /* This will reset the tigon3 PHY if there is no valid
2534 * link unless the FORCE argument is non-zero.
2536 static int tg3_phy_reset(struct tg3
*tp
)
2541 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2542 val
= tr32(GRC_MISC_CFG
);
2543 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2546 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2547 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2551 if (netif_running(tp
->dev
) && tp
->link_up
) {
2552 netif_carrier_off(tp
->dev
);
2553 tg3_link_report(tp
);
2556 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2557 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2558 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2559 err
= tg3_phy_reset_5703_4_5(tp
);
2566 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2567 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2568 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2569 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2571 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2574 err
= tg3_bmcr_reset(tp
);
2578 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2579 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2580 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2582 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2585 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2586 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2587 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2588 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2589 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2590 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2592 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2596 if (tg3_flag(tp
, 5717_PLUS
) &&
2597 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2600 tg3_phy_apply_otp(tp
);
2602 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2603 tg3_phy_toggle_apd(tp
, true);
2605 tg3_phy_toggle_apd(tp
, false);
2608 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2609 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2610 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2611 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2612 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2615 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2616 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2617 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2620 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2621 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2622 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2623 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2624 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2625 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2627 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2628 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2629 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2630 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2631 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2632 tg3_writephy(tp
, MII_TG3_TEST1
,
2633 MII_TG3_TEST1_TRIM_EN
| 0x4);
2635 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2637 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2641 /* Set Extended packet length bit (bit 14) on all chips that */
2642 /* support jumbo frames */
2643 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2644 /* Cannot do read-modify-write on 5401 */
2645 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2646 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2647 /* Set bit 14 with read-modify-write to preserve other bits */
2648 err
= tg3_phy_auxctl_read(tp
,
2649 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2651 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2652 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2655 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2656 * jumbo frames transmission.
2658 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2659 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2660 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2661 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2664 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2665 /* adjust output voltage */
2666 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2669 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2670 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2672 tg3_phy_toggle_automdix(tp
, 1);
2673 tg3_phy_set_wirespeed(tp
);
2677 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2678 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2679 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2680 TG3_GPIO_MSG_NEED_VAUX)
2681 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2682 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2683 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2684 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2685 (TG3_GPIO_MSG_DRVR_PRES << 12))
2687 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2688 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2689 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2690 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2691 (TG3_GPIO_MSG_NEED_VAUX << 12))
2693 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2697 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2698 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2699 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2701 status
= tr32(TG3_CPMU_DRV_STATUS
);
2703 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2704 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2705 status
|= (newstat
<< shift
);
2707 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2708 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2709 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2711 tw32(TG3_CPMU_DRV_STATUS
, status
);
2713 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2716 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2718 if (!tg3_flag(tp
, IS_NIC
))
2721 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2722 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2723 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2724 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2727 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2729 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2730 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2732 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2734 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2735 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2741 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2745 if (!tg3_flag(tp
, IS_NIC
) ||
2746 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2747 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2750 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2752 tw32_wait_f(GRC_LOCAL_CTRL
,
2753 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2754 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2756 tw32_wait_f(GRC_LOCAL_CTRL
,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2760 tw32_wait_f(GRC_LOCAL_CTRL
,
2761 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2765 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2767 if (!tg3_flag(tp
, IS_NIC
))
2770 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2771 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2772 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2773 (GRC_LCLCTRL_GPIO_OE0
|
2774 GRC_LCLCTRL_GPIO_OE1
|
2775 GRC_LCLCTRL_GPIO_OE2
|
2776 GRC_LCLCTRL_GPIO_OUTPUT0
|
2777 GRC_LCLCTRL_GPIO_OUTPUT1
),
2778 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2779 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2780 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2781 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2782 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2783 GRC_LCLCTRL_GPIO_OE1
|
2784 GRC_LCLCTRL_GPIO_OE2
|
2785 GRC_LCLCTRL_GPIO_OUTPUT0
|
2786 GRC_LCLCTRL_GPIO_OUTPUT1
|
2788 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2789 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2791 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2792 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2795 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2796 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2800 u32 grc_local_ctrl
= 0;
2802 /* Workaround to prevent overdrawing Amps. */
2803 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2804 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2805 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2807 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2810 /* On 5753 and variants, GPIO2 cannot be used. */
2811 no_gpio2
= tp
->nic_sram_data_cfg
&
2812 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2814 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2815 GRC_LCLCTRL_GPIO_OE1
|
2816 GRC_LCLCTRL_GPIO_OE2
|
2817 GRC_LCLCTRL_GPIO_OUTPUT1
|
2818 GRC_LCLCTRL_GPIO_OUTPUT2
;
2820 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2821 GRC_LCLCTRL_GPIO_OUTPUT2
);
2823 tw32_wait_f(GRC_LOCAL_CTRL
,
2824 tp
->grc_local_ctrl
| grc_local_ctrl
,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2827 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2829 tw32_wait_f(GRC_LOCAL_CTRL
,
2830 tp
->grc_local_ctrl
| grc_local_ctrl
,
2831 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2834 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2835 tw32_wait_f(GRC_LOCAL_CTRL
,
2836 tp
->grc_local_ctrl
| grc_local_ctrl
,
2837 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2842 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2846 /* Serialize power state transitions */
2847 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2850 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2851 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2853 msg
= tg3_set_function_status(tp
, msg
);
2855 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2858 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2859 tg3_pwrsrc_switch_to_vaux(tp
);
2861 tg3_pwrsrc_die_with_vmain(tp
);
2864 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2867 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2869 bool need_vaux
= false;
2871 /* The GPIOs do something completely different on 57765. */
2872 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2875 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2876 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2877 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2878 tg3_frob_aux_power_5717(tp
, include_wol
?
2879 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2883 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2884 struct net_device
*dev_peer
;
2886 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2888 /* remove_one() may have been run on the peer. */
2890 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2892 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2895 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2896 tg3_flag(tp_peer
, ENABLE_ASF
))
2901 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2902 tg3_flag(tp
, ENABLE_ASF
))
2906 tg3_pwrsrc_switch_to_vaux(tp
);
2908 tg3_pwrsrc_die_with_vmain(tp
);
2911 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2913 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2915 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2916 if (speed
!= SPEED_10
)
2918 } else if (speed
== SPEED_10
)
2924 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2928 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2929 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
2930 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2931 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2934 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2935 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2936 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2941 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2943 val
= tr32(GRC_MISC_CFG
);
2944 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2947 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2949 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2952 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2953 tg3_writephy(tp
, MII_BMCR
,
2954 BMCR_ANENABLE
| BMCR_ANRESTART
);
2956 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2957 phytest
| MII_TG3_FET_SHADOW_EN
);
2958 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2959 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2961 MII_TG3_FET_SHDW_AUXMODE4
,
2964 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2967 } else if (do_low_power
) {
2968 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2969 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2971 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2972 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2973 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2974 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2977 /* The PHY should not be powered down on some chips because
2980 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2981 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2982 (tg3_asic_rev(tp
) == ASIC_REV_5780
&&
2983 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2984 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
2988 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2989 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2990 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2991 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2992 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2993 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2996 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2999 /* tp->lock is held. */
3000 static int tg3_nvram_lock(struct tg3
*tp
)
3002 if (tg3_flag(tp
, NVRAM
)) {
3005 if (tp
->nvram_lock_cnt
== 0) {
3006 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3007 for (i
= 0; i
< 8000; i
++) {
3008 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3013 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3017 tp
->nvram_lock_cnt
++;
3022 /* tp->lock is held. */
3023 static void tg3_nvram_unlock(struct tg3
*tp
)
3025 if (tg3_flag(tp
, NVRAM
)) {
3026 if (tp
->nvram_lock_cnt
> 0)
3027 tp
->nvram_lock_cnt
--;
3028 if (tp
->nvram_lock_cnt
== 0)
3029 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3033 /* tp->lock is held. */
3034 static void tg3_enable_nvram_access(struct tg3
*tp
)
3036 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3037 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3039 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3043 /* tp->lock is held. */
3044 static void tg3_disable_nvram_access(struct tg3
*tp
)
3046 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3047 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3049 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3053 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3054 u32 offset
, u32
*val
)
3059 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3062 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3063 EEPROM_ADDR_DEVID_MASK
|
3065 tw32(GRC_EEPROM_ADDR
,
3067 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3068 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3069 EEPROM_ADDR_ADDR_MASK
) |
3070 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3072 for (i
= 0; i
< 1000; i
++) {
3073 tmp
= tr32(GRC_EEPROM_ADDR
);
3075 if (tmp
& EEPROM_ADDR_COMPLETE
)
3079 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3082 tmp
= tr32(GRC_EEPROM_DATA
);
3085 * The data will always be opposite the native endian
3086 * format. Perform a blind byteswap to compensate.
3093 #define NVRAM_CMD_TIMEOUT 10000
3095 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3099 tw32(NVRAM_CMD
, nvram_cmd
);
3100 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3102 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3108 if (i
== NVRAM_CMD_TIMEOUT
)
3114 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3116 if (tg3_flag(tp
, NVRAM
) &&
3117 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3118 tg3_flag(tp
, FLASH
) &&
3119 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3120 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3122 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3123 ATMEL_AT45DB0X1B_PAGE_POS
) +
3124 (addr
% tp
->nvram_pagesize
);
3129 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3131 if (tg3_flag(tp
, NVRAM
) &&
3132 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3133 tg3_flag(tp
, FLASH
) &&
3134 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3135 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3137 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3138 tp
->nvram_pagesize
) +
3139 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3144 /* NOTE: Data read in from NVRAM is byteswapped according to
3145 * the byteswapping settings for all other register accesses.
3146 * tg3 devices are BE devices, so on a BE machine, the data
3147 * returned will be exactly as it is seen in NVRAM. On a LE
3148 * machine, the 32-bit value will be byteswapped.
3150 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3154 if (!tg3_flag(tp
, NVRAM
))
3155 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3157 offset
= tg3_nvram_phys_addr(tp
, offset
);
3159 if (offset
> NVRAM_ADDR_MSK
)
3162 ret
= tg3_nvram_lock(tp
);
3166 tg3_enable_nvram_access(tp
);
3168 tw32(NVRAM_ADDR
, offset
);
3169 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3170 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3173 *val
= tr32(NVRAM_RDDATA
);
3175 tg3_disable_nvram_access(tp
);
3177 tg3_nvram_unlock(tp
);
3182 /* Ensures NVRAM data is in bytestream format. */
3183 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3186 int res
= tg3_nvram_read(tp
, offset
, &v
);
3188 *val
= cpu_to_be32(v
);
3192 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3193 u32 offset
, u32 len
, u8
*buf
)
3198 for (i
= 0; i
< len
; i
+= 4) {
3204 memcpy(&data
, buf
+ i
, 4);
3207 * The SEEPROM interface expects the data to always be opposite
3208 * the native endian format. We accomplish this by reversing
3209 * all the operations that would have been performed on the
3210 * data from a call to tg3_nvram_read_be32().
3212 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3214 val
= tr32(GRC_EEPROM_ADDR
);
3215 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3217 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3219 tw32(GRC_EEPROM_ADDR
, val
|
3220 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3221 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3225 for (j
= 0; j
< 1000; j
++) {
3226 val
= tr32(GRC_EEPROM_ADDR
);
3228 if (val
& EEPROM_ADDR_COMPLETE
)
3232 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3241 /* offset and length are dword aligned */
3242 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3246 u32 pagesize
= tp
->nvram_pagesize
;
3247 u32 pagemask
= pagesize
- 1;
3251 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3257 u32 phy_addr
, page_off
, size
;
3259 phy_addr
= offset
& ~pagemask
;
3261 for (j
= 0; j
< pagesize
; j
+= 4) {
3262 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3263 (__be32
*) (tmp
+ j
));
3270 page_off
= offset
& pagemask
;
3277 memcpy(tmp
+ page_off
, buf
, size
);
3279 offset
= offset
+ (pagesize
- page_off
);
3281 tg3_enable_nvram_access(tp
);
3284 * Before we can erase the flash page, we need
3285 * to issue a special "write enable" command.
3287 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3289 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3292 /* Erase the target page */
3293 tw32(NVRAM_ADDR
, phy_addr
);
3295 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3296 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3298 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3301 /* Issue another write enable to start the write. */
3302 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3304 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3307 for (j
= 0; j
< pagesize
; j
+= 4) {
3310 data
= *((__be32
*) (tmp
+ j
));
3312 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3314 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3316 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3320 nvram_cmd
|= NVRAM_CMD_FIRST
;
3321 else if (j
== (pagesize
- 4))
3322 nvram_cmd
|= NVRAM_CMD_LAST
;
3324 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3332 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3333 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3340 /* offset and length are dword aligned */
3341 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3346 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3347 u32 page_off
, phy_addr
, nvram_cmd
;
3350 memcpy(&data
, buf
+ i
, 4);
3351 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3353 page_off
= offset
% tp
->nvram_pagesize
;
3355 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3357 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3359 if (page_off
== 0 || i
== 0)
3360 nvram_cmd
|= NVRAM_CMD_FIRST
;
3361 if (page_off
== (tp
->nvram_pagesize
- 4))
3362 nvram_cmd
|= NVRAM_CMD_LAST
;
3365 nvram_cmd
|= NVRAM_CMD_LAST
;
3367 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3368 !tg3_flag(tp
, FLASH
) ||
3369 !tg3_flag(tp
, 57765_PLUS
))
3370 tw32(NVRAM_ADDR
, phy_addr
);
3372 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3373 !tg3_flag(tp
, 5755_PLUS
) &&
3374 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3375 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3378 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3379 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3383 if (!tg3_flag(tp
, FLASH
)) {
3384 /* We always do complete word writes to eeprom. */
3385 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3388 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3395 /* offset and length are dword aligned */
3396 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3400 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3401 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3402 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3406 if (!tg3_flag(tp
, NVRAM
)) {
3407 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3411 ret
= tg3_nvram_lock(tp
);
3415 tg3_enable_nvram_access(tp
);
3416 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3417 tw32(NVRAM_WRITE1
, 0x406);
3419 grc_mode
= tr32(GRC_MODE
);
3420 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3422 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3423 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3426 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3430 grc_mode
= tr32(GRC_MODE
);
3431 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3433 tg3_disable_nvram_access(tp
);
3434 tg3_nvram_unlock(tp
);
3437 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3438 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3445 #define RX_CPU_SCRATCH_BASE 0x30000
3446 #define RX_CPU_SCRATCH_SIZE 0x04000
3447 #define TX_CPU_SCRATCH_BASE 0x34000
3448 #define TX_CPU_SCRATCH_SIZE 0x04000
3450 /* tp->lock is held. */
3451 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
3455 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3457 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3458 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3460 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3463 if (offset
== RX_CPU_BASE
) {
3464 for (i
= 0; i
< 10000; i
++) {
3465 tw32(offset
+ CPU_STATE
, 0xffffffff);
3466 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3467 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3471 tw32(offset
+ CPU_STATE
, 0xffffffff);
3472 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3476 * There is only an Rx CPU for the 5750 derivative in the
3479 if (tg3_flag(tp
, IS_SSB_CORE
))
3482 for (i
= 0; i
< 10000; i
++) {
3483 tw32(offset
+ CPU_STATE
, 0xffffffff);
3484 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3485 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3491 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3492 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3496 /* Clear firmware's nvram arbitration. */
3497 if (tg3_flag(tp
, NVRAM
))
3498 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3503 unsigned int fw_base
;
3504 unsigned int fw_len
;
3505 const __be32
*fw_data
;
3508 /* tp->lock is held. */
3509 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3510 u32 cpu_scratch_base
, int cpu_scratch_size
,
3511 struct fw_info
*info
)
3513 int err
, lock_err
, i
;
3514 void (*write_op
)(struct tg3
*, u32
, u32
);
3516 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3518 "%s: Trying to load TX cpu firmware which is 5705\n",
3523 if (tg3_flag(tp
, 5705_PLUS
))
3524 write_op
= tg3_write_mem
;
3526 write_op
= tg3_write_indirect_reg32
;
3528 /* It is possible that bootcode is still loading at this point.
3529 * Get the nvram lock first before halting the cpu.
3531 lock_err
= tg3_nvram_lock(tp
);
3532 err
= tg3_halt_cpu(tp
, cpu_base
);
3534 tg3_nvram_unlock(tp
);
3538 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3539 write_op(tp
, cpu_scratch_base
+ i
, 0);
3540 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3541 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3542 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3543 write_op(tp
, (cpu_scratch_base
+
3544 (info
->fw_base
& 0xffff) +
3546 be32_to_cpu(info
->fw_data
[i
]));
3554 /* tp->lock is held. */
3555 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3557 struct fw_info info
;
3558 const __be32
*fw_data
;
3561 fw_data
= (void *)tp
->fw
->data
;
3563 /* Firmware blob starts with version numbers, followed by
3564 start address and length. We are setting complete length.
3565 length = end_address_of_bss - start_address_of_text.
3566 Remainder is the blob to be loaded contiguously
3567 from start address. */
3569 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3570 info
.fw_len
= tp
->fw
->size
- 12;
3571 info
.fw_data
= &fw_data
[3];
3573 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3574 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3579 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3580 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3585 /* Now startup only the RX cpu. */
3586 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3587 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3589 for (i
= 0; i
< 5; i
++) {
3590 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3592 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3593 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3594 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3598 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3599 "should be %08x\n", __func__
,
3600 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3603 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3604 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3609 /* tp->lock is held. */
3610 static int tg3_load_tso_firmware(struct tg3
*tp
)
3612 struct fw_info info
;
3613 const __be32
*fw_data
;
3614 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3617 if (tg3_flag(tp
, HW_TSO_1
) ||
3618 tg3_flag(tp
, HW_TSO_2
) ||
3619 tg3_flag(tp
, HW_TSO_3
))
3622 fw_data
= (void *)tp
->fw
->data
;
3624 /* Firmware blob starts with version numbers, followed by
3625 start address and length. We are setting complete length.
3626 length = end_address_of_bss - start_address_of_text.
3627 Remainder is the blob to be loaded contiguously
3628 from start address. */
3630 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3631 cpu_scratch_size
= tp
->fw_len
;
3632 info
.fw_len
= tp
->fw
->size
- 12;
3633 info
.fw_data
= &fw_data
[3];
3635 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3636 cpu_base
= RX_CPU_BASE
;
3637 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3639 cpu_base
= TX_CPU_BASE
;
3640 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3641 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3644 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3645 cpu_scratch_base
, cpu_scratch_size
,
3650 /* Now startup the cpu. */
3651 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3652 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3654 for (i
= 0; i
< 5; i
++) {
3655 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3657 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3658 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3659 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3664 "%s fails to set CPU PC, is %08x should be %08x\n",
3665 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3668 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3669 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3674 /* tp->lock is held. */
3675 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3677 u32 addr_high
, addr_low
;
3680 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3681 tp
->dev
->dev_addr
[1]);
3682 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3683 (tp
->dev
->dev_addr
[3] << 16) |
3684 (tp
->dev
->dev_addr
[4] << 8) |
3685 (tp
->dev
->dev_addr
[5] << 0));
3686 for (i
= 0; i
< 4; i
++) {
3687 if (i
== 1 && skip_mac_1
)
3689 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3690 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3693 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3694 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3695 for (i
= 0; i
< 12; i
++) {
3696 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3697 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3701 addr_high
= (tp
->dev
->dev_addr
[0] +
3702 tp
->dev
->dev_addr
[1] +
3703 tp
->dev
->dev_addr
[2] +
3704 tp
->dev
->dev_addr
[3] +
3705 tp
->dev
->dev_addr
[4] +
3706 tp
->dev
->dev_addr
[5]) &
3707 TX_BACKOFF_SEED_MASK
;
3708 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3711 static void tg3_enable_register_access(struct tg3
*tp
)
3714 * Make sure register accesses (indirect or otherwise) will function
3717 pci_write_config_dword(tp
->pdev
,
3718 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3721 static int tg3_power_up(struct tg3
*tp
)
3725 tg3_enable_register_access(tp
);
3727 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3729 /* Switch out of Vaux if it is a NIC */
3730 tg3_pwrsrc_switch_to_vmain(tp
);
3732 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3738 static int tg3_setup_phy(struct tg3
*, int);
3740 static int tg3_power_down_prepare(struct tg3
*tp
)
3743 bool device_should_wake
, do_low_power
;
3745 tg3_enable_register_access(tp
);
3747 /* Restore the CLKREQ setting. */
3748 if (tg3_flag(tp
, CLKREQ_BUG
))
3749 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3750 PCI_EXP_LNKCTL_CLKREQ_EN
);
3752 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3753 tw32(TG3PCI_MISC_HOST_CTRL
,
3754 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3756 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3757 tg3_flag(tp
, WOL_ENABLE
);
3759 if (tg3_flag(tp
, USE_PHYLIB
)) {
3760 do_low_power
= false;
3761 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3762 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3763 struct phy_device
*phydev
;
3764 u32 phyid
, advertising
;
3766 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3768 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3770 tp
->link_config
.speed
= phydev
->speed
;
3771 tp
->link_config
.duplex
= phydev
->duplex
;
3772 tp
->link_config
.autoneg
= phydev
->autoneg
;
3773 tp
->link_config
.advertising
= phydev
->advertising
;
3775 advertising
= ADVERTISED_TP
|
3777 ADVERTISED_Autoneg
|
3778 ADVERTISED_10baseT_Half
;
3780 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3781 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3783 ADVERTISED_100baseT_Half
|
3784 ADVERTISED_100baseT_Full
|
3785 ADVERTISED_10baseT_Full
;
3787 advertising
|= ADVERTISED_10baseT_Full
;
3790 phydev
->advertising
= advertising
;
3792 phy_start_aneg(phydev
);
3794 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3795 if (phyid
!= PHY_ID_BCMAC131
) {
3796 phyid
&= PHY_BCM_OUI_MASK
;
3797 if (phyid
== PHY_BCM_OUI_1
||
3798 phyid
== PHY_BCM_OUI_2
||
3799 phyid
== PHY_BCM_OUI_3
)
3800 do_low_power
= true;
3804 do_low_power
= true;
3806 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3807 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3809 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3810 tg3_setup_phy(tp
, 0);
3813 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3816 val
= tr32(GRC_VCPU_EXT_CTRL
);
3817 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3818 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3822 for (i
= 0; i
< 200; i
++) {
3823 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3824 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3829 if (tg3_flag(tp
, WOL_CAP
))
3830 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3831 WOL_DRV_STATE_SHUTDOWN
|
3835 if (device_should_wake
) {
3838 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3840 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3841 tg3_phy_auxctl_write(tp
,
3842 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3843 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3844 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3845 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3849 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3850 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3852 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3854 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3855 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
3856 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3857 SPEED_100
: SPEED_10
;
3858 if (tg3_5700_link_polarity(tp
, speed
))
3859 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3861 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3864 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3867 if (!tg3_flag(tp
, 5750_PLUS
))
3868 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3870 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3871 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3872 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3873 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3875 if (tg3_flag(tp
, ENABLE_APE
))
3876 mac_mode
|= MAC_MODE_APE_TX_EN
|
3877 MAC_MODE_APE_RX_EN
|
3878 MAC_MODE_TDE_ENABLE
;
3880 tw32_f(MAC_MODE
, mac_mode
);
3883 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3887 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3888 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3889 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
3892 base_val
= tp
->pci_clock_ctrl
;
3893 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3894 CLOCK_CTRL_TXCLK_DISABLE
);
3896 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3897 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3898 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3899 tg3_flag(tp
, CPMU_PRESENT
) ||
3900 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3902 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3903 u32 newbits1
, newbits2
;
3905 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3906 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3907 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3908 CLOCK_CTRL_TXCLK_DISABLE
|
3910 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3911 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3912 newbits1
= CLOCK_CTRL_625_CORE
;
3913 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3915 newbits1
= CLOCK_CTRL_ALTCLK
;
3916 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3919 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3922 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3925 if (!tg3_flag(tp
, 5705_PLUS
)) {
3928 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
3929 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
3930 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3931 CLOCK_CTRL_TXCLK_DISABLE
|
3932 CLOCK_CTRL_44MHZ_CORE
);
3934 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3937 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3938 tp
->pci_clock_ctrl
| newbits3
, 40);
3942 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3943 tg3_power_down_phy(tp
, do_low_power
);
3945 tg3_frob_aux_power(tp
, true);
3947 /* Workaround for unstable PLL clock */
3948 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
3949 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
3950 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
3951 u32 val
= tr32(0x7d00);
3953 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3955 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3958 err
= tg3_nvram_lock(tp
);
3959 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3961 tg3_nvram_unlock(tp
);
3965 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3970 static void tg3_power_down(struct tg3
*tp
)
3972 tg3_power_down_prepare(tp
);
3974 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3975 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3978 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3980 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3981 case MII_TG3_AUX_STAT_10HALF
:
3983 *duplex
= DUPLEX_HALF
;
3986 case MII_TG3_AUX_STAT_10FULL
:
3988 *duplex
= DUPLEX_FULL
;
3991 case MII_TG3_AUX_STAT_100HALF
:
3993 *duplex
= DUPLEX_HALF
;
3996 case MII_TG3_AUX_STAT_100FULL
:
3998 *duplex
= DUPLEX_FULL
;
4001 case MII_TG3_AUX_STAT_1000HALF
:
4002 *speed
= SPEED_1000
;
4003 *duplex
= DUPLEX_HALF
;
4006 case MII_TG3_AUX_STAT_1000FULL
:
4007 *speed
= SPEED_1000
;
4008 *duplex
= DUPLEX_FULL
;
4012 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4013 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4015 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4019 *speed
= SPEED_UNKNOWN
;
4020 *duplex
= DUPLEX_UNKNOWN
;
4025 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4030 new_adv
= ADVERTISE_CSMA
;
4031 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4032 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4034 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4038 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4039 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4041 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4042 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4043 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4045 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4050 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4053 tw32(TG3_CPMU_EEE_MODE
,
4054 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4056 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4061 /* Advertise 100-BaseTX EEE ability */
4062 if (advertise
& ADVERTISED_100baseT_Full
)
4063 val
|= MDIO_AN_EEE_ADV_100TX
;
4064 /* Advertise 1000-BaseT EEE ability */
4065 if (advertise
& ADVERTISED_1000baseT_Full
)
4066 val
|= MDIO_AN_EEE_ADV_1000T
;
4067 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4071 switch (tg3_asic_rev(tp
)) {
4073 case ASIC_REV_57765
:
4074 case ASIC_REV_57766
:
4076 /* If we advertised any eee advertisements above... */
4078 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4079 MII_TG3_DSP_TAP26_RMRXSTO
|
4080 MII_TG3_DSP_TAP26_OPCSINPT
;
4081 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4085 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4086 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4087 MII_TG3_DSP_CH34TP2_HIBW01
);
4090 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4099 static void tg3_phy_copper_begin(struct tg3
*tp
)
4101 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4102 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4105 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
4106 adv
= ADVERTISED_10baseT_Half
|
4107 ADVERTISED_10baseT_Full
;
4108 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4109 adv
|= ADVERTISED_100baseT_Half
|
4110 ADVERTISED_100baseT_Full
;
4112 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4114 adv
= tp
->link_config
.advertising
;
4115 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4116 adv
&= ~(ADVERTISED_1000baseT_Half
|
4117 ADVERTISED_1000baseT_Full
);
4119 fc
= tp
->link_config
.flowctrl
;
4122 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4124 tg3_writephy(tp
, MII_BMCR
,
4125 BMCR_ANENABLE
| BMCR_ANRESTART
);
4128 u32 bmcr
, orig_bmcr
;
4130 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4131 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4134 switch (tp
->link_config
.speed
) {
4140 bmcr
|= BMCR_SPEED100
;
4144 bmcr
|= BMCR_SPEED1000
;
4148 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4149 bmcr
|= BMCR_FULLDPLX
;
4151 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4152 (bmcr
!= orig_bmcr
)) {
4153 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4154 for (i
= 0; i
< 1500; i
++) {
4158 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4159 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4161 if (!(tmp
& BMSR_LSTATUS
)) {
4166 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4172 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4176 /* Turn off tap power management. */
4177 /* Set Extended packet length bit */
4178 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4180 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4181 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4182 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4183 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4184 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4191 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4193 u32 advmsk
, tgtadv
, advertising
;
4195 advertising
= tp
->link_config
.advertising
;
4196 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4198 advmsk
= ADVERTISE_ALL
;
4199 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4200 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4201 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4204 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4207 if ((*lcladv
& advmsk
) != tgtadv
)
4210 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4213 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4215 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4219 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4220 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4221 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4222 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4223 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4225 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4228 if (tg3_ctrl
!= tgtadv
)
4235 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4239 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4242 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4245 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4248 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4251 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4252 tp
->link_config
.rmt_adv
= lpeth
;
4257 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, int curr_link_up
)
4259 if (curr_link_up
!= tp
->link_up
) {
4261 netif_carrier_on(tp
->dev
);
4263 netif_carrier_off(tp
->dev
);
4264 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4265 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4268 tg3_link_report(tp
);
4275 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4277 int current_link_up
;
4279 u32 lcl_adv
, rmt_adv
;
4287 (MAC_STATUS_SYNC_CHANGED
|
4288 MAC_STATUS_CFG_CHANGED
|
4289 MAC_STATUS_MI_COMPLETION
|
4290 MAC_STATUS_LNKSTATE_CHANGED
));
4293 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4295 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4299 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4301 /* Some third-party PHYs need to be reset on link going
4304 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4305 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4306 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4308 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4309 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4310 !(bmsr
& BMSR_LSTATUS
))
4316 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4317 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4318 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4319 !tg3_flag(tp
, INIT_COMPLETE
))
4322 if (!(bmsr
& BMSR_LSTATUS
)) {
4323 err
= tg3_init_5401phy_dsp(tp
);
4327 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4328 for (i
= 0; i
< 1000; i
++) {
4330 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4331 (bmsr
& BMSR_LSTATUS
)) {
4337 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4338 TG3_PHY_REV_BCM5401_B0
&&
4339 !(bmsr
& BMSR_LSTATUS
) &&
4340 tp
->link_config
.active_speed
== SPEED_1000
) {
4341 err
= tg3_phy_reset(tp
);
4343 err
= tg3_init_5401phy_dsp(tp
);
4348 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4349 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4350 /* 5701 {A0,B0} CRC bug workaround */
4351 tg3_writephy(tp
, 0x15, 0x0a75);
4352 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4353 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4354 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4357 /* Clear pending interrupts... */
4358 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4359 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4361 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4362 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4363 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4364 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4366 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4367 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4368 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4369 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4370 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4372 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4375 current_link_up
= 0;
4376 current_speed
= SPEED_UNKNOWN
;
4377 current_duplex
= DUPLEX_UNKNOWN
;
4378 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4379 tp
->link_config
.rmt_adv
= 0;
4381 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4382 err
= tg3_phy_auxctl_read(tp
,
4383 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4385 if (!err
&& !(val
& (1 << 10))) {
4386 tg3_phy_auxctl_write(tp
,
4387 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4394 for (i
= 0; i
< 100; i
++) {
4395 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4396 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4397 (bmsr
& BMSR_LSTATUS
))
4402 if (bmsr
& BMSR_LSTATUS
) {
4405 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4406 for (i
= 0; i
< 2000; i
++) {
4408 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4413 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4418 for (i
= 0; i
< 200; i
++) {
4419 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4420 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4422 if (bmcr
&& bmcr
!= 0x7fff)
4430 tp
->link_config
.active_speed
= current_speed
;
4431 tp
->link_config
.active_duplex
= current_duplex
;
4433 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4434 if ((bmcr
& BMCR_ANENABLE
) &&
4435 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4436 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4437 current_link_up
= 1;
4439 if (!(bmcr
& BMCR_ANENABLE
) &&
4440 tp
->link_config
.speed
== current_speed
&&
4441 tp
->link_config
.duplex
== current_duplex
&&
4442 tp
->link_config
.flowctrl
==
4443 tp
->link_config
.active_flowctrl
) {
4444 current_link_up
= 1;
4448 if (current_link_up
== 1 &&
4449 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4452 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4453 reg
= MII_TG3_FET_GEN_STAT
;
4454 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4456 reg
= MII_TG3_EXT_STAT
;
4457 bit
= MII_TG3_EXT_STAT_MDIX
;
4460 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4461 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4463 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4468 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4469 tg3_phy_copper_begin(tp
);
4471 if (tg3_flag(tp
, ROBOSWITCH
)) {
4472 current_link_up
= 1;
4473 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4474 current_speed
= SPEED_1000
;
4475 current_duplex
= DUPLEX_FULL
;
4476 tp
->link_config
.active_speed
= current_speed
;
4477 tp
->link_config
.active_duplex
= current_duplex
;
4480 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4481 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4482 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4483 current_link_up
= 1;
4486 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4487 if (current_link_up
== 1) {
4488 if (tp
->link_config
.active_speed
== SPEED_100
||
4489 tp
->link_config
.active_speed
== SPEED_10
)
4490 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4492 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4493 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4494 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4496 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4498 /* In order for the 5750 core in BCM4785 chip to work properly
4499 * in RGMII mode, the Led Control Register must be set up.
4501 if (tg3_flag(tp
, RGMII_MODE
)) {
4502 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4503 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4505 if (tp
->link_config
.active_speed
== SPEED_10
)
4506 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4507 else if (tp
->link_config
.active_speed
== SPEED_100
)
4508 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4509 LED_CTRL_100MBPS_ON
);
4510 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4511 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4512 LED_CTRL_1000MBPS_ON
);
4514 tw32(MAC_LED_CTRL
, led_ctrl
);
4518 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4519 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4520 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4522 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4523 if (current_link_up
== 1 &&
4524 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4525 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4527 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4530 /* ??? Without this setting Netgear GA302T PHY does not
4531 * ??? send/receive packets...
4533 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4534 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4535 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4536 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4540 tw32_f(MAC_MODE
, tp
->mac_mode
);
4543 tg3_phy_eee_adjust(tp
, current_link_up
);
4545 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4546 /* Polled via timer. */
4547 tw32_f(MAC_EVENT
, 0);
4549 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4553 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
4554 current_link_up
== 1 &&
4555 tp
->link_config
.active_speed
== SPEED_1000
&&
4556 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4559 (MAC_STATUS_SYNC_CHANGED
|
4560 MAC_STATUS_CFG_CHANGED
));
4563 NIC_SRAM_FIRMWARE_MBOX
,
4564 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4567 /* Prevent send BD corruption. */
4568 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4569 if (tp
->link_config
.active_speed
== SPEED_100
||
4570 tp
->link_config
.active_speed
== SPEED_10
)
4571 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4572 PCI_EXP_LNKCTL_CLKREQ_EN
);
4574 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4575 PCI_EXP_LNKCTL_CLKREQ_EN
);
4578 tg3_test_and_report_link_chg(tp
, current_link_up
);
4583 struct tg3_fiber_aneginfo
{
4585 #define ANEG_STATE_UNKNOWN 0
4586 #define ANEG_STATE_AN_ENABLE 1
4587 #define ANEG_STATE_RESTART_INIT 2
4588 #define ANEG_STATE_RESTART 3
4589 #define ANEG_STATE_DISABLE_LINK_OK 4
4590 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4591 #define ANEG_STATE_ABILITY_DETECT 6
4592 #define ANEG_STATE_ACK_DETECT_INIT 7
4593 #define ANEG_STATE_ACK_DETECT 8
4594 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4595 #define ANEG_STATE_COMPLETE_ACK 10
4596 #define ANEG_STATE_IDLE_DETECT_INIT 11
4597 #define ANEG_STATE_IDLE_DETECT 12
4598 #define ANEG_STATE_LINK_OK 13
4599 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4600 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4603 #define MR_AN_ENABLE 0x00000001
4604 #define MR_RESTART_AN 0x00000002
4605 #define MR_AN_COMPLETE 0x00000004
4606 #define MR_PAGE_RX 0x00000008
4607 #define MR_NP_LOADED 0x00000010
4608 #define MR_TOGGLE_TX 0x00000020
4609 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4610 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4611 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4612 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4613 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4614 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4615 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4616 #define MR_TOGGLE_RX 0x00002000
4617 #define MR_NP_RX 0x00004000
4619 #define MR_LINK_OK 0x80000000
4621 unsigned long link_time
, cur_time
;
4623 u32 ability_match_cfg
;
4624 int ability_match_count
;
4626 char ability_match
, idle_match
, ack_match
;
4628 u32 txconfig
, rxconfig
;
4629 #define ANEG_CFG_NP 0x00000080
4630 #define ANEG_CFG_ACK 0x00000040
4631 #define ANEG_CFG_RF2 0x00000020
4632 #define ANEG_CFG_RF1 0x00000010
4633 #define ANEG_CFG_PS2 0x00000001
4634 #define ANEG_CFG_PS1 0x00008000
4635 #define ANEG_CFG_HD 0x00004000
4636 #define ANEG_CFG_FD 0x00002000
4637 #define ANEG_CFG_INVAL 0x00001f06
4642 #define ANEG_TIMER_ENAB 2
4643 #define ANEG_FAILED -1
4645 #define ANEG_STATE_SETTLE_TIME 10000
4647 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4648 struct tg3_fiber_aneginfo
*ap
)
4651 unsigned long delta
;
4655 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4659 ap
->ability_match_cfg
= 0;
4660 ap
->ability_match_count
= 0;
4661 ap
->ability_match
= 0;
4667 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4668 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4670 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4671 ap
->ability_match_cfg
= rx_cfg_reg
;
4672 ap
->ability_match
= 0;
4673 ap
->ability_match_count
= 0;
4675 if (++ap
->ability_match_count
> 1) {
4676 ap
->ability_match
= 1;
4677 ap
->ability_match_cfg
= rx_cfg_reg
;
4680 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4688 ap
->ability_match_cfg
= 0;
4689 ap
->ability_match_count
= 0;
4690 ap
->ability_match
= 0;
4696 ap
->rxconfig
= rx_cfg_reg
;
4699 switch (ap
->state
) {
4700 case ANEG_STATE_UNKNOWN
:
4701 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4702 ap
->state
= ANEG_STATE_AN_ENABLE
;
4705 case ANEG_STATE_AN_ENABLE
:
4706 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4707 if (ap
->flags
& MR_AN_ENABLE
) {
4710 ap
->ability_match_cfg
= 0;
4711 ap
->ability_match_count
= 0;
4712 ap
->ability_match
= 0;
4716 ap
->state
= ANEG_STATE_RESTART_INIT
;
4718 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4722 case ANEG_STATE_RESTART_INIT
:
4723 ap
->link_time
= ap
->cur_time
;
4724 ap
->flags
&= ~(MR_NP_LOADED
);
4726 tw32(MAC_TX_AUTO_NEG
, 0);
4727 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4728 tw32_f(MAC_MODE
, tp
->mac_mode
);
4731 ret
= ANEG_TIMER_ENAB
;
4732 ap
->state
= ANEG_STATE_RESTART
;
4735 case ANEG_STATE_RESTART
:
4736 delta
= ap
->cur_time
- ap
->link_time
;
4737 if (delta
> ANEG_STATE_SETTLE_TIME
)
4738 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4740 ret
= ANEG_TIMER_ENAB
;
4743 case ANEG_STATE_DISABLE_LINK_OK
:
4747 case ANEG_STATE_ABILITY_DETECT_INIT
:
4748 ap
->flags
&= ~(MR_TOGGLE_TX
);
4749 ap
->txconfig
= ANEG_CFG_FD
;
4750 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4751 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4752 ap
->txconfig
|= ANEG_CFG_PS1
;
4753 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4754 ap
->txconfig
|= ANEG_CFG_PS2
;
4755 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4756 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4757 tw32_f(MAC_MODE
, tp
->mac_mode
);
4760 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4763 case ANEG_STATE_ABILITY_DETECT
:
4764 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4765 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4768 case ANEG_STATE_ACK_DETECT_INIT
:
4769 ap
->txconfig
|= ANEG_CFG_ACK
;
4770 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4771 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4772 tw32_f(MAC_MODE
, tp
->mac_mode
);
4775 ap
->state
= ANEG_STATE_ACK_DETECT
;
4778 case ANEG_STATE_ACK_DETECT
:
4779 if (ap
->ack_match
!= 0) {
4780 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4781 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4782 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4784 ap
->state
= ANEG_STATE_AN_ENABLE
;
4786 } else if (ap
->ability_match
!= 0 &&
4787 ap
->rxconfig
== 0) {
4788 ap
->state
= ANEG_STATE_AN_ENABLE
;
4792 case ANEG_STATE_COMPLETE_ACK_INIT
:
4793 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4797 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4798 MR_LP_ADV_HALF_DUPLEX
|
4799 MR_LP_ADV_SYM_PAUSE
|
4800 MR_LP_ADV_ASYM_PAUSE
|
4801 MR_LP_ADV_REMOTE_FAULT1
|
4802 MR_LP_ADV_REMOTE_FAULT2
|
4803 MR_LP_ADV_NEXT_PAGE
|
4806 if (ap
->rxconfig
& ANEG_CFG_FD
)
4807 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4808 if (ap
->rxconfig
& ANEG_CFG_HD
)
4809 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4810 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4811 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4812 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4813 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4814 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4815 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4816 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4817 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4818 if (ap
->rxconfig
& ANEG_CFG_NP
)
4819 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4821 ap
->link_time
= ap
->cur_time
;
4823 ap
->flags
^= (MR_TOGGLE_TX
);
4824 if (ap
->rxconfig
& 0x0008)
4825 ap
->flags
|= MR_TOGGLE_RX
;
4826 if (ap
->rxconfig
& ANEG_CFG_NP
)
4827 ap
->flags
|= MR_NP_RX
;
4828 ap
->flags
|= MR_PAGE_RX
;
4830 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4831 ret
= ANEG_TIMER_ENAB
;
4834 case ANEG_STATE_COMPLETE_ACK
:
4835 if (ap
->ability_match
!= 0 &&
4836 ap
->rxconfig
== 0) {
4837 ap
->state
= ANEG_STATE_AN_ENABLE
;
4840 delta
= ap
->cur_time
- ap
->link_time
;
4841 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4842 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4843 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4845 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4846 !(ap
->flags
& MR_NP_RX
)) {
4847 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4855 case ANEG_STATE_IDLE_DETECT_INIT
:
4856 ap
->link_time
= ap
->cur_time
;
4857 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4858 tw32_f(MAC_MODE
, tp
->mac_mode
);
4861 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4862 ret
= ANEG_TIMER_ENAB
;
4865 case ANEG_STATE_IDLE_DETECT
:
4866 if (ap
->ability_match
!= 0 &&
4867 ap
->rxconfig
== 0) {
4868 ap
->state
= ANEG_STATE_AN_ENABLE
;
4871 delta
= ap
->cur_time
- ap
->link_time
;
4872 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4873 /* XXX another gem from the Broadcom driver :( */
4874 ap
->state
= ANEG_STATE_LINK_OK
;
4878 case ANEG_STATE_LINK_OK
:
4879 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4883 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4884 /* ??? unimplemented */
4887 case ANEG_STATE_NEXT_PAGE_WAIT
:
4888 /* ??? unimplemented */
4899 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4902 struct tg3_fiber_aneginfo aninfo
;
4903 int status
= ANEG_FAILED
;
4907 tw32_f(MAC_TX_AUTO_NEG
, 0);
4909 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4910 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4913 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4916 memset(&aninfo
, 0, sizeof(aninfo
));
4917 aninfo
.flags
|= MR_AN_ENABLE
;
4918 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4919 aninfo
.cur_time
= 0;
4921 while (++tick
< 195000) {
4922 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4923 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4929 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4930 tw32_f(MAC_MODE
, tp
->mac_mode
);
4933 *txflags
= aninfo
.txconfig
;
4934 *rxflags
= aninfo
.flags
;
4936 if (status
== ANEG_DONE
&&
4937 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4938 MR_LP_ADV_FULL_DUPLEX
)))
4944 static void tg3_init_bcm8002(struct tg3
*tp
)
4946 u32 mac_status
= tr32(MAC_STATUS
);
4949 /* Reset when initting first time or we have a link. */
4950 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4951 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4954 /* Set PLL lock range. */
4955 tg3_writephy(tp
, 0x16, 0x8007);
4958 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4960 /* Wait for reset to complete. */
4961 /* XXX schedule_timeout() ... */
4962 for (i
= 0; i
< 500; i
++)
4965 /* Config mode; select PMA/Ch 1 regs. */
4966 tg3_writephy(tp
, 0x10, 0x8411);
4968 /* Enable auto-lock and comdet, select txclk for tx. */
4969 tg3_writephy(tp
, 0x11, 0x0a10);
4971 tg3_writephy(tp
, 0x18, 0x00a0);
4972 tg3_writephy(tp
, 0x16, 0x41ff);
4974 /* Assert and deassert POR. */
4975 tg3_writephy(tp
, 0x13, 0x0400);
4977 tg3_writephy(tp
, 0x13, 0x0000);
4979 tg3_writephy(tp
, 0x11, 0x0a50);
4981 tg3_writephy(tp
, 0x11, 0x0a10);
4983 /* Wait for signal to stabilize */
4984 /* XXX schedule_timeout() ... */
4985 for (i
= 0; i
< 15000; i
++)
4988 /* Deselect the channel register so we can read the PHYID
4991 tg3_writephy(tp
, 0x10, 0x8011);
4994 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4997 u32 sg_dig_ctrl
, sg_dig_status
;
4998 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4999 int workaround
, port_a
;
5000 int current_link_up
;
5003 expected_sg_dig_ctrl
= 0;
5006 current_link_up
= 0;
5008 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5009 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5011 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5014 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5015 /* preserve bits 20-23 for voltage regulator */
5016 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5019 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5021 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5022 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5024 u32 val
= serdes_cfg
;
5030 tw32_f(MAC_SERDES_CFG
, val
);
5033 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5035 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5036 tg3_setup_flow_control(tp
, 0, 0);
5037 current_link_up
= 1;
5042 /* Want auto-negotiation. */
5043 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5045 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5046 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5047 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5048 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5049 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5051 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5052 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5053 tp
->serdes_counter
&&
5054 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5055 MAC_STATUS_RCVD_CFG
)) ==
5056 MAC_STATUS_PCS_SYNCED
)) {
5057 tp
->serdes_counter
--;
5058 current_link_up
= 1;
5063 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5064 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5066 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5068 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5069 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5070 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5071 MAC_STATUS_SIGNAL_DET
)) {
5072 sg_dig_status
= tr32(SG_DIG_STATUS
);
5073 mac_status
= tr32(MAC_STATUS
);
5075 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5076 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5077 u32 local_adv
= 0, remote_adv
= 0;
5079 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5080 local_adv
|= ADVERTISE_1000XPAUSE
;
5081 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5082 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5084 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5085 remote_adv
|= LPA_1000XPAUSE
;
5086 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5087 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5089 tp
->link_config
.rmt_adv
=
5090 mii_adv_to_ethtool_adv_x(remote_adv
);
5092 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5093 current_link_up
= 1;
5094 tp
->serdes_counter
= 0;
5095 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5096 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5097 if (tp
->serdes_counter
)
5098 tp
->serdes_counter
--;
5101 u32 val
= serdes_cfg
;
5108 tw32_f(MAC_SERDES_CFG
, val
);
5111 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5114 /* Link parallel detection - link is up */
5115 /* only if we have PCS_SYNC and not */
5116 /* receiving config code words */
5117 mac_status
= tr32(MAC_STATUS
);
5118 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5119 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5120 tg3_setup_flow_control(tp
, 0, 0);
5121 current_link_up
= 1;
5123 TG3_PHYFLG_PARALLEL_DETECT
;
5124 tp
->serdes_counter
=
5125 SERDES_PARALLEL_DET_TIMEOUT
;
5127 goto restart_autoneg
;
5131 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5132 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5136 return current_link_up
;
5139 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5141 int current_link_up
= 0;
5143 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5146 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5147 u32 txflags
, rxflags
;
5150 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5151 u32 local_adv
= 0, remote_adv
= 0;
5153 if (txflags
& ANEG_CFG_PS1
)
5154 local_adv
|= ADVERTISE_1000XPAUSE
;
5155 if (txflags
& ANEG_CFG_PS2
)
5156 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5158 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5159 remote_adv
|= LPA_1000XPAUSE
;
5160 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5161 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5163 tp
->link_config
.rmt_adv
=
5164 mii_adv_to_ethtool_adv_x(remote_adv
);
5166 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5168 current_link_up
= 1;
5170 for (i
= 0; i
< 30; i
++) {
5173 (MAC_STATUS_SYNC_CHANGED
|
5174 MAC_STATUS_CFG_CHANGED
));
5176 if ((tr32(MAC_STATUS
) &
5177 (MAC_STATUS_SYNC_CHANGED
|
5178 MAC_STATUS_CFG_CHANGED
)) == 0)
5182 mac_status
= tr32(MAC_STATUS
);
5183 if (current_link_up
== 0 &&
5184 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5185 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5186 current_link_up
= 1;
5188 tg3_setup_flow_control(tp
, 0, 0);
5190 /* Forcing 1000FD link up. */
5191 current_link_up
= 1;
5193 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5196 tw32_f(MAC_MODE
, tp
->mac_mode
);
5201 return current_link_up
;
5204 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5207 u16 orig_active_speed
;
5208 u8 orig_active_duplex
;
5210 int current_link_up
;
5213 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5214 orig_active_speed
= tp
->link_config
.active_speed
;
5215 orig_active_duplex
= tp
->link_config
.active_duplex
;
5217 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5219 tg3_flag(tp
, INIT_COMPLETE
)) {
5220 mac_status
= tr32(MAC_STATUS
);
5221 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5222 MAC_STATUS_SIGNAL_DET
|
5223 MAC_STATUS_CFG_CHANGED
|
5224 MAC_STATUS_RCVD_CFG
);
5225 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5226 MAC_STATUS_SIGNAL_DET
)) {
5227 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5228 MAC_STATUS_CFG_CHANGED
));
5233 tw32_f(MAC_TX_AUTO_NEG
, 0);
5235 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5236 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5237 tw32_f(MAC_MODE
, tp
->mac_mode
);
5240 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5241 tg3_init_bcm8002(tp
);
5243 /* Enable link change event even when serdes polling. */
5244 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5247 current_link_up
= 0;
5248 tp
->link_config
.rmt_adv
= 0;
5249 mac_status
= tr32(MAC_STATUS
);
5251 if (tg3_flag(tp
, HW_AUTONEG
))
5252 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5254 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5256 tp
->napi
[0].hw_status
->status
=
5257 (SD_STATUS_UPDATED
|
5258 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5260 for (i
= 0; i
< 100; i
++) {
5261 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5262 MAC_STATUS_CFG_CHANGED
));
5264 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5265 MAC_STATUS_CFG_CHANGED
|
5266 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5270 mac_status
= tr32(MAC_STATUS
);
5271 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5272 current_link_up
= 0;
5273 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5274 tp
->serdes_counter
== 0) {
5275 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5276 MAC_MODE_SEND_CONFIGS
));
5278 tw32_f(MAC_MODE
, tp
->mac_mode
);
5282 if (current_link_up
== 1) {
5283 tp
->link_config
.active_speed
= SPEED_1000
;
5284 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5285 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5286 LED_CTRL_LNKLED_OVERRIDE
|
5287 LED_CTRL_1000MBPS_ON
));
5289 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5290 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5291 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5292 LED_CTRL_LNKLED_OVERRIDE
|
5293 LED_CTRL_TRAFFIC_OVERRIDE
));
5296 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5297 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5298 if (orig_pause_cfg
!= now_pause_cfg
||
5299 orig_active_speed
!= tp
->link_config
.active_speed
||
5300 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5301 tg3_link_report(tp
);
5307 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5309 int current_link_up
, err
= 0;
5313 u32 local_adv
, remote_adv
;
5315 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5316 tw32_f(MAC_MODE
, tp
->mac_mode
);
5322 (MAC_STATUS_SYNC_CHANGED
|
5323 MAC_STATUS_CFG_CHANGED
|
5324 MAC_STATUS_MI_COMPLETION
|
5325 MAC_STATUS_LNKSTATE_CHANGED
));
5331 current_link_up
= 0;
5332 current_speed
= SPEED_UNKNOWN
;
5333 current_duplex
= DUPLEX_UNKNOWN
;
5334 tp
->link_config
.rmt_adv
= 0;
5336 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5337 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5338 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5339 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5340 bmsr
|= BMSR_LSTATUS
;
5342 bmsr
&= ~BMSR_LSTATUS
;
5345 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5347 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5348 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5349 /* do nothing, just check for link up at the end */
5350 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5353 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5354 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5355 ADVERTISE_1000XPAUSE
|
5356 ADVERTISE_1000XPSE_ASYM
|
5359 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5360 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5362 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5363 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5364 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5365 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5367 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5368 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5369 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5376 bmcr
&= ~BMCR_SPEED1000
;
5377 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5379 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5380 new_bmcr
|= BMCR_FULLDPLX
;
5382 if (new_bmcr
!= bmcr
) {
5383 /* BMCR_SPEED1000 is a reserved bit that needs
5384 * to be set on write.
5386 new_bmcr
|= BMCR_SPEED1000
;
5388 /* Force a linkdown */
5392 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5393 adv
&= ~(ADVERTISE_1000XFULL
|
5394 ADVERTISE_1000XHALF
|
5396 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5397 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5401 tg3_carrier_off(tp
);
5403 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5405 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5406 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5407 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5408 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5409 bmsr
|= BMSR_LSTATUS
;
5411 bmsr
&= ~BMSR_LSTATUS
;
5413 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5417 if (bmsr
& BMSR_LSTATUS
) {
5418 current_speed
= SPEED_1000
;
5419 current_link_up
= 1;
5420 if (bmcr
& BMCR_FULLDPLX
)
5421 current_duplex
= DUPLEX_FULL
;
5423 current_duplex
= DUPLEX_HALF
;
5428 if (bmcr
& BMCR_ANENABLE
) {
5431 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5432 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5433 common
= local_adv
& remote_adv
;
5434 if (common
& (ADVERTISE_1000XHALF
|
5435 ADVERTISE_1000XFULL
)) {
5436 if (common
& ADVERTISE_1000XFULL
)
5437 current_duplex
= DUPLEX_FULL
;
5439 current_duplex
= DUPLEX_HALF
;
5441 tp
->link_config
.rmt_adv
=
5442 mii_adv_to_ethtool_adv_x(remote_adv
);
5443 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5444 /* Link is up via parallel detect */
5446 current_link_up
= 0;
5451 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5452 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5454 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5455 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5456 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5458 tw32_f(MAC_MODE
, tp
->mac_mode
);
5461 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5463 tp
->link_config
.active_speed
= current_speed
;
5464 tp
->link_config
.active_duplex
= current_duplex
;
5466 tg3_test_and_report_link_chg(tp
, current_link_up
);
5470 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5472 if (tp
->serdes_counter
) {
5473 /* Give autoneg time to complete. */
5474 tp
->serdes_counter
--;
5479 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5482 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5483 if (bmcr
& BMCR_ANENABLE
) {
5486 /* Select shadow register 0x1f */
5487 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5488 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5490 /* Select expansion interrupt status register */
5491 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5492 MII_TG3_DSP_EXP1_INT_STAT
);
5493 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5494 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5496 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5497 /* We have signal detect and not receiving
5498 * config code words, link is up by parallel
5502 bmcr
&= ~BMCR_ANENABLE
;
5503 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5504 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5505 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5508 } else if (tp
->link_up
&&
5509 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5510 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5513 /* Select expansion interrupt status register */
5514 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5515 MII_TG3_DSP_EXP1_INT_STAT
);
5516 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5520 /* Config code words received, turn on autoneg. */
5521 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5522 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5524 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5530 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5535 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5536 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5537 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5538 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5540 err
= tg3_setup_copper_phy(tp
, force_reset
);
5542 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
5545 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5546 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5548 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5553 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5554 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5555 tw32(GRC_MISC_CFG
, val
);
5558 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5559 (6 << TX_LENGTHS_IPG_SHIFT
);
5560 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
5561 tg3_asic_rev(tp
) == ASIC_REV_5762
)
5562 val
|= tr32(MAC_TX_LENGTHS
) &
5563 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5564 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5566 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5567 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5568 tw32(MAC_TX_LENGTHS
, val
|
5569 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5571 tw32(MAC_TX_LENGTHS
, val
|
5572 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5574 if (!tg3_flag(tp
, 5705_PLUS
)) {
5576 tw32(HOSTCC_STAT_COAL_TICKS
,
5577 tp
->coal
.stats_block_coalesce_usecs
);
5579 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5583 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5584 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5586 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5589 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5590 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5596 /* tp->lock must be held */
5597 static u64
tg3_refclk_read(struct tg3
*tp
)
5599 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
5600 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
5603 /* tp->lock must be held */
5604 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
5606 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
5607 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
5608 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
5609 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
5612 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
5613 static inline void tg3_full_unlock(struct tg3
*tp
);
5614 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
5616 struct tg3
*tp
= netdev_priv(dev
);
5618 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
5619 SOF_TIMESTAMPING_RX_SOFTWARE
|
5620 SOF_TIMESTAMPING_SOFTWARE
|
5621 SOF_TIMESTAMPING_TX_HARDWARE
|
5622 SOF_TIMESTAMPING_RX_HARDWARE
|
5623 SOF_TIMESTAMPING_RAW_HARDWARE
;
5626 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
5628 info
->phc_index
= -1;
5630 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
5632 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
5633 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
5634 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
5635 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
5639 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
5641 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5642 bool neg_adj
= false;
5650 /* Frequency adjustment is performed using hardware with a 24 bit
5651 * accumulator and a programmable correction value. On each clk, the
5652 * correction value gets added to the accumulator and when it
5653 * overflows, the time counter is incremented/decremented.
5655 * So conversion from ppb to correction value is
5656 * ppb * (1 << 24) / 1000000000
5658 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
5659 TG3_EAV_REF_CLK_CORRECT_MASK
;
5661 tg3_full_lock(tp
, 0);
5664 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
5665 TG3_EAV_REF_CLK_CORRECT_EN
|
5666 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
5668 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
5670 tg3_full_unlock(tp
);
5675 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
5677 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5679 tg3_full_lock(tp
, 0);
5680 tp
->ptp_adjust
+= delta
;
5681 tg3_full_unlock(tp
);
5686 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
5690 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5692 tg3_full_lock(tp
, 0);
5693 ns
= tg3_refclk_read(tp
);
5694 ns
+= tp
->ptp_adjust
;
5695 tg3_full_unlock(tp
);
5697 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
5698 ts
->tv_nsec
= remainder
;
5703 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
5704 const struct timespec
*ts
)
5707 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5709 ns
= timespec_to_ns(ts
);
5711 tg3_full_lock(tp
, 0);
5712 tg3_refclk_write(tp
, ns
);
5714 tg3_full_unlock(tp
);
5719 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
5720 struct ptp_clock_request
*rq
, int on
)
5725 static const struct ptp_clock_info tg3_ptp_caps
= {
5726 .owner
= THIS_MODULE
,
5727 .name
= "tg3 clock",
5728 .max_adj
= 250000000,
5733 .adjfreq
= tg3_ptp_adjfreq
,
5734 .adjtime
= tg3_ptp_adjtime
,
5735 .gettime
= tg3_ptp_gettime
,
5736 .settime
= tg3_ptp_settime
,
5737 .enable
= tg3_ptp_enable
,
5740 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
5741 struct skb_shared_hwtstamps
*timestamp
)
5743 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
5744 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
5748 /* tp->lock must be held */
5749 static void tg3_ptp_init(struct tg3
*tp
)
5751 if (!tg3_flag(tp
, PTP_CAPABLE
))
5754 /* Initialize the hardware clock to the system time. */
5755 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
5757 tp
->ptp_info
= tg3_ptp_caps
;
5760 /* tp->lock must be held */
5761 static void tg3_ptp_resume(struct tg3
*tp
)
5763 if (!tg3_flag(tp
, PTP_CAPABLE
))
5766 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
5770 static void tg3_ptp_fini(struct tg3
*tp
)
5772 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
5775 ptp_clock_unregister(tp
->ptp_clock
);
5776 tp
->ptp_clock
= NULL
;
5780 static inline int tg3_irq_sync(struct tg3
*tp
)
5782 return tp
->irq_sync
;
5785 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5789 dst
= (u32
*)((u8
*)dst
+ off
);
5790 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5791 *dst
++ = tr32(off
+ i
);
5794 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5796 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5797 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5798 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5799 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5800 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5801 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5802 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5803 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5804 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5805 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5806 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5807 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5808 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5809 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5810 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5811 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5812 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5813 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5814 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5816 if (tg3_flag(tp
, SUPPORT_MSIX
))
5817 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5819 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5820 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5821 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5822 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5823 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5824 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5825 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5826 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5828 if (!tg3_flag(tp
, 5705_PLUS
)) {
5829 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5830 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5831 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5834 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5835 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5836 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5837 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5838 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5840 if (tg3_flag(tp
, NVRAM
))
5841 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5844 static void tg3_dump_state(struct tg3
*tp
)
5849 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5853 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5854 /* Read up to but not including private PCI registers */
5855 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5856 regs
[i
/ sizeof(u32
)] = tr32(i
);
5858 tg3_dump_legacy_regs(tp
, regs
);
5860 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5861 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5862 !regs
[i
+ 2] && !regs
[i
+ 3])
5865 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5867 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5872 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5873 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5875 /* SW status block */
5877 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5879 tnapi
->hw_status
->status
,
5880 tnapi
->hw_status
->status_tag
,
5881 tnapi
->hw_status
->rx_jumbo_consumer
,
5882 tnapi
->hw_status
->rx_consumer
,
5883 tnapi
->hw_status
->rx_mini_consumer
,
5884 tnapi
->hw_status
->idx
[0].rx_producer
,
5885 tnapi
->hw_status
->idx
[0].tx_consumer
);
5888 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5890 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5891 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5893 tnapi
->prodring
.rx_std_prod_idx
,
5894 tnapi
->prodring
.rx_std_cons_idx
,
5895 tnapi
->prodring
.rx_jmb_prod_idx
,
5896 tnapi
->prodring
.rx_jmb_cons_idx
);
5900 /* This is called whenever we suspect that the system chipset is re-
5901 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5902 * is bogus tx completions. We try to recover by setting the
5903 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5906 static void tg3_tx_recover(struct tg3
*tp
)
5908 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5909 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5911 netdev_warn(tp
->dev
,
5912 "The system may be re-ordering memory-mapped I/O "
5913 "cycles to the network device, attempting to recover. "
5914 "Please report the problem to the driver maintainer "
5915 "and include system chipset information.\n");
5917 spin_lock(&tp
->lock
);
5918 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5919 spin_unlock(&tp
->lock
);
5922 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5924 /* Tell compiler to fetch tx indices from memory. */
5926 return tnapi
->tx_pending
-
5927 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5930 /* Tigon3 never reports partial packet sends. So we do not
5931 * need special logic to handle SKBs that have not had all
5932 * of their frags sent yet, like SunGEM does.
5934 static void tg3_tx(struct tg3_napi
*tnapi
)
5936 struct tg3
*tp
= tnapi
->tp
;
5937 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5938 u32 sw_idx
= tnapi
->tx_cons
;
5939 struct netdev_queue
*txq
;
5940 int index
= tnapi
- tp
->napi
;
5941 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5943 if (tg3_flag(tp
, ENABLE_TSS
))
5946 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5948 while (sw_idx
!= hw_idx
) {
5949 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5950 struct sk_buff
*skb
= ri
->skb
;
5953 if (unlikely(skb
== NULL
)) {
5958 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
5959 struct skb_shared_hwtstamps timestamp
;
5960 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
5961 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
5963 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
5965 skb_tstamp_tx(skb
, ×tamp
);
5968 pci_unmap_single(tp
->pdev
,
5969 dma_unmap_addr(ri
, mapping
),
5975 while (ri
->fragmented
) {
5976 ri
->fragmented
= false;
5977 sw_idx
= NEXT_TX(sw_idx
);
5978 ri
= &tnapi
->tx_buffers
[sw_idx
];
5981 sw_idx
= NEXT_TX(sw_idx
);
5983 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5984 ri
= &tnapi
->tx_buffers
[sw_idx
];
5985 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5988 pci_unmap_page(tp
->pdev
,
5989 dma_unmap_addr(ri
, mapping
),
5990 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5993 while (ri
->fragmented
) {
5994 ri
->fragmented
= false;
5995 sw_idx
= NEXT_TX(sw_idx
);
5996 ri
= &tnapi
->tx_buffers
[sw_idx
];
5999 sw_idx
= NEXT_TX(sw_idx
);
6003 bytes_compl
+= skb
->len
;
6007 if (unlikely(tx_bug
)) {
6013 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6015 tnapi
->tx_cons
= sw_idx
;
6017 /* Need to make the tx_cons update visible to tg3_start_xmit()
6018 * before checking for netif_queue_stopped(). Without the
6019 * memory barrier, there is a small possibility that tg3_start_xmit()
6020 * will miss it and cause the queue to be stopped forever.
6024 if (unlikely(netif_tx_queue_stopped(txq
) &&
6025 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6026 __netif_tx_lock(txq
, smp_processor_id());
6027 if (netif_tx_queue_stopped(txq
) &&
6028 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6029 netif_tx_wake_queue(txq
);
6030 __netif_tx_unlock(txq
);
6034 static void tg3_frag_free(bool is_frag
, void *data
)
6037 put_page(virt_to_head_page(data
));
6042 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6044 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6045 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6050 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6051 map_sz
, PCI_DMA_FROMDEVICE
);
6052 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6057 /* Returns size of skb allocated or < 0 on error.
6059 * We only need to fill in the address because the other members
6060 * of the RX descriptor are invariant, see tg3_init_rings.
6062 * Note the purposeful assymetry of cpu vs. chip accesses. For
6063 * posting buffers we only dirty the first cache line of the RX
6064 * descriptor (containing the address). Whereas for the RX status
6065 * buffers the cpu only reads the last cacheline of the RX descriptor
6066 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6068 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6069 u32 opaque_key
, u32 dest_idx_unmasked
,
6070 unsigned int *frag_size
)
6072 struct tg3_rx_buffer_desc
*desc
;
6073 struct ring_info
*map
;
6076 int skb_size
, data_size
, dest_idx
;
6078 switch (opaque_key
) {
6079 case RXD_OPAQUE_RING_STD
:
6080 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6081 desc
= &tpr
->rx_std
[dest_idx
];
6082 map
= &tpr
->rx_std_buffers
[dest_idx
];
6083 data_size
= tp
->rx_pkt_map_sz
;
6086 case RXD_OPAQUE_RING_JUMBO
:
6087 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6088 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6089 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6090 data_size
= TG3_RX_JMB_MAP_SZ
;
6097 /* Do not overwrite any of the map or rp information
6098 * until we are sure we can commit to a new buffer.
6100 * Callers depend upon this behavior and assume that
6101 * we leave everything unchanged if we fail.
6103 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6104 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6105 if (skb_size
<= PAGE_SIZE
) {
6106 data
= netdev_alloc_frag(skb_size
);
6107 *frag_size
= skb_size
;
6109 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6115 mapping
= pci_map_single(tp
->pdev
,
6116 data
+ TG3_RX_OFFSET(tp
),
6118 PCI_DMA_FROMDEVICE
);
6119 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6120 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6125 dma_unmap_addr_set(map
, mapping
, mapping
);
6127 desc
->addr_hi
= ((u64
)mapping
>> 32);
6128 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6133 /* We only need to move over in the address because the other
6134 * members of the RX descriptor are invariant. See notes above
6135 * tg3_alloc_rx_data for full details.
6137 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6138 struct tg3_rx_prodring_set
*dpr
,
6139 u32 opaque_key
, int src_idx
,
6140 u32 dest_idx_unmasked
)
6142 struct tg3
*tp
= tnapi
->tp
;
6143 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6144 struct ring_info
*src_map
, *dest_map
;
6145 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6148 switch (opaque_key
) {
6149 case RXD_OPAQUE_RING_STD
:
6150 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6151 dest_desc
= &dpr
->rx_std
[dest_idx
];
6152 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6153 src_desc
= &spr
->rx_std
[src_idx
];
6154 src_map
= &spr
->rx_std_buffers
[src_idx
];
6157 case RXD_OPAQUE_RING_JUMBO
:
6158 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6159 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6160 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6161 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6162 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6169 dest_map
->data
= src_map
->data
;
6170 dma_unmap_addr_set(dest_map
, mapping
,
6171 dma_unmap_addr(src_map
, mapping
));
6172 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6173 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6175 /* Ensure that the update to the skb happens after the physical
6176 * addresses have been transferred to the new BD location.
6180 src_map
->data
= NULL
;
6183 /* The RX ring scheme is composed of multiple rings which post fresh
6184 * buffers to the chip, and one special ring the chip uses to report
6185 * status back to the host.
6187 * The special ring reports the status of received packets to the
6188 * host. The chip does not write into the original descriptor the
6189 * RX buffer was obtained from. The chip simply takes the original
6190 * descriptor as provided by the host, updates the status and length
6191 * field, then writes this into the next status ring entry.
6193 * Each ring the host uses to post buffers to the chip is described
6194 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6195 * it is first placed into the on-chip ram. When the packet's length
6196 * is known, it walks down the TG3_BDINFO entries to select the ring.
6197 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6198 * which is within the range of the new packet's length is chosen.
6200 * The "separate ring for rx status" scheme may sound queer, but it makes
6201 * sense from a cache coherency perspective. If only the host writes
6202 * to the buffer post rings, and only the chip writes to the rx status
6203 * rings, then cache lines never move beyond shared-modified state.
6204 * If both the host and chip were to write into the same ring, cache line
6205 * eviction could occur since both entities want it in an exclusive state.
6207 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6209 struct tg3
*tp
= tnapi
->tp
;
6210 u32 work_mask
, rx_std_posted
= 0;
6211 u32 std_prod_idx
, jmb_prod_idx
;
6212 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6215 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6217 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6219 * We need to order the read of hw_idx and the read of
6220 * the opaque cookie.
6225 std_prod_idx
= tpr
->rx_std_prod_idx
;
6226 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6227 while (sw_idx
!= hw_idx
&& budget
> 0) {
6228 struct ring_info
*ri
;
6229 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6231 struct sk_buff
*skb
;
6232 dma_addr_t dma_addr
;
6233 u32 opaque_key
, desc_idx
, *post_ptr
;
6237 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6238 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6239 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6240 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6241 dma_addr
= dma_unmap_addr(ri
, mapping
);
6243 post_ptr
= &std_prod_idx
;
6245 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6246 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6247 dma_addr
= dma_unmap_addr(ri
, mapping
);
6249 post_ptr
= &jmb_prod_idx
;
6251 goto next_pkt_nopost
;
6253 work_mask
|= opaque_key
;
6255 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6256 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6258 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6259 desc_idx
, *post_ptr
);
6261 /* Other statistics kept track of by card. */
6266 prefetch(data
+ TG3_RX_OFFSET(tp
));
6267 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6270 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6271 RXD_FLAG_PTPSTAT_PTPV1
||
6272 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6273 RXD_FLAG_PTPSTAT_PTPV2
) {
6274 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6275 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6278 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6280 unsigned int frag_size
;
6282 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6283 *post_ptr
, &frag_size
);
6287 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6288 PCI_DMA_FROMDEVICE
);
6290 skb
= build_skb(data
, frag_size
);
6292 tg3_frag_free(frag_size
!= 0, data
);
6293 goto drop_it_no_recycle
;
6295 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6296 /* Ensure that the update to the data happens
6297 * after the usage of the old DMA mapping.
6304 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6305 desc_idx
, *post_ptr
);
6307 skb
= netdev_alloc_skb(tp
->dev
,
6308 len
+ TG3_RAW_IP_ALIGN
);
6310 goto drop_it_no_recycle
;
6312 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6313 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6315 data
+ TG3_RX_OFFSET(tp
),
6317 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6322 tg3_hwclock_to_timestamp(tp
, tstamp
,
6323 skb_hwtstamps(skb
));
6325 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6326 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6327 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6328 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6329 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6331 skb_checksum_none_assert(skb
);
6333 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6335 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6336 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6338 goto drop_it_no_recycle
;
6341 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6342 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6343 __vlan_hwaccel_put_tag(skb
,
6344 desc
->err_vlan
& RXD_VLAN_MASK
);
6346 napi_gro_receive(&tnapi
->napi
, skb
);
6354 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6355 tpr
->rx_std_prod_idx
= std_prod_idx
&
6356 tp
->rx_std_ring_mask
;
6357 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6358 tpr
->rx_std_prod_idx
);
6359 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6364 sw_idx
&= tp
->rx_ret_ring_mask
;
6366 /* Refresh hw_idx to see if there is new work */
6367 if (sw_idx
== hw_idx
) {
6368 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6373 /* ACK the status ring. */
6374 tnapi
->rx_rcb_ptr
= sw_idx
;
6375 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6377 /* Refill RX ring(s). */
6378 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6379 /* Sync BD data before updating mailbox */
6382 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6383 tpr
->rx_std_prod_idx
= std_prod_idx
&
6384 tp
->rx_std_ring_mask
;
6385 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6386 tpr
->rx_std_prod_idx
);
6388 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6389 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6390 tp
->rx_jmb_ring_mask
;
6391 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6392 tpr
->rx_jmb_prod_idx
);
6395 } else if (work_mask
) {
6396 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6397 * updated before the producer indices can be updated.
6401 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6402 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6404 if (tnapi
!= &tp
->napi
[1]) {
6405 tp
->rx_refill
= true;
6406 napi_schedule(&tp
->napi
[1].napi
);
6413 static void tg3_poll_link(struct tg3
*tp
)
6415 /* handle link change and other phy events */
6416 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6417 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6419 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6420 sblk
->status
= SD_STATUS_UPDATED
|
6421 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6422 spin_lock(&tp
->lock
);
6423 if (tg3_flag(tp
, USE_PHYLIB
)) {
6425 (MAC_STATUS_SYNC_CHANGED
|
6426 MAC_STATUS_CFG_CHANGED
|
6427 MAC_STATUS_MI_COMPLETION
|
6428 MAC_STATUS_LNKSTATE_CHANGED
));
6431 tg3_setup_phy(tp
, 0);
6432 spin_unlock(&tp
->lock
);
6437 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6438 struct tg3_rx_prodring_set
*dpr
,
6439 struct tg3_rx_prodring_set
*spr
)
6441 u32 si
, di
, cpycnt
, src_prod_idx
;
6445 src_prod_idx
= spr
->rx_std_prod_idx
;
6447 /* Make sure updates to the rx_std_buffers[] entries and the
6448 * standard producer index are seen in the correct order.
6452 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6455 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6456 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6458 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6459 spr
->rx_std_cons_idx
;
6461 cpycnt
= min(cpycnt
,
6462 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6464 si
= spr
->rx_std_cons_idx
;
6465 di
= dpr
->rx_std_prod_idx
;
6467 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6468 if (dpr
->rx_std_buffers
[i
].data
) {
6478 /* Ensure that updates to the rx_std_buffers ring and the
6479 * shadowed hardware producer ring from tg3_recycle_skb() are
6480 * ordered correctly WRT the skb check above.
6484 memcpy(&dpr
->rx_std_buffers
[di
],
6485 &spr
->rx_std_buffers
[si
],
6486 cpycnt
* sizeof(struct ring_info
));
6488 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6489 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6490 sbd
= &spr
->rx_std
[si
];
6491 dbd
= &dpr
->rx_std
[di
];
6492 dbd
->addr_hi
= sbd
->addr_hi
;
6493 dbd
->addr_lo
= sbd
->addr_lo
;
6496 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6497 tp
->rx_std_ring_mask
;
6498 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6499 tp
->rx_std_ring_mask
;
6503 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6505 /* Make sure updates to the rx_jmb_buffers[] entries and
6506 * the jumbo producer index are seen in the correct order.
6510 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6513 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6514 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6516 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6517 spr
->rx_jmb_cons_idx
;
6519 cpycnt
= min(cpycnt
,
6520 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6522 si
= spr
->rx_jmb_cons_idx
;
6523 di
= dpr
->rx_jmb_prod_idx
;
6525 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6526 if (dpr
->rx_jmb_buffers
[i
].data
) {
6536 /* Ensure that updates to the rx_jmb_buffers ring and the
6537 * shadowed hardware producer ring from tg3_recycle_skb() are
6538 * ordered correctly WRT the skb check above.
6542 memcpy(&dpr
->rx_jmb_buffers
[di
],
6543 &spr
->rx_jmb_buffers
[si
],
6544 cpycnt
* sizeof(struct ring_info
));
6546 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6547 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6548 sbd
= &spr
->rx_jmb
[si
].std
;
6549 dbd
= &dpr
->rx_jmb
[di
].std
;
6550 dbd
->addr_hi
= sbd
->addr_hi
;
6551 dbd
->addr_lo
= sbd
->addr_lo
;
6554 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6555 tp
->rx_jmb_ring_mask
;
6556 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6557 tp
->rx_jmb_ring_mask
;
6563 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6565 struct tg3
*tp
= tnapi
->tp
;
6567 /* run TX completion thread */
6568 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6570 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6574 if (!tnapi
->rx_rcb_prod_idx
)
6577 /* run RX thread, within the bounds set by NAPI.
6578 * All RX "locking" is done by ensuring outside
6579 * code synchronizes with tg3->napi.poll()
6581 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6582 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6584 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6585 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6587 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6588 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6590 tp
->rx_refill
= false;
6591 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
6592 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6593 &tp
->napi
[i
].prodring
);
6597 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6598 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6599 dpr
->rx_std_prod_idx
);
6601 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6602 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6603 dpr
->rx_jmb_prod_idx
);
6608 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6614 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6616 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6617 schedule_work(&tp
->reset_task
);
6620 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6622 cancel_work_sync(&tp
->reset_task
);
6623 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6624 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6627 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6629 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6630 struct tg3
*tp
= tnapi
->tp
;
6632 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6635 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6637 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6640 if (unlikely(work_done
>= budget
))
6643 /* tp->last_tag is used in tg3_int_reenable() below
6644 * to tell the hw how much work has been processed,
6645 * so we must read it before checking for more work.
6647 tnapi
->last_tag
= sblk
->status_tag
;
6648 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6651 /* check for RX/TX work to do */
6652 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6653 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6655 /* This test here is not race free, but will reduce
6656 * the number of interrupts by looping again.
6658 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6661 napi_complete(napi
);
6662 /* Reenable interrupts. */
6663 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6665 /* This test here is synchronized by napi_schedule()
6666 * and napi_complete() to close the race condition.
6668 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6669 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6670 HOSTCC_MODE_ENABLE
|
6681 /* work_done is guaranteed to be less than budget. */
6682 napi_complete(napi
);
6683 tg3_reset_task_schedule(tp
);
6687 static void tg3_process_error(struct tg3
*tp
)
6690 bool real_error
= false;
6692 if (tg3_flag(tp
, ERROR_PROCESSED
))
6695 /* Check Flow Attention register */
6696 val
= tr32(HOSTCC_FLOW_ATTN
);
6697 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6698 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6702 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6703 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6707 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6708 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6717 tg3_flag_set(tp
, ERROR_PROCESSED
);
6718 tg3_reset_task_schedule(tp
);
6721 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6723 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6724 struct tg3
*tp
= tnapi
->tp
;
6726 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6729 if (sblk
->status
& SD_STATUS_ERROR
)
6730 tg3_process_error(tp
);
6734 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6736 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6739 if (unlikely(work_done
>= budget
))
6742 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6743 /* tp->last_tag is used in tg3_int_reenable() below
6744 * to tell the hw how much work has been processed,
6745 * so we must read it before checking for more work.
6747 tnapi
->last_tag
= sblk
->status_tag
;
6748 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6751 sblk
->status
&= ~SD_STATUS_UPDATED
;
6753 if (likely(!tg3_has_work(tnapi
))) {
6754 napi_complete(napi
);
6755 tg3_int_reenable(tnapi
);
6763 /* work_done is guaranteed to be less than budget. */
6764 napi_complete(napi
);
6765 tg3_reset_task_schedule(tp
);
6769 static void tg3_napi_disable(struct tg3
*tp
)
6773 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6774 napi_disable(&tp
->napi
[i
].napi
);
6777 static void tg3_napi_enable(struct tg3
*tp
)
6781 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6782 napi_enable(&tp
->napi
[i
].napi
);
6785 static void tg3_napi_init(struct tg3
*tp
)
6789 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6790 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6791 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6794 static void tg3_napi_fini(struct tg3
*tp
)
6798 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6799 netif_napi_del(&tp
->napi
[i
].napi
);
6802 static inline void tg3_netif_stop(struct tg3
*tp
)
6804 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6805 tg3_napi_disable(tp
);
6806 netif_carrier_off(tp
->dev
);
6807 netif_tx_disable(tp
->dev
);
6810 /* tp->lock must be held */
6811 static inline void tg3_netif_start(struct tg3
*tp
)
6815 /* NOTE: unconditional netif_tx_wake_all_queues is only
6816 * appropriate so long as all callers are assured to
6817 * have free tx slots (such as after tg3_init_hw)
6819 netif_tx_wake_all_queues(tp
->dev
);
6822 netif_carrier_on(tp
->dev
);
6824 tg3_napi_enable(tp
);
6825 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6826 tg3_enable_ints(tp
);
6829 static void tg3_irq_quiesce(struct tg3
*tp
)
6833 BUG_ON(tp
->irq_sync
);
6838 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6839 synchronize_irq(tp
->napi
[i
].irq_vec
);
6842 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6843 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6844 * with as well. Most of the time, this is not necessary except when
6845 * shutting down the device.
6847 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6849 spin_lock_bh(&tp
->lock
);
6851 tg3_irq_quiesce(tp
);
6854 static inline void tg3_full_unlock(struct tg3
*tp
)
6856 spin_unlock_bh(&tp
->lock
);
6859 /* One-shot MSI handler - Chip automatically disables interrupt
6860 * after sending MSI so driver doesn't have to do it.
6862 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6864 struct tg3_napi
*tnapi
= dev_id
;
6865 struct tg3
*tp
= tnapi
->tp
;
6867 prefetch(tnapi
->hw_status
);
6869 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6871 if (likely(!tg3_irq_sync(tp
)))
6872 napi_schedule(&tnapi
->napi
);
6877 /* MSI ISR - No need to check for interrupt sharing and no need to
6878 * flush status block and interrupt mailbox. PCI ordering rules
6879 * guarantee that MSI will arrive after the status block.
6881 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6883 struct tg3_napi
*tnapi
= dev_id
;
6884 struct tg3
*tp
= tnapi
->tp
;
6886 prefetch(tnapi
->hw_status
);
6888 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6890 * Writing any value to intr-mbox-0 clears PCI INTA# and
6891 * chip-internal interrupt pending events.
6892 * Writing non-zero to intr-mbox-0 additional tells the
6893 * NIC to stop sending us irqs, engaging "in-intr-handler"
6896 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6897 if (likely(!tg3_irq_sync(tp
)))
6898 napi_schedule(&tnapi
->napi
);
6900 return IRQ_RETVAL(1);
6903 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6905 struct tg3_napi
*tnapi
= dev_id
;
6906 struct tg3
*tp
= tnapi
->tp
;
6907 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6908 unsigned int handled
= 1;
6910 /* In INTx mode, it is possible for the interrupt to arrive at
6911 * the CPU before the status block posted prior to the interrupt.
6912 * Reading the PCI State register will confirm whether the
6913 * interrupt is ours and will flush the status block.
6915 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6916 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6917 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6924 * Writing any value to intr-mbox-0 clears PCI INTA# and
6925 * chip-internal interrupt pending events.
6926 * Writing non-zero to intr-mbox-0 additional tells the
6927 * NIC to stop sending us irqs, engaging "in-intr-handler"
6930 * Flush the mailbox to de-assert the IRQ immediately to prevent
6931 * spurious interrupts. The flush impacts performance but
6932 * excessive spurious interrupts can be worse in some cases.
6934 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6935 if (tg3_irq_sync(tp
))
6937 sblk
->status
&= ~SD_STATUS_UPDATED
;
6938 if (likely(tg3_has_work(tnapi
))) {
6939 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6940 napi_schedule(&tnapi
->napi
);
6942 /* No work, shared interrupt perhaps? re-enable
6943 * interrupts, and flush that PCI write
6945 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6949 return IRQ_RETVAL(handled
);
6952 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6954 struct tg3_napi
*tnapi
= dev_id
;
6955 struct tg3
*tp
= tnapi
->tp
;
6956 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6957 unsigned int handled
= 1;
6959 /* In INTx mode, it is possible for the interrupt to arrive at
6960 * the CPU before the status block posted prior to the interrupt.
6961 * Reading the PCI State register will confirm whether the
6962 * interrupt is ours and will flush the status block.
6964 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6965 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6966 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6973 * writing any value to intr-mbox-0 clears PCI INTA# and
6974 * chip-internal interrupt pending events.
6975 * writing non-zero to intr-mbox-0 additional tells the
6976 * NIC to stop sending us irqs, engaging "in-intr-handler"
6979 * Flush the mailbox to de-assert the IRQ immediately to prevent
6980 * spurious interrupts. The flush impacts performance but
6981 * excessive spurious interrupts can be worse in some cases.
6983 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6986 * In a shared interrupt configuration, sometimes other devices'
6987 * interrupts will scream. We record the current status tag here
6988 * so that the above check can report that the screaming interrupts
6989 * are unhandled. Eventually they will be silenced.
6991 tnapi
->last_irq_tag
= sblk
->status_tag
;
6993 if (tg3_irq_sync(tp
))
6996 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6998 napi_schedule(&tnapi
->napi
);
7001 return IRQ_RETVAL(handled
);
7004 /* ISR for interrupt test */
7005 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7007 struct tg3_napi
*tnapi
= dev_id
;
7008 struct tg3
*tp
= tnapi
->tp
;
7009 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7011 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7012 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7013 tg3_disable_ints(tp
);
7014 return IRQ_RETVAL(1);
7016 return IRQ_RETVAL(0);
7019 #ifdef CONFIG_NET_POLL_CONTROLLER
7020 static void tg3_poll_controller(struct net_device
*dev
)
7023 struct tg3
*tp
= netdev_priv(dev
);
7025 if (tg3_irq_sync(tp
))
7028 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7029 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7033 static void tg3_tx_timeout(struct net_device
*dev
)
7035 struct tg3
*tp
= netdev_priv(dev
);
7037 if (netif_msg_tx_err(tp
)) {
7038 netdev_err(dev
, "transmit timed out, resetting\n");
7042 tg3_reset_task_schedule(tp
);
7045 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7046 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7048 u32 base
= (u32
) mapping
& 0xffffffff;
7050 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7053 /* Test for DMA addresses > 40-bit */
7054 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7057 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7058 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7059 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7066 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7067 dma_addr_t mapping
, u32 len
, u32 flags
,
7070 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7071 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7072 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7073 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7076 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7077 dma_addr_t map
, u32 len
, u32 flags
,
7080 struct tg3
*tp
= tnapi
->tp
;
7083 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7086 if (tg3_4g_overflow_test(map
, len
))
7089 if (tg3_40bit_overflow_test(tp
, map
, len
))
7092 if (tp
->dma_limit
) {
7093 u32 prvidx
= *entry
;
7094 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7095 while (len
> tp
->dma_limit
&& *budget
) {
7096 u32 frag_len
= tp
->dma_limit
;
7097 len
-= tp
->dma_limit
;
7099 /* Avoid the 8byte DMA problem */
7101 len
+= tp
->dma_limit
/ 2;
7102 frag_len
= tp
->dma_limit
/ 2;
7105 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7107 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7108 frag_len
, tmp_flag
, mss
, vlan
);
7111 *entry
= NEXT_TX(*entry
);
7118 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7119 len
, flags
, mss
, vlan
);
7121 *entry
= NEXT_TX(*entry
);
7124 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7128 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7129 len
, flags
, mss
, vlan
);
7130 *entry
= NEXT_TX(*entry
);
7136 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7139 struct sk_buff
*skb
;
7140 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7145 pci_unmap_single(tnapi
->tp
->pdev
,
7146 dma_unmap_addr(txb
, mapping
),
7150 while (txb
->fragmented
) {
7151 txb
->fragmented
= false;
7152 entry
= NEXT_TX(entry
);
7153 txb
= &tnapi
->tx_buffers
[entry
];
7156 for (i
= 0; i
<= last
; i
++) {
7157 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7159 entry
= NEXT_TX(entry
);
7160 txb
= &tnapi
->tx_buffers
[entry
];
7162 pci_unmap_page(tnapi
->tp
->pdev
,
7163 dma_unmap_addr(txb
, mapping
),
7164 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7166 while (txb
->fragmented
) {
7167 txb
->fragmented
= false;
7168 entry
= NEXT_TX(entry
);
7169 txb
= &tnapi
->tx_buffers
[entry
];
7174 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7175 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7176 struct sk_buff
**pskb
,
7177 u32
*entry
, u32
*budget
,
7178 u32 base_flags
, u32 mss
, u32 vlan
)
7180 struct tg3
*tp
= tnapi
->tp
;
7181 struct sk_buff
*new_skb
, *skb
= *pskb
;
7182 dma_addr_t new_addr
= 0;
7185 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7186 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7188 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7190 new_skb
= skb_copy_expand(skb
,
7191 skb_headroom(skb
) + more_headroom
,
7192 skb_tailroom(skb
), GFP_ATOMIC
);
7198 /* New SKB is guaranteed to be linear. */
7199 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7201 /* Make sure the mapping succeeded */
7202 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7203 dev_kfree_skb(new_skb
);
7206 u32 save_entry
= *entry
;
7208 base_flags
|= TXD_FLAG_END
;
7210 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7211 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7214 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7215 new_skb
->len
, base_flags
,
7217 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7218 dev_kfree_skb(new_skb
);
7229 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7231 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7232 * TSO header is greater than 80 bytes.
7234 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7236 struct sk_buff
*segs
, *nskb
;
7237 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7239 /* Estimate the number of fragments in the worst case */
7240 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7241 netif_stop_queue(tp
->dev
);
7243 /* netif_tx_stop_queue() must be done before checking
7244 * checking tx index in tg3_tx_avail() below, because in
7245 * tg3_tx(), we update tx index before checking for
7246 * netif_tx_queue_stopped().
7249 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7250 return NETDEV_TX_BUSY
;
7252 netif_wake_queue(tp
->dev
);
7255 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7257 goto tg3_tso_bug_end
;
7263 tg3_start_xmit(nskb
, tp
->dev
);
7269 return NETDEV_TX_OK
;
7272 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7273 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7275 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7277 struct tg3
*tp
= netdev_priv(dev
);
7278 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7280 int i
= -1, would_hit_hwbug
;
7282 struct tg3_napi
*tnapi
;
7283 struct netdev_queue
*txq
;
7286 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7287 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7288 if (tg3_flag(tp
, ENABLE_TSS
))
7291 budget
= tg3_tx_avail(tnapi
);
7293 /* We are running in BH disabled context with netif_tx_lock
7294 * and TX reclaim runs via tp->napi.poll inside of a software
7295 * interrupt. Furthermore, IRQ processing runs lockless so we have
7296 * no IRQ context deadlocks to worry about either. Rejoice!
7298 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7299 if (!netif_tx_queue_stopped(txq
)) {
7300 netif_tx_stop_queue(txq
);
7302 /* This is a hard error, log it. */
7304 "BUG! Tx Ring full when queue awake!\n");
7306 return NETDEV_TX_BUSY
;
7309 entry
= tnapi
->tx_prod
;
7311 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7312 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7314 mss
= skb_shinfo(skb
)->gso_size
;
7317 u32 tcp_opt_len
, hdr_len
;
7319 if (skb_header_cloned(skb
) &&
7320 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7324 tcp_opt_len
= tcp_optlen(skb
);
7326 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7328 if (!skb_is_gso_v6(skb
)) {
7330 iph
->tot_len
= htons(mss
+ hdr_len
);
7333 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7334 tg3_flag(tp
, TSO_BUG
))
7335 return tg3_tso_bug(tp
, skb
);
7337 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7338 TXD_FLAG_CPU_POST_DMA
);
7340 if (tg3_flag(tp
, HW_TSO_1
) ||
7341 tg3_flag(tp
, HW_TSO_2
) ||
7342 tg3_flag(tp
, HW_TSO_3
)) {
7343 tcp_hdr(skb
)->check
= 0;
7344 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7346 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7351 if (tg3_flag(tp
, HW_TSO_3
)) {
7352 mss
|= (hdr_len
& 0xc) << 12;
7354 base_flags
|= 0x00000010;
7355 base_flags
|= (hdr_len
& 0x3e0) << 5;
7356 } else if (tg3_flag(tp
, HW_TSO_2
))
7357 mss
|= hdr_len
<< 9;
7358 else if (tg3_flag(tp
, HW_TSO_1
) ||
7359 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7360 if (tcp_opt_len
|| iph
->ihl
> 5) {
7363 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7364 mss
|= (tsflags
<< 11);
7367 if (tcp_opt_len
|| iph
->ihl
> 5) {
7370 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7371 base_flags
|= tsflags
<< 12;
7376 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7377 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7378 base_flags
|= TXD_FLAG_JMB_PKT
;
7380 if (vlan_tx_tag_present(skb
)) {
7381 base_flags
|= TXD_FLAG_VLAN
;
7382 vlan
= vlan_tx_tag_get(skb
);
7385 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7386 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7387 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7388 base_flags
|= TXD_FLAG_HWTSTAMP
;
7391 len
= skb_headlen(skb
);
7393 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7394 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7398 tnapi
->tx_buffers
[entry
].skb
= skb
;
7399 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7401 would_hit_hwbug
= 0;
7403 if (tg3_flag(tp
, 5701_DMA_BUG
))
7404 would_hit_hwbug
= 1;
7406 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7407 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7409 would_hit_hwbug
= 1;
7410 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7413 if (!tg3_flag(tp
, HW_TSO_1
) &&
7414 !tg3_flag(tp
, HW_TSO_2
) &&
7415 !tg3_flag(tp
, HW_TSO_3
))
7418 /* Now loop through additional data
7419 * fragments, and queue them.
7421 last
= skb_shinfo(skb
)->nr_frags
- 1;
7422 for (i
= 0; i
<= last
; i
++) {
7423 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7425 len
= skb_frag_size(frag
);
7426 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7427 len
, DMA_TO_DEVICE
);
7429 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7430 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7432 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7436 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7438 ((i
== last
) ? TXD_FLAG_END
: 0),
7440 would_hit_hwbug
= 1;
7446 if (would_hit_hwbug
) {
7447 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7449 /* If the workaround fails due to memory/mapping
7450 * failure, silently drop this packet.
7452 entry
= tnapi
->tx_prod
;
7453 budget
= tg3_tx_avail(tnapi
);
7454 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7455 base_flags
, mss
, vlan
))
7459 skb_tx_timestamp(skb
);
7460 netdev_tx_sent_queue(txq
, skb
->len
);
7462 /* Sync BD data before updating mailbox */
7465 /* Packets are ready, update Tx producer idx local and on card. */
7466 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7468 tnapi
->tx_prod
= entry
;
7469 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7470 netif_tx_stop_queue(txq
);
7472 /* netif_tx_stop_queue() must be done before checking
7473 * checking tx index in tg3_tx_avail() below, because in
7474 * tg3_tx(), we update tx index before checking for
7475 * netif_tx_queue_stopped().
7478 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7479 netif_tx_wake_queue(txq
);
7483 return NETDEV_TX_OK
;
7486 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7487 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7492 return NETDEV_TX_OK
;
7495 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7498 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7499 MAC_MODE_PORT_MODE_MASK
);
7501 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7503 if (!tg3_flag(tp
, 5705_PLUS
))
7504 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7506 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7507 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7509 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7511 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7513 if (tg3_flag(tp
, 5705_PLUS
) ||
7514 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7515 tg3_asic_rev(tp
) == ASIC_REV_5700
)
7516 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7519 tw32(MAC_MODE
, tp
->mac_mode
);
7523 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7525 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7527 tg3_phy_toggle_apd(tp
, false);
7528 tg3_phy_toggle_automdix(tp
, 0);
7530 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7533 bmcr
= BMCR_FULLDPLX
;
7538 bmcr
|= BMCR_SPEED100
;
7542 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7544 bmcr
|= BMCR_SPEED100
;
7547 bmcr
|= BMCR_SPEED1000
;
7552 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7553 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7554 val
|= CTL1000_AS_MASTER
|
7555 CTL1000_ENABLE_MASTER
;
7556 tg3_writephy(tp
, MII_CTRL1000
, val
);
7558 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7559 MII_TG3_FET_PTEST_TRIM_2
;
7560 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7563 bmcr
|= BMCR_LOOPBACK
;
7565 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7567 /* The write needs to be flushed for the FETs */
7568 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7569 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7573 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7574 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
7575 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7576 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7577 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7579 /* The write needs to be flushed for the AC131 */
7580 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7583 /* Reset to prevent losing 1st rx packet intermittently */
7584 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7585 tg3_flag(tp
, 5780_CLASS
)) {
7586 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7588 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7591 mac_mode
= tp
->mac_mode
&
7592 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7593 if (speed
== SPEED_1000
)
7594 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7596 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7598 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
7599 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7601 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7602 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7603 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7604 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7606 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7607 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7610 tw32(MAC_MODE
, mac_mode
);
7616 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7618 struct tg3
*tp
= netdev_priv(dev
);
7620 if (features
& NETIF_F_LOOPBACK
) {
7621 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7624 spin_lock_bh(&tp
->lock
);
7625 tg3_mac_loopback(tp
, true);
7626 netif_carrier_on(tp
->dev
);
7627 spin_unlock_bh(&tp
->lock
);
7628 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7630 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7633 spin_lock_bh(&tp
->lock
);
7634 tg3_mac_loopback(tp
, false);
7635 /* Force link status check */
7636 tg3_setup_phy(tp
, 1);
7637 spin_unlock_bh(&tp
->lock
);
7638 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7642 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7643 netdev_features_t features
)
7645 struct tg3
*tp
= netdev_priv(dev
);
7647 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7648 features
&= ~NETIF_F_ALL_TSO
;
7653 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7655 netdev_features_t changed
= dev
->features
^ features
;
7657 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7658 tg3_set_loopback(dev
, features
);
7663 static void tg3_rx_prodring_free(struct tg3
*tp
,
7664 struct tg3_rx_prodring_set
*tpr
)
7668 if (tpr
!= &tp
->napi
[0].prodring
) {
7669 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7670 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7671 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7674 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7675 for (i
= tpr
->rx_jmb_cons_idx
;
7676 i
!= tpr
->rx_jmb_prod_idx
;
7677 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7678 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7686 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7687 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7690 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7691 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7692 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7697 /* Initialize rx rings for packet processing.
7699 * The chip has been shut down and the driver detached from
7700 * the networking, so no interrupts or new tx packets will
7701 * end up in the driver. tp->{tx,}lock are held and thus
7704 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7705 struct tg3_rx_prodring_set
*tpr
)
7707 u32 i
, rx_pkt_dma_sz
;
7709 tpr
->rx_std_cons_idx
= 0;
7710 tpr
->rx_std_prod_idx
= 0;
7711 tpr
->rx_jmb_cons_idx
= 0;
7712 tpr
->rx_jmb_prod_idx
= 0;
7714 if (tpr
!= &tp
->napi
[0].prodring
) {
7715 memset(&tpr
->rx_std_buffers
[0], 0,
7716 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7717 if (tpr
->rx_jmb_buffers
)
7718 memset(&tpr
->rx_jmb_buffers
[0], 0,
7719 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7723 /* Zero out all descriptors. */
7724 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7726 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7727 if (tg3_flag(tp
, 5780_CLASS
) &&
7728 tp
->dev
->mtu
> ETH_DATA_LEN
)
7729 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7730 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7732 /* Initialize invariants of the rings, we only set this
7733 * stuff once. This works because the card does not
7734 * write into the rx buffer posting rings.
7736 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7737 struct tg3_rx_buffer_desc
*rxd
;
7739 rxd
= &tpr
->rx_std
[i
];
7740 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7741 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7742 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7743 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7746 /* Now allocate fresh SKBs for each rx ring. */
7747 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7748 unsigned int frag_size
;
7750 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
7752 netdev_warn(tp
->dev
,
7753 "Using a smaller RX standard ring. Only "
7754 "%d out of %d buffers were allocated "
7755 "successfully\n", i
, tp
->rx_pending
);
7763 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7766 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7768 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7771 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7772 struct tg3_rx_buffer_desc
*rxd
;
7774 rxd
= &tpr
->rx_jmb
[i
].std
;
7775 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7776 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7778 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7779 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7782 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7783 unsigned int frag_size
;
7785 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
7787 netdev_warn(tp
->dev
,
7788 "Using a smaller RX jumbo ring. Only %d "
7789 "out of %d buffers were allocated "
7790 "successfully\n", i
, tp
->rx_jumbo_pending
);
7793 tp
->rx_jumbo_pending
= i
;
7802 tg3_rx_prodring_free(tp
, tpr
);
7806 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7807 struct tg3_rx_prodring_set
*tpr
)
7809 kfree(tpr
->rx_std_buffers
);
7810 tpr
->rx_std_buffers
= NULL
;
7811 kfree(tpr
->rx_jmb_buffers
);
7812 tpr
->rx_jmb_buffers
= NULL
;
7814 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7815 tpr
->rx_std
, tpr
->rx_std_mapping
);
7819 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7820 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7825 static int tg3_rx_prodring_init(struct tg3
*tp
,
7826 struct tg3_rx_prodring_set
*tpr
)
7828 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7830 if (!tpr
->rx_std_buffers
)
7833 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7834 TG3_RX_STD_RING_BYTES(tp
),
7835 &tpr
->rx_std_mapping
,
7840 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7841 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7843 if (!tpr
->rx_jmb_buffers
)
7846 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7847 TG3_RX_JMB_RING_BYTES(tp
),
7848 &tpr
->rx_jmb_mapping
,
7857 tg3_rx_prodring_fini(tp
, tpr
);
7861 /* Free up pending packets in all rx/tx rings.
7863 * The chip has been shut down and the driver detached from
7864 * the networking, so no interrupts or new tx packets will
7865 * end up in the driver. tp->{tx,}lock is not held and we are not
7866 * in an interrupt context and thus may sleep.
7868 static void tg3_free_rings(struct tg3
*tp
)
7872 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7873 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7875 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7877 if (!tnapi
->tx_buffers
)
7880 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7881 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7886 tg3_tx_skb_unmap(tnapi
, i
,
7887 skb_shinfo(skb
)->nr_frags
- 1);
7889 dev_kfree_skb_any(skb
);
7891 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7895 /* Initialize tx/rx rings for packet processing.
7897 * The chip has been shut down and the driver detached from
7898 * the networking, so no interrupts or new tx packets will
7899 * end up in the driver. tp->{tx,}lock are held and thus
7902 static int tg3_init_rings(struct tg3
*tp
)
7906 /* Free up all the SKBs. */
7909 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7910 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7912 tnapi
->last_tag
= 0;
7913 tnapi
->last_irq_tag
= 0;
7914 tnapi
->hw_status
->status
= 0;
7915 tnapi
->hw_status
->status_tag
= 0;
7916 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7921 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7923 tnapi
->rx_rcb_ptr
= 0;
7925 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7927 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7936 static void tg3_mem_tx_release(struct tg3
*tp
)
7940 for (i
= 0; i
< tp
->irq_max
; i
++) {
7941 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7943 if (tnapi
->tx_ring
) {
7944 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7945 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7946 tnapi
->tx_ring
= NULL
;
7949 kfree(tnapi
->tx_buffers
);
7950 tnapi
->tx_buffers
= NULL
;
7954 static int tg3_mem_tx_acquire(struct tg3
*tp
)
7957 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7959 /* If multivector TSS is enabled, vector 0 does not handle
7960 * tx interrupts. Don't allocate any resources for it.
7962 if (tg3_flag(tp
, ENABLE_TSS
))
7965 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
7966 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
7967 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7968 if (!tnapi
->tx_buffers
)
7971 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7973 &tnapi
->tx_desc_mapping
,
7975 if (!tnapi
->tx_ring
)
7982 tg3_mem_tx_release(tp
);
7986 static void tg3_mem_rx_release(struct tg3
*tp
)
7990 for (i
= 0; i
< tp
->irq_max
; i
++) {
7991 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7993 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7998 dma_free_coherent(&tp
->pdev
->dev
,
7999 TG3_RX_RCB_RING_BYTES(tp
),
8001 tnapi
->rx_rcb_mapping
);
8002 tnapi
->rx_rcb
= NULL
;
8006 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8008 unsigned int i
, limit
;
8010 limit
= tp
->rxq_cnt
;
8012 /* If RSS is enabled, we need a (dummy) producer ring
8013 * set on vector zero. This is the true hw prodring.
8015 if (tg3_flag(tp
, ENABLE_RSS
))
8018 for (i
= 0; i
< limit
; i
++) {
8019 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8021 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8024 /* If multivector RSS is enabled, vector 0
8025 * does not handle rx or tx interrupts.
8026 * Don't allocate any resources for it.
8028 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8031 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8032 TG3_RX_RCB_RING_BYTES(tp
),
8033 &tnapi
->rx_rcb_mapping
,
8038 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8044 tg3_mem_rx_release(tp
);
8049 * Must not be invoked with interrupt sources disabled and
8050 * the hardware shutdown down.
8052 static void tg3_free_consistent(struct tg3
*tp
)
8056 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8057 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8059 if (tnapi
->hw_status
) {
8060 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8062 tnapi
->status_mapping
);
8063 tnapi
->hw_status
= NULL
;
8067 tg3_mem_rx_release(tp
);
8068 tg3_mem_tx_release(tp
);
8071 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8072 tp
->hw_stats
, tp
->stats_mapping
);
8073 tp
->hw_stats
= NULL
;
8078 * Must not be invoked with interrupt sources disabled and
8079 * the hardware shutdown down. Can sleep.
8081 static int tg3_alloc_consistent(struct tg3
*tp
)
8085 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8086 sizeof(struct tg3_hw_stats
),
8092 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8094 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8095 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8096 struct tg3_hw_status
*sblk
;
8098 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8100 &tnapi
->status_mapping
,
8102 if (!tnapi
->hw_status
)
8105 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8106 sblk
= tnapi
->hw_status
;
8108 if (tg3_flag(tp
, ENABLE_RSS
)) {
8109 u16
*prodptr
= NULL
;
8112 * When RSS is enabled, the status block format changes
8113 * slightly. The "rx_jumbo_consumer", "reserved",
8114 * and "rx_mini_consumer" members get mapped to the
8115 * other three rx return ring producer indexes.
8119 prodptr
= &sblk
->idx
[0].rx_producer
;
8122 prodptr
= &sblk
->rx_jumbo_consumer
;
8125 prodptr
= &sblk
->reserved
;
8128 prodptr
= &sblk
->rx_mini_consumer
;
8131 tnapi
->rx_rcb_prod_idx
= prodptr
;
8133 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8137 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8143 tg3_free_consistent(tp
);
8147 #define MAX_WAIT_CNT 1000
8149 /* To stop a block, clear the enable bit and poll till it
8150 * clears. tp->lock is held.
8152 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
8157 if (tg3_flag(tp
, 5705_PLUS
)) {
8164 /* We can't enable/disable these bits of the
8165 * 5705/5750, just say success.
8178 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8181 if ((val
& enable_bit
) == 0)
8185 if (i
== MAX_WAIT_CNT
&& !silent
) {
8186 dev_err(&tp
->pdev
->dev
,
8187 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8195 /* tp->lock is held. */
8196 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
8200 tg3_disable_ints(tp
);
8202 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8203 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8206 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8207 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8208 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8209 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8210 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8211 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8213 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8214 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8215 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8216 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8217 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8218 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8219 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8221 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8222 tw32_f(MAC_MODE
, tp
->mac_mode
);
8225 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8226 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8228 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8230 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8233 if (i
>= MAX_WAIT_CNT
) {
8234 dev_err(&tp
->pdev
->dev
,
8235 "%s timed out, TX_MODE_ENABLE will not clear "
8236 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8240 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8241 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8242 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8244 tw32(FTQ_RESET
, 0xffffffff);
8245 tw32(FTQ_RESET
, 0x00000000);
8247 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8248 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8250 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8251 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8252 if (tnapi
->hw_status
)
8253 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8259 /* Save PCI command register before chip reset */
8260 static void tg3_save_pci_state(struct tg3
*tp
)
8262 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8265 /* Restore PCI state after chip reset */
8266 static void tg3_restore_pci_state(struct tg3
*tp
)
8270 /* Re-enable indirect register accesses. */
8271 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8272 tp
->misc_host_ctrl
);
8274 /* Set MAX PCI retry to zero. */
8275 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8276 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8277 tg3_flag(tp
, PCIX_MODE
))
8278 val
|= PCISTATE_RETRY_SAME_DMA
;
8279 /* Allow reads and writes to the APE register and memory space. */
8280 if (tg3_flag(tp
, ENABLE_APE
))
8281 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8282 PCISTATE_ALLOW_APE_SHMEM_WR
|
8283 PCISTATE_ALLOW_APE_PSPACE_WR
;
8284 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8286 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8288 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8289 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8290 tp
->pci_cacheline_sz
);
8291 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8295 /* Make sure PCI-X relaxed ordering bit is clear. */
8296 if (tg3_flag(tp
, PCIX_MODE
)) {
8299 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8301 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8302 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8306 if (tg3_flag(tp
, 5780_CLASS
)) {
8308 /* Chip reset on 5780 will reset MSI enable bit,
8309 * so need to restore it.
8311 if (tg3_flag(tp
, USING_MSI
)) {
8314 pci_read_config_word(tp
->pdev
,
8315 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8317 pci_write_config_word(tp
->pdev
,
8318 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8319 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8320 val
= tr32(MSGINT_MODE
);
8321 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8326 /* tp->lock is held. */
8327 static int tg3_chip_reset(struct tg3
*tp
)
8330 void (*write_op
)(struct tg3
*, u32
, u32
);
8335 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8337 /* No matching tg3_nvram_unlock() after this because
8338 * chip reset below will undo the nvram lock.
8340 tp
->nvram_lock_cnt
= 0;
8342 /* GRC_MISC_CFG core clock reset will clear the memory
8343 * enable bit in PCI register 4 and the MSI enable bit
8344 * on some chips, so we save relevant registers here.
8346 tg3_save_pci_state(tp
);
8348 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8349 tg3_flag(tp
, 5755_PLUS
))
8350 tw32(GRC_FASTBOOT_PC
, 0);
8353 * We must avoid the readl() that normally takes place.
8354 * It locks machines, causes machine checks, and other
8355 * fun things. So, temporarily disable the 5701
8356 * hardware workaround, while we do the reset.
8358 write_op
= tp
->write32
;
8359 if (write_op
== tg3_write_flush_reg32
)
8360 tp
->write32
= tg3_write32
;
8362 /* Prevent the irq handler from reading or writing PCI registers
8363 * during chip reset when the memory enable bit in the PCI command
8364 * register may be cleared. The chip does not generate interrupt
8365 * at this time, but the irq handler may still be called due to irq
8366 * sharing or irqpoll.
8368 tg3_flag_set(tp
, CHIP_RESETTING
);
8369 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8370 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8371 if (tnapi
->hw_status
) {
8372 tnapi
->hw_status
->status
= 0;
8373 tnapi
->hw_status
->status_tag
= 0;
8375 tnapi
->last_tag
= 0;
8376 tnapi
->last_irq_tag
= 0;
8380 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8381 synchronize_irq(tp
->napi
[i
].irq_vec
);
8383 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8384 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8385 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8389 val
= GRC_MISC_CFG_CORECLK_RESET
;
8391 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8392 /* Force PCIe 1.0a mode */
8393 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8394 !tg3_flag(tp
, 57765_PLUS
) &&
8395 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8396 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8397 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8399 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8400 tw32(GRC_MISC_CFG
, (1 << 29));
8405 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8406 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8407 tw32(GRC_VCPU_EXT_CTRL
,
8408 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8411 /* Manage gphy power for all CPMU absent PCIe devices. */
8412 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8413 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8415 tw32(GRC_MISC_CFG
, val
);
8417 /* restore 5701 hardware bug workaround write method */
8418 tp
->write32
= write_op
;
8420 /* Unfortunately, we have to delay before the PCI read back.
8421 * Some 575X chips even will not respond to a PCI cfg access
8422 * when the reset command is given to the chip.
8424 * How do these hardware designers expect things to work
8425 * properly if the PCI write is posted for a long period
8426 * of time? It is always necessary to have some method by
8427 * which a register read back can occur to push the write
8428 * out which does the reset.
8430 * For most tg3 variants the trick below was working.
8435 /* Flush PCI posted writes. The normal MMIO registers
8436 * are inaccessible at this time so this is the only
8437 * way to make this reliably (actually, this is no longer
8438 * the case, see above). I tried to use indirect
8439 * register read/write but this upset some 5701 variants.
8441 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8445 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8448 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8452 /* Wait for link training to complete. */
8453 for (j
= 0; j
< 5000; j
++)
8456 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8457 pci_write_config_dword(tp
->pdev
, 0xc4,
8458 cfg_val
| (1 << 15));
8461 /* Clear the "no snoop" and "relaxed ordering" bits. */
8462 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8464 * Older PCIe devices only support the 128 byte
8465 * MPS setting. Enforce the restriction.
8467 if (!tg3_flag(tp
, CPMU_PRESENT
))
8468 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8469 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8471 /* Clear error status */
8472 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8473 PCI_EXP_DEVSTA_CED
|
8474 PCI_EXP_DEVSTA_NFED
|
8475 PCI_EXP_DEVSTA_FED
|
8476 PCI_EXP_DEVSTA_URD
);
8479 tg3_restore_pci_state(tp
);
8481 tg3_flag_clear(tp
, CHIP_RESETTING
);
8482 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8485 if (tg3_flag(tp
, 5780_CLASS
))
8486 val
= tr32(MEMARB_MODE
);
8487 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8489 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
8491 tw32(0x5000, 0x400);
8494 if (tg3_flag(tp
, IS_SSB_CORE
)) {
8496 * BCM4785: In order to avoid repercussions from using
8497 * potentially defective internal ROM, stop the Rx RISC CPU,
8498 * which is not required.
8501 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8504 tw32(GRC_MODE
, tp
->grc_mode
);
8506 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
8509 tw32(0xc4, val
| (1 << 15));
8512 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8513 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8514 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8515 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
8516 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8517 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8520 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8521 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8523 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8524 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8529 tw32_f(MAC_MODE
, val
);
8532 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8534 err
= tg3_poll_fw(tp
);
8540 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8541 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
8542 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8543 !tg3_flag(tp
, 57765_PLUS
)) {
8546 tw32(0x7c00, val
| (1 << 25));
8549 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
8550 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8551 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8554 /* Reprobe ASF enable state. */
8555 tg3_flag_clear(tp
, ENABLE_ASF
);
8556 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8557 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8558 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8561 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8562 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8563 tg3_flag_set(tp
, ENABLE_ASF
);
8564 tp
->last_event_jiffies
= jiffies
;
8565 if (tg3_flag(tp
, 5750_PLUS
))
8566 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8573 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8574 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8576 /* tp->lock is held. */
8577 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8583 tg3_write_sig_pre_reset(tp
, kind
);
8585 tg3_abort_hw(tp
, silent
);
8586 err
= tg3_chip_reset(tp
);
8588 __tg3_set_mac_addr(tp
, 0);
8590 tg3_write_sig_legacy(tp
, kind
);
8591 tg3_write_sig_post_reset(tp
, kind
);
8594 /* Save the stats across chip resets... */
8595 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8596 tg3_get_estats(tp
, &tp
->estats_prev
);
8598 /* And make sure the next sample is new data */
8599 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8608 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8610 struct tg3
*tp
= netdev_priv(dev
);
8611 struct sockaddr
*addr
= p
;
8612 int err
= 0, skip_mac_1
= 0;
8614 if (!is_valid_ether_addr(addr
->sa_data
))
8615 return -EADDRNOTAVAIL
;
8617 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8619 if (!netif_running(dev
))
8622 if (tg3_flag(tp
, ENABLE_ASF
)) {
8623 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8625 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8626 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8627 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8628 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8630 /* Skip MAC addr 1 if ASF is using it. */
8631 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8632 !(addr1_high
== 0 && addr1_low
== 0))
8635 spin_lock_bh(&tp
->lock
);
8636 __tg3_set_mac_addr(tp
, skip_mac_1
);
8637 spin_unlock_bh(&tp
->lock
);
8642 /* tp->lock is held. */
8643 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8644 dma_addr_t mapping
, u32 maxlen_flags
,
8648 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8649 ((u64
) mapping
>> 32));
8651 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8652 ((u64
) mapping
& 0xffffffff));
8654 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8657 if (!tg3_flag(tp
, 5705_PLUS
))
8659 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8664 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8668 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8669 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8670 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8671 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8673 tw32(HOSTCC_TXCOL_TICKS
, 0);
8674 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8675 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8677 for (; i
< tp
->txq_cnt
; i
++) {
8680 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8681 tw32(reg
, ec
->tx_coalesce_usecs
);
8682 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8683 tw32(reg
, ec
->tx_max_coalesced_frames
);
8684 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8685 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8689 for (; i
< tp
->irq_max
- 1; i
++) {
8690 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8691 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8692 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8696 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8699 u32 limit
= tp
->rxq_cnt
;
8701 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8702 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8703 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8704 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8707 tw32(HOSTCC_RXCOL_TICKS
, 0);
8708 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8709 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8712 for (; i
< limit
; i
++) {
8715 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8716 tw32(reg
, ec
->rx_coalesce_usecs
);
8717 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8718 tw32(reg
, ec
->rx_max_coalesced_frames
);
8719 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8720 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8723 for (; i
< tp
->irq_max
- 1; i
++) {
8724 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8725 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8726 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8730 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8732 tg3_coal_tx_init(tp
, ec
);
8733 tg3_coal_rx_init(tp
, ec
);
8735 if (!tg3_flag(tp
, 5705_PLUS
)) {
8736 u32 val
= ec
->stats_block_coalesce_usecs
;
8738 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8739 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8744 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8748 /* tp->lock is held. */
8749 static void tg3_rings_reset(struct tg3
*tp
)
8752 u32 stblk
, txrcb
, rxrcb
, limit
;
8753 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8755 /* Disable all transmit rings but the first. */
8756 if (!tg3_flag(tp
, 5705_PLUS
))
8757 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8758 else if (tg3_flag(tp
, 5717_PLUS
))
8759 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8760 else if (tg3_flag(tp
, 57765_CLASS
) ||
8761 tg3_asic_rev(tp
) == ASIC_REV_5762
)
8762 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8764 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8766 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8767 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8768 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8769 BDINFO_FLAGS_DISABLED
);
8772 /* Disable all receive return rings but the first. */
8773 if (tg3_flag(tp
, 5717_PLUS
))
8774 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8775 else if (!tg3_flag(tp
, 5705_PLUS
))
8776 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8777 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8778 tg3_asic_rev(tp
) == ASIC_REV_5762
||
8779 tg3_flag(tp
, 57765_CLASS
))
8780 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8782 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8784 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8785 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8786 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8787 BDINFO_FLAGS_DISABLED
);
8789 /* Disable interrupts */
8790 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8791 tp
->napi
[0].chk_msi_cnt
= 0;
8792 tp
->napi
[0].last_rx_cons
= 0;
8793 tp
->napi
[0].last_tx_cons
= 0;
8795 /* Zero mailbox registers. */
8796 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8797 for (i
= 1; i
< tp
->irq_max
; i
++) {
8798 tp
->napi
[i
].tx_prod
= 0;
8799 tp
->napi
[i
].tx_cons
= 0;
8800 if (tg3_flag(tp
, ENABLE_TSS
))
8801 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8802 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8803 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8804 tp
->napi
[i
].chk_msi_cnt
= 0;
8805 tp
->napi
[i
].last_rx_cons
= 0;
8806 tp
->napi
[i
].last_tx_cons
= 0;
8808 if (!tg3_flag(tp
, ENABLE_TSS
))
8809 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8811 tp
->napi
[0].tx_prod
= 0;
8812 tp
->napi
[0].tx_cons
= 0;
8813 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8814 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8817 /* Make sure the NIC-based send BD rings are disabled. */
8818 if (!tg3_flag(tp
, 5705_PLUS
)) {
8819 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8820 for (i
= 0; i
< 16; i
++)
8821 tw32_tx_mbox(mbox
+ i
* 8, 0);
8824 txrcb
= NIC_SRAM_SEND_RCB
;
8825 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8827 /* Clear status block in ram. */
8828 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8830 /* Set status block DMA address */
8831 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8832 ((u64
) tnapi
->status_mapping
>> 32));
8833 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8834 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8836 if (tnapi
->tx_ring
) {
8837 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8838 (TG3_TX_RING_SIZE
<<
8839 BDINFO_FLAGS_MAXLEN_SHIFT
),
8840 NIC_SRAM_TX_BUFFER_DESC
);
8841 txrcb
+= TG3_BDINFO_SIZE
;
8844 if (tnapi
->rx_rcb
) {
8845 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8846 (tp
->rx_ret_ring_mask
+ 1) <<
8847 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8848 rxrcb
+= TG3_BDINFO_SIZE
;
8851 stblk
= HOSTCC_STATBLCK_RING1
;
8853 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8854 u64 mapping
= (u64
)tnapi
->status_mapping
;
8855 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8856 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8858 /* Clear status block in ram. */
8859 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8861 if (tnapi
->tx_ring
) {
8862 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8863 (TG3_TX_RING_SIZE
<<
8864 BDINFO_FLAGS_MAXLEN_SHIFT
),
8865 NIC_SRAM_TX_BUFFER_DESC
);
8866 txrcb
+= TG3_BDINFO_SIZE
;
8869 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8870 ((tp
->rx_ret_ring_mask
+ 1) <<
8871 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8874 rxrcb
+= TG3_BDINFO_SIZE
;
8878 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8880 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8882 if (!tg3_flag(tp
, 5750_PLUS
) ||
8883 tg3_flag(tp
, 5780_CLASS
) ||
8884 tg3_asic_rev(tp
) == ASIC_REV_5750
||
8885 tg3_asic_rev(tp
) == ASIC_REV_5752
||
8886 tg3_flag(tp
, 57765_PLUS
))
8887 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8888 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
8889 tg3_asic_rev(tp
) == ASIC_REV_5787
)
8890 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8892 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8894 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8895 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8897 val
= min(nic_rep_thresh
, host_rep_thresh
);
8898 tw32(RCVBDI_STD_THRESH
, val
);
8900 if (tg3_flag(tp
, 57765_PLUS
))
8901 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8903 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8906 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8908 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8910 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8911 tw32(RCVBDI_JUMBO_THRESH
, val
);
8913 if (tg3_flag(tp
, 57765_PLUS
))
8914 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8917 static inline u32
calc_crc(unsigned char *buf
, int len
)
8925 for (j
= 0; j
< len
; j
++) {
8928 for (k
= 0; k
< 8; k
++) {
8941 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
8943 /* accept or reject all multicast frames */
8944 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
8945 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
8946 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
8947 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
8950 static void __tg3_set_rx_mode(struct net_device
*dev
)
8952 struct tg3
*tp
= netdev_priv(dev
);
8955 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
8956 RX_MODE_KEEP_VLAN_TAG
);
8958 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8959 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8962 if (!tg3_flag(tp
, ENABLE_ASF
))
8963 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
8966 if (dev
->flags
& IFF_PROMISC
) {
8967 /* Promiscuous mode. */
8968 rx_mode
|= RX_MODE_PROMISC
;
8969 } else if (dev
->flags
& IFF_ALLMULTI
) {
8970 /* Accept all multicast. */
8971 tg3_set_multi(tp
, 1);
8972 } else if (netdev_mc_empty(dev
)) {
8973 /* Reject all multicast. */
8974 tg3_set_multi(tp
, 0);
8976 /* Accept one or more multicast(s). */
8977 struct netdev_hw_addr
*ha
;
8978 u32 mc_filter
[4] = { 0, };
8983 netdev_for_each_mc_addr(ha
, dev
) {
8984 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
8986 regidx
= (bit
& 0x60) >> 5;
8988 mc_filter
[regidx
] |= (1 << bit
);
8991 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
8992 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
8993 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
8994 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
8997 if (rx_mode
!= tp
->rx_mode
) {
8998 tp
->rx_mode
= rx_mode
;
8999 tw32_f(MAC_RX_MODE
, rx_mode
);
9004 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9008 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9009 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9012 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9016 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9019 if (tp
->rxq_cnt
== 1) {
9020 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9024 /* Validate table against current IRQ count */
9025 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9026 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9030 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9031 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9034 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9037 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9039 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9040 u32 val
= tp
->rss_ind_tbl
[i
];
9042 for (; i
% 8; i
++) {
9044 val
|= tp
->rss_ind_tbl
[i
];
9051 /* tp->lock is held. */
9052 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
9054 u32 val
, rdmac_mode
;
9056 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9058 tg3_disable_ints(tp
);
9062 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9064 if (tg3_flag(tp
, INIT_COMPLETE
))
9065 tg3_abort_hw(tp
, 1);
9067 /* Enable MAC control of LPI */
9068 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
9069 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
9070 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
9071 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9072 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
9074 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
9076 tw32_f(TG3_CPMU_EEE_CTRL
,
9077 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9079 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9080 TG3_CPMU_EEEMD_LPI_IN_TX
|
9081 TG3_CPMU_EEEMD_LPI_IN_RX
|
9082 TG3_CPMU_EEEMD_EEE_ENABLE
;
9084 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
9085 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9087 if (tg3_flag(tp
, ENABLE_APE
))
9088 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9090 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9092 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9093 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9094 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9096 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9097 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9098 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9104 err
= tg3_chip_reset(tp
);
9108 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9110 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9111 val
= tr32(TG3_CPMU_CTRL
);
9112 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9113 tw32(TG3_CPMU_CTRL
, val
);
9115 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9116 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9117 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9118 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9120 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9121 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9122 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9123 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9125 val
= tr32(TG3_CPMU_HST_ACC
);
9126 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9127 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9128 tw32(TG3_CPMU_HST_ACC
, val
);
9131 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9132 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9133 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9134 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9135 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9137 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9138 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9140 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9142 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9143 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9146 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9147 u32 grc_mode
= tr32(GRC_MODE
);
9149 /* Access the lower 1K of PL PCIE block registers. */
9150 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9151 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9153 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9154 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9155 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9157 tw32(GRC_MODE
, grc_mode
);
9160 if (tg3_flag(tp
, 57765_CLASS
)) {
9161 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9162 u32 grc_mode
= tr32(GRC_MODE
);
9164 /* Access the lower 1K of PL PCIE block registers. */
9165 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9166 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9168 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9169 TG3_PCIE_PL_LO_PHYCTL5
);
9170 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9171 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9173 tw32(GRC_MODE
, grc_mode
);
9176 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9179 /* Fix transmit hangs */
9180 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9181 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9182 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9184 grc_mode
= tr32(GRC_MODE
);
9186 /* Access the lower 1K of DL PCIE block registers. */
9187 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9188 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9190 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9191 TG3_PCIE_DL_LO_FTSMAX
);
9192 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9193 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9194 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9196 tw32(GRC_MODE
, grc_mode
);
9199 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9200 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9201 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9202 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9205 /* This works around an issue with Athlon chipsets on
9206 * B3 tigon3 silicon. This bit has no effect on any
9207 * other revision. But do not set this on PCI Express
9208 * chips and don't even touch the clocks if the CPMU is present.
9210 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9211 if (!tg3_flag(tp
, PCI_EXPRESS
))
9212 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9213 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9216 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9217 tg3_flag(tp
, PCIX_MODE
)) {
9218 val
= tr32(TG3PCI_PCISTATE
);
9219 val
|= PCISTATE_RETRY_SAME_DMA
;
9220 tw32(TG3PCI_PCISTATE
, val
);
9223 if (tg3_flag(tp
, ENABLE_APE
)) {
9224 /* Allow reads and writes to the
9225 * APE register and memory space.
9227 val
= tr32(TG3PCI_PCISTATE
);
9228 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9229 PCISTATE_ALLOW_APE_SHMEM_WR
|
9230 PCISTATE_ALLOW_APE_PSPACE_WR
;
9231 tw32(TG3PCI_PCISTATE
, val
);
9234 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9235 /* Enable some hw fixes. */
9236 val
= tr32(TG3PCI_MSI_DATA
);
9237 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9238 tw32(TG3PCI_MSI_DATA
, val
);
9241 /* Descriptor ring init may make accesses to the
9242 * NIC SRAM area to setup the TX descriptors, so we
9243 * can only do this after the hardware has been
9244 * successfully reset.
9246 err
= tg3_init_rings(tp
);
9250 if (tg3_flag(tp
, 57765_PLUS
)) {
9251 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9252 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9253 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9254 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9255 if (!tg3_flag(tp
, 57765_CLASS
) &&
9256 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9257 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9258 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9259 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9260 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9261 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9262 /* This value is determined during the probe time DMA
9263 * engine test, tg3_test_dma.
9265 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9268 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9269 GRC_MODE_4X_NIC_SEND_RINGS
|
9270 GRC_MODE_NO_TX_PHDR_CSUM
|
9271 GRC_MODE_NO_RX_PHDR_CSUM
);
9272 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9274 /* Pseudo-header checksum is done by hardware logic and not
9275 * the offload processers, so make the chip do the pseudo-
9276 * header checksums on receive. For transmit it is more
9277 * convenient to do the pseudo-header checksum in software
9278 * as Linux does that on transmit for us in all cases.
9280 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9282 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9284 tw32(TG3_RX_PTP_CTL
,
9285 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9287 if (tg3_flag(tp
, PTP_CAPABLE
))
9288 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9290 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9292 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9293 val
= tr32(GRC_MISC_CFG
);
9295 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9296 tw32(GRC_MISC_CFG
, val
);
9298 /* Initialize MBUF/DESC pool. */
9299 if (tg3_flag(tp
, 5750_PLUS
)) {
9301 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9302 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9303 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9304 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9306 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9307 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9308 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9309 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9312 fw_len
= tp
->fw_len
;
9313 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9314 tw32(BUFMGR_MB_POOL_ADDR
,
9315 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9316 tw32(BUFMGR_MB_POOL_SIZE
,
9317 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9320 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9321 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9322 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9323 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9324 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9325 tw32(BUFMGR_MB_HIGH_WATER
,
9326 tp
->bufmgr_config
.mbuf_high_water
);
9328 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9329 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9330 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9331 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9332 tw32(BUFMGR_MB_HIGH_WATER
,
9333 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9335 tw32(BUFMGR_DMA_LOW_WATER
,
9336 tp
->bufmgr_config
.dma_low_water
);
9337 tw32(BUFMGR_DMA_HIGH_WATER
,
9338 tp
->bufmgr_config
.dma_high_water
);
9340 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9341 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9342 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9343 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9344 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9345 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9346 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9347 tw32(BUFMGR_MODE
, val
);
9348 for (i
= 0; i
< 2000; i
++) {
9349 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9354 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9358 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9359 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9361 tg3_setup_rxbd_thresholds(tp
);
9363 /* Initialize TG3_BDINFO's at:
9364 * RCVDBDI_STD_BD: standard eth size rx ring
9365 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9366 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9369 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9370 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9371 * ring attribute flags
9372 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9374 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9375 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9377 * The size of each ring is fixed in the firmware, but the location is
9380 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9381 ((u64
) tpr
->rx_std_mapping
>> 32));
9382 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9383 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9384 if (!tg3_flag(tp
, 5717_PLUS
))
9385 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9386 NIC_SRAM_RX_BUFFER_DESC
);
9388 /* Disable the mini ring */
9389 if (!tg3_flag(tp
, 5705_PLUS
))
9390 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9391 BDINFO_FLAGS_DISABLED
);
9393 /* Program the jumbo buffer descriptor ring control
9394 * blocks on those devices that have them.
9396 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9397 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9399 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9400 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9401 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9402 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9403 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9404 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9405 BDINFO_FLAGS_MAXLEN_SHIFT
;
9406 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9407 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9408 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9409 tg3_flag(tp
, 57765_CLASS
) ||
9410 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9411 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9412 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9414 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9415 BDINFO_FLAGS_DISABLED
);
9418 if (tg3_flag(tp
, 57765_PLUS
)) {
9419 val
= TG3_RX_STD_RING_SIZE(tp
);
9420 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9421 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9423 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9425 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9427 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9429 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9430 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9432 tpr
->rx_jmb_prod_idx
=
9433 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9434 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9436 tg3_rings_reset(tp
);
9438 /* Initialize MAC address and backoff seed. */
9439 __tg3_set_mac_addr(tp
, 0);
9441 /* MTU + ethernet header + FCS + optional VLAN tag */
9442 tw32(MAC_RX_MTU_SIZE
,
9443 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9445 /* The slot time is changed by tg3_setup_phy if we
9446 * run at gigabit with half duplex.
9448 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9449 (6 << TX_LENGTHS_IPG_SHIFT
) |
9450 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9452 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9453 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9454 val
|= tr32(MAC_TX_LENGTHS
) &
9455 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9456 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9458 tw32(MAC_TX_LENGTHS
, val
);
9460 /* Receive rules. */
9461 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9462 tw32(RCVLPC_CONFIG
, 0x0181);
9464 /* Calculate RDMAC_MODE setting early, we need it to determine
9465 * the RCVLPC_STATE_ENABLE mask.
9467 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9468 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9469 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9470 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9471 RDMAC_MODE_LNGREAD_ENAB
);
9473 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
9474 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9476 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
9477 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9478 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9479 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9480 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9481 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9483 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9484 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9485 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9486 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9487 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9488 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9489 !tg3_flag(tp
, IS_5788
)) {
9490 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9494 if (tg3_flag(tp
, PCI_EXPRESS
))
9495 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9497 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9499 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9500 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
9501 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
9505 if (tg3_flag(tp
, HW_TSO_1
) ||
9506 tg3_flag(tp
, HW_TSO_2
) ||
9507 tg3_flag(tp
, HW_TSO_3
))
9508 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9510 if (tg3_flag(tp
, 57765_PLUS
) ||
9511 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9512 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9513 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9515 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9516 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9517 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9519 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
9520 tg3_asic_rev(tp
) == ASIC_REV_5784
||
9521 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9522 tg3_asic_rev(tp
) == ASIC_REV_57780
||
9523 tg3_flag(tp
, 57765_PLUS
)) {
9526 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9527 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
9529 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
9532 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9533 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9534 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9535 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9536 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9537 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9538 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9539 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9541 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9544 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
9545 tg3_asic_rev(tp
) == ASIC_REV_5720
||
9546 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9549 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
9550 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
9552 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
9556 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9557 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9560 /* Receive/send statistics. */
9561 if (tg3_flag(tp
, 5750_PLUS
)) {
9562 val
= tr32(RCVLPC_STATS_ENABLE
);
9563 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9564 tw32(RCVLPC_STATS_ENABLE
, val
);
9565 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9566 tg3_flag(tp
, TSO_CAPABLE
)) {
9567 val
= tr32(RCVLPC_STATS_ENABLE
);
9568 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9569 tw32(RCVLPC_STATS_ENABLE
, val
);
9571 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9573 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9574 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9575 tw32(SNDDATAI_STATSCTRL
,
9576 (SNDDATAI_SCTRL_ENABLE
|
9577 SNDDATAI_SCTRL_FASTUPD
));
9579 /* Setup host coalescing engine. */
9580 tw32(HOSTCC_MODE
, 0);
9581 for (i
= 0; i
< 2000; i
++) {
9582 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9587 __tg3_set_coalesce(tp
, &tp
->coal
);
9589 if (!tg3_flag(tp
, 5705_PLUS
)) {
9590 /* Status/statistics block address. See tg3_timer,
9591 * the tg3_periodic_fetch_stats call there, and
9592 * tg3_get_stats to see how this works for 5705/5750 chips.
9594 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9595 ((u64
) tp
->stats_mapping
>> 32));
9596 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9597 ((u64
) tp
->stats_mapping
& 0xffffffff));
9598 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9600 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9602 /* Clear statistics and status block memory areas */
9603 for (i
= NIC_SRAM_STATS_BLK
;
9604 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9606 tg3_write_mem(tp
, i
, 0);
9611 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9613 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9614 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9615 if (!tg3_flag(tp
, 5705_PLUS
))
9616 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9618 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9619 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9620 /* reset to prevent losing 1st rx packet intermittently */
9621 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9625 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9626 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9627 MAC_MODE_FHDE_ENABLE
;
9628 if (tg3_flag(tp
, ENABLE_APE
))
9629 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9630 if (!tg3_flag(tp
, 5705_PLUS
) &&
9631 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9632 tg3_asic_rev(tp
) != ASIC_REV_5700
)
9633 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9634 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9637 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9638 * If TG3_FLAG_IS_NIC is zero, we should read the
9639 * register to preserve the GPIO settings for LOMs. The GPIOs,
9640 * whether used as inputs or outputs, are set by boot code after
9643 if (!tg3_flag(tp
, IS_NIC
)) {
9646 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9647 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9648 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9650 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
9651 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9652 GRC_LCLCTRL_GPIO_OUTPUT3
;
9654 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
9655 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9657 tp
->grc_local_ctrl
&= ~gpio_mask
;
9658 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9660 /* GPIO1 must be driven high for eeprom write protect */
9661 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9662 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9663 GRC_LCLCTRL_GPIO_OUTPUT1
);
9665 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9668 if (tg3_flag(tp
, USING_MSIX
)) {
9669 val
= tr32(MSGINT_MODE
);
9670 val
|= MSGINT_MODE_ENABLE
;
9671 if (tp
->irq_cnt
> 1)
9672 val
|= MSGINT_MODE_MULTIVEC_EN
;
9673 if (!tg3_flag(tp
, 1SHOT_MSI
))
9674 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9675 tw32(MSGINT_MODE
, val
);
9678 if (!tg3_flag(tp
, 5705_PLUS
)) {
9679 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9683 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9684 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9685 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9686 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9687 WDMAC_MODE_LNGREAD_ENAB
);
9689 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9690 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9691 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9692 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
9693 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
9695 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9696 !tg3_flag(tp
, IS_5788
)) {
9697 val
|= WDMAC_MODE_RX_ACCEL
;
9701 /* Enable host coalescing bug fix */
9702 if (tg3_flag(tp
, 5755_PLUS
))
9703 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9705 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
9706 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9708 tw32_f(WDMAC_MODE
, val
);
9711 if (tg3_flag(tp
, PCIX_MODE
)) {
9714 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9716 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
9717 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9718 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9719 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
9720 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9721 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9723 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9727 tw32_f(RDMAC_MODE
, rdmac_mode
);
9730 if (tg3_asic_rev(tp
) == ASIC_REV_5719
) {
9731 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9732 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9735 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9736 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9737 val
|= TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9738 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9739 tg3_flag_set(tp
, 5719_RDMA_BUG
);
9743 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9744 if (!tg3_flag(tp
, 5705_PLUS
))
9745 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9747 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
9749 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9751 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9753 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9754 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9755 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9756 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9757 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9758 tw32(RCVDBDI_MODE
, val
);
9759 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9760 if (tg3_flag(tp
, HW_TSO_1
) ||
9761 tg3_flag(tp
, HW_TSO_2
) ||
9762 tg3_flag(tp
, HW_TSO_3
))
9763 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9764 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9765 if (tg3_flag(tp
, ENABLE_TSS
))
9766 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9767 tw32(SNDBDI_MODE
, val
);
9768 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9770 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
9771 err
= tg3_load_5701_a0_firmware_fix(tp
);
9776 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9777 err
= tg3_load_tso_firmware(tp
);
9782 tp
->tx_mode
= TX_MODE_ENABLE
;
9784 if (tg3_flag(tp
, 5755_PLUS
) ||
9785 tg3_asic_rev(tp
) == ASIC_REV_5906
)
9786 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9788 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9789 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
9790 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9791 tp
->tx_mode
&= ~val
;
9792 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9795 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9798 if (tg3_flag(tp
, ENABLE_RSS
)) {
9799 tg3_rss_write_indir_tbl(tp
);
9801 /* Setup the "secret" hash key. */
9802 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9803 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9804 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9805 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9806 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9807 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9808 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9809 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9810 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9811 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9814 tp
->rx_mode
= RX_MODE_ENABLE
;
9815 if (tg3_flag(tp
, 5755_PLUS
))
9816 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9818 if (tg3_flag(tp
, ENABLE_RSS
))
9819 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9820 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9821 RX_MODE_RSS_IPV6_HASH_EN
|
9822 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9823 RX_MODE_RSS_IPV4_HASH_EN
|
9824 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9826 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9829 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9831 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9832 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9833 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9836 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9839 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9840 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
9841 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9842 /* Set drive transmission level to 1.2V */
9843 /* only if the signal pre-emphasis bit is not set */
9844 val
= tr32(MAC_SERDES_CFG
);
9847 tw32(MAC_SERDES_CFG
, val
);
9849 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
9850 tw32(MAC_SERDES_CFG
, 0x616000);
9853 /* Prevent chip from dropping frames when flow control
9856 if (tg3_flag(tp
, 57765_CLASS
))
9860 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9862 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
9863 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9864 /* Use hardware link auto-negotiation */
9865 tg3_flag_set(tp
, HW_AUTONEG
);
9868 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9869 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
9872 tmp
= tr32(SERDES_RX_CTRL
);
9873 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9874 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9875 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9876 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9879 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9880 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9881 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9883 err
= tg3_setup_phy(tp
, 0);
9887 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9888 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9891 /* Clear CRC stats. */
9892 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9893 tg3_writephy(tp
, MII_TG3_TEST1
,
9894 tmp
| MII_TG3_TEST1_CRC_EN
);
9895 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9900 __tg3_set_rx_mode(tp
->dev
);
9902 /* Initialize receive rules. */
9903 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9904 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9905 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9906 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9908 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9912 if (tg3_flag(tp
, ENABLE_ASF
))
9916 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9918 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9920 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9922 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9924 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9926 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9928 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9930 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9932 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9934 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9936 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9938 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9940 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9942 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9950 if (tg3_flag(tp
, ENABLE_APE
))
9951 /* Write our heartbeat update interval to APE. */
9952 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9953 APE_HOST_HEARTBEAT_INT_DISABLE
);
9955 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9960 /* Called at device open time to get the chip ready for
9961 * packet processing. Invoked with tp->lock held.
9963 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9965 tg3_switch_clocks(tp
);
9967 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9969 return tg3_reset_hw(tp
, reset_phy
);
9972 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
9976 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
9977 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
9979 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
9982 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
9983 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
9984 memset(ocir
, 0, TG3_OCIR_LEN
);
9988 /* sysfs attributes for hwmon */
9989 static ssize_t
tg3_show_temp(struct device
*dev
,
9990 struct device_attribute
*devattr
, char *buf
)
9992 struct pci_dev
*pdev
= to_pci_dev(dev
);
9993 struct net_device
*netdev
= pci_get_drvdata(pdev
);
9994 struct tg3
*tp
= netdev_priv(netdev
);
9995 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
9998 spin_lock_bh(&tp
->lock
);
9999 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10000 sizeof(temperature
));
10001 spin_unlock_bh(&tp
->lock
);
10002 return sprintf(buf
, "%u\n", temperature
);
10006 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10007 TG3_TEMP_SENSOR_OFFSET
);
10008 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10009 TG3_TEMP_CAUTION_OFFSET
);
10010 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10011 TG3_TEMP_MAX_OFFSET
);
10013 static struct attribute
*tg3_attributes
[] = {
10014 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10015 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10016 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10020 static const struct attribute_group tg3_group
= {
10021 .attrs
= tg3_attributes
,
10024 static void tg3_hwmon_close(struct tg3
*tp
)
10026 if (tp
->hwmon_dev
) {
10027 hwmon_device_unregister(tp
->hwmon_dev
);
10028 tp
->hwmon_dev
= NULL
;
10029 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10033 static void tg3_hwmon_open(struct tg3
*tp
)
10037 struct pci_dev
*pdev
= tp
->pdev
;
10038 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10040 tg3_sd_scan_scratchpad(tp
, ocirs
);
10042 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10043 if (!ocirs
[i
].src_data_length
)
10046 size
+= ocirs
[i
].src_hdr_length
;
10047 size
+= ocirs
[i
].src_data_length
;
10053 /* Register hwmon sysfs hooks */
10054 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10056 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10060 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10061 if (IS_ERR(tp
->hwmon_dev
)) {
10062 tp
->hwmon_dev
= NULL
;
10063 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10064 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10069 #define TG3_STAT_ADD32(PSTAT, REG) \
10070 do { u32 __val = tr32(REG); \
10071 (PSTAT)->low += __val; \
10072 if ((PSTAT)->low < __val) \
10073 (PSTAT)->high += 1; \
10076 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10078 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10083 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10084 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10085 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10086 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10087 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10088 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10089 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10090 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10091 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10092 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10093 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10094 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10095 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10096 if (unlikely(tg3_flag(tp
, 5719_RDMA_BUG
) &&
10097 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10098 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10101 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10102 val
&= ~TG3_LSO_RD_DMA_TX_LENGTH_WA
;
10103 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10104 tg3_flag_clear(tp
, 5719_RDMA_BUG
);
10107 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10108 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10109 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10110 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10111 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10112 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10113 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10114 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10115 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10116 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10117 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10118 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10119 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10120 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10122 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10123 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10124 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10125 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10126 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10128 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10129 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10131 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10132 sp
->rx_discards
.low
+= val
;
10133 if (sp
->rx_discards
.low
< val
)
10134 sp
->rx_discards
.high
+= 1;
10136 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10138 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10141 static void tg3_chk_missed_msi(struct tg3
*tp
)
10145 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10146 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10148 if (tg3_has_work(tnapi
)) {
10149 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10150 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10151 if (tnapi
->chk_msi_cnt
< 1) {
10152 tnapi
->chk_msi_cnt
++;
10158 tnapi
->chk_msi_cnt
= 0;
10159 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10160 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10164 static void tg3_timer(unsigned long __opaque
)
10166 struct tg3
*tp
= (struct tg3
*) __opaque
;
10168 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10169 goto restart_timer
;
10171 spin_lock(&tp
->lock
);
10173 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10174 tg3_flag(tp
, 57765_CLASS
))
10175 tg3_chk_missed_msi(tp
);
10177 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10178 /* BCM4785: Flush posted writes from GbE to host memory. */
10182 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10183 /* All of this garbage is because when using non-tagged
10184 * IRQ status the mailbox/status_block protocol the chip
10185 * uses with the cpu is race prone.
10187 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10188 tw32(GRC_LOCAL_CTRL
,
10189 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10191 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10192 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10195 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10196 spin_unlock(&tp
->lock
);
10197 tg3_reset_task_schedule(tp
);
10198 goto restart_timer
;
10202 /* This part only runs once per second. */
10203 if (!--tp
->timer_counter
) {
10204 if (tg3_flag(tp
, 5705_PLUS
))
10205 tg3_periodic_fetch_stats(tp
);
10207 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10208 tg3_phy_eee_enable(tp
);
10210 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10214 mac_stat
= tr32(MAC_STATUS
);
10217 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10218 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10220 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10224 tg3_setup_phy(tp
, 0);
10225 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10226 u32 mac_stat
= tr32(MAC_STATUS
);
10227 int need_setup
= 0;
10230 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10233 if (!tp
->link_up
&&
10234 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10235 MAC_STATUS_SIGNAL_DET
))) {
10239 if (!tp
->serdes_counter
) {
10242 ~MAC_MODE_PORT_MODE_MASK
));
10244 tw32_f(MAC_MODE
, tp
->mac_mode
);
10247 tg3_setup_phy(tp
, 0);
10249 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10250 tg3_flag(tp
, 5780_CLASS
)) {
10251 tg3_serdes_parallel_detect(tp
);
10254 tp
->timer_counter
= tp
->timer_multiplier
;
10257 /* Heartbeat is only sent once every 2 seconds.
10259 * The heartbeat is to tell the ASF firmware that the host
10260 * driver is still alive. In the event that the OS crashes,
10261 * ASF needs to reset the hardware to free up the FIFO space
10262 * that may be filled with rx packets destined for the host.
10263 * If the FIFO is full, ASF will no longer function properly.
10265 * Unintended resets have been reported on real time kernels
10266 * where the timer doesn't run on time. Netpoll will also have
10269 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10270 * to check the ring condition when the heartbeat is expiring
10271 * before doing the reset. This will prevent most unintended
10274 if (!--tp
->asf_counter
) {
10275 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10276 tg3_wait_for_event_ack(tp
);
10278 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10279 FWCMD_NICDRV_ALIVE3
);
10280 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10281 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10282 TG3_FW_UPDATE_TIMEOUT_SEC
);
10284 tg3_generate_fw_event(tp
);
10286 tp
->asf_counter
= tp
->asf_multiplier
;
10289 spin_unlock(&tp
->lock
);
10292 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10293 add_timer(&tp
->timer
);
10296 static void tg3_timer_init(struct tg3
*tp
)
10298 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10299 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10300 !tg3_flag(tp
, 57765_CLASS
))
10301 tp
->timer_offset
= HZ
;
10303 tp
->timer_offset
= HZ
/ 10;
10305 BUG_ON(tp
->timer_offset
> HZ
);
10307 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10308 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10309 TG3_FW_UPDATE_FREQ_SEC
;
10311 init_timer(&tp
->timer
);
10312 tp
->timer
.data
= (unsigned long) tp
;
10313 tp
->timer
.function
= tg3_timer
;
10316 static void tg3_timer_start(struct tg3
*tp
)
10318 tp
->asf_counter
= tp
->asf_multiplier
;
10319 tp
->timer_counter
= tp
->timer_multiplier
;
10321 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10322 add_timer(&tp
->timer
);
10325 static void tg3_timer_stop(struct tg3
*tp
)
10327 del_timer_sync(&tp
->timer
);
10330 /* Restart hardware after configuration changes, self-test, etc.
10331 * Invoked with tp->lock held.
10333 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
10334 __releases(tp
->lock
)
10335 __acquires(tp
->lock
)
10339 err
= tg3_init_hw(tp
, reset_phy
);
10341 netdev_err(tp
->dev
,
10342 "Failed to re-initialize device, aborting\n");
10343 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10344 tg3_full_unlock(tp
);
10345 tg3_timer_stop(tp
);
10347 tg3_napi_enable(tp
);
10348 dev_close(tp
->dev
);
10349 tg3_full_lock(tp
, 0);
10354 static void tg3_reset_task(struct work_struct
*work
)
10356 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10359 tg3_full_lock(tp
, 0);
10361 if (!netif_running(tp
->dev
)) {
10362 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10363 tg3_full_unlock(tp
);
10367 tg3_full_unlock(tp
);
10371 tg3_netif_stop(tp
);
10373 tg3_full_lock(tp
, 1);
10375 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10376 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10377 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10378 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10379 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10382 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10383 err
= tg3_init_hw(tp
, 1);
10387 tg3_netif_start(tp
);
10390 tg3_full_unlock(tp
);
10395 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10398 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10401 unsigned long flags
;
10403 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10405 if (tp
->irq_cnt
== 1)
10406 name
= tp
->dev
->name
;
10408 name
= &tnapi
->irq_lbl
[0];
10409 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10410 name
[IFNAMSIZ
-1] = 0;
10413 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10415 if (tg3_flag(tp
, 1SHOT_MSI
))
10416 fn
= tg3_msi_1shot
;
10419 fn
= tg3_interrupt
;
10420 if (tg3_flag(tp
, TAGGED_STATUS
))
10421 fn
= tg3_interrupt_tagged
;
10422 flags
= IRQF_SHARED
;
10425 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10428 static int tg3_test_interrupt(struct tg3
*tp
)
10430 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10431 struct net_device
*dev
= tp
->dev
;
10432 int err
, i
, intr_ok
= 0;
10435 if (!netif_running(dev
))
10438 tg3_disable_ints(tp
);
10440 free_irq(tnapi
->irq_vec
, tnapi
);
10443 * Turn off MSI one shot mode. Otherwise this test has no
10444 * observable way to know whether the interrupt was delivered.
10446 if (tg3_flag(tp
, 57765_PLUS
)) {
10447 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10448 tw32(MSGINT_MODE
, val
);
10451 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10452 IRQF_SHARED
, dev
->name
, tnapi
);
10456 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10457 tg3_enable_ints(tp
);
10459 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10462 for (i
= 0; i
< 5; i
++) {
10463 u32 int_mbox
, misc_host_ctrl
;
10465 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10466 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10468 if ((int_mbox
!= 0) ||
10469 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10474 if (tg3_flag(tp
, 57765_PLUS
) &&
10475 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10476 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10481 tg3_disable_ints(tp
);
10483 free_irq(tnapi
->irq_vec
, tnapi
);
10485 err
= tg3_request_irq(tp
, 0);
10491 /* Reenable MSI one shot mode. */
10492 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10493 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10494 tw32(MSGINT_MODE
, val
);
10502 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10503 * successfully restored
10505 static int tg3_test_msi(struct tg3
*tp
)
10510 if (!tg3_flag(tp
, USING_MSI
))
10513 /* Turn off SERR reporting in case MSI terminates with Master
10516 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10517 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
10518 pci_cmd
& ~PCI_COMMAND_SERR
);
10520 err
= tg3_test_interrupt(tp
);
10522 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10527 /* other failures */
10531 /* MSI test failed, go back to INTx mode */
10532 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
10533 "to INTx mode. Please report this failure to the PCI "
10534 "maintainer and include system chipset information\n");
10536 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10538 pci_disable_msi(tp
->pdev
);
10540 tg3_flag_clear(tp
, USING_MSI
);
10541 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10543 err
= tg3_request_irq(tp
, 0);
10547 /* Need to reset the chip because the MSI cycle may have terminated
10548 * with Master Abort.
10550 tg3_full_lock(tp
, 1);
10552 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10553 err
= tg3_init_hw(tp
, 1);
10555 tg3_full_unlock(tp
);
10558 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10563 static int tg3_request_firmware(struct tg3
*tp
)
10565 const __be32
*fw_data
;
10567 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
10568 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
10573 fw_data
= (void *)tp
->fw
->data
;
10575 /* Firmware blob starts with version numbers, followed by
10576 * start address and _full_ length including BSS sections
10577 * (which must be longer than the actual data, of course
10580 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
10581 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
10582 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
10583 tp
->fw_len
, tp
->fw_needed
);
10584 release_firmware(tp
->fw
);
10589 /* We no longer need firmware; we have it. */
10590 tp
->fw_needed
= NULL
;
10594 static u32
tg3_irq_count(struct tg3
*tp
)
10596 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
10599 /* We want as many rx rings enabled as there are cpus.
10600 * In multiqueue MSI-X mode, the first MSI-X vector
10601 * only deals with link interrupts, etc, so we add
10602 * one to the number of vectors we are requesting.
10604 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
10610 static bool tg3_enable_msix(struct tg3
*tp
)
10613 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
10615 tp
->txq_cnt
= tp
->txq_req
;
10616 tp
->rxq_cnt
= tp
->rxq_req
;
10618 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
10619 if (tp
->rxq_cnt
> tp
->rxq_max
)
10620 tp
->rxq_cnt
= tp
->rxq_max
;
10622 /* Disable multiple TX rings by default. Simple round-robin hardware
10623 * scheduling of the TX rings can cause starvation of rings with
10624 * small packets when other rings have TSO or jumbo packets.
10629 tp
->irq_cnt
= tg3_irq_count(tp
);
10631 for (i
= 0; i
< tp
->irq_max
; i
++) {
10632 msix_ent
[i
].entry
= i
;
10633 msix_ent
[i
].vector
= 0;
10636 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
10639 } else if (rc
!= 0) {
10640 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
10642 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
10645 tp
->rxq_cnt
= max(rc
- 1, 1);
10647 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
10650 for (i
= 0; i
< tp
->irq_max
; i
++)
10651 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
10653 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
10654 pci_disable_msix(tp
->pdev
);
10658 if (tp
->irq_cnt
== 1)
10661 tg3_flag_set(tp
, ENABLE_RSS
);
10663 if (tp
->txq_cnt
> 1)
10664 tg3_flag_set(tp
, ENABLE_TSS
);
10666 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
10671 static void tg3_ints_init(struct tg3
*tp
)
10673 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10674 !tg3_flag(tp
, TAGGED_STATUS
)) {
10675 /* All MSI supporting chips should support tagged
10676 * status. Assert that this is the case.
10678 netdev_warn(tp
->dev
,
10679 "MSI without TAGGED_STATUS? Not using MSI\n");
10683 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10684 tg3_flag_set(tp
, USING_MSIX
);
10685 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10686 tg3_flag_set(tp
, USING_MSI
);
10688 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10689 u32 msi_mode
= tr32(MSGINT_MODE
);
10690 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10691 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10692 if (!tg3_flag(tp
, 1SHOT_MSI
))
10693 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10694 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10697 if (!tg3_flag(tp
, USING_MSIX
)) {
10699 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10702 if (tp
->irq_cnt
== 1) {
10705 netif_set_real_num_tx_queues(tp
->dev
, 1);
10706 netif_set_real_num_rx_queues(tp
->dev
, 1);
10710 static void tg3_ints_fini(struct tg3
*tp
)
10712 if (tg3_flag(tp
, USING_MSIX
))
10713 pci_disable_msix(tp
->pdev
);
10714 else if (tg3_flag(tp
, USING_MSI
))
10715 pci_disable_msi(tp
->pdev
);
10716 tg3_flag_clear(tp
, USING_MSI
);
10717 tg3_flag_clear(tp
, USING_MSIX
);
10718 tg3_flag_clear(tp
, ENABLE_RSS
);
10719 tg3_flag_clear(tp
, ENABLE_TSS
);
10722 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
10725 struct net_device
*dev
= tp
->dev
;
10729 * Setup interrupts first so we know how
10730 * many NAPI resources to allocate
10734 tg3_rss_check_indir_tbl(tp
);
10736 /* The placement of this call is tied
10737 * to the setup and use of Host TX descriptors.
10739 err
= tg3_alloc_consistent(tp
);
10745 tg3_napi_enable(tp
);
10747 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10748 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10749 err
= tg3_request_irq(tp
, i
);
10751 for (i
--; i
>= 0; i
--) {
10752 tnapi
= &tp
->napi
[i
];
10753 free_irq(tnapi
->irq_vec
, tnapi
);
10759 tg3_full_lock(tp
, 0);
10761 err
= tg3_init_hw(tp
, reset_phy
);
10763 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10764 tg3_free_rings(tp
);
10767 tg3_full_unlock(tp
);
10772 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
10773 err
= tg3_test_msi(tp
);
10776 tg3_full_lock(tp
, 0);
10777 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10778 tg3_free_rings(tp
);
10779 tg3_full_unlock(tp
);
10784 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10785 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10787 tw32(PCIE_TRANSACTION_CFG
,
10788 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10794 tg3_hwmon_open(tp
);
10796 tg3_full_lock(tp
, 0);
10798 tg3_timer_start(tp
);
10799 tg3_flag_set(tp
, INIT_COMPLETE
);
10800 tg3_enable_ints(tp
);
10805 tg3_ptp_resume(tp
);
10808 tg3_full_unlock(tp
);
10810 netif_tx_start_all_queues(dev
);
10813 * Reset loopback feature if it was turned on while the device was down
10814 * make sure that it's installed properly now.
10816 if (dev
->features
& NETIF_F_LOOPBACK
)
10817 tg3_set_loopback(dev
, dev
->features
);
10822 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10823 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10824 free_irq(tnapi
->irq_vec
, tnapi
);
10828 tg3_napi_disable(tp
);
10830 tg3_free_consistent(tp
);
10838 static void tg3_stop(struct tg3
*tp
)
10842 tg3_reset_task_cancel(tp
);
10843 tg3_netif_stop(tp
);
10845 tg3_timer_stop(tp
);
10847 tg3_hwmon_close(tp
);
10851 tg3_full_lock(tp
, 1);
10853 tg3_disable_ints(tp
);
10855 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10856 tg3_free_rings(tp
);
10857 tg3_flag_clear(tp
, INIT_COMPLETE
);
10859 tg3_full_unlock(tp
);
10861 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10862 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10863 free_irq(tnapi
->irq_vec
, tnapi
);
10870 tg3_free_consistent(tp
);
10873 static int tg3_open(struct net_device
*dev
)
10875 struct tg3
*tp
= netdev_priv(dev
);
10878 if (tp
->fw_needed
) {
10879 err
= tg3_request_firmware(tp
);
10880 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10884 netdev_warn(tp
->dev
, "TSO capability disabled\n");
10885 tg3_flag_clear(tp
, TSO_CAPABLE
);
10886 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
10887 netdev_notice(tp
->dev
, "TSO capability restored\n");
10888 tg3_flag_set(tp
, TSO_CAPABLE
);
10892 tg3_carrier_off(tp
);
10894 err
= tg3_power_up(tp
);
10898 tg3_full_lock(tp
, 0);
10900 tg3_disable_ints(tp
);
10901 tg3_flag_clear(tp
, INIT_COMPLETE
);
10903 tg3_full_unlock(tp
);
10905 err
= tg3_start(tp
, true, true, true);
10907 tg3_frob_aux_power(tp
, false);
10908 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
10911 if (tg3_flag(tp
, PTP_CAPABLE
)) {
10912 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
10914 if (IS_ERR(tp
->ptp_clock
))
10915 tp
->ptp_clock
= NULL
;
10921 static int tg3_close(struct net_device
*dev
)
10923 struct tg3
*tp
= netdev_priv(dev
);
10929 /* Clear stats across close / open calls */
10930 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
10931 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
10933 tg3_power_down(tp
);
10935 tg3_carrier_off(tp
);
10940 static inline u64
get_stat64(tg3_stat64_t
*val
)
10942 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
10945 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
10947 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10949 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10950 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
10951 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
10954 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
10955 tg3_writephy(tp
, MII_TG3_TEST1
,
10956 val
| MII_TG3_TEST1_CRC_EN
);
10957 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
10961 tp
->phy_crc_errors
+= val
;
10963 return tp
->phy_crc_errors
;
10966 return get_stat64(&hw_stats
->rx_fcs_errors
);
10969 #define ESTAT_ADD(member) \
10970 estats->member = old_estats->member + \
10971 get_stat64(&hw_stats->member)
10973 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
10975 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
10976 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10978 ESTAT_ADD(rx_octets
);
10979 ESTAT_ADD(rx_fragments
);
10980 ESTAT_ADD(rx_ucast_packets
);
10981 ESTAT_ADD(rx_mcast_packets
);
10982 ESTAT_ADD(rx_bcast_packets
);
10983 ESTAT_ADD(rx_fcs_errors
);
10984 ESTAT_ADD(rx_align_errors
);
10985 ESTAT_ADD(rx_xon_pause_rcvd
);
10986 ESTAT_ADD(rx_xoff_pause_rcvd
);
10987 ESTAT_ADD(rx_mac_ctrl_rcvd
);
10988 ESTAT_ADD(rx_xoff_entered
);
10989 ESTAT_ADD(rx_frame_too_long_errors
);
10990 ESTAT_ADD(rx_jabbers
);
10991 ESTAT_ADD(rx_undersize_packets
);
10992 ESTAT_ADD(rx_in_length_errors
);
10993 ESTAT_ADD(rx_out_length_errors
);
10994 ESTAT_ADD(rx_64_or_less_octet_packets
);
10995 ESTAT_ADD(rx_65_to_127_octet_packets
);
10996 ESTAT_ADD(rx_128_to_255_octet_packets
);
10997 ESTAT_ADD(rx_256_to_511_octet_packets
);
10998 ESTAT_ADD(rx_512_to_1023_octet_packets
);
10999 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11000 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11001 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11002 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11003 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11005 ESTAT_ADD(tx_octets
);
11006 ESTAT_ADD(tx_collisions
);
11007 ESTAT_ADD(tx_xon_sent
);
11008 ESTAT_ADD(tx_xoff_sent
);
11009 ESTAT_ADD(tx_flow_control
);
11010 ESTAT_ADD(tx_mac_errors
);
11011 ESTAT_ADD(tx_single_collisions
);
11012 ESTAT_ADD(tx_mult_collisions
);
11013 ESTAT_ADD(tx_deferred
);
11014 ESTAT_ADD(tx_excessive_collisions
);
11015 ESTAT_ADD(tx_late_collisions
);
11016 ESTAT_ADD(tx_collide_2times
);
11017 ESTAT_ADD(tx_collide_3times
);
11018 ESTAT_ADD(tx_collide_4times
);
11019 ESTAT_ADD(tx_collide_5times
);
11020 ESTAT_ADD(tx_collide_6times
);
11021 ESTAT_ADD(tx_collide_7times
);
11022 ESTAT_ADD(tx_collide_8times
);
11023 ESTAT_ADD(tx_collide_9times
);
11024 ESTAT_ADD(tx_collide_10times
);
11025 ESTAT_ADD(tx_collide_11times
);
11026 ESTAT_ADD(tx_collide_12times
);
11027 ESTAT_ADD(tx_collide_13times
);
11028 ESTAT_ADD(tx_collide_14times
);
11029 ESTAT_ADD(tx_collide_15times
);
11030 ESTAT_ADD(tx_ucast_packets
);
11031 ESTAT_ADD(tx_mcast_packets
);
11032 ESTAT_ADD(tx_bcast_packets
);
11033 ESTAT_ADD(tx_carrier_sense_errors
);
11034 ESTAT_ADD(tx_discards
);
11035 ESTAT_ADD(tx_errors
);
11037 ESTAT_ADD(dma_writeq_full
);
11038 ESTAT_ADD(dma_write_prioq_full
);
11039 ESTAT_ADD(rxbds_empty
);
11040 ESTAT_ADD(rx_discards
);
11041 ESTAT_ADD(rx_errors
);
11042 ESTAT_ADD(rx_threshold_hit
);
11044 ESTAT_ADD(dma_readq_full
);
11045 ESTAT_ADD(dma_read_prioq_full
);
11046 ESTAT_ADD(tx_comp_queue_full
);
11048 ESTAT_ADD(ring_set_send_prod_index
);
11049 ESTAT_ADD(ring_status_update
);
11050 ESTAT_ADD(nic_irqs
);
11051 ESTAT_ADD(nic_avoided_irqs
);
11052 ESTAT_ADD(nic_tx_threshold_hit
);
11054 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11057 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11059 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11060 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11062 stats
->rx_packets
= old_stats
->rx_packets
+
11063 get_stat64(&hw_stats
->rx_ucast_packets
) +
11064 get_stat64(&hw_stats
->rx_mcast_packets
) +
11065 get_stat64(&hw_stats
->rx_bcast_packets
);
11067 stats
->tx_packets
= old_stats
->tx_packets
+
11068 get_stat64(&hw_stats
->tx_ucast_packets
) +
11069 get_stat64(&hw_stats
->tx_mcast_packets
) +
11070 get_stat64(&hw_stats
->tx_bcast_packets
);
11072 stats
->rx_bytes
= old_stats
->rx_bytes
+
11073 get_stat64(&hw_stats
->rx_octets
);
11074 stats
->tx_bytes
= old_stats
->tx_bytes
+
11075 get_stat64(&hw_stats
->tx_octets
);
11077 stats
->rx_errors
= old_stats
->rx_errors
+
11078 get_stat64(&hw_stats
->rx_errors
);
11079 stats
->tx_errors
= old_stats
->tx_errors
+
11080 get_stat64(&hw_stats
->tx_errors
) +
11081 get_stat64(&hw_stats
->tx_mac_errors
) +
11082 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11083 get_stat64(&hw_stats
->tx_discards
);
11085 stats
->multicast
= old_stats
->multicast
+
11086 get_stat64(&hw_stats
->rx_mcast_packets
);
11087 stats
->collisions
= old_stats
->collisions
+
11088 get_stat64(&hw_stats
->tx_collisions
);
11090 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11091 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11092 get_stat64(&hw_stats
->rx_undersize_packets
);
11094 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11095 get_stat64(&hw_stats
->rxbds_empty
);
11096 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11097 get_stat64(&hw_stats
->rx_align_errors
);
11098 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11099 get_stat64(&hw_stats
->tx_discards
);
11100 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11101 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11103 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11104 tg3_calc_crc_errors(tp
);
11106 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11107 get_stat64(&hw_stats
->rx_discards
);
11109 stats
->rx_dropped
= tp
->rx_dropped
;
11110 stats
->tx_dropped
= tp
->tx_dropped
;
11113 static int tg3_get_regs_len(struct net_device
*dev
)
11115 return TG3_REG_BLK_SIZE
;
11118 static void tg3_get_regs(struct net_device
*dev
,
11119 struct ethtool_regs
*regs
, void *_p
)
11121 struct tg3
*tp
= netdev_priv(dev
);
11125 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11127 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11130 tg3_full_lock(tp
, 0);
11132 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11134 tg3_full_unlock(tp
);
11137 static int tg3_get_eeprom_len(struct net_device
*dev
)
11139 struct tg3
*tp
= netdev_priv(dev
);
11141 return tp
->nvram_size
;
11144 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11146 struct tg3
*tp
= netdev_priv(dev
);
11149 u32 i
, offset
, len
, b_offset
, b_count
;
11152 if (tg3_flag(tp
, NO_NVRAM
))
11155 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11158 offset
= eeprom
->offset
;
11162 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11165 /* adjustments to start on required 4 byte boundary */
11166 b_offset
= offset
& 3;
11167 b_count
= 4 - b_offset
;
11168 if (b_count
> len
) {
11169 /* i.e. offset=1 len=2 */
11172 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11175 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11178 eeprom
->len
+= b_count
;
11181 /* read bytes up to the last 4 byte boundary */
11182 pd
= &data
[eeprom
->len
];
11183 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11184 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11189 memcpy(pd
+ i
, &val
, 4);
11194 /* read last bytes not ending on 4 byte boundary */
11195 pd
= &data
[eeprom
->len
];
11197 b_offset
= offset
+ len
- b_count
;
11198 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11201 memcpy(pd
, &val
, b_count
);
11202 eeprom
->len
+= b_count
;
11207 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11209 struct tg3
*tp
= netdev_priv(dev
);
11211 u32 offset
, len
, b_offset
, odd_len
;
11215 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11218 if (tg3_flag(tp
, NO_NVRAM
) ||
11219 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11222 offset
= eeprom
->offset
;
11225 if ((b_offset
= (offset
& 3))) {
11226 /* adjustments to start on required 4 byte boundary */
11227 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11238 /* adjustments to end on required 4 byte boundary */
11240 len
= (len
+ 3) & ~3;
11241 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11247 if (b_offset
|| odd_len
) {
11248 buf
= kmalloc(len
, GFP_KERNEL
);
11252 memcpy(buf
, &start
, 4);
11254 memcpy(buf
+len
-4, &end
, 4);
11255 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11258 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11266 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11268 struct tg3
*tp
= netdev_priv(dev
);
11270 if (tg3_flag(tp
, USE_PHYLIB
)) {
11271 struct phy_device
*phydev
;
11272 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11274 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11275 return phy_ethtool_gset(phydev
, cmd
);
11278 cmd
->supported
= (SUPPORTED_Autoneg
);
11280 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11281 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11282 SUPPORTED_1000baseT_Full
);
11284 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11285 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11286 SUPPORTED_100baseT_Full
|
11287 SUPPORTED_10baseT_Half
|
11288 SUPPORTED_10baseT_Full
|
11290 cmd
->port
= PORT_TP
;
11292 cmd
->supported
|= SUPPORTED_FIBRE
;
11293 cmd
->port
= PORT_FIBRE
;
11296 cmd
->advertising
= tp
->link_config
.advertising
;
11297 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11298 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11299 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11300 cmd
->advertising
|= ADVERTISED_Pause
;
11302 cmd
->advertising
|= ADVERTISED_Pause
|
11303 ADVERTISED_Asym_Pause
;
11305 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11306 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11309 if (netif_running(dev
) && tp
->link_up
) {
11310 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11311 cmd
->duplex
= tp
->link_config
.active_duplex
;
11312 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11313 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11314 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11315 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11317 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11320 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11321 cmd
->duplex
= DUPLEX_UNKNOWN
;
11322 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11324 cmd
->phy_address
= tp
->phy_addr
;
11325 cmd
->transceiver
= XCVR_INTERNAL
;
11326 cmd
->autoneg
= tp
->link_config
.autoneg
;
11332 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11334 struct tg3
*tp
= netdev_priv(dev
);
11335 u32 speed
= ethtool_cmd_speed(cmd
);
11337 if (tg3_flag(tp
, USE_PHYLIB
)) {
11338 struct phy_device
*phydev
;
11339 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11341 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11342 return phy_ethtool_sset(phydev
, cmd
);
11345 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11346 cmd
->autoneg
!= AUTONEG_DISABLE
)
11349 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11350 cmd
->duplex
!= DUPLEX_FULL
&&
11351 cmd
->duplex
!= DUPLEX_HALF
)
11354 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11355 u32 mask
= ADVERTISED_Autoneg
|
11357 ADVERTISED_Asym_Pause
;
11359 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11360 mask
|= ADVERTISED_1000baseT_Half
|
11361 ADVERTISED_1000baseT_Full
;
11363 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11364 mask
|= ADVERTISED_100baseT_Half
|
11365 ADVERTISED_100baseT_Full
|
11366 ADVERTISED_10baseT_Half
|
11367 ADVERTISED_10baseT_Full
|
11370 mask
|= ADVERTISED_FIBRE
;
11372 if (cmd
->advertising
& ~mask
)
11375 mask
&= (ADVERTISED_1000baseT_Half
|
11376 ADVERTISED_1000baseT_Full
|
11377 ADVERTISED_100baseT_Half
|
11378 ADVERTISED_100baseT_Full
|
11379 ADVERTISED_10baseT_Half
|
11380 ADVERTISED_10baseT_Full
);
11382 cmd
->advertising
&= mask
;
11384 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11385 if (speed
!= SPEED_1000
)
11388 if (cmd
->duplex
!= DUPLEX_FULL
)
11391 if (speed
!= SPEED_100
&&
11397 tg3_full_lock(tp
, 0);
11399 tp
->link_config
.autoneg
= cmd
->autoneg
;
11400 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11401 tp
->link_config
.advertising
= (cmd
->advertising
|
11402 ADVERTISED_Autoneg
);
11403 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11404 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11406 tp
->link_config
.advertising
= 0;
11407 tp
->link_config
.speed
= speed
;
11408 tp
->link_config
.duplex
= cmd
->duplex
;
11411 if (netif_running(dev
))
11412 tg3_setup_phy(tp
, 1);
11414 tg3_full_unlock(tp
);
11419 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11421 struct tg3
*tp
= netdev_priv(dev
);
11423 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11424 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11425 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11426 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11429 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11431 struct tg3
*tp
= netdev_priv(dev
);
11433 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11434 wol
->supported
= WAKE_MAGIC
;
11436 wol
->supported
= 0;
11438 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11439 wol
->wolopts
= WAKE_MAGIC
;
11440 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11443 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11445 struct tg3
*tp
= netdev_priv(dev
);
11446 struct device
*dp
= &tp
->pdev
->dev
;
11448 if (wol
->wolopts
& ~WAKE_MAGIC
)
11450 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11451 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11454 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11456 spin_lock_bh(&tp
->lock
);
11457 if (device_may_wakeup(dp
))
11458 tg3_flag_set(tp
, WOL_ENABLE
);
11460 tg3_flag_clear(tp
, WOL_ENABLE
);
11461 spin_unlock_bh(&tp
->lock
);
11466 static u32
tg3_get_msglevel(struct net_device
*dev
)
11468 struct tg3
*tp
= netdev_priv(dev
);
11469 return tp
->msg_enable
;
11472 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11474 struct tg3
*tp
= netdev_priv(dev
);
11475 tp
->msg_enable
= value
;
11478 static int tg3_nway_reset(struct net_device
*dev
)
11480 struct tg3
*tp
= netdev_priv(dev
);
11483 if (!netif_running(dev
))
11486 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11489 if (tg3_flag(tp
, USE_PHYLIB
)) {
11490 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11492 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
11496 spin_lock_bh(&tp
->lock
);
11498 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
11499 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
11500 ((bmcr
& BMCR_ANENABLE
) ||
11501 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
11502 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
11506 spin_unlock_bh(&tp
->lock
);
11512 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11514 struct tg3
*tp
= netdev_priv(dev
);
11516 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
11517 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11518 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
11520 ering
->rx_jumbo_max_pending
= 0;
11522 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
11524 ering
->rx_pending
= tp
->rx_pending
;
11525 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11526 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
11528 ering
->rx_jumbo_pending
= 0;
11530 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
11533 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11535 struct tg3
*tp
= netdev_priv(dev
);
11536 int i
, irq_sync
= 0, err
= 0;
11538 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
11539 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
11540 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
11541 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
11542 (tg3_flag(tp
, TSO_BUG
) &&
11543 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
11546 if (netif_running(dev
)) {
11548 tg3_netif_stop(tp
);
11552 tg3_full_lock(tp
, irq_sync
);
11554 tp
->rx_pending
= ering
->rx_pending
;
11556 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
11557 tp
->rx_pending
> 63)
11558 tp
->rx_pending
= 63;
11559 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
11561 for (i
= 0; i
< tp
->irq_max
; i
++)
11562 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
11564 if (netif_running(dev
)) {
11565 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11566 err
= tg3_restart_hw(tp
, 1);
11568 tg3_netif_start(tp
);
11571 tg3_full_unlock(tp
);
11573 if (irq_sync
&& !err
)
11579 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11581 struct tg3
*tp
= netdev_priv(dev
);
11583 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
11585 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
11586 epause
->rx_pause
= 1;
11588 epause
->rx_pause
= 0;
11590 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
11591 epause
->tx_pause
= 1;
11593 epause
->tx_pause
= 0;
11596 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11598 struct tg3
*tp
= netdev_priv(dev
);
11601 if (tg3_flag(tp
, USE_PHYLIB
)) {
11603 struct phy_device
*phydev
;
11605 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11607 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
11608 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
11609 (epause
->rx_pause
!= epause
->tx_pause
)))
11612 tp
->link_config
.flowctrl
= 0;
11613 if (epause
->rx_pause
) {
11614 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11616 if (epause
->tx_pause
) {
11617 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11618 newadv
= ADVERTISED_Pause
;
11620 newadv
= ADVERTISED_Pause
|
11621 ADVERTISED_Asym_Pause
;
11622 } else if (epause
->tx_pause
) {
11623 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11624 newadv
= ADVERTISED_Asym_Pause
;
11628 if (epause
->autoneg
)
11629 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11631 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11633 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
11634 u32 oldadv
= phydev
->advertising
&
11635 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
11636 if (oldadv
!= newadv
) {
11637 phydev
->advertising
&=
11638 ~(ADVERTISED_Pause
|
11639 ADVERTISED_Asym_Pause
);
11640 phydev
->advertising
|= newadv
;
11641 if (phydev
->autoneg
) {
11643 * Always renegotiate the link to
11644 * inform our link partner of our
11645 * flow control settings, even if the
11646 * flow control is forced. Let
11647 * tg3_adjust_link() do the final
11648 * flow control setup.
11650 return phy_start_aneg(phydev
);
11654 if (!epause
->autoneg
)
11655 tg3_setup_flow_control(tp
, 0, 0);
11657 tp
->link_config
.advertising
&=
11658 ~(ADVERTISED_Pause
|
11659 ADVERTISED_Asym_Pause
);
11660 tp
->link_config
.advertising
|= newadv
;
11665 if (netif_running(dev
)) {
11666 tg3_netif_stop(tp
);
11670 tg3_full_lock(tp
, irq_sync
);
11672 if (epause
->autoneg
)
11673 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11675 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11676 if (epause
->rx_pause
)
11677 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11679 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
11680 if (epause
->tx_pause
)
11681 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11683 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
11685 if (netif_running(dev
)) {
11686 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11687 err
= tg3_restart_hw(tp
, 1);
11689 tg3_netif_start(tp
);
11692 tg3_full_unlock(tp
);
11698 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11702 return TG3_NUM_TEST
;
11704 return TG3_NUM_STATS
;
11706 return -EOPNOTSUPP
;
11710 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11711 u32
*rules __always_unused
)
11713 struct tg3
*tp
= netdev_priv(dev
);
11715 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11716 return -EOPNOTSUPP
;
11718 switch (info
->cmd
) {
11719 case ETHTOOL_GRXRINGS
:
11720 if (netif_running(tp
->dev
))
11721 info
->data
= tp
->rxq_cnt
;
11723 info
->data
= num_online_cpus();
11724 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
11725 info
->data
= TG3_RSS_MAX_NUM_QS
;
11728 /* The first interrupt vector only
11729 * handles link interrupts.
11735 return -EOPNOTSUPP
;
11739 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11742 struct tg3
*tp
= netdev_priv(dev
);
11744 if (tg3_flag(tp
, SUPPORT_MSIX
))
11745 size
= TG3_RSS_INDIR_TBL_SIZE
;
11750 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11752 struct tg3
*tp
= netdev_priv(dev
);
11755 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11756 indir
[i
] = tp
->rss_ind_tbl
[i
];
11761 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11763 struct tg3
*tp
= netdev_priv(dev
);
11766 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11767 tp
->rss_ind_tbl
[i
] = indir
[i
];
11769 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11772 /* It is legal to write the indirection
11773 * table while the device is running.
11775 tg3_full_lock(tp
, 0);
11776 tg3_rss_write_indir_tbl(tp
);
11777 tg3_full_unlock(tp
);
11782 static void tg3_get_channels(struct net_device
*dev
,
11783 struct ethtool_channels
*channel
)
11785 struct tg3
*tp
= netdev_priv(dev
);
11786 u32 deflt_qs
= netif_get_num_default_rss_queues();
11788 channel
->max_rx
= tp
->rxq_max
;
11789 channel
->max_tx
= tp
->txq_max
;
11791 if (netif_running(dev
)) {
11792 channel
->rx_count
= tp
->rxq_cnt
;
11793 channel
->tx_count
= tp
->txq_cnt
;
11796 channel
->rx_count
= tp
->rxq_req
;
11798 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
11801 channel
->tx_count
= tp
->txq_req
;
11803 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
11807 static int tg3_set_channels(struct net_device
*dev
,
11808 struct ethtool_channels
*channel
)
11810 struct tg3
*tp
= netdev_priv(dev
);
11812 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11813 return -EOPNOTSUPP
;
11815 if (channel
->rx_count
> tp
->rxq_max
||
11816 channel
->tx_count
> tp
->txq_max
)
11819 tp
->rxq_req
= channel
->rx_count
;
11820 tp
->txq_req
= channel
->tx_count
;
11822 if (!netif_running(dev
))
11827 tg3_carrier_off(tp
);
11829 tg3_start(tp
, true, false, false);
11834 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11836 switch (stringset
) {
11838 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
11841 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
11844 WARN_ON(1); /* we need a WARN() */
11849 static int tg3_set_phys_id(struct net_device
*dev
,
11850 enum ethtool_phys_id_state state
)
11852 struct tg3
*tp
= netdev_priv(dev
);
11854 if (!netif_running(tp
->dev
))
11858 case ETHTOOL_ID_ACTIVE
:
11859 return 1; /* cycle on/off once per second */
11861 case ETHTOOL_ID_ON
:
11862 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11863 LED_CTRL_1000MBPS_ON
|
11864 LED_CTRL_100MBPS_ON
|
11865 LED_CTRL_10MBPS_ON
|
11866 LED_CTRL_TRAFFIC_OVERRIDE
|
11867 LED_CTRL_TRAFFIC_BLINK
|
11868 LED_CTRL_TRAFFIC_LED
);
11871 case ETHTOOL_ID_OFF
:
11872 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11873 LED_CTRL_TRAFFIC_OVERRIDE
);
11876 case ETHTOOL_ID_INACTIVE
:
11877 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
11884 static void tg3_get_ethtool_stats(struct net_device
*dev
,
11885 struct ethtool_stats
*estats
, u64
*tmp_stats
)
11887 struct tg3
*tp
= netdev_priv(dev
);
11890 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
11892 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
11895 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
11899 u32 offset
= 0, len
= 0;
11902 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
11905 if (magic
== TG3_EEPROM_MAGIC
) {
11906 for (offset
= TG3_NVM_DIR_START
;
11907 offset
< TG3_NVM_DIR_END
;
11908 offset
+= TG3_NVM_DIRENT_SIZE
) {
11909 if (tg3_nvram_read(tp
, offset
, &val
))
11912 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
11913 TG3_NVM_DIRTYPE_EXTVPD
)
11917 if (offset
!= TG3_NVM_DIR_END
) {
11918 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
11919 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
11922 offset
= tg3_nvram_logical_addr(tp
, offset
);
11926 if (!offset
|| !len
) {
11927 offset
= TG3_NVM_VPD_OFF
;
11928 len
= TG3_NVM_VPD_LEN
;
11931 buf
= kmalloc(len
, GFP_KERNEL
);
11935 if (magic
== TG3_EEPROM_MAGIC
) {
11936 for (i
= 0; i
< len
; i
+= 4) {
11937 /* The data is in little-endian format in NVRAM.
11938 * Use the big-endian read routines to preserve
11939 * the byte order as it exists in NVRAM.
11941 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
11947 unsigned int pos
= 0;
11949 ptr
= (u8
*)&buf
[0];
11950 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
11951 cnt
= pci_read_vpd(tp
->pdev
, pos
,
11953 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
11971 #define NVRAM_TEST_SIZE 0x100
11972 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11973 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11974 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11975 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11976 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11977 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11978 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11979 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11981 static int tg3_test_nvram(struct tg3
*tp
)
11983 u32 csum
, magic
, len
;
11985 int i
, j
, k
, err
= 0, size
;
11987 if (tg3_flag(tp
, NO_NVRAM
))
11990 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11993 if (magic
== TG3_EEPROM_MAGIC
)
11994 size
= NVRAM_TEST_SIZE
;
11995 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
11996 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
11997 TG3_EEPROM_SB_FORMAT_1
) {
11998 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
11999 case TG3_EEPROM_SB_REVISION_0
:
12000 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12002 case TG3_EEPROM_SB_REVISION_2
:
12003 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12005 case TG3_EEPROM_SB_REVISION_3
:
12006 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12008 case TG3_EEPROM_SB_REVISION_4
:
12009 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12011 case TG3_EEPROM_SB_REVISION_5
:
12012 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12014 case TG3_EEPROM_SB_REVISION_6
:
12015 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12022 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12023 size
= NVRAM_SELFBOOT_HW_SIZE
;
12027 buf
= kmalloc(size
, GFP_KERNEL
);
12032 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12033 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12040 /* Selfboot format */
12041 magic
= be32_to_cpu(buf
[0]);
12042 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12043 TG3_EEPROM_MAGIC_FW
) {
12044 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12046 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12047 TG3_EEPROM_SB_REVISION_2
) {
12048 /* For rev 2, the csum doesn't include the MBA. */
12049 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12051 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12054 for (i
= 0; i
< size
; i
++)
12067 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12068 TG3_EEPROM_MAGIC_HW
) {
12069 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12070 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12071 u8
*buf8
= (u8
*) buf
;
12073 /* Separate the parity bits and the data bytes. */
12074 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12075 if ((i
== 0) || (i
== 8)) {
12079 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12080 parity
[k
++] = buf8
[i
] & msk
;
12082 } else if (i
== 16) {
12086 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12087 parity
[k
++] = buf8
[i
] & msk
;
12090 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12091 parity
[k
++] = buf8
[i
] & msk
;
12094 data
[j
++] = buf8
[i
];
12098 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12099 u8 hw8
= hweight8(data
[i
]);
12101 if ((hw8
& 0x1) && parity
[i
])
12103 else if (!(hw8
& 0x1) && !parity
[i
])
12112 /* Bootstrap checksum at offset 0x10 */
12113 csum
= calc_crc((unsigned char *) buf
, 0x10);
12114 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12117 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12118 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12119 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12124 buf
= tg3_vpd_readblock(tp
, &len
);
12128 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12130 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12134 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12137 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12138 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12139 PCI_VPD_RO_KEYWORD_CHKSUM
);
12143 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12145 for (i
= 0; i
<= j
; i
++)
12146 csum8
+= ((u8
*)buf
)[i
];
12160 #define TG3_SERDES_TIMEOUT_SEC 2
12161 #define TG3_COPPER_TIMEOUT_SEC 6
12163 static int tg3_test_link(struct tg3
*tp
)
12167 if (!netif_running(tp
->dev
))
12170 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12171 max
= TG3_SERDES_TIMEOUT_SEC
;
12173 max
= TG3_COPPER_TIMEOUT_SEC
;
12175 for (i
= 0; i
< max
; i
++) {
12179 if (msleep_interruptible(1000))
12186 /* Only test the commonly used registers */
12187 static int tg3_test_registers(struct tg3
*tp
)
12189 int i
, is_5705
, is_5750
;
12190 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12194 #define TG3_FL_5705 0x1
12195 #define TG3_FL_NOT_5705 0x2
12196 #define TG3_FL_NOT_5788 0x4
12197 #define TG3_FL_NOT_5750 0x8
12201 /* MAC Control Registers */
12202 { MAC_MODE
, TG3_FL_NOT_5705
,
12203 0x00000000, 0x00ef6f8c },
12204 { MAC_MODE
, TG3_FL_5705
,
12205 0x00000000, 0x01ef6b8c },
12206 { MAC_STATUS
, TG3_FL_NOT_5705
,
12207 0x03800107, 0x00000000 },
12208 { MAC_STATUS
, TG3_FL_5705
,
12209 0x03800100, 0x00000000 },
12210 { MAC_ADDR_0_HIGH
, 0x0000,
12211 0x00000000, 0x0000ffff },
12212 { MAC_ADDR_0_LOW
, 0x0000,
12213 0x00000000, 0xffffffff },
12214 { MAC_RX_MTU_SIZE
, 0x0000,
12215 0x00000000, 0x0000ffff },
12216 { MAC_TX_MODE
, 0x0000,
12217 0x00000000, 0x00000070 },
12218 { MAC_TX_LENGTHS
, 0x0000,
12219 0x00000000, 0x00003fff },
12220 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12221 0x00000000, 0x000007fc },
12222 { MAC_RX_MODE
, TG3_FL_5705
,
12223 0x00000000, 0x000007dc },
12224 { MAC_HASH_REG_0
, 0x0000,
12225 0x00000000, 0xffffffff },
12226 { MAC_HASH_REG_1
, 0x0000,
12227 0x00000000, 0xffffffff },
12228 { MAC_HASH_REG_2
, 0x0000,
12229 0x00000000, 0xffffffff },
12230 { MAC_HASH_REG_3
, 0x0000,
12231 0x00000000, 0xffffffff },
12233 /* Receive Data and Receive BD Initiator Control Registers. */
12234 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12235 0x00000000, 0xffffffff },
12236 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12237 0x00000000, 0xffffffff },
12238 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12239 0x00000000, 0x00000003 },
12240 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12241 0x00000000, 0xffffffff },
12242 { RCVDBDI_STD_BD
+0, 0x0000,
12243 0x00000000, 0xffffffff },
12244 { RCVDBDI_STD_BD
+4, 0x0000,
12245 0x00000000, 0xffffffff },
12246 { RCVDBDI_STD_BD
+8, 0x0000,
12247 0x00000000, 0xffff0002 },
12248 { RCVDBDI_STD_BD
+0xc, 0x0000,
12249 0x00000000, 0xffffffff },
12251 /* Receive BD Initiator Control Registers. */
12252 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12253 0x00000000, 0xffffffff },
12254 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12255 0x00000000, 0x000003ff },
12256 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12257 0x00000000, 0xffffffff },
12259 /* Host Coalescing Control Registers. */
12260 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12261 0x00000000, 0x00000004 },
12262 { HOSTCC_MODE
, TG3_FL_5705
,
12263 0x00000000, 0x000000f6 },
12264 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12265 0x00000000, 0xffffffff },
12266 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12267 0x00000000, 0x000003ff },
12268 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12269 0x00000000, 0xffffffff },
12270 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12271 0x00000000, 0x000003ff },
12272 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12273 0x00000000, 0xffffffff },
12274 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12275 0x00000000, 0x000000ff },
12276 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12277 0x00000000, 0xffffffff },
12278 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12279 0x00000000, 0x000000ff },
12280 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12281 0x00000000, 0xffffffff },
12282 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12283 0x00000000, 0xffffffff },
12284 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12285 0x00000000, 0xffffffff },
12286 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12287 0x00000000, 0x000000ff },
12288 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12289 0x00000000, 0xffffffff },
12290 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12291 0x00000000, 0x000000ff },
12292 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12293 0x00000000, 0xffffffff },
12294 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12295 0x00000000, 0xffffffff },
12296 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12297 0x00000000, 0xffffffff },
12298 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12299 0x00000000, 0xffffffff },
12300 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12301 0x00000000, 0xffffffff },
12302 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12303 0xffffffff, 0x00000000 },
12304 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12305 0xffffffff, 0x00000000 },
12307 /* Buffer Manager Control Registers. */
12308 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12309 0x00000000, 0x007fff80 },
12310 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12311 0x00000000, 0x007fffff },
12312 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12313 0x00000000, 0x0000003f },
12314 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12315 0x00000000, 0x000001ff },
12316 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12317 0x00000000, 0x000001ff },
12318 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12319 0xffffffff, 0x00000000 },
12320 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12321 0xffffffff, 0x00000000 },
12323 /* Mailbox Registers */
12324 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12325 0x00000000, 0x000001ff },
12326 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12327 0x00000000, 0x000001ff },
12328 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12329 0x00000000, 0x000007ff },
12330 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12331 0x00000000, 0x000001ff },
12333 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12336 is_5705
= is_5750
= 0;
12337 if (tg3_flag(tp
, 5705_PLUS
)) {
12339 if (tg3_flag(tp
, 5750_PLUS
))
12343 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12344 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12347 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12350 if (tg3_flag(tp
, IS_5788
) &&
12351 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12354 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12357 offset
= (u32
) reg_tbl
[i
].offset
;
12358 read_mask
= reg_tbl
[i
].read_mask
;
12359 write_mask
= reg_tbl
[i
].write_mask
;
12361 /* Save the original register content */
12362 save_val
= tr32(offset
);
12364 /* Determine the read-only value. */
12365 read_val
= save_val
& read_mask
;
12367 /* Write zero to the register, then make sure the read-only bits
12368 * are not changed and the read/write bits are all zeros.
12372 val
= tr32(offset
);
12374 /* Test the read-only and read/write bits. */
12375 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12378 /* Write ones to all the bits defined by RdMask and WrMask, then
12379 * make sure the read-only bits are not changed and the
12380 * read/write bits are all ones.
12382 tw32(offset
, read_mask
| write_mask
);
12384 val
= tr32(offset
);
12386 /* Test the read-only bits. */
12387 if ((val
& read_mask
) != read_val
)
12390 /* Test the read/write bits. */
12391 if ((val
& write_mask
) != write_mask
)
12394 tw32(offset
, save_val
);
12400 if (netif_msg_hw(tp
))
12401 netdev_err(tp
->dev
,
12402 "Register test failed at offset %x\n", offset
);
12403 tw32(offset
, save_val
);
12407 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12409 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12413 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12414 for (j
= 0; j
< len
; j
+= 4) {
12417 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12418 tg3_read_mem(tp
, offset
+ j
, &val
);
12419 if (val
!= test_pattern
[i
])
12426 static int tg3_test_memory(struct tg3
*tp
)
12428 static struct mem_entry
{
12431 } mem_tbl_570x
[] = {
12432 { 0x00000000, 0x00b50},
12433 { 0x00002000, 0x1c000},
12434 { 0xffffffff, 0x00000}
12435 }, mem_tbl_5705
[] = {
12436 { 0x00000100, 0x0000c},
12437 { 0x00000200, 0x00008},
12438 { 0x00004000, 0x00800},
12439 { 0x00006000, 0x01000},
12440 { 0x00008000, 0x02000},
12441 { 0x00010000, 0x0e000},
12442 { 0xffffffff, 0x00000}
12443 }, mem_tbl_5755
[] = {
12444 { 0x00000200, 0x00008},
12445 { 0x00004000, 0x00800},
12446 { 0x00006000, 0x00800},
12447 { 0x00008000, 0x02000},
12448 { 0x00010000, 0x0c000},
12449 { 0xffffffff, 0x00000}
12450 }, mem_tbl_5906
[] = {
12451 { 0x00000200, 0x00008},
12452 { 0x00004000, 0x00400},
12453 { 0x00006000, 0x00400},
12454 { 0x00008000, 0x01000},
12455 { 0x00010000, 0x01000},
12456 { 0xffffffff, 0x00000}
12457 }, mem_tbl_5717
[] = {
12458 { 0x00000200, 0x00008},
12459 { 0x00010000, 0x0a000},
12460 { 0x00020000, 0x13c00},
12461 { 0xffffffff, 0x00000}
12462 }, mem_tbl_57765
[] = {
12463 { 0x00000200, 0x00008},
12464 { 0x00004000, 0x00800},
12465 { 0x00006000, 0x09800},
12466 { 0x00010000, 0x0a000},
12467 { 0xffffffff, 0x00000}
12469 struct mem_entry
*mem_tbl
;
12473 if (tg3_flag(tp
, 5717_PLUS
))
12474 mem_tbl
= mem_tbl_5717
;
12475 else if (tg3_flag(tp
, 57765_CLASS
) ||
12476 tg3_asic_rev(tp
) == ASIC_REV_5762
)
12477 mem_tbl
= mem_tbl_57765
;
12478 else if (tg3_flag(tp
, 5755_PLUS
))
12479 mem_tbl
= mem_tbl_5755
;
12480 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
12481 mem_tbl
= mem_tbl_5906
;
12482 else if (tg3_flag(tp
, 5705_PLUS
))
12483 mem_tbl
= mem_tbl_5705
;
12485 mem_tbl
= mem_tbl_570x
;
12487 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
12488 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
12496 #define TG3_TSO_MSS 500
12498 #define TG3_TSO_IP_HDR_LEN 20
12499 #define TG3_TSO_TCP_HDR_LEN 20
12500 #define TG3_TSO_TCP_OPT_LEN 12
12502 static const u8 tg3_tso_header
[] = {
12504 0x45, 0x00, 0x00, 0x00,
12505 0x00, 0x00, 0x40, 0x00,
12506 0x40, 0x06, 0x00, 0x00,
12507 0x0a, 0x00, 0x00, 0x01,
12508 0x0a, 0x00, 0x00, 0x02,
12509 0x0d, 0x00, 0xe0, 0x00,
12510 0x00, 0x00, 0x01, 0x00,
12511 0x00, 0x00, 0x02, 0x00,
12512 0x80, 0x10, 0x10, 0x00,
12513 0x14, 0x09, 0x00, 0x00,
12514 0x01, 0x01, 0x08, 0x0a,
12515 0x11, 0x11, 0x11, 0x11,
12516 0x11, 0x11, 0x11, 0x11,
12519 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
12521 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
12522 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
12524 struct sk_buff
*skb
;
12525 u8
*tx_data
, *rx_data
;
12527 int num_pkts
, tx_len
, rx_len
, i
, err
;
12528 struct tg3_rx_buffer_desc
*desc
;
12529 struct tg3_napi
*tnapi
, *rnapi
;
12530 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
12532 tnapi
= &tp
->napi
[0];
12533 rnapi
= &tp
->napi
[0];
12534 if (tp
->irq_cnt
> 1) {
12535 if (tg3_flag(tp
, ENABLE_RSS
))
12536 rnapi
= &tp
->napi
[1];
12537 if (tg3_flag(tp
, ENABLE_TSS
))
12538 tnapi
= &tp
->napi
[1];
12540 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
12545 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
12549 tx_data
= skb_put(skb
, tx_len
);
12550 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
12551 memset(tx_data
+ 6, 0x0, 8);
12553 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
12555 if (tso_loopback
) {
12556 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
12558 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
12559 TG3_TSO_TCP_OPT_LEN
;
12561 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
12562 sizeof(tg3_tso_header
));
12565 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
12566 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
12568 /* Set the total length field in the IP header */
12569 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
12571 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
12572 TXD_FLAG_CPU_POST_DMA
);
12574 if (tg3_flag(tp
, HW_TSO_1
) ||
12575 tg3_flag(tp
, HW_TSO_2
) ||
12576 tg3_flag(tp
, HW_TSO_3
)) {
12578 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
12579 th
= (struct tcphdr
*)&tx_data
[val
];
12582 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
12584 if (tg3_flag(tp
, HW_TSO_3
)) {
12585 mss
|= (hdr_len
& 0xc) << 12;
12586 if (hdr_len
& 0x10)
12587 base_flags
|= 0x00000010;
12588 base_flags
|= (hdr_len
& 0x3e0) << 5;
12589 } else if (tg3_flag(tp
, HW_TSO_2
))
12590 mss
|= hdr_len
<< 9;
12591 else if (tg3_flag(tp
, HW_TSO_1
) ||
12592 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
12593 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
12595 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
12598 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
12601 data_off
= ETH_HLEN
;
12603 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
12604 tx_len
> VLAN_ETH_FRAME_LEN
)
12605 base_flags
|= TXD_FLAG_JMB_PKT
;
12608 for (i
= data_off
; i
< tx_len
; i
++)
12609 tx_data
[i
] = (u8
) (i
& 0xff);
12611 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
12612 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
12613 dev_kfree_skb(skb
);
12617 val
= tnapi
->tx_prod
;
12618 tnapi
->tx_buffers
[val
].skb
= skb
;
12619 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
12621 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12626 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12628 budget
= tg3_tx_avail(tnapi
);
12629 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
12630 base_flags
| TXD_FLAG_END
, mss
, 0)) {
12631 tnapi
->tx_buffers
[val
].skb
= NULL
;
12632 dev_kfree_skb(skb
);
12638 /* Sync BD data before updating mailbox */
12641 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
12642 tr32_mailbox(tnapi
->prodmbox
);
12646 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12647 for (i
= 0; i
< 35; i
++) {
12648 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12653 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
12654 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12655 if ((tx_idx
== tnapi
->tx_prod
) &&
12656 (rx_idx
== (rx_start_idx
+ num_pkts
)))
12660 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
12661 dev_kfree_skb(skb
);
12663 if (tx_idx
!= tnapi
->tx_prod
)
12666 if (rx_idx
!= rx_start_idx
+ num_pkts
)
12670 while (rx_idx
!= rx_start_idx
) {
12671 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
12672 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
12673 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
12675 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
12676 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
12679 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
12682 if (!tso_loopback
) {
12683 if (rx_len
!= tx_len
)
12686 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
12687 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
12690 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
12693 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
12694 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
12695 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
12699 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
12700 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
12701 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
12703 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
12704 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
12705 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
12710 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
12711 PCI_DMA_FROMDEVICE
);
12713 rx_data
+= TG3_RX_OFFSET(tp
);
12714 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
12715 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
12722 /* tg3_free_rings will unmap and free the rx_data */
12727 #define TG3_STD_LOOPBACK_FAILED 1
12728 #define TG3_JMB_LOOPBACK_FAILED 2
12729 #define TG3_TSO_LOOPBACK_FAILED 4
12730 #define TG3_LOOPBACK_FAILED \
12731 (TG3_STD_LOOPBACK_FAILED | \
12732 TG3_JMB_LOOPBACK_FAILED | \
12733 TG3_TSO_LOOPBACK_FAILED)
12735 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
12739 u32 jmb_pkt_sz
= 9000;
12742 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
12744 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
12745 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12747 if (!netif_running(tp
->dev
)) {
12748 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12749 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12751 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12755 err
= tg3_reset_hw(tp
, 1);
12757 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12758 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12760 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12764 if (tg3_flag(tp
, ENABLE_RSS
)) {
12767 /* Reroute all rx packets to the 1st queue */
12768 for (i
= MAC_RSS_INDIR_TBL_0
;
12769 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12773 /* HW errata - mac loopback fails in some cases on 5780.
12774 * Normal traffic and PHY loopback are not affected by
12775 * errata. Also, the MAC loopback test is deprecated for
12776 * all newer ASIC revisions.
12778 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
12779 !tg3_flag(tp
, CPMU_PRESENT
)) {
12780 tg3_mac_loopback(tp
, true);
12782 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12783 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12785 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12786 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12787 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12789 tg3_mac_loopback(tp
, false);
12792 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12793 !tg3_flag(tp
, USE_PHYLIB
)) {
12796 tg3_phy_lpbk_set(tp
, 0, false);
12798 /* Wait for link */
12799 for (i
= 0; i
< 100; i
++) {
12800 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12805 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12806 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12807 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12808 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12809 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
12810 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12811 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12812 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12815 tg3_phy_lpbk_set(tp
, 0, true);
12817 /* All link indications report up, but the hardware
12818 * isn't really ready for about 20 msec. Double it
12823 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12824 data
[TG3_EXT_LOOPB_TEST
] |=
12825 TG3_STD_LOOPBACK_FAILED
;
12826 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12827 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12828 data
[TG3_EXT_LOOPB_TEST
] |=
12829 TG3_TSO_LOOPBACK_FAILED
;
12830 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12831 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12832 data
[TG3_EXT_LOOPB_TEST
] |=
12833 TG3_JMB_LOOPBACK_FAILED
;
12836 /* Re-enable gphy autopowerdown. */
12837 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12838 tg3_phy_toggle_apd(tp
, true);
12841 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
12842 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
12845 tp
->phy_flags
|= eee_cap
;
12850 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
12853 struct tg3
*tp
= netdev_priv(dev
);
12854 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
12856 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
12857 tg3_power_up(tp
)) {
12858 etest
->flags
|= ETH_TEST_FL_FAILED
;
12859 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
12863 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
12865 if (tg3_test_nvram(tp
) != 0) {
12866 etest
->flags
|= ETH_TEST_FL_FAILED
;
12867 data
[TG3_NVRAM_TEST
] = 1;
12869 if (!doextlpbk
&& tg3_test_link(tp
)) {
12870 etest
->flags
|= ETH_TEST_FL_FAILED
;
12871 data
[TG3_LINK_TEST
] = 1;
12873 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
12874 int err
, err2
= 0, irq_sync
= 0;
12876 if (netif_running(dev
)) {
12878 tg3_netif_stop(tp
);
12882 tg3_full_lock(tp
, irq_sync
);
12883 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
12884 err
= tg3_nvram_lock(tp
);
12885 tg3_halt_cpu(tp
, RX_CPU_BASE
);
12886 if (!tg3_flag(tp
, 5705_PLUS
))
12887 tg3_halt_cpu(tp
, TX_CPU_BASE
);
12889 tg3_nvram_unlock(tp
);
12891 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
12894 if (tg3_test_registers(tp
) != 0) {
12895 etest
->flags
|= ETH_TEST_FL_FAILED
;
12896 data
[TG3_REGISTER_TEST
] = 1;
12899 if (tg3_test_memory(tp
) != 0) {
12900 etest
->flags
|= ETH_TEST_FL_FAILED
;
12901 data
[TG3_MEMORY_TEST
] = 1;
12905 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
12907 if (tg3_test_loopback(tp
, data
, doextlpbk
))
12908 etest
->flags
|= ETH_TEST_FL_FAILED
;
12910 tg3_full_unlock(tp
);
12912 if (tg3_test_interrupt(tp
) != 0) {
12913 etest
->flags
|= ETH_TEST_FL_FAILED
;
12914 data
[TG3_INTERRUPT_TEST
] = 1;
12917 tg3_full_lock(tp
, 0);
12919 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12920 if (netif_running(dev
)) {
12921 tg3_flag_set(tp
, INIT_COMPLETE
);
12922 err2
= tg3_restart_hw(tp
, 1);
12924 tg3_netif_start(tp
);
12927 tg3_full_unlock(tp
);
12929 if (irq_sync
&& !err2
)
12932 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
12933 tg3_power_down(tp
);
12937 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
12938 struct ifreq
*ifr
, int cmd
)
12940 struct tg3
*tp
= netdev_priv(dev
);
12941 struct hwtstamp_config stmpconf
;
12943 if (!tg3_flag(tp
, PTP_CAPABLE
))
12946 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
12949 if (stmpconf
.flags
)
12952 switch (stmpconf
.tx_type
) {
12953 case HWTSTAMP_TX_ON
:
12954 tg3_flag_set(tp
, TX_TSTAMP_EN
);
12956 case HWTSTAMP_TX_OFF
:
12957 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
12963 switch (stmpconf
.rx_filter
) {
12964 case HWTSTAMP_FILTER_NONE
:
12967 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
12968 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12969 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
12971 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
12972 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12973 TG3_RX_PTP_CTL_SYNC_EVNT
;
12975 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
12976 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12977 TG3_RX_PTP_CTL_DELAY_REQ
;
12979 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
12980 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
12981 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12983 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
12984 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
12985 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12987 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
12988 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
12989 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12991 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
12992 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
12993 TG3_RX_PTP_CTL_SYNC_EVNT
;
12995 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
12996 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
12997 TG3_RX_PTP_CTL_SYNC_EVNT
;
12999 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13000 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13001 TG3_RX_PTP_CTL_SYNC_EVNT
;
13003 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13004 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13005 TG3_RX_PTP_CTL_DELAY_REQ
;
13007 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13008 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13009 TG3_RX_PTP_CTL_DELAY_REQ
;
13011 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13012 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13013 TG3_RX_PTP_CTL_DELAY_REQ
;
13019 if (netif_running(dev
) && tp
->rxptpctl
)
13020 tw32(TG3_RX_PTP_CTL
,
13021 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13023 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13027 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13029 struct mii_ioctl_data
*data
= if_mii(ifr
);
13030 struct tg3
*tp
= netdev_priv(dev
);
13033 if (tg3_flag(tp
, USE_PHYLIB
)) {
13034 struct phy_device
*phydev
;
13035 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13037 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13038 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13043 data
->phy_id
= tp
->phy_addr
;
13046 case SIOCGMIIREG
: {
13049 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13050 break; /* We have no PHY */
13052 if (!netif_running(dev
))
13055 spin_lock_bh(&tp
->lock
);
13056 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13057 data
->reg_num
& 0x1f, &mii_regval
);
13058 spin_unlock_bh(&tp
->lock
);
13060 data
->val_out
= mii_regval
;
13066 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13067 break; /* We have no PHY */
13069 if (!netif_running(dev
))
13072 spin_lock_bh(&tp
->lock
);
13073 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13074 data
->reg_num
& 0x1f, data
->val_in
);
13075 spin_unlock_bh(&tp
->lock
);
13079 case SIOCSHWTSTAMP
:
13080 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13086 return -EOPNOTSUPP
;
13089 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13091 struct tg3
*tp
= netdev_priv(dev
);
13093 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13097 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13099 struct tg3
*tp
= netdev_priv(dev
);
13100 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13101 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13103 if (!tg3_flag(tp
, 5705_PLUS
)) {
13104 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13105 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13106 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13107 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13110 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13111 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13112 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13113 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13114 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13115 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13116 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13117 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13118 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13119 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13122 /* No rx interrupts will be generated if both are zero */
13123 if ((ec
->rx_coalesce_usecs
== 0) &&
13124 (ec
->rx_max_coalesced_frames
== 0))
13127 /* No tx interrupts will be generated if both are zero */
13128 if ((ec
->tx_coalesce_usecs
== 0) &&
13129 (ec
->tx_max_coalesced_frames
== 0))
13132 /* Only copy relevant parameters, ignore all others. */
13133 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13134 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13135 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13136 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13137 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13138 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13139 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13140 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13141 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13143 if (netif_running(dev
)) {
13144 tg3_full_lock(tp
, 0);
13145 __tg3_set_coalesce(tp
, &tp
->coal
);
13146 tg3_full_unlock(tp
);
13151 static const struct ethtool_ops tg3_ethtool_ops
= {
13152 .get_settings
= tg3_get_settings
,
13153 .set_settings
= tg3_set_settings
,
13154 .get_drvinfo
= tg3_get_drvinfo
,
13155 .get_regs_len
= tg3_get_regs_len
,
13156 .get_regs
= tg3_get_regs
,
13157 .get_wol
= tg3_get_wol
,
13158 .set_wol
= tg3_set_wol
,
13159 .get_msglevel
= tg3_get_msglevel
,
13160 .set_msglevel
= tg3_set_msglevel
,
13161 .nway_reset
= tg3_nway_reset
,
13162 .get_link
= ethtool_op_get_link
,
13163 .get_eeprom_len
= tg3_get_eeprom_len
,
13164 .get_eeprom
= tg3_get_eeprom
,
13165 .set_eeprom
= tg3_set_eeprom
,
13166 .get_ringparam
= tg3_get_ringparam
,
13167 .set_ringparam
= tg3_set_ringparam
,
13168 .get_pauseparam
= tg3_get_pauseparam
,
13169 .set_pauseparam
= tg3_set_pauseparam
,
13170 .self_test
= tg3_self_test
,
13171 .get_strings
= tg3_get_strings
,
13172 .set_phys_id
= tg3_set_phys_id
,
13173 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13174 .get_coalesce
= tg3_get_coalesce
,
13175 .set_coalesce
= tg3_set_coalesce
,
13176 .get_sset_count
= tg3_get_sset_count
,
13177 .get_rxnfc
= tg3_get_rxnfc
,
13178 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13179 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13180 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13181 .get_channels
= tg3_get_channels
,
13182 .set_channels
= tg3_set_channels
,
13183 .get_ts_info
= tg3_get_ts_info
,
13186 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13187 struct rtnl_link_stats64
*stats
)
13189 struct tg3
*tp
= netdev_priv(dev
);
13191 spin_lock_bh(&tp
->lock
);
13192 if (!tp
->hw_stats
) {
13193 spin_unlock_bh(&tp
->lock
);
13194 return &tp
->net_stats_prev
;
13197 tg3_get_nstats(tp
, stats
);
13198 spin_unlock_bh(&tp
->lock
);
13203 static void tg3_set_rx_mode(struct net_device
*dev
)
13205 struct tg3
*tp
= netdev_priv(dev
);
13207 if (!netif_running(dev
))
13210 tg3_full_lock(tp
, 0);
13211 __tg3_set_rx_mode(dev
);
13212 tg3_full_unlock(tp
);
13215 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13218 dev
->mtu
= new_mtu
;
13220 if (new_mtu
> ETH_DATA_LEN
) {
13221 if (tg3_flag(tp
, 5780_CLASS
)) {
13222 netdev_update_features(dev
);
13223 tg3_flag_clear(tp
, TSO_CAPABLE
);
13225 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13228 if (tg3_flag(tp
, 5780_CLASS
)) {
13229 tg3_flag_set(tp
, TSO_CAPABLE
);
13230 netdev_update_features(dev
);
13232 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13236 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13238 struct tg3
*tp
= netdev_priv(dev
);
13239 int err
, reset_phy
= 0;
13241 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13244 if (!netif_running(dev
)) {
13245 /* We'll just catch it later when the
13248 tg3_set_mtu(dev
, tp
, new_mtu
);
13254 tg3_netif_stop(tp
);
13256 tg3_full_lock(tp
, 1);
13258 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13260 tg3_set_mtu(dev
, tp
, new_mtu
);
13262 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13263 * breaks all requests to 256 bytes.
13265 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13268 err
= tg3_restart_hw(tp
, reset_phy
);
13271 tg3_netif_start(tp
);
13273 tg3_full_unlock(tp
);
13281 static const struct net_device_ops tg3_netdev_ops
= {
13282 .ndo_open
= tg3_open
,
13283 .ndo_stop
= tg3_close
,
13284 .ndo_start_xmit
= tg3_start_xmit
,
13285 .ndo_get_stats64
= tg3_get_stats64
,
13286 .ndo_validate_addr
= eth_validate_addr
,
13287 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13288 .ndo_set_mac_address
= tg3_set_mac_addr
,
13289 .ndo_do_ioctl
= tg3_ioctl
,
13290 .ndo_tx_timeout
= tg3_tx_timeout
,
13291 .ndo_change_mtu
= tg3_change_mtu
,
13292 .ndo_fix_features
= tg3_fix_features
,
13293 .ndo_set_features
= tg3_set_features
,
13294 #ifdef CONFIG_NET_POLL_CONTROLLER
13295 .ndo_poll_controller
= tg3_poll_controller
,
13299 static void tg3_get_eeprom_size(struct tg3
*tp
)
13301 u32 cursize
, val
, magic
;
13303 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13305 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13308 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13309 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13310 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13314 * Size the chip by reading offsets at increasing powers of two.
13315 * When we encounter our validation signature, we know the addressing
13316 * has wrapped around, and thus have our chip size.
13320 while (cursize
< tp
->nvram_size
) {
13321 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13330 tp
->nvram_size
= cursize
;
13333 static void tg3_get_nvram_size(struct tg3
*tp
)
13337 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13340 /* Selfboot format */
13341 if (val
!= TG3_EEPROM_MAGIC
) {
13342 tg3_get_eeprom_size(tp
);
13346 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13348 /* This is confusing. We want to operate on the
13349 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13350 * call will read from NVRAM and byteswap the data
13351 * according to the byteswapping settings for all
13352 * other register accesses. This ensures the data we
13353 * want will always reside in the lower 16-bits.
13354 * However, the data in NVRAM is in LE format, which
13355 * means the data from the NVRAM read will always be
13356 * opposite the endianness of the CPU. The 16-bit
13357 * byteswap then brings the data to CPU endianness.
13359 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13363 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13366 static void tg3_get_nvram_info(struct tg3
*tp
)
13370 nvcfg1
= tr32(NVRAM_CFG1
);
13371 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13372 tg3_flag_set(tp
, FLASH
);
13374 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13375 tw32(NVRAM_CFG1
, nvcfg1
);
13378 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
13379 tg3_flag(tp
, 5780_CLASS
)) {
13380 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13381 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13382 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13383 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13384 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13386 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13387 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13388 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13390 case FLASH_VENDOR_ATMEL_EEPROM
:
13391 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13392 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13393 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13395 case FLASH_VENDOR_ST
:
13396 tp
->nvram_jedecnum
= JEDEC_ST
;
13397 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13398 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13400 case FLASH_VENDOR_SAIFUN
:
13401 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13402 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13404 case FLASH_VENDOR_SST_SMALL
:
13405 case FLASH_VENDOR_SST_LARGE
:
13406 tp
->nvram_jedecnum
= JEDEC_SST
;
13407 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13411 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13412 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13413 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13417 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13419 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13420 case FLASH_5752PAGE_SIZE_256
:
13421 tp
->nvram_pagesize
= 256;
13423 case FLASH_5752PAGE_SIZE_512
:
13424 tp
->nvram_pagesize
= 512;
13426 case FLASH_5752PAGE_SIZE_1K
:
13427 tp
->nvram_pagesize
= 1024;
13429 case FLASH_5752PAGE_SIZE_2K
:
13430 tp
->nvram_pagesize
= 2048;
13432 case FLASH_5752PAGE_SIZE_4K
:
13433 tp
->nvram_pagesize
= 4096;
13435 case FLASH_5752PAGE_SIZE_264
:
13436 tp
->nvram_pagesize
= 264;
13438 case FLASH_5752PAGE_SIZE_528
:
13439 tp
->nvram_pagesize
= 528;
13444 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13448 nvcfg1
= tr32(NVRAM_CFG1
);
13450 /* NVRAM protection for TPM */
13451 if (nvcfg1
& (1 << 27))
13452 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13454 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13455 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13456 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13457 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13458 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13460 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13461 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13462 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13463 tg3_flag_set(tp
, FLASH
);
13465 case FLASH_5752VENDOR_ST_M45PE10
:
13466 case FLASH_5752VENDOR_ST_M45PE20
:
13467 case FLASH_5752VENDOR_ST_M45PE40
:
13468 tp
->nvram_jedecnum
= JEDEC_ST
;
13469 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13470 tg3_flag_set(tp
, FLASH
);
13474 if (tg3_flag(tp
, FLASH
)) {
13475 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13477 /* For eeprom, set pagesize to maximum eeprom size */
13478 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13480 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13481 tw32(NVRAM_CFG1
, nvcfg1
);
13485 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
13487 u32 nvcfg1
, protect
= 0;
13489 nvcfg1
= tr32(NVRAM_CFG1
);
13491 /* NVRAM protection for TPM */
13492 if (nvcfg1
& (1 << 27)) {
13493 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13497 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13499 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13500 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13501 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13502 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
13503 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13504 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13505 tg3_flag_set(tp
, FLASH
);
13506 tp
->nvram_pagesize
= 264;
13507 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
13508 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
13509 tp
->nvram_size
= (protect
? 0x3e200 :
13510 TG3_NVRAM_SIZE_512KB
);
13511 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
13512 tp
->nvram_size
= (protect
? 0x1f200 :
13513 TG3_NVRAM_SIZE_256KB
);
13515 tp
->nvram_size
= (protect
? 0x1f200 :
13516 TG3_NVRAM_SIZE_128KB
);
13518 case FLASH_5752VENDOR_ST_M45PE10
:
13519 case FLASH_5752VENDOR_ST_M45PE20
:
13520 case FLASH_5752VENDOR_ST_M45PE40
:
13521 tp
->nvram_jedecnum
= JEDEC_ST
;
13522 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13523 tg3_flag_set(tp
, FLASH
);
13524 tp
->nvram_pagesize
= 256;
13525 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
13526 tp
->nvram_size
= (protect
?
13527 TG3_NVRAM_SIZE_64KB
:
13528 TG3_NVRAM_SIZE_128KB
);
13529 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
13530 tp
->nvram_size
= (protect
?
13531 TG3_NVRAM_SIZE_64KB
:
13532 TG3_NVRAM_SIZE_256KB
);
13534 tp
->nvram_size
= (protect
?
13535 TG3_NVRAM_SIZE_128KB
:
13536 TG3_NVRAM_SIZE_512KB
);
13541 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
13545 nvcfg1
= tr32(NVRAM_CFG1
);
13547 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13548 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
13549 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13550 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
13551 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13552 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13553 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13554 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13556 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13557 tw32(NVRAM_CFG1
, nvcfg1
);
13559 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13560 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13561 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13562 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13563 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13564 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13565 tg3_flag_set(tp
, FLASH
);
13566 tp
->nvram_pagesize
= 264;
13568 case FLASH_5752VENDOR_ST_M45PE10
:
13569 case FLASH_5752VENDOR_ST_M45PE20
:
13570 case FLASH_5752VENDOR_ST_M45PE40
:
13571 tp
->nvram_jedecnum
= JEDEC_ST
;
13572 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13573 tg3_flag_set(tp
, FLASH
);
13574 tp
->nvram_pagesize
= 256;
13579 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
13581 u32 nvcfg1
, protect
= 0;
13583 nvcfg1
= tr32(NVRAM_CFG1
);
13585 /* NVRAM protection for TPM */
13586 if (nvcfg1
& (1 << 27)) {
13587 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13591 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13593 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13594 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13595 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13596 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13597 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13598 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13599 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13600 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13601 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13602 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13603 tg3_flag_set(tp
, FLASH
);
13604 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13605 tp
->nvram_pagesize
= 256;
13607 case FLASH_5761VENDOR_ST_A_M45PE20
:
13608 case FLASH_5761VENDOR_ST_A_M45PE40
:
13609 case FLASH_5761VENDOR_ST_A_M45PE80
:
13610 case FLASH_5761VENDOR_ST_A_M45PE16
:
13611 case FLASH_5761VENDOR_ST_M_M45PE20
:
13612 case FLASH_5761VENDOR_ST_M_M45PE40
:
13613 case FLASH_5761VENDOR_ST_M_M45PE80
:
13614 case FLASH_5761VENDOR_ST_M_M45PE16
:
13615 tp
->nvram_jedecnum
= JEDEC_ST
;
13616 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13617 tg3_flag_set(tp
, FLASH
);
13618 tp
->nvram_pagesize
= 256;
13623 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
13626 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13627 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13628 case FLASH_5761VENDOR_ST_A_M45PE16
:
13629 case FLASH_5761VENDOR_ST_M_M45PE16
:
13630 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
13632 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13633 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13634 case FLASH_5761VENDOR_ST_A_M45PE80
:
13635 case FLASH_5761VENDOR_ST_M_M45PE80
:
13636 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13638 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13639 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13640 case FLASH_5761VENDOR_ST_A_M45PE40
:
13641 case FLASH_5761VENDOR_ST_M_M45PE40
:
13642 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13644 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13645 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13646 case FLASH_5761VENDOR_ST_A_M45PE20
:
13647 case FLASH_5761VENDOR_ST_M_M45PE20
:
13648 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13654 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
13656 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13657 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13658 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13661 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
13665 nvcfg1
= tr32(NVRAM_CFG1
);
13667 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13668 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13669 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13670 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13671 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13672 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13674 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13675 tw32(NVRAM_CFG1
, nvcfg1
);
13677 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13678 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13679 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13680 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13681 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13682 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13683 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13684 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13685 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13686 tg3_flag_set(tp
, FLASH
);
13688 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13689 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13690 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13691 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13692 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13694 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13695 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13696 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13698 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13699 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13700 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13704 case FLASH_5752VENDOR_ST_M45PE10
:
13705 case FLASH_5752VENDOR_ST_M45PE20
:
13706 case FLASH_5752VENDOR_ST_M45PE40
:
13707 tp
->nvram_jedecnum
= JEDEC_ST
;
13708 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13709 tg3_flag_set(tp
, FLASH
);
13711 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13712 case FLASH_5752VENDOR_ST_M45PE10
:
13713 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13715 case FLASH_5752VENDOR_ST_M45PE20
:
13716 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13718 case FLASH_5752VENDOR_ST_M45PE40
:
13719 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13724 tg3_flag_set(tp
, NO_NVRAM
);
13728 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13729 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13730 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13734 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
13738 nvcfg1
= tr32(NVRAM_CFG1
);
13740 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13741 case FLASH_5717VENDOR_ATMEL_EEPROM
:
13742 case FLASH_5717VENDOR_MICRO_EEPROM
:
13743 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13744 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13745 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13747 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13748 tw32(NVRAM_CFG1
, nvcfg1
);
13750 case FLASH_5717VENDOR_ATMEL_MDB011D
:
13751 case FLASH_5717VENDOR_ATMEL_ADB011B
:
13752 case FLASH_5717VENDOR_ATMEL_ADB011D
:
13753 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13754 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13755 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13756 case FLASH_5717VENDOR_ATMEL_45USPT
:
13757 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13758 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13759 tg3_flag_set(tp
, FLASH
);
13761 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13762 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13763 /* Detect size with tg3_nvram_get_size() */
13765 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13766 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13767 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13770 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13774 case FLASH_5717VENDOR_ST_M_M25PE10
:
13775 case FLASH_5717VENDOR_ST_A_M25PE10
:
13776 case FLASH_5717VENDOR_ST_M_M45PE10
:
13777 case FLASH_5717VENDOR_ST_A_M45PE10
:
13778 case FLASH_5717VENDOR_ST_M_M25PE20
:
13779 case FLASH_5717VENDOR_ST_A_M25PE20
:
13780 case FLASH_5717VENDOR_ST_M_M45PE20
:
13781 case FLASH_5717VENDOR_ST_A_M45PE20
:
13782 case FLASH_5717VENDOR_ST_25USPT
:
13783 case FLASH_5717VENDOR_ST_45USPT
:
13784 tp
->nvram_jedecnum
= JEDEC_ST
;
13785 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13786 tg3_flag_set(tp
, FLASH
);
13788 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13789 case FLASH_5717VENDOR_ST_M_M25PE20
:
13790 case FLASH_5717VENDOR_ST_M_M45PE20
:
13791 /* Detect size with tg3_nvram_get_size() */
13793 case FLASH_5717VENDOR_ST_A_M25PE20
:
13794 case FLASH_5717VENDOR_ST_A_M45PE20
:
13795 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13798 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13803 tg3_flag_set(tp
, NO_NVRAM
);
13807 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13808 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13809 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13812 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
13814 u32 nvcfg1
, nvmpinstrp
;
13816 nvcfg1
= tr32(NVRAM_CFG1
);
13817 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
13819 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13820 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
13821 tg3_flag_set(tp
, NO_NVRAM
);
13825 switch (nvmpinstrp
) {
13826 case FLASH_5762_EEPROM_HD
:
13827 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
13829 case FLASH_5762_EEPROM_LD
:
13830 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
13835 switch (nvmpinstrp
) {
13836 case FLASH_5720_EEPROM_HD
:
13837 case FLASH_5720_EEPROM_LD
:
13838 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13839 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13841 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13842 tw32(NVRAM_CFG1
, nvcfg1
);
13843 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
13844 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13846 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
13848 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
13849 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
13850 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
13851 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13852 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13853 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13854 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13855 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13856 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13857 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13858 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13859 case FLASH_5720VENDOR_ATMEL_45USPT
:
13860 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13861 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13862 tg3_flag_set(tp
, FLASH
);
13864 switch (nvmpinstrp
) {
13865 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13866 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13867 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13868 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13870 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13871 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13872 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13873 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13875 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13876 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13877 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13880 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13881 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13885 case FLASH_5720VENDOR_M_ST_M25PE10
:
13886 case FLASH_5720VENDOR_M_ST_M45PE10
:
13887 case FLASH_5720VENDOR_A_ST_M25PE10
:
13888 case FLASH_5720VENDOR_A_ST_M45PE10
:
13889 case FLASH_5720VENDOR_M_ST_M25PE20
:
13890 case FLASH_5720VENDOR_M_ST_M45PE20
:
13891 case FLASH_5720VENDOR_A_ST_M25PE20
:
13892 case FLASH_5720VENDOR_A_ST_M45PE20
:
13893 case FLASH_5720VENDOR_M_ST_M25PE40
:
13894 case FLASH_5720VENDOR_M_ST_M45PE40
:
13895 case FLASH_5720VENDOR_A_ST_M25PE40
:
13896 case FLASH_5720VENDOR_A_ST_M45PE40
:
13897 case FLASH_5720VENDOR_M_ST_M25PE80
:
13898 case FLASH_5720VENDOR_M_ST_M45PE80
:
13899 case FLASH_5720VENDOR_A_ST_M25PE80
:
13900 case FLASH_5720VENDOR_A_ST_M45PE80
:
13901 case FLASH_5720VENDOR_ST_25USPT
:
13902 case FLASH_5720VENDOR_ST_45USPT
:
13903 tp
->nvram_jedecnum
= JEDEC_ST
;
13904 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13905 tg3_flag_set(tp
, FLASH
);
13907 switch (nvmpinstrp
) {
13908 case FLASH_5720VENDOR_M_ST_M25PE20
:
13909 case FLASH_5720VENDOR_M_ST_M45PE20
:
13910 case FLASH_5720VENDOR_A_ST_M25PE20
:
13911 case FLASH_5720VENDOR_A_ST_M45PE20
:
13912 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13914 case FLASH_5720VENDOR_M_ST_M25PE40
:
13915 case FLASH_5720VENDOR_M_ST_M45PE40
:
13916 case FLASH_5720VENDOR_A_ST_M25PE40
:
13917 case FLASH_5720VENDOR_A_ST_M45PE40
:
13918 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13920 case FLASH_5720VENDOR_M_ST_M25PE80
:
13921 case FLASH_5720VENDOR_M_ST_M45PE80
:
13922 case FLASH_5720VENDOR_A_ST_M25PE80
:
13923 case FLASH_5720VENDOR_A_ST_M45PE80
:
13924 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13927 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
13928 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13933 tg3_flag_set(tp
, NO_NVRAM
);
13937 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13938 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13939 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13941 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
13944 if (tg3_nvram_read(tp
, 0, &val
))
13947 if (val
!= TG3_EEPROM_MAGIC
&&
13948 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
13949 tg3_flag_set(tp
, NO_NVRAM
);
13953 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13954 static void tg3_nvram_init(struct tg3
*tp
)
13956 if (tg3_flag(tp
, IS_SSB_CORE
)) {
13957 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13958 tg3_flag_clear(tp
, NVRAM
);
13959 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
13960 tg3_flag_set(tp
, NO_NVRAM
);
13964 tw32_f(GRC_EEPROM_ADDR
,
13965 (EEPROM_ADDR_FSM_RESET
|
13966 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
13967 EEPROM_ADDR_CLKPERD_SHIFT
)));
13971 /* Enable seeprom accesses. */
13972 tw32_f(GRC_LOCAL_CTRL
,
13973 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
13976 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
13977 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
13978 tg3_flag_set(tp
, NVRAM
);
13980 if (tg3_nvram_lock(tp
)) {
13981 netdev_warn(tp
->dev
,
13982 "Cannot get nvram lock, %s failed\n",
13986 tg3_enable_nvram_access(tp
);
13988 tp
->nvram_size
= 0;
13990 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
13991 tg3_get_5752_nvram_info(tp
);
13992 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
13993 tg3_get_5755_nvram_info(tp
);
13994 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
13995 tg3_asic_rev(tp
) == ASIC_REV_5784
||
13996 tg3_asic_rev(tp
) == ASIC_REV_5785
)
13997 tg3_get_5787_nvram_info(tp
);
13998 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
13999 tg3_get_5761_nvram_info(tp
);
14000 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14001 tg3_get_5906_nvram_info(tp
);
14002 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14003 tg3_flag(tp
, 57765_CLASS
))
14004 tg3_get_57780_nvram_info(tp
);
14005 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14006 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14007 tg3_get_5717_nvram_info(tp
);
14008 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14009 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14010 tg3_get_5720_nvram_info(tp
);
14012 tg3_get_nvram_info(tp
);
14014 if (tp
->nvram_size
== 0)
14015 tg3_get_nvram_size(tp
);
14017 tg3_disable_nvram_access(tp
);
14018 tg3_nvram_unlock(tp
);
14021 tg3_flag_clear(tp
, NVRAM
);
14022 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14024 tg3_get_eeprom_size(tp
);
14028 struct subsys_tbl_ent
{
14029 u16 subsys_vendor
, subsys_devid
;
14033 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14034 /* Broadcom boards. */
14035 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14036 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14037 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14039 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14040 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14041 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14042 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14043 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14044 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14045 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14046 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14047 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14048 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14049 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14050 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14051 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14052 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14053 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14054 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14055 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14056 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14059 { TG3PCI_SUBVENDOR_ID_3COM
,
14060 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14061 { TG3PCI_SUBVENDOR_ID_3COM
,
14062 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14063 { TG3PCI_SUBVENDOR_ID_3COM
,
14064 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14065 { TG3PCI_SUBVENDOR_ID_3COM
,
14066 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14067 { TG3PCI_SUBVENDOR_ID_3COM
,
14068 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14071 { TG3PCI_SUBVENDOR_ID_DELL
,
14072 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14073 { TG3PCI_SUBVENDOR_ID_DELL
,
14074 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14075 { TG3PCI_SUBVENDOR_ID_DELL
,
14076 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14077 { TG3PCI_SUBVENDOR_ID_DELL
,
14078 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14080 /* Compaq boards. */
14081 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14082 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14083 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14084 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14085 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14086 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14087 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14088 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14089 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14090 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14093 { TG3PCI_SUBVENDOR_ID_IBM
,
14094 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14097 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14101 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14102 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14103 tp
->pdev
->subsystem_vendor
) &&
14104 (subsys_id_to_phy_id
[i
].subsys_devid
==
14105 tp
->pdev
->subsystem_device
))
14106 return &subsys_id_to_phy_id
[i
];
14111 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14115 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14116 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14118 /* Assume an onboard device and WOL capable by default. */
14119 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14120 tg3_flag_set(tp
, WOL_CAP
);
14122 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14123 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14124 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14125 tg3_flag_set(tp
, IS_NIC
);
14127 val
= tr32(VCPU_CFGSHDW
);
14128 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14129 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14130 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14131 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14132 tg3_flag_set(tp
, WOL_ENABLE
);
14133 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14138 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14139 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14140 u32 nic_cfg
, led_cfg
;
14141 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14142 int eeprom_phy_serdes
= 0;
14144 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14145 tp
->nic_sram_data_cfg
= nic_cfg
;
14147 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14148 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14149 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14150 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14151 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14152 (ver
> 0) && (ver
< 0x100))
14153 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14155 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14156 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14158 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14159 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14160 eeprom_phy_serdes
= 1;
14162 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14163 if (nic_phy_id
!= 0) {
14164 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14165 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14167 eeprom_phy_id
= (id1
>> 16) << 10;
14168 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14169 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14173 tp
->phy_id
= eeprom_phy_id
;
14174 if (eeprom_phy_serdes
) {
14175 if (!tg3_flag(tp
, 5705_PLUS
))
14176 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14178 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14181 if (tg3_flag(tp
, 5750_PLUS
))
14182 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14183 SHASTA_EXT_LED_MODE_MASK
);
14185 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14189 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14190 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14193 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14194 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14197 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14198 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14200 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14201 * read on some older 5700/5701 bootcode.
14203 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14204 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14205 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14209 case SHASTA_EXT_LED_SHARED
:
14210 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14211 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14212 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14213 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14214 LED_CTRL_MODE_PHY_2
);
14217 case SHASTA_EXT_LED_MAC
:
14218 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14221 case SHASTA_EXT_LED_COMBO
:
14222 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14223 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14224 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14225 LED_CTRL_MODE_PHY_2
);
14230 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14231 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14232 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14233 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14235 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14236 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14238 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14239 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14240 if ((tp
->pdev
->subsystem_vendor
==
14241 PCI_VENDOR_ID_ARIMA
) &&
14242 (tp
->pdev
->subsystem_device
== 0x205a ||
14243 tp
->pdev
->subsystem_device
== 0x2063))
14244 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14246 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14247 tg3_flag_set(tp
, IS_NIC
);
14250 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14251 tg3_flag_set(tp
, ENABLE_ASF
);
14252 if (tg3_flag(tp
, 5750_PLUS
))
14253 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14256 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14257 tg3_flag(tp
, 5750_PLUS
))
14258 tg3_flag_set(tp
, ENABLE_APE
);
14260 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14261 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14262 tg3_flag_clear(tp
, WOL_CAP
);
14264 if (tg3_flag(tp
, WOL_CAP
) &&
14265 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14266 tg3_flag_set(tp
, WOL_ENABLE
);
14267 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14270 if (cfg2
& (1 << 17))
14271 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14273 /* serdes signal pre-emphasis in register 0x590 set by */
14274 /* bootcode if bit 18 is set */
14275 if (cfg2
& (1 << 18))
14276 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14278 if ((tg3_flag(tp
, 57765_PLUS
) ||
14279 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14280 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14281 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14282 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14284 if (tg3_flag(tp
, PCI_EXPRESS
) &&
14285 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14286 !tg3_flag(tp
, 57765_PLUS
)) {
14289 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14290 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
14291 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14294 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14295 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14296 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14297 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14298 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14299 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14302 if (tg3_flag(tp
, WOL_CAP
))
14303 device_set_wakeup_enable(&tp
->pdev
->dev
,
14304 tg3_flag(tp
, WOL_ENABLE
));
14306 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14309 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14312 u32 val2
, off
= offset
* 8;
14314 err
= tg3_nvram_lock(tp
);
14318 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14319 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14320 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14321 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14324 for (i
= 0; i
< 100; i
++) {
14325 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14326 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14327 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14333 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14335 tg3_nvram_unlock(tp
);
14336 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14342 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14347 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14348 tw32(OTP_CTRL
, cmd
);
14350 /* Wait for up to 1 ms for command to execute. */
14351 for (i
= 0; i
< 100; i
++) {
14352 val
= tr32(OTP_STATUS
);
14353 if (val
& OTP_STATUS_CMD_DONE
)
14358 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14361 /* Read the gphy configuration from the OTP region of the chip. The gphy
14362 * configuration is a 32-bit value that straddles the alignment boundary.
14363 * We do two 32-bit reads and then shift and merge the results.
14365 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14367 u32 bhalf_otp
, thalf_otp
;
14369 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14371 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14374 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14376 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14379 thalf_otp
= tr32(OTP_READ_DATA
);
14381 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14383 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14386 bhalf_otp
= tr32(OTP_READ_DATA
);
14388 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14391 static void tg3_phy_init_link_config(struct tg3
*tp
)
14393 u32 adv
= ADVERTISED_Autoneg
;
14395 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14396 adv
|= ADVERTISED_1000baseT_Half
|
14397 ADVERTISED_1000baseT_Full
;
14399 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14400 adv
|= ADVERTISED_100baseT_Half
|
14401 ADVERTISED_100baseT_Full
|
14402 ADVERTISED_10baseT_Half
|
14403 ADVERTISED_10baseT_Full
|
14406 adv
|= ADVERTISED_FIBRE
;
14408 tp
->link_config
.advertising
= adv
;
14409 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14410 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14411 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14412 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14413 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14418 static int tg3_phy_probe(struct tg3
*tp
)
14420 u32 hw_phy_id_1
, hw_phy_id_2
;
14421 u32 hw_phy_id
, hw_phy_id_masked
;
14424 /* flow control autonegotiation is default behavior */
14425 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14426 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14428 if (tg3_flag(tp
, ENABLE_APE
)) {
14429 switch (tp
->pci_fn
) {
14431 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14434 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14437 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14440 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14445 if (tg3_flag(tp
, USE_PHYLIB
))
14446 return tg3_phy_init(tp
);
14448 /* Reading the PHY ID register can conflict with ASF
14449 * firmware access to the PHY hardware.
14452 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14453 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14455 /* Now read the physical PHY_ID from the chip and verify
14456 * that it is sane. If it doesn't look good, we fall back
14457 * to either the hard-coded table based PHY_ID and failing
14458 * that the value found in the eeprom area.
14460 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
14461 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
14463 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
14464 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
14465 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
14467 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
14470 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
14471 tp
->phy_id
= hw_phy_id
;
14472 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
14473 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14475 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
14477 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
14478 /* Do nothing, phy ID already set up in
14479 * tg3_get_eeprom_hw_cfg().
14482 struct subsys_tbl_ent
*p
;
14484 /* No eeprom signature? Try the hardcoded
14485 * subsys device table.
14487 p
= tg3_lookup_by_subsys(tp
);
14489 tp
->phy_id
= p
->phy_id
;
14490 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
14491 /* For now we saw the IDs 0xbc050cd0,
14492 * 0xbc050f80 and 0xbc050c30 on devices
14493 * connected to an BCM4785 and there are
14494 * probably more. Just assume that the phy is
14495 * supported when it is connected to a SSB core
14502 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
14503 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14507 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14508 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
14509 tg3_asic_rev(tp
) == ASIC_REV_5720
||
14510 tg3_asic_rev(tp
) == ASIC_REV_5762
||
14511 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
14512 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
14513 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
14514 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
)))
14515 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
14517 tg3_phy_init_link_config(tp
);
14519 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14520 !tg3_flag(tp
, ENABLE_APE
) &&
14521 !tg3_flag(tp
, ENABLE_ASF
)) {
14524 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
14525 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
14526 (bmsr
& BMSR_LSTATUS
))
14527 goto skip_phy_reset
;
14529 err
= tg3_phy_reset(tp
);
14533 tg3_phy_set_wirespeed(tp
);
14535 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
14536 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
14537 tp
->link_config
.flowctrl
);
14539 tg3_writephy(tp
, MII_BMCR
,
14540 BMCR_ANENABLE
| BMCR_ANRESTART
);
14545 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
14546 err
= tg3_init_5401phy_dsp(tp
);
14550 err
= tg3_init_5401phy_dsp(tp
);
14556 static void tg3_read_vpd(struct tg3
*tp
)
14559 unsigned int block_end
, rosize
, len
;
14563 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
14567 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
14569 goto out_not_found
;
14571 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
14572 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
14573 i
+= PCI_VPD_LRDT_TAG_SIZE
;
14575 if (block_end
> vpdlen
)
14576 goto out_not_found
;
14578 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14579 PCI_VPD_RO_KEYWORD_MFR_ID
);
14581 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14583 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14584 if (j
+ len
> block_end
|| len
!= 4 ||
14585 memcmp(&vpd_data
[j
], "1028", 4))
14588 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14589 PCI_VPD_RO_KEYWORD_VENDOR0
);
14593 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14595 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14596 if (j
+ len
> block_end
)
14599 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
14600 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
14604 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14605 PCI_VPD_RO_KEYWORD_PARTNO
);
14607 goto out_not_found
;
14609 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
14611 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14612 if (len
> TG3_BPN_SIZE
||
14613 (len
+ i
) > vpdlen
)
14614 goto out_not_found
;
14616 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
14620 if (tp
->board_part_number
[0])
14624 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
14625 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14626 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
14627 strcpy(tp
->board_part_number
, "BCM5717");
14628 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
14629 strcpy(tp
->board_part_number
, "BCM5718");
14632 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
14633 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
14634 strcpy(tp
->board_part_number
, "BCM57780");
14635 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
14636 strcpy(tp
->board_part_number
, "BCM57760");
14637 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
14638 strcpy(tp
->board_part_number
, "BCM57790");
14639 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
14640 strcpy(tp
->board_part_number
, "BCM57788");
14643 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
14644 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
14645 strcpy(tp
->board_part_number
, "BCM57761");
14646 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
14647 strcpy(tp
->board_part_number
, "BCM57765");
14648 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
14649 strcpy(tp
->board_part_number
, "BCM57781");
14650 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
14651 strcpy(tp
->board_part_number
, "BCM57785");
14652 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
14653 strcpy(tp
->board_part_number
, "BCM57791");
14654 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
14655 strcpy(tp
->board_part_number
, "BCM57795");
14658 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
14659 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
14660 strcpy(tp
->board_part_number
, "BCM57762");
14661 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
14662 strcpy(tp
->board_part_number
, "BCM57766");
14663 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
14664 strcpy(tp
->board_part_number
, "BCM57782");
14665 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14666 strcpy(tp
->board_part_number
, "BCM57786");
14669 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14670 strcpy(tp
->board_part_number
, "BCM95906");
14673 strcpy(tp
->board_part_number
, "none");
14677 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
14681 if (tg3_nvram_read(tp
, offset
, &val
) ||
14682 (val
& 0xfc000000) != 0x0c000000 ||
14683 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
14690 static void tg3_read_bc_ver(struct tg3
*tp
)
14692 u32 val
, offset
, start
, ver_offset
;
14694 bool newver
= false;
14696 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
14697 tg3_nvram_read(tp
, 0x4, &start
))
14700 offset
= tg3_nvram_logical_addr(tp
, offset
);
14702 if (tg3_nvram_read(tp
, offset
, &val
))
14705 if ((val
& 0xfc000000) == 0x0c000000) {
14706 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
14713 dst_off
= strlen(tp
->fw_ver
);
14716 if (TG3_VER_SIZE
- dst_off
< 16 ||
14717 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
14720 offset
= offset
+ ver_offset
- start
;
14721 for (i
= 0; i
< 16; i
+= 4) {
14723 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
14726 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
14731 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
14734 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
14735 TG3_NVM_BCVER_MAJSFT
;
14736 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
14737 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
14738 "v%d.%02d", major
, minor
);
14742 static void tg3_read_hwsb_ver(struct tg3
*tp
)
14744 u32 val
, major
, minor
;
14746 /* Use native endian representation */
14747 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
14750 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
14751 TG3_NVM_HWSB_CFG1_MAJSFT
;
14752 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
14753 TG3_NVM_HWSB_CFG1_MINSFT
;
14755 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
14758 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
14760 u32 offset
, major
, minor
, build
;
14762 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
14764 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
14767 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
14768 case TG3_EEPROM_SB_REVISION_0
:
14769 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
14771 case TG3_EEPROM_SB_REVISION_2
:
14772 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
14774 case TG3_EEPROM_SB_REVISION_3
:
14775 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
14777 case TG3_EEPROM_SB_REVISION_4
:
14778 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
14780 case TG3_EEPROM_SB_REVISION_5
:
14781 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
14783 case TG3_EEPROM_SB_REVISION_6
:
14784 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
14790 if (tg3_nvram_read(tp
, offset
, &val
))
14793 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
14794 TG3_EEPROM_SB_EDH_BLD_SHFT
;
14795 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
14796 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
14797 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
14799 if (minor
> 99 || build
> 26)
14802 offset
= strlen(tp
->fw_ver
);
14803 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
14804 " v%d.%02d", major
, minor
);
14807 offset
= strlen(tp
->fw_ver
);
14808 if (offset
< TG3_VER_SIZE
- 1)
14809 tp
->fw_ver
[offset
] = 'a' + build
- 1;
14813 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
14815 u32 val
, offset
, start
;
14818 for (offset
= TG3_NVM_DIR_START
;
14819 offset
< TG3_NVM_DIR_END
;
14820 offset
+= TG3_NVM_DIRENT_SIZE
) {
14821 if (tg3_nvram_read(tp
, offset
, &val
))
14824 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
14828 if (offset
== TG3_NVM_DIR_END
)
14831 if (!tg3_flag(tp
, 5705_PLUS
))
14832 start
= 0x08000000;
14833 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
14836 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
14837 !tg3_fw_img_is_valid(tp
, offset
) ||
14838 tg3_nvram_read(tp
, offset
+ 8, &val
))
14841 offset
+= val
- start
;
14843 vlen
= strlen(tp
->fw_ver
);
14845 tp
->fw_ver
[vlen
++] = ',';
14846 tp
->fw_ver
[vlen
++] = ' ';
14848 for (i
= 0; i
< 4; i
++) {
14850 if (tg3_nvram_read_be32(tp
, offset
, &v
))
14853 offset
+= sizeof(v
);
14855 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
14856 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
14860 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
14865 static void tg3_probe_ncsi(struct tg3
*tp
)
14869 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
14870 if (apedata
!= APE_SEG_SIG_MAGIC
)
14873 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
14874 if (!(apedata
& APE_FW_STATUS_READY
))
14877 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
14878 tg3_flag_set(tp
, APE_HAS_NCSI
);
14881 static void tg3_read_dash_ver(struct tg3
*tp
)
14887 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
14889 if (tg3_flag(tp
, APE_HAS_NCSI
))
14891 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
14896 vlen
= strlen(tp
->fw_ver
);
14898 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
14900 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
14901 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
14902 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
14903 (apedata
& APE_FW_VERSION_BLDMSK
));
14906 static void tg3_read_otp_ver(struct tg3
*tp
)
14910 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14913 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
14914 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
14915 TG3_OTP_MAGIC0_VALID(val
)) {
14916 u64 val64
= (u64
) val
<< 32 | val2
;
14920 for (i
= 0; i
< 7; i
++) {
14921 if ((val64
& 0xff) == 0)
14923 ver
= val64
& 0xff;
14926 vlen
= strlen(tp
->fw_ver
);
14927 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
14931 static void tg3_read_fw_ver(struct tg3
*tp
)
14934 bool vpd_vers
= false;
14936 if (tp
->fw_ver
[0] != 0)
14939 if (tg3_flag(tp
, NO_NVRAM
)) {
14940 strcat(tp
->fw_ver
, "sb");
14941 tg3_read_otp_ver(tp
);
14945 if (tg3_nvram_read(tp
, 0, &val
))
14948 if (val
== TG3_EEPROM_MAGIC
)
14949 tg3_read_bc_ver(tp
);
14950 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
14951 tg3_read_sb_ver(tp
, val
);
14952 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
14953 tg3_read_hwsb_ver(tp
);
14955 if (tg3_flag(tp
, ENABLE_ASF
)) {
14956 if (tg3_flag(tp
, ENABLE_APE
)) {
14957 tg3_probe_ncsi(tp
);
14959 tg3_read_dash_ver(tp
);
14960 } else if (!vpd_vers
) {
14961 tg3_read_mgmtfw_ver(tp
);
14965 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
14968 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
14970 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
14971 return TG3_RX_RET_MAX_SIZE_5717
;
14972 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
14973 return TG3_RX_RET_MAX_SIZE_5700
;
14975 return TG3_RX_RET_MAX_SIZE_5705
;
14978 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
14979 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
14980 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
14981 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
14985 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
14987 struct pci_dev
*peer
;
14988 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14990 for (func
= 0; func
< 8; func
++) {
14991 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14992 if (peer
&& peer
!= tp
->pdev
)
14996 /* 5704 can be configured in single-port mode, set peer to
14997 * tp->pdev in that case.
15005 * We don't need to keep the refcount elevated; there's no way
15006 * to remove one half of this device without removing the other
15013 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15015 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15016 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15019 /* All devices that use the alternate
15020 * ASIC REV location have a CPMU.
15022 tg3_flag_set(tp
, CPMU_PRESENT
);
15024 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15025 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15026 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15027 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15028 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15029 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15030 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15031 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15032 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15033 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15034 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15035 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15036 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15037 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15038 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15039 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15040 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15041 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15042 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15043 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15045 reg
= TG3PCI_PRODID_ASICREV
;
15047 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15050 /* Wrong chip ID in 5752 A0. This code can be removed later
15051 * as A0 is not in production.
15053 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15054 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15056 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15057 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15059 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15060 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15061 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15062 tg3_flag_set(tp
, 5717_PLUS
);
15064 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15065 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15066 tg3_flag_set(tp
, 57765_CLASS
);
15068 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15069 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15070 tg3_flag_set(tp
, 57765_PLUS
);
15072 /* Intentionally exclude ASIC_REV_5906 */
15073 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15074 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15075 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15076 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15077 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15078 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15079 tg3_flag(tp
, 57765_PLUS
))
15080 tg3_flag_set(tp
, 5755_PLUS
);
15082 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15083 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15084 tg3_flag_set(tp
, 5780_CLASS
);
15086 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15087 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15088 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15089 tg3_flag(tp
, 5755_PLUS
) ||
15090 tg3_flag(tp
, 5780_CLASS
))
15091 tg3_flag_set(tp
, 5750_PLUS
);
15093 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15094 tg3_flag(tp
, 5750_PLUS
))
15095 tg3_flag_set(tp
, 5705_PLUS
);
15098 static bool tg3_10_100_only_device(struct tg3
*tp
,
15099 const struct pci_device_id
*ent
)
15101 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15103 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15104 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15105 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15108 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15109 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15110 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15120 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15123 u32 pci_state_reg
, grc_misc_cfg
;
15128 /* Force memory write invalidate off. If we leave it on,
15129 * then on 5700_BX chips we have to enable a workaround.
15130 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15131 * to match the cacheline size. The Broadcom driver have this
15132 * workaround but turns MWI off all the times so never uses
15133 * it. This seems to suggest that the workaround is insufficient.
15135 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15136 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15137 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15139 /* Important! -- Make sure register accesses are byteswapped
15140 * correctly. Also, for those chips that require it, make
15141 * sure that indirect register accesses are enabled before
15142 * the first operation.
15144 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15146 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15147 MISC_HOST_CTRL_CHIPREV
);
15148 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15149 tp
->misc_host_ctrl
);
15151 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15153 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15154 * we need to disable memory and use config. cycles
15155 * only to access all registers. The 5702/03 chips
15156 * can mistakenly decode the special cycles from the
15157 * ICH chipsets as memory write cycles, causing corruption
15158 * of register and memory space. Only certain ICH bridges
15159 * will drive special cycles with non-zero data during the
15160 * address phase which can fall within the 5703's address
15161 * range. This is not an ICH bug as the PCI spec allows
15162 * non-zero address during special cycles. However, only
15163 * these ICH bridges are known to drive non-zero addresses
15164 * during special cycles.
15166 * Since special cycles do not cross PCI bridges, we only
15167 * enable this workaround if the 5703 is on the secondary
15168 * bus of these ICH bridges.
15170 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15171 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15172 static struct tg3_dev_id
{
15176 } ich_chipsets
[] = {
15177 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15179 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15181 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15183 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15187 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15188 struct pci_dev
*bridge
= NULL
;
15190 while (pci_id
->vendor
!= 0) {
15191 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15197 if (pci_id
->rev
!= PCI_ANY_ID
) {
15198 if (bridge
->revision
> pci_id
->rev
)
15201 if (bridge
->subordinate
&&
15202 (bridge
->subordinate
->number
==
15203 tp
->pdev
->bus
->number
)) {
15204 tg3_flag_set(tp
, ICH_WORKAROUND
);
15205 pci_dev_put(bridge
);
15211 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15212 static struct tg3_dev_id
{
15215 } bridge_chipsets
[] = {
15216 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15217 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15220 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15221 struct pci_dev
*bridge
= NULL
;
15223 while (pci_id
->vendor
!= 0) {
15224 bridge
= pci_get_device(pci_id
->vendor
,
15231 if (bridge
->subordinate
&&
15232 (bridge
->subordinate
->number
<=
15233 tp
->pdev
->bus
->number
) &&
15234 (bridge
->subordinate
->busn_res
.end
>=
15235 tp
->pdev
->bus
->number
)) {
15236 tg3_flag_set(tp
, 5701_DMA_BUG
);
15237 pci_dev_put(bridge
);
15243 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15244 * DMA addresses > 40-bit. This bridge may have other additional
15245 * 57xx devices behind it in some 4-port NIC designs for example.
15246 * Any tg3 device found behind the bridge will also need the 40-bit
15249 if (tg3_flag(tp
, 5780_CLASS
)) {
15250 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15251 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15253 struct pci_dev
*bridge
= NULL
;
15256 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15257 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15259 if (bridge
&& bridge
->subordinate
&&
15260 (bridge
->subordinate
->number
<=
15261 tp
->pdev
->bus
->number
) &&
15262 (bridge
->subordinate
->busn_res
.end
>=
15263 tp
->pdev
->bus
->number
)) {
15264 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15265 pci_dev_put(bridge
);
15271 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15272 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15273 tp
->pdev_peer
= tg3_find_peer(tp
);
15275 /* Determine TSO capabilities */
15276 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15277 ; /* Do nothing. HW bug. */
15278 else if (tg3_flag(tp
, 57765_PLUS
))
15279 tg3_flag_set(tp
, HW_TSO_3
);
15280 else if (tg3_flag(tp
, 5755_PLUS
) ||
15281 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15282 tg3_flag_set(tp
, HW_TSO_2
);
15283 else if (tg3_flag(tp
, 5750_PLUS
)) {
15284 tg3_flag_set(tp
, HW_TSO_1
);
15285 tg3_flag_set(tp
, TSO_BUG
);
15286 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15287 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15288 tg3_flag_clear(tp
, TSO_BUG
);
15289 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15290 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15291 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15292 tg3_flag_set(tp
, TSO_BUG
);
15293 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15294 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15296 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15299 /* Selectively allow TSO based on operating conditions */
15300 if (tg3_flag(tp
, HW_TSO_1
) ||
15301 tg3_flag(tp
, HW_TSO_2
) ||
15302 tg3_flag(tp
, HW_TSO_3
) ||
15304 /* For firmware TSO, assume ASF is disabled.
15305 * We'll disable TSO later if we discover ASF
15306 * is enabled in tg3_get_eeprom_hw_cfg().
15308 tg3_flag_set(tp
, TSO_CAPABLE
);
15310 tg3_flag_clear(tp
, TSO_CAPABLE
);
15311 tg3_flag_clear(tp
, TSO_BUG
);
15312 tp
->fw_needed
= NULL
;
15315 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15316 tp
->fw_needed
= FIRMWARE_TG3
;
15320 if (tg3_flag(tp
, 5750_PLUS
)) {
15321 tg3_flag_set(tp
, SUPPORT_MSI
);
15322 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15323 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15324 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15325 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15326 tp
->pdev_peer
== tp
->pdev
))
15327 tg3_flag_clear(tp
, SUPPORT_MSI
);
15329 if (tg3_flag(tp
, 5755_PLUS
) ||
15330 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15331 tg3_flag_set(tp
, 1SHOT_MSI
);
15334 if (tg3_flag(tp
, 57765_PLUS
)) {
15335 tg3_flag_set(tp
, SUPPORT_MSIX
);
15336 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15342 if (tp
->irq_max
> 1) {
15343 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15344 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15346 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15347 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15348 tp
->txq_max
= tp
->irq_max
- 1;
15351 if (tg3_flag(tp
, 5755_PLUS
) ||
15352 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15353 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15355 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
15356 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15358 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15359 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15360 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15361 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15362 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15364 if (tg3_flag(tp
, 57765_PLUS
) &&
15365 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
15366 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15368 if (!tg3_flag(tp
, 5705_PLUS
) ||
15369 tg3_flag(tp
, 5780_CLASS
) ||
15370 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15371 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15373 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15376 if (pci_is_pcie(tp
->pdev
)) {
15379 tg3_flag_set(tp
, PCI_EXPRESS
);
15381 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15382 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15383 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15384 tg3_flag_clear(tp
, HW_TSO_2
);
15385 tg3_flag_clear(tp
, TSO_CAPABLE
);
15387 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
15388 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15389 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
15390 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
15391 tg3_flag_set(tp
, CLKREQ_BUG
);
15392 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
15393 tg3_flag_set(tp
, L1PLLPD_EN
);
15395 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
15396 /* BCM5785 devices are effectively PCIe devices, and should
15397 * follow PCIe codepaths, but do not have a PCIe capabilities
15400 tg3_flag_set(tp
, PCI_EXPRESS
);
15401 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15402 tg3_flag(tp
, 5780_CLASS
)) {
15403 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15404 if (!tp
->pcix_cap
) {
15405 dev_err(&tp
->pdev
->dev
,
15406 "Cannot find PCI-X capability, aborting\n");
15410 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15411 tg3_flag_set(tp
, PCIX_MODE
);
15414 /* If we have an AMD 762 or VIA K8T800 chipset, write
15415 * reordering to the mailbox registers done by the host
15416 * controller can cause major troubles. We read back from
15417 * every mailbox register write to force the writes to be
15418 * posted to the chip in order.
15420 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15421 !tg3_flag(tp
, PCI_EXPRESS
))
15422 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15424 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15425 &tp
->pci_cacheline_sz
);
15426 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15427 &tp
->pci_lat_timer
);
15428 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15429 tp
->pci_lat_timer
< 64) {
15430 tp
->pci_lat_timer
= 64;
15431 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15432 tp
->pci_lat_timer
);
15435 /* Important! -- It is critical that the PCI-X hw workaround
15436 * situation is decided before the first MMIO register access.
15438 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
15439 /* 5700 BX chips need to have their TX producer index
15440 * mailboxes written twice to workaround a bug.
15442 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15444 /* If we are in PCI-X mode, enable register write workaround.
15446 * The workaround is to use indirect register accesses
15447 * for all chip writes not to mailbox registers.
15449 if (tg3_flag(tp
, PCIX_MODE
)) {
15452 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15454 /* The chip can have it's power management PCI config
15455 * space registers clobbered due to this bug.
15456 * So explicitly force the chip into D0 here.
15458 pci_read_config_dword(tp
->pdev
,
15459 tp
->pm_cap
+ PCI_PM_CTRL
,
15461 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
15462 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
15463 pci_write_config_dword(tp
->pdev
,
15464 tp
->pm_cap
+ PCI_PM_CTRL
,
15467 /* Also, force SERR#/PERR# in PCI command. */
15468 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15469 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
15470 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15474 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
15475 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
15476 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
15477 tg3_flag_set(tp
, PCI_32BIT
);
15479 /* Chip-specific fixup from Broadcom driver */
15480 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
15481 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
15482 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
15483 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
15486 /* Default fast path register access methods */
15487 tp
->read32
= tg3_read32
;
15488 tp
->write32
= tg3_write32
;
15489 tp
->read32_mbox
= tg3_read32
;
15490 tp
->write32_mbox
= tg3_write32
;
15491 tp
->write32_tx_mbox
= tg3_write32
;
15492 tp
->write32_rx_mbox
= tg3_write32
;
15494 /* Various workaround register access methods */
15495 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
15496 tp
->write32
= tg3_write_indirect_reg32
;
15497 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
15498 (tg3_flag(tp
, PCI_EXPRESS
) &&
15499 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
15501 * Back to back register writes can cause problems on these
15502 * chips, the workaround is to read back all reg writes
15503 * except those to mailbox regs.
15505 * See tg3_write_indirect_reg32().
15507 tp
->write32
= tg3_write_flush_reg32
;
15510 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
15511 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
15512 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
15513 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15516 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
15517 tp
->read32
= tg3_read_indirect_reg32
;
15518 tp
->write32
= tg3_write_indirect_reg32
;
15519 tp
->read32_mbox
= tg3_read_indirect_mbox
;
15520 tp
->write32_mbox
= tg3_write_indirect_mbox
;
15521 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
15522 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
15527 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15528 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
15529 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15531 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15532 tp
->read32_mbox
= tg3_read32_mbox_5906
;
15533 tp
->write32_mbox
= tg3_write32_mbox_5906
;
15534 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
15535 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
15538 if (tp
->write32
== tg3_write_indirect_reg32
||
15539 (tg3_flag(tp
, PCIX_MODE
) &&
15540 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15541 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
15542 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
15544 /* The memory arbiter has to be enabled in order for SRAM accesses
15545 * to succeed. Normally on powerup the tg3 chip firmware will make
15546 * sure it is enabled, but other entities such as system netboot
15547 * code might disable it.
15549 val
= tr32(MEMARB_MODE
);
15550 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
15552 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
15553 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15554 tg3_flag(tp
, 5780_CLASS
)) {
15555 if (tg3_flag(tp
, PCIX_MODE
)) {
15556 pci_read_config_dword(tp
->pdev
,
15557 tp
->pcix_cap
+ PCI_X_STATUS
,
15559 tp
->pci_fn
= val
& 0x7;
15561 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15562 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15563 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
15564 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
15565 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
15566 val
= tr32(TG3_CPMU_STATUS
);
15568 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
15569 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
15571 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
15572 TG3_CPMU_STATUS_FSHFT_5719
;
15575 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
15576 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
15577 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15580 /* Get eeprom hw config before calling tg3_set_power_state().
15581 * In particular, the TG3_FLAG_IS_NIC flag must be
15582 * determined before calling tg3_set_power_state() so that
15583 * we know whether or not to switch out of Vaux power.
15584 * When the flag is set, it means that GPIO1 is used for eeprom
15585 * write protect and also implies that it is a LOM where GPIOs
15586 * are not used to switch power.
15588 tg3_get_eeprom_hw_cfg(tp
);
15590 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
15591 tg3_flag_clear(tp
, TSO_CAPABLE
);
15592 tg3_flag_clear(tp
, TSO_BUG
);
15593 tp
->fw_needed
= NULL
;
15596 if (tg3_flag(tp
, ENABLE_APE
)) {
15597 /* Allow reads and writes to the
15598 * APE register and memory space.
15600 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
15601 PCISTATE_ALLOW_APE_SHMEM_WR
|
15602 PCISTATE_ALLOW_APE_PSPACE_WR
;
15603 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15606 tg3_ape_lock_init(tp
);
15609 /* Set up tp->grc_local_ctrl before calling
15610 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15611 * will bring 5700's external PHY out of reset.
15612 * It is also used as eeprom write protect on LOMs.
15614 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
15615 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15616 tg3_flag(tp
, EEPROM_WRITE_PROT
))
15617 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
15618 GRC_LCLCTRL_GPIO_OUTPUT1
);
15619 /* Unused GPIO3 must be driven as output on 5752 because there
15620 * are no pull-up resistors on unused GPIO pins.
15622 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
15623 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
15625 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15626 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15627 tg3_flag(tp
, 57765_CLASS
))
15628 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15630 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15631 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
15632 /* Turn off the debug UART. */
15633 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15634 if (tg3_flag(tp
, IS_NIC
))
15635 /* Keep VMain power. */
15636 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
15637 GRC_LCLCTRL_GPIO_OUTPUT0
;
15640 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
15641 tp
->grc_local_ctrl
|=
15642 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
15644 /* Switch out of Vaux if it is a NIC */
15645 tg3_pwrsrc_switch_to_vmain(tp
);
15647 /* Derive initial jumbo mode from MTU assigned in
15648 * ether_setup() via the alloc_etherdev() call
15650 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
15651 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
15653 /* Determine WakeOnLan speed to use. */
15654 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15655 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15656 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15657 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
15658 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
15660 tg3_flag_set(tp
, WOL_SPEED_100MB
);
15663 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15664 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
15666 /* A few boards don't want Ethernet@WireSpeed phy feature */
15667 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
15668 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15669 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
15670 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
15671 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
15672 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15673 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
15675 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
15676 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
15677 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
15678 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
15679 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
15681 if (tg3_flag(tp
, 5705_PLUS
) &&
15682 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
15683 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15684 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
15685 !tg3_flag(tp
, 57765_PLUS
)) {
15686 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15687 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15688 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15689 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
15690 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
15691 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
15692 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
15693 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
15694 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
15696 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
15699 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15700 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
15701 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
15702 if (tp
->phy_otp
== 0)
15703 tp
->phy_otp
= TG3_OTP_DEFAULT
;
15706 if (tg3_flag(tp
, CPMU_PRESENT
))
15707 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
15709 tp
->mi_mode
= MAC_MI_MODE_BASE
;
15711 tp
->coalesce_mode
= 0;
15712 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
15713 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
15714 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
15716 /* Set these bits to enable statistics workaround. */
15717 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15718 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
15719 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
15720 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
15721 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
15724 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
15725 tg3_asic_rev(tp
) == ASIC_REV_57780
)
15726 tg3_flag_set(tp
, USE_PHYLIB
);
15728 err
= tg3_mdio_init(tp
);
15732 /* Initialize data/descriptor byte/word swapping. */
15733 val
= tr32(GRC_MODE
);
15734 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
15735 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15736 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
15737 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
15738 GRC_MODE_B2HRX_ENABLE
|
15739 GRC_MODE_HTX2B_ENABLE
|
15740 GRC_MODE_HOST_STACKUP
);
15742 val
&= GRC_MODE_HOST_STACKUP
;
15744 tw32(GRC_MODE
, val
| tp
->grc_mode
);
15746 tg3_switch_clocks(tp
);
15748 /* Clear this out for sanity. */
15749 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15751 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15753 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
15754 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
15755 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
15756 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
15757 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
15758 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
15759 void __iomem
*sram_base
;
15761 /* Write some dummy words into the SRAM status block
15762 * area, see if it reads back correctly. If the return
15763 * value is bad, force enable the PCIX workaround.
15765 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
15767 writel(0x00000000, sram_base
);
15768 writel(0x00000000, sram_base
+ 4);
15769 writel(0xffffffff, sram_base
+ 4);
15770 if (readl(sram_base
) != 0x00000000)
15771 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15776 tg3_nvram_init(tp
);
15778 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
15779 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
15781 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
15782 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
15783 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
15784 tg3_flag_set(tp
, IS_5788
);
15786 if (!tg3_flag(tp
, IS_5788
) &&
15787 tg3_asic_rev(tp
) != ASIC_REV_5700
)
15788 tg3_flag_set(tp
, TAGGED_STATUS
);
15789 if (tg3_flag(tp
, TAGGED_STATUS
)) {
15790 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
15791 HOSTCC_MODE_CLRTICK_TXBD
);
15793 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
15794 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15795 tp
->misc_host_ctrl
);
15798 /* Preserve the APE MAC_MODE bits */
15799 if (tg3_flag(tp
, ENABLE_APE
))
15800 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
15804 if (tg3_10_100_only_device(tp
, ent
))
15805 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
15807 err
= tg3_phy_probe(tp
);
15809 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
15810 /* ... but do not return immediately ... */
15815 tg3_read_fw_ver(tp
);
15817 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
15818 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15820 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15821 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15823 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15826 /* 5700 {AX,BX} chips have a broken status block link
15827 * change bit implementation, so we must use the
15828 * status register in those cases.
15830 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
15831 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15833 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
15835 /* The led_ctrl is set during tg3_phy_probe, here we might
15836 * have to force the link status polling mechanism based
15837 * upon subsystem IDs.
15839 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
15840 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15841 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
15842 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15843 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15846 /* For all SERDES we poll the MAC status register. */
15847 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
15848 tg3_flag_set(tp
, POLL_SERDES
);
15850 tg3_flag_clear(tp
, POLL_SERDES
);
15852 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
15853 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
15854 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
15855 tg3_flag(tp
, PCIX_MODE
)) {
15856 tp
->rx_offset
= NET_SKB_PAD
;
15857 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15858 tp
->rx_copy_thresh
= ~(u16
)0;
15862 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
15863 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
15864 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
15866 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
15868 /* Increment the rx prod index on the rx std ring by at most
15869 * 8 for these chips to workaround hw errata.
15871 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15872 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15873 tg3_asic_rev(tp
) == ASIC_REV_5755
)
15874 tp
->rx_std_max_post
= 8;
15876 if (tg3_flag(tp
, ASPM_WORKAROUND
))
15877 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
15878 PCIE_PWR_MGMT_L1_THRESH_MSK
;
15883 #ifdef CONFIG_SPARC
15884 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
15886 struct net_device
*dev
= tp
->dev
;
15887 struct pci_dev
*pdev
= tp
->pdev
;
15888 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
15889 const unsigned char *addr
;
15892 addr
= of_get_property(dp
, "local-mac-address", &len
);
15893 if (addr
&& len
== 6) {
15894 memcpy(dev
->dev_addr
, addr
, 6);
15900 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
15902 struct net_device
*dev
= tp
->dev
;
15904 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
15909 static int tg3_get_device_address(struct tg3
*tp
)
15911 struct net_device
*dev
= tp
->dev
;
15912 u32 hi
, lo
, mac_offset
;
15916 #ifdef CONFIG_SPARC
15917 if (!tg3_get_macaddr_sparc(tp
))
15921 if (tg3_flag(tp
, IS_SSB_CORE
)) {
15922 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
15923 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
15928 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15929 tg3_flag(tp
, 5780_CLASS
)) {
15930 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
15932 if (tg3_nvram_lock(tp
))
15933 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
15935 tg3_nvram_unlock(tp
);
15936 } else if (tg3_flag(tp
, 5717_PLUS
)) {
15937 if (tp
->pci_fn
& 1)
15939 if (tp
->pci_fn
> 1)
15940 mac_offset
+= 0x18c;
15941 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
15944 /* First try to get it from MAC address mailbox. */
15945 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
15946 if ((hi
>> 16) == 0x484b) {
15947 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15948 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
15950 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
15951 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15952 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15953 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15954 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
15956 /* Some old bootcode may report a 0 MAC address in SRAM */
15957 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
15960 /* Next, try NVRAM. */
15961 if (!tg3_flag(tp
, NO_NVRAM
) &&
15962 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
15963 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
15964 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
15965 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
15967 /* Finally just fetch it out of the MAC control regs. */
15969 hi
= tr32(MAC_ADDR_0_HIGH
);
15970 lo
= tr32(MAC_ADDR_0_LOW
);
15972 dev
->dev_addr
[5] = lo
& 0xff;
15973 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15974 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15975 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15976 dev
->dev_addr
[1] = hi
& 0xff;
15977 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15981 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
15982 #ifdef CONFIG_SPARC
15983 if (!tg3_get_default_macaddr_sparc(tp
))
15991 #define BOUNDARY_SINGLE_CACHELINE 1
15992 #define BOUNDARY_MULTI_CACHELINE 2
15994 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
15996 int cacheline_size
;
16000 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16002 cacheline_size
= 1024;
16004 cacheline_size
= (int) byte
* 4;
16006 /* On 5703 and later chips, the boundary bits have no
16009 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16010 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16011 !tg3_flag(tp
, PCI_EXPRESS
))
16014 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16015 goal
= BOUNDARY_MULTI_CACHELINE
;
16017 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16018 goal
= BOUNDARY_SINGLE_CACHELINE
;
16024 if (tg3_flag(tp
, 57765_PLUS
)) {
16025 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16032 /* PCI controllers on most RISC systems tend to disconnect
16033 * when a device tries to burst across a cache-line boundary.
16034 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16036 * Unfortunately, for PCI-E there are only limited
16037 * write-side controls for this, and thus for reads
16038 * we will still get the disconnects. We'll also waste
16039 * these PCI cycles for both read and write for chips
16040 * other than 5700 and 5701 which do not implement the
16043 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16044 switch (cacheline_size
) {
16049 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16050 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16051 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16053 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16054 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16059 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16060 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16064 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16065 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16068 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16069 switch (cacheline_size
) {
16073 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16074 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16075 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16081 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16082 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16086 switch (cacheline_size
) {
16088 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16089 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16090 DMA_RWCTRL_WRITE_BNDRY_16
);
16095 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16096 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16097 DMA_RWCTRL_WRITE_BNDRY_32
);
16102 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16103 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16104 DMA_RWCTRL_WRITE_BNDRY_64
);
16109 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16110 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16111 DMA_RWCTRL_WRITE_BNDRY_128
);
16116 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16117 DMA_RWCTRL_WRITE_BNDRY_256
);
16120 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16121 DMA_RWCTRL_WRITE_BNDRY_512
);
16125 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16126 DMA_RWCTRL_WRITE_BNDRY_1024
);
16135 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16136 int size
, int to_device
)
16138 struct tg3_internal_buffer_desc test_desc
;
16139 u32 sram_dma_descs
;
16142 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16144 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16145 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16146 tw32(RDMAC_STATUS
, 0);
16147 tw32(WDMAC_STATUS
, 0);
16149 tw32(BUFMGR_MODE
, 0);
16150 tw32(FTQ_RESET
, 0);
16152 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16153 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16154 test_desc
.nic_mbuf
= 0x00002100;
16155 test_desc
.len
= size
;
16158 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16159 * the *second* time the tg3 driver was getting loaded after an
16162 * Broadcom tells me:
16163 * ...the DMA engine is connected to the GRC block and a DMA
16164 * reset may affect the GRC block in some unpredictable way...
16165 * The behavior of resets to individual blocks has not been tested.
16167 * Broadcom noted the GRC reset will also reset all sub-components.
16170 test_desc
.cqid_sqid
= (13 << 8) | 2;
16172 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16175 test_desc
.cqid_sqid
= (16 << 8) | 7;
16177 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16180 test_desc
.flags
= 0x00000005;
16182 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16185 val
= *(((u32
*)&test_desc
) + i
);
16186 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16187 sram_dma_descs
+ (i
* sizeof(u32
)));
16188 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16190 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16193 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16195 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16198 for (i
= 0; i
< 40; i
++) {
16202 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16204 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16205 if ((val
& 0xffff) == sram_dma_descs
) {
16216 #define TEST_BUFFER_SIZE 0x2000
16218 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16219 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16223 static int tg3_test_dma(struct tg3
*tp
)
16225 dma_addr_t buf_dma
;
16226 u32
*buf
, saved_dma_rwctrl
;
16229 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16230 &buf_dma
, GFP_KERNEL
);
16236 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16237 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16239 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16241 if (tg3_flag(tp
, 57765_PLUS
))
16244 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16245 /* DMA read watermark not used on PCIE */
16246 tp
->dma_rwctrl
|= 0x00180000;
16247 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16248 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16249 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16250 tp
->dma_rwctrl
|= 0x003f0000;
16252 tp
->dma_rwctrl
|= 0x003f000f;
16254 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16255 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16256 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16257 u32 read_water
= 0x7;
16259 /* If the 5704 is behind the EPB bridge, we can
16260 * do the less restrictive ONE_DMA workaround for
16261 * better performance.
16263 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16264 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16265 tp
->dma_rwctrl
|= 0x8000;
16266 else if (ccval
== 0x6 || ccval
== 0x7)
16267 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16269 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16271 /* Set bit 23 to enable PCIX hw bug fix */
16273 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16274 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16276 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16277 /* 5780 always in PCIX mode */
16278 tp
->dma_rwctrl
|= 0x00144000;
16279 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16280 /* 5714 always in PCIX mode */
16281 tp
->dma_rwctrl
|= 0x00148000;
16283 tp
->dma_rwctrl
|= 0x001b000f;
16286 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16287 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16289 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16290 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16291 tp
->dma_rwctrl
&= 0xfffffff0;
16293 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16294 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16295 /* Remove this if it causes problems for some boards. */
16296 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16298 /* On 5700/5701 chips, we need to set this bit.
16299 * Otherwise the chip will issue cacheline transactions
16300 * to streamable DMA memory with not all the byte
16301 * enables turned on. This is an error on several
16302 * RISC PCI controllers, in particular sparc64.
16304 * On 5703/5704 chips, this bit has been reassigned
16305 * a different meaning. In particular, it is used
16306 * on those chips to enable a PCI-X workaround.
16308 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16311 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16314 /* Unneeded, already done by tg3_get_invariants. */
16315 tg3_switch_clocks(tp
);
16318 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16319 tg3_asic_rev(tp
) != ASIC_REV_5701
)
16322 /* It is best to perform DMA test with maximum write burst size
16323 * to expose the 5700/5701 write DMA bug.
16325 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16326 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16327 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16332 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16335 /* Send the buffer to the chip. */
16336 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
16338 dev_err(&tp
->pdev
->dev
,
16339 "%s: Buffer write failed. err = %d\n",
16345 /* validate data reached card RAM correctly. */
16346 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16348 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16349 if (le32_to_cpu(val
) != p
[i
]) {
16350 dev_err(&tp
->pdev
->dev
,
16351 "%s: Buffer corrupted on device! "
16352 "(%d != %d)\n", __func__
, val
, i
);
16353 /* ret = -ENODEV here? */
16358 /* Now read it back. */
16359 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
16361 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16362 "err = %d\n", __func__
, ret
);
16367 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16371 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16372 DMA_RWCTRL_WRITE_BNDRY_16
) {
16373 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16374 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16375 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16378 dev_err(&tp
->pdev
->dev
,
16379 "%s: Buffer corrupted on read back! "
16380 "(%d != %d)\n", __func__
, p
[i
], i
);
16386 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16392 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16393 DMA_RWCTRL_WRITE_BNDRY_16
) {
16394 /* DMA test passed without adjusting DMA boundary,
16395 * now look for chipsets that are known to expose the
16396 * DMA bug without failing the test.
16398 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16399 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16400 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16402 /* Safe to use the calculated DMA boundary. */
16403 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16406 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16410 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16415 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16417 if (tg3_flag(tp
, 57765_PLUS
)) {
16418 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16419 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16420 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16421 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16422 tp
->bufmgr_config
.mbuf_high_water
=
16423 DEFAULT_MB_HIGH_WATER_57765
;
16425 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16426 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16427 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16428 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16429 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16430 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16431 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16432 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16433 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16434 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16435 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16436 tp
->bufmgr_config
.mbuf_high_water
=
16437 DEFAULT_MB_HIGH_WATER_5705
;
16438 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16439 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16440 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16441 tp
->bufmgr_config
.mbuf_high_water
=
16442 DEFAULT_MB_HIGH_WATER_5906
;
16445 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16446 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
16447 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16448 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
16449 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16450 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
16452 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16453 DEFAULT_MB_RDMA_LOW_WATER
;
16454 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16455 DEFAULT_MB_MACRX_LOW_WATER
;
16456 tp
->bufmgr_config
.mbuf_high_water
=
16457 DEFAULT_MB_HIGH_WATER
;
16459 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16460 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
16461 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16462 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
16463 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16464 DEFAULT_MB_HIGH_WATER_JUMBO
;
16467 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
16468 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
16471 static char *tg3_phy_string(struct tg3
*tp
)
16473 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
16474 case TG3_PHY_ID_BCM5400
: return "5400";
16475 case TG3_PHY_ID_BCM5401
: return "5401";
16476 case TG3_PHY_ID_BCM5411
: return "5411";
16477 case TG3_PHY_ID_BCM5701
: return "5701";
16478 case TG3_PHY_ID_BCM5703
: return "5703";
16479 case TG3_PHY_ID_BCM5704
: return "5704";
16480 case TG3_PHY_ID_BCM5705
: return "5705";
16481 case TG3_PHY_ID_BCM5750
: return "5750";
16482 case TG3_PHY_ID_BCM5752
: return "5752";
16483 case TG3_PHY_ID_BCM5714
: return "5714";
16484 case TG3_PHY_ID_BCM5780
: return "5780";
16485 case TG3_PHY_ID_BCM5755
: return "5755";
16486 case TG3_PHY_ID_BCM5787
: return "5787";
16487 case TG3_PHY_ID_BCM5784
: return "5784";
16488 case TG3_PHY_ID_BCM5756
: return "5722/5756";
16489 case TG3_PHY_ID_BCM5906
: return "5906";
16490 case TG3_PHY_ID_BCM5761
: return "5761";
16491 case TG3_PHY_ID_BCM5718C
: return "5718C";
16492 case TG3_PHY_ID_BCM5718S
: return "5718S";
16493 case TG3_PHY_ID_BCM57765
: return "57765";
16494 case TG3_PHY_ID_BCM5719C
: return "5719C";
16495 case TG3_PHY_ID_BCM5720C
: return "5720C";
16496 case TG3_PHY_ID_BCM5762
: return "5762C";
16497 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
16498 case 0: return "serdes";
16499 default: return "unknown";
16503 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
16505 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16506 strcpy(str
, "PCI Express");
16508 } else if (tg3_flag(tp
, PCIX_MODE
)) {
16509 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
16511 strcpy(str
, "PCIX:");
16513 if ((clock_ctrl
== 7) ||
16514 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
16515 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
16516 strcat(str
, "133MHz");
16517 else if (clock_ctrl
== 0)
16518 strcat(str
, "33MHz");
16519 else if (clock_ctrl
== 2)
16520 strcat(str
, "50MHz");
16521 else if (clock_ctrl
== 4)
16522 strcat(str
, "66MHz");
16523 else if (clock_ctrl
== 6)
16524 strcat(str
, "100MHz");
16526 strcpy(str
, "PCI:");
16527 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
16528 strcat(str
, "66MHz");
16530 strcat(str
, "33MHz");
16532 if (tg3_flag(tp
, PCI_32BIT
))
16533 strcat(str
, ":32-bit");
16535 strcat(str
, ":64-bit");
16539 static void tg3_init_coal(struct tg3
*tp
)
16541 struct ethtool_coalesce
*ec
= &tp
->coal
;
16543 memset(ec
, 0, sizeof(*ec
));
16544 ec
->cmd
= ETHTOOL_GCOALESCE
;
16545 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
16546 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
16547 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
16548 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
16549 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
16550 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
16551 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
16552 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
16553 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
16555 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
16556 HOSTCC_MODE_CLRTICK_TXBD
)) {
16557 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
16558 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
16559 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
16560 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
16563 if (tg3_flag(tp
, 5705_PLUS
)) {
16564 ec
->rx_coalesce_usecs_irq
= 0;
16565 ec
->tx_coalesce_usecs_irq
= 0;
16566 ec
->stats_block_coalesce_usecs
= 0;
16570 static int tg3_init_one(struct pci_dev
*pdev
,
16571 const struct pci_device_id
*ent
)
16573 struct net_device
*dev
;
16575 int i
, err
, pm_cap
;
16576 u32 sndmbx
, rcvmbx
, intmbx
;
16578 u64 dma_mask
, persist_dma_mask
;
16579 netdev_features_t features
= 0;
16581 printk_once(KERN_INFO
"%s\n", version
);
16583 err
= pci_enable_device(pdev
);
16585 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
16589 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
16591 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
16592 goto err_out_disable_pdev
;
16595 pci_set_master(pdev
);
16597 /* Find power-management capability. */
16598 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
16600 dev_err(&pdev
->dev
,
16601 "Cannot find Power Management capability, aborting\n");
16603 goto err_out_free_res
;
16606 err
= pci_set_power_state(pdev
, PCI_D0
);
16608 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
16609 goto err_out_free_res
;
16612 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
16615 goto err_out_power_down
;
16618 SET_NETDEV_DEV(dev
, &pdev
->dev
);
16620 tp
= netdev_priv(dev
);
16623 tp
->pm_cap
= pm_cap
;
16624 tp
->rx_mode
= TG3_DEF_RX_MODE
;
16625 tp
->tx_mode
= TG3_DEF_TX_MODE
;
16629 tp
->msg_enable
= tg3_debug
;
16631 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
16633 if (pdev_is_ssb_gige_core(pdev
)) {
16634 tg3_flag_set(tp
, IS_SSB_CORE
);
16635 if (ssb_gige_must_flush_posted_writes(pdev
))
16636 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
16637 if (ssb_gige_one_dma_at_once(pdev
))
16638 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
16639 if (ssb_gige_have_roboswitch(pdev
))
16640 tg3_flag_set(tp
, ROBOSWITCH
);
16641 if (ssb_gige_is_rgmii(pdev
))
16642 tg3_flag_set(tp
, RGMII_MODE
);
16645 /* The word/byte swap controls here control register access byte
16646 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16649 tp
->misc_host_ctrl
=
16650 MISC_HOST_CTRL_MASK_PCI_INT
|
16651 MISC_HOST_CTRL_WORD_SWAP
|
16652 MISC_HOST_CTRL_INDIR_ACCESS
|
16653 MISC_HOST_CTRL_PCISTATE_RW
;
16655 /* The NONFRM (non-frame) byte/word swap controls take effect
16656 * on descriptor entries, anything which isn't packet data.
16658 * The StrongARM chips on the board (one for tx, one for rx)
16659 * are running in big-endian mode.
16661 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
16662 GRC_MODE_WSWAP_NONFRM_DATA
);
16663 #ifdef __BIG_ENDIAN
16664 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
16666 spin_lock_init(&tp
->lock
);
16667 spin_lock_init(&tp
->indirect_lock
);
16668 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
16670 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
16672 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
16674 goto err_out_free_dev
;
16677 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16678 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
16679 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
16680 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
16681 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16682 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16683 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16684 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16685 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
16686 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
16687 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
16688 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
16689 tg3_flag_set(tp
, ENABLE_APE
);
16690 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
16691 if (!tp
->aperegs
) {
16692 dev_err(&pdev
->dev
,
16693 "Cannot map APE registers, aborting\n");
16695 goto err_out_iounmap
;
16699 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
16700 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
16702 dev
->ethtool_ops
= &tg3_ethtool_ops
;
16703 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
16704 dev
->netdev_ops
= &tg3_netdev_ops
;
16705 dev
->irq
= pdev
->irq
;
16707 err
= tg3_get_invariants(tp
, ent
);
16709 dev_err(&pdev
->dev
,
16710 "Problem fetching invariants of chip, aborting\n");
16711 goto err_out_apeunmap
;
16714 /* The EPB bridge inside 5714, 5715, and 5780 and any
16715 * device behind the EPB cannot support DMA addresses > 40-bit.
16716 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16717 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16718 * do DMA address check in tg3_start_xmit().
16720 if (tg3_flag(tp
, IS_5788
))
16721 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
16722 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
16723 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
16724 #ifdef CONFIG_HIGHMEM
16725 dma_mask
= DMA_BIT_MASK(64);
16728 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
16730 /* Configure DMA attributes. */
16731 if (dma_mask
> DMA_BIT_MASK(32)) {
16732 err
= pci_set_dma_mask(pdev
, dma_mask
);
16734 features
|= NETIF_F_HIGHDMA
;
16735 err
= pci_set_consistent_dma_mask(pdev
,
16738 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
16739 "DMA for consistent allocations\n");
16740 goto err_out_apeunmap
;
16744 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
16745 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
16747 dev_err(&pdev
->dev
,
16748 "No usable DMA configuration, aborting\n");
16749 goto err_out_apeunmap
;
16753 tg3_init_bufmgr_config(tp
);
16755 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
16757 /* 5700 B0 chips do not support checksumming correctly due
16758 * to hardware bugs.
16760 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
16761 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
16763 if (tg3_flag(tp
, 5755_PLUS
))
16764 features
|= NETIF_F_IPV6_CSUM
;
16767 /* TSO is on by default on chips that support hardware TSO.
16768 * Firmware TSO on older chips gives lower performance, so it
16769 * is off by default, but can be enabled using ethtool.
16771 if ((tg3_flag(tp
, HW_TSO_1
) ||
16772 tg3_flag(tp
, HW_TSO_2
) ||
16773 tg3_flag(tp
, HW_TSO_3
)) &&
16774 (features
& NETIF_F_IP_CSUM
))
16775 features
|= NETIF_F_TSO
;
16776 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
16777 if (features
& NETIF_F_IPV6_CSUM
)
16778 features
|= NETIF_F_TSO6
;
16779 if (tg3_flag(tp
, HW_TSO_3
) ||
16780 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16781 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16782 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
16783 tg3_asic_rev(tp
) == ASIC_REV_5785
||
16784 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16785 features
|= NETIF_F_TSO_ECN
;
16788 dev
->features
|= features
;
16789 dev
->vlan_features
|= features
;
16792 * Add loopback capability only for a subset of devices that support
16793 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16794 * loopback for the remaining devices.
16796 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
16797 !tg3_flag(tp
, CPMU_PRESENT
))
16798 /* Add the loopback capability */
16799 features
|= NETIF_F_LOOPBACK
;
16801 dev
->hw_features
|= features
;
16803 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
16804 !tg3_flag(tp
, TSO_CAPABLE
) &&
16805 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
16806 tg3_flag_set(tp
, MAX_RXPEND_64
);
16807 tp
->rx_pending
= 63;
16810 err
= tg3_get_device_address(tp
);
16812 dev_err(&pdev
->dev
,
16813 "Could not obtain valid ethernet address, aborting\n");
16814 goto err_out_apeunmap
;
16818 * Reset chip in case UNDI or EFI driver did not shutdown
16819 * DMA self test will enable WDMAC and we'll see (spurious)
16820 * pending DMA on the PCI bus at that point.
16822 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
16823 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
16824 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
16825 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16828 err
= tg3_test_dma(tp
);
16830 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
16831 goto err_out_apeunmap
;
16834 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
16835 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
16836 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
16837 for (i
= 0; i
< tp
->irq_max
; i
++) {
16838 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
16841 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
16843 tnapi
->int_mbox
= intmbx
;
16849 tnapi
->consmbox
= rcvmbx
;
16850 tnapi
->prodmbox
= sndmbx
;
16853 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
16855 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
16857 if (!tg3_flag(tp
, SUPPORT_MSIX
))
16861 * If we support MSIX, we'll be using RSS. If we're using
16862 * RSS, the first vector only handles link interrupts and the
16863 * remaining vectors handle rx and tx interrupts. Reuse the
16864 * mailbox values for the next iteration. The values we setup
16865 * above are still useful for the single vectored mode.
16880 pci_set_drvdata(pdev
, dev
);
16882 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16883 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16884 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16885 tg3_flag_set(tp
, PTP_CAPABLE
);
16887 if (tg3_flag(tp
, 5717_PLUS
)) {
16888 /* Resume a low-power mode */
16889 tg3_frob_aux_power(tp
, false);
16892 tg3_timer_init(tp
);
16894 tg3_carrier_off(tp
);
16896 err
= register_netdev(dev
);
16898 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
16899 goto err_out_apeunmap
;
16902 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16903 tp
->board_part_number
,
16904 tg3_chip_rev_id(tp
),
16905 tg3_bus_string(tp
, str
),
16908 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
16909 struct phy_device
*phydev
;
16910 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
16912 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16913 phydev
->drv
->name
, dev_name(&phydev
->dev
));
16917 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
16918 ethtype
= "10/100Base-TX";
16919 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
16920 ethtype
= "1000Base-SX";
16922 ethtype
= "10/100/1000Base-T";
16924 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
16925 "(WireSpeed[%d], EEE[%d])\n",
16926 tg3_phy_string(tp
), ethtype
,
16927 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
16928 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
16931 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16932 (dev
->features
& NETIF_F_RXCSUM
) != 0,
16933 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
16934 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
16935 tg3_flag(tp
, ENABLE_ASF
) != 0,
16936 tg3_flag(tp
, TSO_CAPABLE
) != 0);
16937 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16939 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
16940 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
16942 pci_save_state(pdev
);
16948 iounmap(tp
->aperegs
);
16949 tp
->aperegs
= NULL
;
16961 err_out_power_down
:
16962 pci_set_power_state(pdev
, PCI_D3hot
);
16965 pci_release_regions(pdev
);
16967 err_out_disable_pdev
:
16968 pci_disable_device(pdev
);
16969 pci_set_drvdata(pdev
, NULL
);
16973 static void tg3_remove_one(struct pci_dev
*pdev
)
16975 struct net_device
*dev
= pci_get_drvdata(pdev
);
16978 struct tg3
*tp
= netdev_priv(dev
);
16980 release_firmware(tp
->fw
);
16982 tg3_reset_task_cancel(tp
);
16984 if (tg3_flag(tp
, USE_PHYLIB
)) {
16989 unregister_netdev(dev
);
16991 iounmap(tp
->aperegs
);
16992 tp
->aperegs
= NULL
;
16999 pci_release_regions(pdev
);
17000 pci_disable_device(pdev
);
17001 pci_set_drvdata(pdev
, NULL
);
17005 #ifdef CONFIG_PM_SLEEP
17006 static int tg3_suspend(struct device
*device
)
17008 struct pci_dev
*pdev
= to_pci_dev(device
);
17009 struct net_device
*dev
= pci_get_drvdata(pdev
);
17010 struct tg3
*tp
= netdev_priv(dev
);
17013 if (!netif_running(dev
))
17016 tg3_reset_task_cancel(tp
);
17018 tg3_netif_stop(tp
);
17020 tg3_timer_stop(tp
);
17022 tg3_full_lock(tp
, 1);
17023 tg3_disable_ints(tp
);
17024 tg3_full_unlock(tp
);
17026 netif_device_detach(dev
);
17028 tg3_full_lock(tp
, 0);
17029 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17030 tg3_flag_clear(tp
, INIT_COMPLETE
);
17031 tg3_full_unlock(tp
);
17033 err
= tg3_power_down_prepare(tp
);
17037 tg3_full_lock(tp
, 0);
17039 tg3_flag_set(tp
, INIT_COMPLETE
);
17040 err2
= tg3_restart_hw(tp
, 1);
17044 tg3_timer_start(tp
);
17046 netif_device_attach(dev
);
17047 tg3_netif_start(tp
);
17050 tg3_full_unlock(tp
);
17059 static int tg3_resume(struct device
*device
)
17061 struct pci_dev
*pdev
= to_pci_dev(device
);
17062 struct net_device
*dev
= pci_get_drvdata(pdev
);
17063 struct tg3
*tp
= netdev_priv(dev
);
17066 if (!netif_running(dev
))
17069 netif_device_attach(dev
);
17071 tg3_full_lock(tp
, 0);
17073 tg3_flag_set(tp
, INIT_COMPLETE
);
17074 err
= tg3_restart_hw(tp
, 1);
17078 tg3_timer_start(tp
);
17080 tg3_netif_start(tp
);
17083 tg3_full_unlock(tp
);
17091 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17092 #define TG3_PM_OPS (&tg3_pm_ops)
17096 #define TG3_PM_OPS NULL
17098 #endif /* CONFIG_PM_SLEEP */
17101 * tg3_io_error_detected - called when PCI error is detected
17102 * @pdev: Pointer to PCI device
17103 * @state: The current pci connection state
17105 * This function is called after a PCI bus error affecting
17106 * this device has been detected.
17108 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17109 pci_channel_state_t state
)
17111 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17112 struct tg3
*tp
= netdev_priv(netdev
);
17113 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17115 netdev_info(netdev
, "PCI I/O error detected\n");
17119 if (!netif_running(netdev
))
17124 tg3_netif_stop(tp
);
17126 tg3_timer_stop(tp
);
17128 /* Want to make sure that the reset task doesn't run */
17129 tg3_reset_task_cancel(tp
);
17131 netif_device_detach(netdev
);
17133 /* Clean up software state, even if MMIO is blocked */
17134 tg3_full_lock(tp
, 0);
17135 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17136 tg3_full_unlock(tp
);
17139 if (state
== pci_channel_io_perm_failure
)
17140 err
= PCI_ERS_RESULT_DISCONNECT
;
17142 pci_disable_device(pdev
);
17150 * tg3_io_slot_reset - called after the pci bus has been reset.
17151 * @pdev: Pointer to PCI device
17153 * Restart the card from scratch, as if from a cold-boot.
17154 * At this point, the card has exprienced a hard reset,
17155 * followed by fixups by BIOS, and has its config space
17156 * set up identically to what it was at cold boot.
17158 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17160 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17161 struct tg3
*tp
= netdev_priv(netdev
);
17162 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17167 if (pci_enable_device(pdev
)) {
17168 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17172 pci_set_master(pdev
);
17173 pci_restore_state(pdev
);
17174 pci_save_state(pdev
);
17176 if (!netif_running(netdev
)) {
17177 rc
= PCI_ERS_RESULT_RECOVERED
;
17181 err
= tg3_power_up(tp
);
17185 rc
= PCI_ERS_RESULT_RECOVERED
;
17194 * tg3_io_resume - called when traffic can start flowing again.
17195 * @pdev: Pointer to PCI device
17197 * This callback is called when the error recovery driver tells
17198 * us that its OK to resume normal operation.
17200 static void tg3_io_resume(struct pci_dev
*pdev
)
17202 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17203 struct tg3
*tp
= netdev_priv(netdev
);
17208 if (!netif_running(netdev
))
17211 tg3_full_lock(tp
, 0);
17212 tg3_flag_set(tp
, INIT_COMPLETE
);
17213 err
= tg3_restart_hw(tp
, 1);
17215 tg3_full_unlock(tp
);
17216 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17220 netif_device_attach(netdev
);
17222 tg3_timer_start(tp
);
17224 tg3_netif_start(tp
);
17226 tg3_full_unlock(tp
);
17234 static const struct pci_error_handlers tg3_err_handler
= {
17235 .error_detected
= tg3_io_error_detected
,
17236 .slot_reset
= tg3_io_slot_reset
,
17237 .resume
= tg3_io_resume
17240 static struct pci_driver tg3_driver
= {
17241 .name
= DRV_MODULE_NAME
,
17242 .id_table
= tg3_pci_tbl
,
17243 .probe
= tg3_init_one
,
17244 .remove
= tg3_remove_one
,
17245 .err_handler
= &tg3_err_handler
,
17246 .driver
.pm
= TG3_PM_OPS
,
17249 static int __init
tg3_init(void)
17251 return pci_register_driver(&tg3_driver
);
17254 static void __exit
tg3_cleanup(void)
17256 pci_unregister_driver(&tg3_driver
);
17259 module_init(tg3_init
);
17260 module_exit(tg3_cleanup
);