2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
43 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
59 #define TG3_VLAN_TAG_USED 0
63 #define TG3_TSO_SUPPORT 1
65 #define TG3_TSO_SUPPORT 0
70 #define DRV_MODULE_NAME "tg3"
71 #define PFX DRV_MODULE_NAME ": "
72 #define DRV_MODULE_VERSION "3.51"
73 #define DRV_MODULE_RELDATE "Feb 21, 2006"
75 #define TG3_DEF_MAC_MODE 0
76 #define TG3_DEF_RX_MODE 0
77 #define TG3_DEF_TX_MODE 0
78 #define TG3_DEF_MSG_ENABLE \
88 /* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
91 #define TG3_TX_TIMEOUT (5 * HZ)
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU 60
95 #define TG3_MAX_MTU(tp) \
96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
102 #define TG3_RX_RING_SIZE 512
103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JUMBO_RING_SIZE 256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define TX_BUFFS_AVAIL(TP) \
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
141 #define TG3_NUM_TEST 6
143 static char version
[] __devinitdata
=
144 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION
);
151 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug
, int, 0);
153 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
155 static struct pci_device_id tg3_pci_tbl
[] = {
156 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
,
157 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
,
159 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
,
161 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
,
163 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
,
165 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
,
167 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
,
169 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
,
171 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
,
173 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
,
175 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
,
177 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
,
179 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
,
181 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
,
183 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
,
185 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
,
187 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
,
189 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
,
191 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
,
193 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
,
195 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
,
197 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
,
199 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5720
,
201 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
,
203 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
,
205 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
,
207 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750M
,
209 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
,
211 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
,
213 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
,
215 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
,
217 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
,
219 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
,
221 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
,
223 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
,
225 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
,
227 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
,
229 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
,
231 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
,
233 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
,
235 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
,
237 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
238 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
,
239 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
240 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
,
241 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
242 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
,
243 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
244 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
,
245 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
246 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
,
247 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
248 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
,
249 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
250 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
,
251 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
255 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
258 const char string
[ETH_GSTRING_LEN
];
259 } ethtool_stats_keys
[TG3_NUM_STATS
] = {
262 { "rx_ucast_packets" },
263 { "rx_mcast_packets" },
264 { "rx_bcast_packets" },
266 { "rx_align_errors" },
267 { "rx_xon_pause_rcvd" },
268 { "rx_xoff_pause_rcvd" },
269 { "rx_mac_ctrl_rcvd" },
270 { "rx_xoff_entered" },
271 { "rx_frame_too_long_errors" },
273 { "rx_undersize_packets" },
274 { "rx_in_length_errors" },
275 { "rx_out_length_errors" },
276 { "rx_64_or_less_octet_packets" },
277 { "rx_65_to_127_octet_packets" },
278 { "rx_128_to_255_octet_packets" },
279 { "rx_256_to_511_octet_packets" },
280 { "rx_512_to_1023_octet_packets" },
281 { "rx_1024_to_1522_octet_packets" },
282 { "rx_1523_to_2047_octet_packets" },
283 { "rx_2048_to_4095_octet_packets" },
284 { "rx_4096_to_8191_octet_packets" },
285 { "rx_8192_to_9022_octet_packets" },
292 { "tx_flow_control" },
294 { "tx_single_collisions" },
295 { "tx_mult_collisions" },
297 { "tx_excessive_collisions" },
298 { "tx_late_collisions" },
299 { "tx_collide_2times" },
300 { "tx_collide_3times" },
301 { "tx_collide_4times" },
302 { "tx_collide_5times" },
303 { "tx_collide_6times" },
304 { "tx_collide_7times" },
305 { "tx_collide_8times" },
306 { "tx_collide_9times" },
307 { "tx_collide_10times" },
308 { "tx_collide_11times" },
309 { "tx_collide_12times" },
310 { "tx_collide_13times" },
311 { "tx_collide_14times" },
312 { "tx_collide_15times" },
313 { "tx_ucast_packets" },
314 { "tx_mcast_packets" },
315 { "tx_bcast_packets" },
316 { "tx_carrier_sense_errors" },
320 { "dma_writeq_full" },
321 { "dma_write_prioq_full" },
325 { "rx_threshold_hit" },
327 { "dma_readq_full" },
328 { "dma_read_prioq_full" },
329 { "tx_comp_queue_full" },
331 { "ring_set_send_prod_index" },
332 { "ring_status_update" },
334 { "nic_avoided_irqs" },
335 { "nic_tx_threshold_hit" }
339 const char string
[ETH_GSTRING_LEN
];
340 } ethtool_test_keys
[TG3_NUM_TEST
] = {
341 { "nvram test (online) " },
342 { "link test (online) " },
343 { "register test (offline)" },
344 { "memory test (offline)" },
345 { "loopback test (offline)" },
346 { "interrupt test (offline)" },
349 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
351 writel(val
, tp
->regs
+ off
);
354 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
356 return (readl(tp
->regs
+ off
));
359 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
363 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
364 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
365 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
366 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
369 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
371 writel(val
, tp
->regs
+ off
);
372 readl(tp
->regs
+ off
);
375 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
380 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
381 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
382 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
383 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
387 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
391 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
392 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
393 TG3_64BIT_REG_LOW
, val
);
396 if (off
== (MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
)) {
397 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
398 TG3_64BIT_REG_LOW
, val
);
402 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
403 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
404 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
405 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
407 /* In indirect mode when disabling interrupts, we also need
408 * to clear the interrupt bit in the GRC local ctrl register.
410 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
412 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
413 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
417 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
422 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
423 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
424 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
425 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
429 /* usec_wait specifies the wait time in usec when writing to certain registers
430 * where it is unsafe to read back the register without some delay.
431 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
432 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
434 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
436 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) ||
437 (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
438 /* Non-posted methods */
439 tp
->write32(tp
, off
, val
);
442 tg3_write32(tp
, off
, val
);
447 /* Wait again after the read for the posted method to guarantee that
448 * the wait time is met.
454 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
456 tp
->write32_mbox(tp
, off
, val
);
457 if (!(tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) &&
458 !(tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
459 tp
->read32_mbox(tp
, off
);
462 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
464 void __iomem
*mbox
= tp
->regs
+ off
;
466 if (tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
)
468 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
472 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
473 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
474 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
475 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
476 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
478 #define tw32(reg,val) tp->write32(tp, reg, val)
479 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
480 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
481 #define tr32(reg) tp->read32(tp, reg)
483 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
489 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
491 /* Always leave this as zero. */
492 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
493 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
496 static void tg3_write_mem_fast(struct tg3
*tp
, u32 off
, u32 val
)
498 /* If no workaround is needed, write to mem space directly */
499 if (tp
->write32
!= tg3_write_indirect_reg32
)
500 tw32(NIC_SRAM_WIN_BASE
+ off
, val
);
502 tg3_write_mem(tp
, off
, val
);
505 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
509 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
510 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
511 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
513 /* Always leave this as zero. */
514 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
515 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
518 static void tg3_disable_ints(struct tg3
*tp
)
520 tw32(TG3PCI_MISC_HOST_CTRL
,
521 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
522 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
525 static inline void tg3_cond_int(struct tg3
*tp
)
527 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
528 (tp
->hw_status
->status
& SD_STATUS_UPDATED
))
529 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
532 static void tg3_enable_ints(struct tg3
*tp
)
537 tw32(TG3PCI_MISC_HOST_CTRL
,
538 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
539 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
540 (tp
->last_tag
<< 24));
544 static inline unsigned int tg3_has_work(struct tg3
*tp
)
546 struct tg3_hw_status
*sblk
= tp
->hw_status
;
547 unsigned int work_exists
= 0;
549 /* check for phy events */
550 if (!(tp
->tg3_flags
&
551 (TG3_FLAG_USE_LINKCHG_REG
|
552 TG3_FLAG_POLL_SERDES
))) {
553 if (sblk
->status
& SD_STATUS_LINK_CHG
)
556 /* check for RX/TX work to do */
557 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
||
558 sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
)
565 * similar to tg3_enable_ints, but it accurately determines whether there
566 * is new work pending and can return without flushing the PIO write
567 * which reenables interrupts
569 static void tg3_restart_ints(struct tg3
*tp
)
571 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
575 /* When doing tagged status, this work check is unnecessary.
576 * The last_tag we write above tells the chip which piece of
577 * work we've completed.
579 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
581 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
582 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
585 static inline void tg3_netif_stop(struct tg3
*tp
)
587 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
588 netif_poll_disable(tp
->dev
);
589 netif_tx_disable(tp
->dev
);
592 static inline void tg3_netif_start(struct tg3
*tp
)
594 netif_wake_queue(tp
->dev
);
595 /* NOTE: unconditional netif_wake_queue is only appropriate
596 * so long as all callers are assured to have free tx slots
597 * (such as after tg3_init_hw)
599 netif_poll_enable(tp
->dev
);
600 tp
->hw_status
->status
|= SD_STATUS_UPDATED
;
604 static void tg3_switch_clocks(struct tg3
*tp
)
606 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
609 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
612 orig_clock_ctrl
= clock_ctrl
;
613 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
614 CLOCK_CTRL_CLKRUN_OENABLE
|
616 tp
->pci_clock_ctrl
= clock_ctrl
;
618 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
619 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
620 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
621 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
623 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
624 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
626 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
628 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
629 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
632 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
635 #define PHY_BUSY_LOOPS 5000
637 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
643 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
645 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
651 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
652 MI_COM_PHY_ADDR_MASK
);
653 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
654 MI_COM_REG_ADDR_MASK
);
655 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
657 tw32_f(MAC_MI_COM
, frame_val
);
659 loops
= PHY_BUSY_LOOPS
;
662 frame_val
= tr32(MAC_MI_COM
);
664 if ((frame_val
& MI_COM_BUSY
) == 0) {
666 frame_val
= tr32(MAC_MI_COM
);
674 *val
= frame_val
& MI_COM_DATA_MASK
;
678 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
679 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
686 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
692 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
694 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
698 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
699 MI_COM_PHY_ADDR_MASK
);
700 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
701 MI_COM_REG_ADDR_MASK
);
702 frame_val
|= (val
& MI_COM_DATA_MASK
);
703 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
705 tw32_f(MAC_MI_COM
, frame_val
);
707 loops
= PHY_BUSY_LOOPS
;
710 frame_val
= tr32(MAC_MI_COM
);
711 if ((frame_val
& MI_COM_BUSY
) == 0) {
713 frame_val
= tr32(MAC_MI_COM
);
723 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
724 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
731 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
735 if (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
)
738 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x7007) &&
739 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
740 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
741 (val
| (1 << 15) | (1 << 4)));
744 static int tg3_bmcr_reset(struct tg3
*tp
)
749 /* OK, reset it, and poll the BMCR_RESET bit until it
750 * clears or we time out.
752 phy_control
= BMCR_RESET
;
753 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
759 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
763 if ((phy_control
& BMCR_RESET
) == 0) {
775 static int tg3_wait_macro_done(struct tg3
*tp
)
782 if (!tg3_readphy(tp
, 0x16, &tmp32
)) {
783 if ((tmp32
& 0x1000) == 0)
793 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
795 static const u32 test_pat
[4][6] = {
796 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
797 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
798 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
799 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
803 for (chan
= 0; chan
< 4; chan
++) {
806 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
807 (chan
* 0x2000) | 0x0200);
808 tg3_writephy(tp
, 0x16, 0x0002);
810 for (i
= 0; i
< 6; i
++)
811 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
814 tg3_writephy(tp
, 0x16, 0x0202);
815 if (tg3_wait_macro_done(tp
)) {
820 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
821 (chan
* 0x2000) | 0x0200);
822 tg3_writephy(tp
, 0x16, 0x0082);
823 if (tg3_wait_macro_done(tp
)) {
828 tg3_writephy(tp
, 0x16, 0x0802);
829 if (tg3_wait_macro_done(tp
)) {
834 for (i
= 0; i
< 6; i
+= 2) {
837 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
838 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
839 tg3_wait_macro_done(tp
)) {
845 if (low
!= test_pat
[chan
][i
] ||
846 high
!= test_pat
[chan
][i
+1]) {
847 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
848 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
849 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
859 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
863 for (chan
= 0; chan
< 4; chan
++) {
866 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
867 (chan
* 0x2000) | 0x0200);
868 tg3_writephy(tp
, 0x16, 0x0002);
869 for (i
= 0; i
< 6; i
++)
870 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
871 tg3_writephy(tp
, 0x16, 0x0202);
872 if (tg3_wait_macro_done(tp
))
879 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
881 u32 reg32
, phy9_orig
;
882 int retries
, do_phy_reset
, err
;
888 err
= tg3_bmcr_reset(tp
);
894 /* Disable transmitter and interrupt. */
895 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
899 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
901 /* Set full-duplex, 1000 mbps. */
902 tg3_writephy(tp
, MII_BMCR
,
903 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
905 /* Set to master mode. */
906 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
909 tg3_writephy(tp
, MII_TG3_CTRL
,
910 (MII_TG3_CTRL_AS_MASTER
|
911 MII_TG3_CTRL_ENABLE_AS_MASTER
));
913 /* Enable SM_DSP_CLOCK and 6dB. */
914 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
916 /* Block the PHY control access. */
917 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
918 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0800);
920 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
925 err
= tg3_phy_reset_chanpat(tp
);
929 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
930 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0000);
932 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
933 tg3_writephy(tp
, 0x16, 0x0000);
935 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
936 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
937 /* Set Extended packet length bit for jumbo frames */
938 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4400);
941 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
944 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
946 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
948 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
955 /* This will reset the tigon3 PHY if there is no valid
956 * link unless the FORCE argument is non-zero.
958 static int tg3_phy_reset(struct tg3
*tp
)
963 err
= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
964 err
|= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
968 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
969 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
970 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
971 err
= tg3_phy_reset_5703_4_5(tp
);
977 err
= tg3_bmcr_reset(tp
);
982 if (tp
->tg3_flags2
& TG3_FLG2_PHY_ADC_BUG
) {
983 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
984 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
985 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x2aaa);
986 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
987 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0323);
988 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
990 if (tp
->tg3_flags2
& TG3_FLG2_PHY_5704_A0_BUG
) {
991 tg3_writephy(tp
, 0x1c, 0x8d68);
992 tg3_writephy(tp
, 0x1c, 0x8d68);
994 if (tp
->tg3_flags2
& TG3_FLG2_PHY_BER_BUG
) {
995 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
996 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
997 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x310b);
998 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
999 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x9506);
1000 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x401f);
1001 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x14e2);
1002 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1004 /* Set Extended packet length bit (bit 14) on all chips that */
1005 /* support jumbo frames */
1006 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1007 /* Cannot do read-modify-write on 5401 */
1008 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1009 } else if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1012 /* Set bit 14 with read-modify-write to preserve other bits */
1013 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0007) &&
1014 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy_reg
))
1015 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy_reg
| 0x4000);
1018 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1019 * jumbo frames transmission.
1021 if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1024 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &phy_reg
))
1025 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1026 phy_reg
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
1029 tg3_phy_set_wirespeed(tp
);
1033 static void tg3_frob_aux_power(struct tg3
*tp
)
1035 struct tg3
*tp_peer
= tp
;
1037 if ((tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) != 0)
1040 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
1041 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
1042 struct net_device
*dev_peer
;
1044 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
1045 /* remove_one() may have been run on the peer. */
1049 tp_peer
= netdev_priv(dev_peer
);
1052 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1053 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0 ||
1054 (tp_peer
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1055 (tp_peer
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
1056 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1057 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1058 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1059 (GRC_LCLCTRL_GPIO_OE0
|
1060 GRC_LCLCTRL_GPIO_OE1
|
1061 GRC_LCLCTRL_GPIO_OE2
|
1062 GRC_LCLCTRL_GPIO_OUTPUT0
|
1063 GRC_LCLCTRL_GPIO_OUTPUT1
),
1067 u32 grc_local_ctrl
= 0;
1069 if (tp_peer
!= tp
&&
1070 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1073 /* Workaround to prevent overdrawing Amps. */
1074 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
1076 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
1077 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1078 grc_local_ctrl
, 100);
1081 /* On 5753 and variants, GPIO2 cannot be used. */
1082 no_gpio2
= tp
->nic_sram_data_cfg
&
1083 NIC_SRAM_DATA_CFG_NO_GPIO2
;
1085 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
1086 GRC_LCLCTRL_GPIO_OE1
|
1087 GRC_LCLCTRL_GPIO_OE2
|
1088 GRC_LCLCTRL_GPIO_OUTPUT1
|
1089 GRC_LCLCTRL_GPIO_OUTPUT2
;
1091 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
1092 GRC_LCLCTRL_GPIO_OUTPUT2
);
1094 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1095 grc_local_ctrl
, 100);
1097 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
1099 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1100 grc_local_ctrl
, 100);
1103 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
1104 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1105 grc_local_ctrl
, 100);
1109 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
1110 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
1111 if (tp_peer
!= tp
&&
1112 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1115 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1116 (GRC_LCLCTRL_GPIO_OE1
|
1117 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1119 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1120 GRC_LCLCTRL_GPIO_OE1
, 100);
1122 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1123 (GRC_LCLCTRL_GPIO_OE1
|
1124 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1129 static int tg3_setup_phy(struct tg3
*, int);
1131 #define RESET_KIND_SHUTDOWN 0
1132 #define RESET_KIND_INIT 1
1133 #define RESET_KIND_SUSPEND 2
1135 static void tg3_write_sig_post_reset(struct tg3
*, int);
1136 static int tg3_halt_cpu(struct tg3
*, u32
);
1137 static int tg3_nvram_lock(struct tg3
*);
1138 static void tg3_nvram_unlock(struct tg3
*);
1140 static int tg3_set_power_state(struct tg3
*tp
, pci_power_t state
)
1143 u16 power_control
, power_caps
;
1144 int pm
= tp
->pm_cap
;
1146 /* Make sure register accesses (indirect or otherwise)
1147 * will function correctly.
1149 pci_write_config_dword(tp
->pdev
,
1150 TG3PCI_MISC_HOST_CTRL
,
1151 tp
->misc_host_ctrl
);
1153 pci_read_config_word(tp
->pdev
,
1156 power_control
|= PCI_PM_CTRL_PME_STATUS
;
1157 power_control
&= ~(PCI_PM_CTRL_STATE_MASK
);
1161 pci_write_config_word(tp
->pdev
,
1164 udelay(100); /* Delay after power state change */
1166 /* Switch out of Vaux if it is not a LOM */
1167 if (!(tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
1168 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
1185 printk(KERN_WARNING PFX
"%s: Invalid power state (%d) "
1187 tp
->dev
->name
, state
);
1191 power_control
|= PCI_PM_CTRL_PME_ENABLE
;
1193 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
1194 tw32(TG3PCI_MISC_HOST_CTRL
,
1195 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
1197 if (tp
->link_config
.phy_is_low_power
== 0) {
1198 tp
->link_config
.phy_is_low_power
= 1;
1199 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
1200 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
1201 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
1204 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
1205 tp
->link_config
.speed
= SPEED_10
;
1206 tp
->link_config
.duplex
= DUPLEX_HALF
;
1207 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
1208 tg3_setup_phy(tp
, 0);
1211 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1215 for (i
= 0; i
< 200; i
++) {
1216 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
1217 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1222 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
1223 WOL_DRV_STATE_SHUTDOWN
|
1224 WOL_DRV_WOL
| WOL_SET_MAGIC_PKT
);
1226 pci_read_config_word(tp
->pdev
, pm
+ PCI_PM_PMC
, &power_caps
);
1228 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) {
1231 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1232 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x5a);
1235 mac_mode
= MAC_MODE_PORT_MODE_MII
;
1237 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
||
1238 !(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
))
1239 mac_mode
|= MAC_MODE_LINK_POLARITY
;
1241 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
1244 if (!(tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
1245 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
1247 if (((power_caps
& PCI_PM_CAP_PME_D3cold
) &&
1248 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)))
1249 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
1251 tw32_f(MAC_MODE
, mac_mode
);
1254 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
1258 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
) &&
1259 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1260 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
1263 base_val
= tp
->pci_clock_ctrl
;
1264 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
1265 CLOCK_CTRL_TXCLK_DISABLE
);
1267 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
1268 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
1269 } else if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
1271 } else if (!((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
1272 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))) {
1273 u32 newbits1
, newbits2
;
1275 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1276 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1277 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
1278 CLOCK_CTRL_TXCLK_DISABLE
|
1280 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1281 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
1282 newbits1
= CLOCK_CTRL_625_CORE
;
1283 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
1285 newbits1
= CLOCK_CTRL_ALTCLK
;
1286 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1289 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
1292 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
1295 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
1298 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1299 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1300 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
1301 CLOCK_CTRL_TXCLK_DISABLE
|
1302 CLOCK_CTRL_44MHZ_CORE
);
1304 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
1307 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1308 tp
->pci_clock_ctrl
| newbits3
, 40);
1312 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) &&
1313 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1314 /* Turn off the PHY */
1315 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1316 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1317 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
1318 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x01b2);
1319 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
1320 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
1324 tg3_frob_aux_power(tp
);
1326 /* Workaround for unstable PLL clock */
1327 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
1328 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
1329 u32 val
= tr32(0x7d00);
1331 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1333 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1336 err
= tg3_nvram_lock(tp
);
1337 tg3_halt_cpu(tp
, RX_CPU_BASE
);
1339 tg3_nvram_unlock(tp
);
1343 /* Finally, set the new power state. */
1344 pci_write_config_word(tp
->pdev
, pm
+ PCI_PM_CTRL
, power_control
);
1345 udelay(100); /* Delay after power state change */
1347 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
1352 static void tg3_link_report(struct tg3
*tp
)
1354 if (!netif_carrier_ok(tp
->dev
)) {
1355 printk(KERN_INFO PFX
"%s: Link is down.\n", tp
->dev
->name
);
1357 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
1359 (tp
->link_config
.active_speed
== SPEED_1000
?
1361 (tp
->link_config
.active_speed
== SPEED_100
?
1363 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1366 printk(KERN_INFO PFX
"%s: Flow control is %s for TX and "
1369 (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) ? "on" : "off",
1370 (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) ? "on" : "off");
1374 static void tg3_setup_flow_control(struct tg3
*tp
, u32 local_adv
, u32 remote_adv
)
1376 u32 new_tg3_flags
= 0;
1377 u32 old_rx_mode
= tp
->rx_mode
;
1378 u32 old_tx_mode
= tp
->tx_mode
;
1380 if (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) {
1382 /* Convert 1000BaseX flow control bits to 1000BaseT
1383 * bits before resolving flow control.
1385 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
1386 local_adv
&= ~(ADVERTISE_PAUSE_CAP
|
1387 ADVERTISE_PAUSE_ASYM
);
1388 remote_adv
&= ~(LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1390 if (local_adv
& ADVERTISE_1000XPAUSE
)
1391 local_adv
|= ADVERTISE_PAUSE_CAP
;
1392 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
1393 local_adv
|= ADVERTISE_PAUSE_ASYM
;
1394 if (remote_adv
& LPA_1000XPAUSE
)
1395 remote_adv
|= LPA_PAUSE_CAP
;
1396 if (remote_adv
& LPA_1000XPAUSE_ASYM
)
1397 remote_adv
|= LPA_PAUSE_ASYM
;
1400 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1401 if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1402 if (remote_adv
& LPA_PAUSE_CAP
)
1404 (TG3_FLAG_RX_PAUSE
|
1406 else if (remote_adv
& LPA_PAUSE_ASYM
)
1408 (TG3_FLAG_RX_PAUSE
);
1410 if (remote_adv
& LPA_PAUSE_CAP
)
1412 (TG3_FLAG_RX_PAUSE
|
1415 } else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1416 if ((remote_adv
& LPA_PAUSE_CAP
) &&
1417 (remote_adv
& LPA_PAUSE_ASYM
))
1418 new_tg3_flags
|= TG3_FLAG_TX_PAUSE
;
1421 tp
->tg3_flags
&= ~(TG3_FLAG_RX_PAUSE
| TG3_FLAG_TX_PAUSE
);
1422 tp
->tg3_flags
|= new_tg3_flags
;
1424 new_tg3_flags
= tp
->tg3_flags
;
1427 if (new_tg3_flags
& TG3_FLAG_RX_PAUSE
)
1428 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1430 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1432 if (old_rx_mode
!= tp
->rx_mode
) {
1433 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1436 if (new_tg3_flags
& TG3_FLAG_TX_PAUSE
)
1437 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1439 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1441 if (old_tx_mode
!= tp
->tx_mode
) {
1442 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1446 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
1448 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
1449 case MII_TG3_AUX_STAT_10HALF
:
1451 *duplex
= DUPLEX_HALF
;
1454 case MII_TG3_AUX_STAT_10FULL
:
1456 *duplex
= DUPLEX_FULL
;
1459 case MII_TG3_AUX_STAT_100HALF
:
1461 *duplex
= DUPLEX_HALF
;
1464 case MII_TG3_AUX_STAT_100FULL
:
1466 *duplex
= DUPLEX_FULL
;
1469 case MII_TG3_AUX_STAT_1000HALF
:
1470 *speed
= SPEED_1000
;
1471 *duplex
= DUPLEX_HALF
;
1474 case MII_TG3_AUX_STAT_1000FULL
:
1475 *speed
= SPEED_1000
;
1476 *duplex
= DUPLEX_FULL
;
1480 *speed
= SPEED_INVALID
;
1481 *duplex
= DUPLEX_INVALID
;
1486 static void tg3_phy_copper_begin(struct tg3
*tp
)
1491 if (tp
->link_config
.phy_is_low_power
) {
1492 /* Entering low power mode. Disable gigabit and
1493 * 100baseT advertisements.
1495 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1497 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1498 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1499 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
1500 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1502 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1503 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
1504 tp
->link_config
.advertising
=
1505 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
1506 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
1507 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
1508 ADVERTISED_Autoneg
| ADVERTISED_MII
);
1510 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
1511 tp
->link_config
.advertising
&=
1512 ~(ADVERTISED_1000baseT_Half
|
1513 ADVERTISED_1000baseT_Full
);
1515 new_adv
= (ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1516 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
1517 new_adv
|= ADVERTISE_10HALF
;
1518 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
1519 new_adv
|= ADVERTISE_10FULL
;
1520 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
1521 new_adv
|= ADVERTISE_100HALF
;
1522 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
1523 new_adv
|= ADVERTISE_100FULL
;
1524 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1526 if (tp
->link_config
.advertising
&
1527 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
1529 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
1530 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
1531 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
1532 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
1533 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) &&
1534 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1535 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
1536 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1537 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1538 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1540 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1543 /* Asking for a specific link mode. */
1544 if (tp
->link_config
.speed
== SPEED_1000
) {
1545 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1546 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1548 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1549 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
1551 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
1552 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1553 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
1554 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1555 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1556 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1558 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1560 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1561 if (tp
->link_config
.speed
== SPEED_100
) {
1562 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1563 new_adv
|= ADVERTISE_100FULL
;
1565 new_adv
|= ADVERTISE_100HALF
;
1567 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1568 new_adv
|= ADVERTISE_10FULL
;
1570 new_adv
|= ADVERTISE_10HALF
;
1572 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1576 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
1577 tp
->link_config
.speed
!= SPEED_INVALID
) {
1578 u32 bmcr
, orig_bmcr
;
1580 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
1581 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
1584 switch (tp
->link_config
.speed
) {
1590 bmcr
|= BMCR_SPEED100
;
1594 bmcr
|= TG3_BMCR_SPEED1000
;
1598 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1599 bmcr
|= BMCR_FULLDPLX
;
1601 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
1602 (bmcr
!= orig_bmcr
)) {
1603 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
1604 for (i
= 0; i
< 1500; i
++) {
1608 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
1609 tg3_readphy(tp
, MII_BMSR
, &tmp
))
1611 if (!(tmp
& BMSR_LSTATUS
)) {
1616 tg3_writephy(tp
, MII_BMCR
, bmcr
);
1620 tg3_writephy(tp
, MII_BMCR
,
1621 BMCR_ANENABLE
| BMCR_ANRESTART
);
1625 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
1629 /* Turn off tap power management. */
1630 /* Set Extended packet length bit */
1631 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1633 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0012);
1634 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1804);
1636 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0013);
1637 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1204);
1639 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1640 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0132);
1642 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1643 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0232);
1645 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1646 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0a20);
1653 static int tg3_copper_is_advertising_all(struct tg3
*tp
)
1655 u32 adv_reg
, all_mask
;
1657 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
1660 all_mask
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1661 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1662 if ((adv_reg
& all_mask
) != all_mask
)
1664 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
1667 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
1670 all_mask
= (MII_TG3_CTRL_ADV_1000_HALF
|
1671 MII_TG3_CTRL_ADV_1000_FULL
);
1672 if ((tg3_ctrl
& all_mask
) != all_mask
)
1678 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
1680 int current_link_up
;
1689 (MAC_STATUS_SYNC_CHANGED
|
1690 MAC_STATUS_CFG_CHANGED
|
1691 MAC_STATUS_MI_COMPLETION
|
1692 MAC_STATUS_LNKSTATE_CHANGED
));
1695 tp
->mi_mode
= MAC_MI_MODE_BASE
;
1696 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1699 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x02);
1701 /* Some third-party PHYs need to be reset on link going
1704 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1705 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1706 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
1707 netif_carrier_ok(tp
->dev
)) {
1708 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1709 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1710 !(bmsr
& BMSR_LSTATUS
))
1716 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1717 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1718 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
1719 !(tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
))
1722 if (!(bmsr
& BMSR_LSTATUS
)) {
1723 err
= tg3_init_5401phy_dsp(tp
);
1727 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1728 for (i
= 0; i
< 1000; i
++) {
1730 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1731 (bmsr
& BMSR_LSTATUS
)) {
1737 if ((tp
->phy_id
& PHY_ID_REV_MASK
) == PHY_REV_BCM5401_B0
&&
1738 !(bmsr
& BMSR_LSTATUS
) &&
1739 tp
->link_config
.active_speed
== SPEED_1000
) {
1740 err
= tg3_phy_reset(tp
);
1742 err
= tg3_init_5401phy_dsp(tp
);
1747 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1748 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
1749 /* 5701 {A0,B0} CRC bug workaround */
1750 tg3_writephy(tp
, 0x15, 0x0a75);
1751 tg3_writephy(tp
, 0x1c, 0x8c68);
1752 tg3_writephy(tp
, 0x1c, 0x8d68);
1753 tg3_writephy(tp
, 0x1c, 0x8c68);
1756 /* Clear pending interrupts... */
1757 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1758 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1760 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
)
1761 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
1763 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
1765 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1766 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1767 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
1768 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1769 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
1771 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
1774 current_link_up
= 0;
1775 current_speed
= SPEED_INVALID
;
1776 current_duplex
= DUPLEX_INVALID
;
1778 if (tp
->tg3_flags2
& TG3_FLG2_CAPACITIVE_COUPLING
) {
1781 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4007);
1782 tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
);
1783 if (!(val
& (1 << 10))) {
1785 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
1791 for (i
= 0; i
< 100; i
++) {
1792 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1793 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1794 (bmsr
& BMSR_LSTATUS
))
1799 if (bmsr
& BMSR_LSTATUS
) {
1802 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
1803 for (i
= 0; i
< 2000; i
++) {
1805 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
1810 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
1815 for (i
= 0; i
< 200; i
++) {
1816 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
1817 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
1819 if (bmcr
&& bmcr
!= 0x7fff)
1824 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
1825 if (bmcr
& BMCR_ANENABLE
) {
1826 current_link_up
= 1;
1828 /* Force autoneg restart if we are exiting
1831 if (!tg3_copper_is_advertising_all(tp
))
1832 current_link_up
= 0;
1834 current_link_up
= 0;
1837 if (!(bmcr
& BMCR_ANENABLE
) &&
1838 tp
->link_config
.speed
== current_speed
&&
1839 tp
->link_config
.duplex
== current_duplex
) {
1840 current_link_up
= 1;
1842 current_link_up
= 0;
1846 tp
->link_config
.active_speed
= current_speed
;
1847 tp
->link_config
.active_duplex
= current_duplex
;
1850 if (current_link_up
== 1 &&
1851 (tp
->link_config
.active_duplex
== DUPLEX_FULL
) &&
1852 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
1853 u32 local_adv
, remote_adv
;
1855 if (tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
))
1857 local_adv
&= (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
1859 if (tg3_readphy(tp
, MII_LPA
, &remote_adv
))
1862 remote_adv
&= (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1864 /* If we are not advertising full pause capability,
1865 * something is wrong. Bring the link down and reconfigure.
1867 if (local_adv
!= ADVERTISE_PAUSE_CAP
) {
1868 current_link_up
= 0;
1870 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
1874 if (current_link_up
== 0 || tp
->link_config
.phy_is_low_power
) {
1877 tg3_phy_copper_begin(tp
);
1879 tg3_readphy(tp
, MII_BMSR
, &tmp
);
1880 if (!tg3_readphy(tp
, MII_BMSR
, &tmp
) &&
1881 (tmp
& BMSR_LSTATUS
))
1882 current_link_up
= 1;
1885 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
1886 if (current_link_up
== 1) {
1887 if (tp
->link_config
.active_speed
== SPEED_100
||
1888 tp
->link_config
.active_speed
== SPEED_10
)
1889 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1891 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1893 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1895 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
1896 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
1897 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1899 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
1900 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
1901 if ((tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
) ||
1902 (current_link_up
== 1 &&
1903 tp
->link_config
.active_speed
== SPEED_10
))
1904 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1906 if (current_link_up
== 1)
1907 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1910 /* ??? Without this setting Netgear GA302T PHY does not
1911 * ??? send/receive packets...
1913 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
&&
1914 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
1915 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
1916 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1920 tw32_f(MAC_MODE
, tp
->mac_mode
);
1923 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
1924 /* Polled via timer. */
1925 tw32_f(MAC_EVENT
, 0);
1927 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
1931 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
1932 current_link_up
== 1 &&
1933 tp
->link_config
.active_speed
== SPEED_1000
&&
1934 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ||
1935 (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
))) {
1938 (MAC_STATUS_SYNC_CHANGED
|
1939 MAC_STATUS_CFG_CHANGED
));
1942 NIC_SRAM_FIRMWARE_MBOX
,
1943 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
1946 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
1947 if (current_link_up
)
1948 netif_carrier_on(tp
->dev
);
1950 netif_carrier_off(tp
->dev
);
1951 tg3_link_report(tp
);
1957 struct tg3_fiber_aneginfo
{
1959 #define ANEG_STATE_UNKNOWN 0
1960 #define ANEG_STATE_AN_ENABLE 1
1961 #define ANEG_STATE_RESTART_INIT 2
1962 #define ANEG_STATE_RESTART 3
1963 #define ANEG_STATE_DISABLE_LINK_OK 4
1964 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1965 #define ANEG_STATE_ABILITY_DETECT 6
1966 #define ANEG_STATE_ACK_DETECT_INIT 7
1967 #define ANEG_STATE_ACK_DETECT 8
1968 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1969 #define ANEG_STATE_COMPLETE_ACK 10
1970 #define ANEG_STATE_IDLE_DETECT_INIT 11
1971 #define ANEG_STATE_IDLE_DETECT 12
1972 #define ANEG_STATE_LINK_OK 13
1973 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1974 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1977 #define MR_AN_ENABLE 0x00000001
1978 #define MR_RESTART_AN 0x00000002
1979 #define MR_AN_COMPLETE 0x00000004
1980 #define MR_PAGE_RX 0x00000008
1981 #define MR_NP_LOADED 0x00000010
1982 #define MR_TOGGLE_TX 0x00000020
1983 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1984 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1985 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1986 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1987 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1988 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1989 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1990 #define MR_TOGGLE_RX 0x00002000
1991 #define MR_NP_RX 0x00004000
1993 #define MR_LINK_OK 0x80000000
1995 unsigned long link_time
, cur_time
;
1997 u32 ability_match_cfg
;
1998 int ability_match_count
;
2000 char ability_match
, idle_match
, ack_match
;
2002 u32 txconfig
, rxconfig
;
2003 #define ANEG_CFG_NP 0x00000080
2004 #define ANEG_CFG_ACK 0x00000040
2005 #define ANEG_CFG_RF2 0x00000020
2006 #define ANEG_CFG_RF1 0x00000010
2007 #define ANEG_CFG_PS2 0x00000001
2008 #define ANEG_CFG_PS1 0x00008000
2009 #define ANEG_CFG_HD 0x00004000
2010 #define ANEG_CFG_FD 0x00002000
2011 #define ANEG_CFG_INVAL 0x00001f06
2016 #define ANEG_TIMER_ENAB 2
2017 #define ANEG_FAILED -1
2019 #define ANEG_STATE_SETTLE_TIME 10000
2021 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
2022 struct tg3_fiber_aneginfo
*ap
)
2024 unsigned long delta
;
2028 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
2032 ap
->ability_match_cfg
= 0;
2033 ap
->ability_match_count
= 0;
2034 ap
->ability_match
= 0;
2040 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
2041 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
2043 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
2044 ap
->ability_match_cfg
= rx_cfg_reg
;
2045 ap
->ability_match
= 0;
2046 ap
->ability_match_count
= 0;
2048 if (++ap
->ability_match_count
> 1) {
2049 ap
->ability_match
= 1;
2050 ap
->ability_match_cfg
= rx_cfg_reg
;
2053 if (rx_cfg_reg
& ANEG_CFG_ACK
)
2061 ap
->ability_match_cfg
= 0;
2062 ap
->ability_match_count
= 0;
2063 ap
->ability_match
= 0;
2069 ap
->rxconfig
= rx_cfg_reg
;
2073 case ANEG_STATE_UNKNOWN
:
2074 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
2075 ap
->state
= ANEG_STATE_AN_ENABLE
;
2078 case ANEG_STATE_AN_ENABLE
:
2079 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
2080 if (ap
->flags
& MR_AN_ENABLE
) {
2083 ap
->ability_match_cfg
= 0;
2084 ap
->ability_match_count
= 0;
2085 ap
->ability_match
= 0;
2089 ap
->state
= ANEG_STATE_RESTART_INIT
;
2091 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
2095 case ANEG_STATE_RESTART_INIT
:
2096 ap
->link_time
= ap
->cur_time
;
2097 ap
->flags
&= ~(MR_NP_LOADED
);
2099 tw32(MAC_TX_AUTO_NEG
, 0);
2100 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2101 tw32_f(MAC_MODE
, tp
->mac_mode
);
2104 ret
= ANEG_TIMER_ENAB
;
2105 ap
->state
= ANEG_STATE_RESTART
;
2108 case ANEG_STATE_RESTART
:
2109 delta
= ap
->cur_time
- ap
->link_time
;
2110 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2111 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
2113 ret
= ANEG_TIMER_ENAB
;
2117 case ANEG_STATE_DISABLE_LINK_OK
:
2121 case ANEG_STATE_ABILITY_DETECT_INIT
:
2122 ap
->flags
&= ~(MR_TOGGLE_TX
);
2123 ap
->txconfig
= (ANEG_CFG_FD
| ANEG_CFG_PS1
);
2124 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2125 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2126 tw32_f(MAC_MODE
, tp
->mac_mode
);
2129 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
2132 case ANEG_STATE_ABILITY_DETECT
:
2133 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0) {
2134 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
2138 case ANEG_STATE_ACK_DETECT_INIT
:
2139 ap
->txconfig
|= ANEG_CFG_ACK
;
2140 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2141 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2142 tw32_f(MAC_MODE
, tp
->mac_mode
);
2145 ap
->state
= ANEG_STATE_ACK_DETECT
;
2148 case ANEG_STATE_ACK_DETECT
:
2149 if (ap
->ack_match
!= 0) {
2150 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
2151 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
2152 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
2154 ap
->state
= ANEG_STATE_AN_ENABLE
;
2156 } else if (ap
->ability_match
!= 0 &&
2157 ap
->rxconfig
== 0) {
2158 ap
->state
= ANEG_STATE_AN_ENABLE
;
2162 case ANEG_STATE_COMPLETE_ACK_INIT
:
2163 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
2167 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
2168 MR_LP_ADV_HALF_DUPLEX
|
2169 MR_LP_ADV_SYM_PAUSE
|
2170 MR_LP_ADV_ASYM_PAUSE
|
2171 MR_LP_ADV_REMOTE_FAULT1
|
2172 MR_LP_ADV_REMOTE_FAULT2
|
2173 MR_LP_ADV_NEXT_PAGE
|
2176 if (ap
->rxconfig
& ANEG_CFG_FD
)
2177 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
2178 if (ap
->rxconfig
& ANEG_CFG_HD
)
2179 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
2180 if (ap
->rxconfig
& ANEG_CFG_PS1
)
2181 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
2182 if (ap
->rxconfig
& ANEG_CFG_PS2
)
2183 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
2184 if (ap
->rxconfig
& ANEG_CFG_RF1
)
2185 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
2186 if (ap
->rxconfig
& ANEG_CFG_RF2
)
2187 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
2188 if (ap
->rxconfig
& ANEG_CFG_NP
)
2189 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
2191 ap
->link_time
= ap
->cur_time
;
2193 ap
->flags
^= (MR_TOGGLE_TX
);
2194 if (ap
->rxconfig
& 0x0008)
2195 ap
->flags
|= MR_TOGGLE_RX
;
2196 if (ap
->rxconfig
& ANEG_CFG_NP
)
2197 ap
->flags
|= MR_NP_RX
;
2198 ap
->flags
|= MR_PAGE_RX
;
2200 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
2201 ret
= ANEG_TIMER_ENAB
;
2204 case ANEG_STATE_COMPLETE_ACK
:
2205 if (ap
->ability_match
!= 0 &&
2206 ap
->rxconfig
== 0) {
2207 ap
->state
= ANEG_STATE_AN_ENABLE
;
2210 delta
= ap
->cur_time
- ap
->link_time
;
2211 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2212 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
2213 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2215 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
2216 !(ap
->flags
& MR_NP_RX
)) {
2217 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2225 case ANEG_STATE_IDLE_DETECT_INIT
:
2226 ap
->link_time
= ap
->cur_time
;
2227 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2228 tw32_f(MAC_MODE
, tp
->mac_mode
);
2231 ap
->state
= ANEG_STATE_IDLE_DETECT
;
2232 ret
= ANEG_TIMER_ENAB
;
2235 case ANEG_STATE_IDLE_DETECT
:
2236 if (ap
->ability_match
!= 0 &&
2237 ap
->rxconfig
== 0) {
2238 ap
->state
= ANEG_STATE_AN_ENABLE
;
2241 delta
= ap
->cur_time
- ap
->link_time
;
2242 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2243 /* XXX another gem from the Broadcom driver :( */
2244 ap
->state
= ANEG_STATE_LINK_OK
;
2248 case ANEG_STATE_LINK_OK
:
2249 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
2253 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
2254 /* ??? unimplemented */
2257 case ANEG_STATE_NEXT_PAGE_WAIT
:
2258 /* ??? unimplemented */
2269 static int fiber_autoneg(struct tg3
*tp
, u32
*flags
)
2272 struct tg3_fiber_aneginfo aninfo
;
2273 int status
= ANEG_FAILED
;
2277 tw32_f(MAC_TX_AUTO_NEG
, 0);
2279 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
2280 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
2283 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
2286 memset(&aninfo
, 0, sizeof(aninfo
));
2287 aninfo
.flags
|= MR_AN_ENABLE
;
2288 aninfo
.state
= ANEG_STATE_UNKNOWN
;
2289 aninfo
.cur_time
= 0;
2291 while (++tick
< 195000) {
2292 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
2293 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
2299 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2300 tw32_f(MAC_MODE
, tp
->mac_mode
);
2303 *flags
= aninfo
.flags
;
2305 if (status
== ANEG_DONE
&&
2306 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
2307 MR_LP_ADV_FULL_DUPLEX
)))
2313 static void tg3_init_bcm8002(struct tg3
*tp
)
2315 u32 mac_status
= tr32(MAC_STATUS
);
2318 /* Reset when initting first time or we have a link. */
2319 if ((tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) &&
2320 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
2323 /* Set PLL lock range. */
2324 tg3_writephy(tp
, 0x16, 0x8007);
2327 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
2329 /* Wait for reset to complete. */
2330 /* XXX schedule_timeout() ... */
2331 for (i
= 0; i
< 500; i
++)
2334 /* Config mode; select PMA/Ch 1 regs. */
2335 tg3_writephy(tp
, 0x10, 0x8411);
2337 /* Enable auto-lock and comdet, select txclk for tx. */
2338 tg3_writephy(tp
, 0x11, 0x0a10);
2340 tg3_writephy(tp
, 0x18, 0x00a0);
2341 tg3_writephy(tp
, 0x16, 0x41ff);
2343 /* Assert and deassert POR. */
2344 tg3_writephy(tp
, 0x13, 0x0400);
2346 tg3_writephy(tp
, 0x13, 0x0000);
2348 tg3_writephy(tp
, 0x11, 0x0a50);
2350 tg3_writephy(tp
, 0x11, 0x0a10);
2352 /* Wait for signal to stabilize */
2353 /* XXX schedule_timeout() ... */
2354 for (i
= 0; i
< 15000; i
++)
2357 /* Deselect the channel register so we can read the PHYID
2360 tg3_writephy(tp
, 0x10, 0x8011);
2363 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
2365 u32 sg_dig_ctrl
, sg_dig_status
;
2366 u32 serdes_cfg
, expected_sg_dig_ctrl
;
2367 int workaround
, port_a
;
2368 int current_link_up
;
2371 expected_sg_dig_ctrl
= 0;
2374 current_link_up
= 0;
2376 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
2377 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
2379 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
2382 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2383 /* preserve bits 20-23 for voltage regulator */
2384 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
2387 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2389 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
2390 if (sg_dig_ctrl
& (1 << 31)) {
2392 u32 val
= serdes_cfg
;
2398 tw32_f(MAC_SERDES_CFG
, val
);
2400 tw32_f(SG_DIG_CTRL
, 0x01388400);
2402 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
2403 tg3_setup_flow_control(tp
, 0, 0);
2404 current_link_up
= 1;
2409 /* Want auto-negotiation. */
2410 expected_sg_dig_ctrl
= 0x81388400;
2412 /* Pause capability */
2413 expected_sg_dig_ctrl
|= (1 << 11);
2415 /* Asymettric pause */
2416 expected_sg_dig_ctrl
|= (1 << 12);
2418 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
2420 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
2421 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| (1 << 30));
2423 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
2425 tp
->tg3_flags2
|= TG3_FLG2_PHY_JUST_INITTED
;
2426 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
2427 MAC_STATUS_SIGNAL_DET
)) {
2430 /* Giver time to negotiate (~200ms) */
2431 for (i
= 0; i
< 40000; i
++) {
2432 sg_dig_status
= tr32(SG_DIG_STATUS
);
2433 if (sg_dig_status
& (0x3))
2437 mac_status
= tr32(MAC_STATUS
);
2439 if ((sg_dig_status
& (1 << 1)) &&
2440 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2441 u32 local_adv
, remote_adv
;
2443 local_adv
= ADVERTISE_PAUSE_CAP
;
2445 if (sg_dig_status
& (1 << 19))
2446 remote_adv
|= LPA_PAUSE_CAP
;
2447 if (sg_dig_status
& (1 << 20))
2448 remote_adv
|= LPA_PAUSE_ASYM
;
2450 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2451 current_link_up
= 1;
2452 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2453 } else if (!(sg_dig_status
& (1 << 1))) {
2454 if (tp
->tg3_flags2
& TG3_FLG2_PHY_JUST_INITTED
)
2455 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2458 u32 val
= serdes_cfg
;
2465 tw32_f(MAC_SERDES_CFG
, val
);
2468 tw32_f(SG_DIG_CTRL
, 0x01388400);
2471 /* Link parallel detection - link is up */
2472 /* only if we have PCS_SYNC and not */
2473 /* receiving config code words */
2474 mac_status
= tr32(MAC_STATUS
);
2475 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2476 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
2477 tg3_setup_flow_control(tp
, 0, 0);
2478 current_link_up
= 1;
2485 return current_link_up
;
2488 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
2490 int current_link_up
= 0;
2492 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2493 tp
->tg3_flags
&= ~TG3_FLAG_GOT_SERDES_FLOWCTL
;
2497 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2501 if (fiber_autoneg(tp
, &flags
)) {
2502 u32 local_adv
, remote_adv
;
2504 local_adv
= ADVERTISE_PAUSE_CAP
;
2506 if (flags
& MR_LP_ADV_SYM_PAUSE
)
2507 remote_adv
|= LPA_PAUSE_CAP
;
2508 if (flags
& MR_LP_ADV_ASYM_PAUSE
)
2509 remote_adv
|= LPA_PAUSE_ASYM
;
2511 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2513 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2514 current_link_up
= 1;
2516 for (i
= 0; i
< 30; i
++) {
2519 (MAC_STATUS_SYNC_CHANGED
|
2520 MAC_STATUS_CFG_CHANGED
));
2522 if ((tr32(MAC_STATUS
) &
2523 (MAC_STATUS_SYNC_CHANGED
|
2524 MAC_STATUS_CFG_CHANGED
)) == 0)
2528 mac_status
= tr32(MAC_STATUS
);
2529 if (current_link_up
== 0 &&
2530 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2531 !(mac_status
& MAC_STATUS_RCVD_CFG
))
2532 current_link_up
= 1;
2534 /* Forcing 1000FD link up. */
2535 current_link_up
= 1;
2536 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2538 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
2543 return current_link_up
;
2546 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
2549 u16 orig_active_speed
;
2550 u8 orig_active_duplex
;
2552 int current_link_up
;
2556 (tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2557 TG3_FLAG_TX_PAUSE
));
2558 orig_active_speed
= tp
->link_config
.active_speed
;
2559 orig_active_duplex
= tp
->link_config
.active_duplex
;
2561 if (!(tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
) &&
2562 netif_carrier_ok(tp
->dev
) &&
2563 (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)) {
2564 mac_status
= tr32(MAC_STATUS
);
2565 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
2566 MAC_STATUS_SIGNAL_DET
|
2567 MAC_STATUS_CFG_CHANGED
|
2568 MAC_STATUS_RCVD_CFG
);
2569 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
2570 MAC_STATUS_SIGNAL_DET
)) {
2571 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2572 MAC_STATUS_CFG_CHANGED
));
2577 tw32_f(MAC_TX_AUTO_NEG
, 0);
2579 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
2580 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
2581 tw32_f(MAC_MODE
, tp
->mac_mode
);
2584 if (tp
->phy_id
== PHY_ID_BCM8002
)
2585 tg3_init_bcm8002(tp
);
2587 /* Enable link change event even when serdes polling. */
2588 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2591 current_link_up
= 0;
2592 mac_status
= tr32(MAC_STATUS
);
2594 if (tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
)
2595 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
2597 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
2599 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2600 tw32_f(MAC_MODE
, tp
->mac_mode
);
2603 tp
->hw_status
->status
=
2604 (SD_STATUS_UPDATED
|
2605 (tp
->hw_status
->status
& ~SD_STATUS_LINK_CHG
));
2607 for (i
= 0; i
< 100; i
++) {
2608 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2609 MAC_STATUS_CFG_CHANGED
));
2611 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
2612 MAC_STATUS_CFG_CHANGED
)) == 0)
2616 mac_status
= tr32(MAC_STATUS
);
2617 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
2618 current_link_up
= 0;
2619 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2620 tw32_f(MAC_MODE
, (tp
->mac_mode
|
2621 MAC_MODE_SEND_CONFIGS
));
2623 tw32_f(MAC_MODE
, tp
->mac_mode
);
2627 if (current_link_up
== 1) {
2628 tp
->link_config
.active_speed
= SPEED_1000
;
2629 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
2630 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2631 LED_CTRL_LNKLED_OVERRIDE
|
2632 LED_CTRL_1000MBPS_ON
));
2634 tp
->link_config
.active_speed
= SPEED_INVALID
;
2635 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
2636 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2637 LED_CTRL_LNKLED_OVERRIDE
|
2638 LED_CTRL_TRAFFIC_OVERRIDE
));
2641 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2642 if (current_link_up
)
2643 netif_carrier_on(tp
->dev
);
2645 netif_carrier_off(tp
->dev
);
2646 tg3_link_report(tp
);
2649 tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2651 if (orig_pause_cfg
!= now_pause_cfg
||
2652 orig_active_speed
!= tp
->link_config
.active_speed
||
2653 orig_active_duplex
!= tp
->link_config
.active_duplex
)
2654 tg3_link_report(tp
);
2660 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
2662 int current_link_up
, err
= 0;
2667 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2668 tw32_f(MAC_MODE
, tp
->mac_mode
);
2674 (MAC_STATUS_SYNC_CHANGED
|
2675 MAC_STATUS_CFG_CHANGED
|
2676 MAC_STATUS_MI_COMPLETION
|
2677 MAC_STATUS_LNKSTATE_CHANGED
));
2683 current_link_up
= 0;
2684 current_speed
= SPEED_INVALID
;
2685 current_duplex
= DUPLEX_INVALID
;
2687 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2688 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2689 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2690 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2691 bmsr
|= BMSR_LSTATUS
;
2693 bmsr
&= ~BMSR_LSTATUS
;
2696 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2698 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
2699 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2700 /* do nothing, just check for link up at the end */
2701 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2704 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2705 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
2706 ADVERTISE_1000XPAUSE
|
2707 ADVERTISE_1000XPSE_ASYM
|
2710 /* Always advertise symmetric PAUSE just like copper */
2711 new_adv
|= ADVERTISE_1000XPAUSE
;
2713 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
2714 new_adv
|= ADVERTISE_1000XHALF
;
2715 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
2716 new_adv
|= ADVERTISE_1000XFULL
;
2718 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
2719 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2720 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
2721 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2723 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2724 tp
->tg3_flags2
|= TG3_FLG2_PHY_JUST_INITTED
;
2725 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2732 bmcr
&= ~BMCR_SPEED1000
;
2733 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
2735 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2736 new_bmcr
|= BMCR_FULLDPLX
;
2738 if (new_bmcr
!= bmcr
) {
2739 /* BMCR_SPEED1000 is a reserved bit that needs
2740 * to be set on write.
2742 new_bmcr
|= BMCR_SPEED1000
;
2744 /* Force a linkdown */
2745 if (netif_carrier_ok(tp
->dev
)) {
2748 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2749 adv
&= ~(ADVERTISE_1000XFULL
|
2750 ADVERTISE_1000XHALF
|
2752 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
2753 tg3_writephy(tp
, MII_BMCR
, bmcr
|
2757 netif_carrier_off(tp
->dev
);
2759 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
2761 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2762 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2763 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2765 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2766 bmsr
|= BMSR_LSTATUS
;
2768 bmsr
&= ~BMSR_LSTATUS
;
2770 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2774 if (bmsr
& BMSR_LSTATUS
) {
2775 current_speed
= SPEED_1000
;
2776 current_link_up
= 1;
2777 if (bmcr
& BMCR_FULLDPLX
)
2778 current_duplex
= DUPLEX_FULL
;
2780 current_duplex
= DUPLEX_HALF
;
2782 if (bmcr
& BMCR_ANENABLE
) {
2783 u32 local_adv
, remote_adv
, common
;
2785 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
2786 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
2787 common
= local_adv
& remote_adv
;
2788 if (common
& (ADVERTISE_1000XHALF
|
2789 ADVERTISE_1000XFULL
)) {
2790 if (common
& ADVERTISE_1000XFULL
)
2791 current_duplex
= DUPLEX_FULL
;
2793 current_duplex
= DUPLEX_HALF
;
2795 tg3_setup_flow_control(tp
, local_adv
,
2799 current_link_up
= 0;
2803 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
2804 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2805 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2807 tw32_f(MAC_MODE
, tp
->mac_mode
);
2810 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2812 tp
->link_config
.active_speed
= current_speed
;
2813 tp
->link_config
.active_duplex
= current_duplex
;
2815 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2816 if (current_link_up
)
2817 netif_carrier_on(tp
->dev
);
2819 netif_carrier_off(tp
->dev
);
2820 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2822 tg3_link_report(tp
);
2827 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
2829 if (tp
->tg3_flags2
& TG3_FLG2_PHY_JUST_INITTED
) {
2830 /* Give autoneg time to complete. */
2831 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2834 if (!netif_carrier_ok(tp
->dev
) &&
2835 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
2838 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2839 if (bmcr
& BMCR_ANENABLE
) {
2842 /* Select shadow register 0x1f */
2843 tg3_writephy(tp
, 0x1c, 0x7c00);
2844 tg3_readphy(tp
, 0x1c, &phy1
);
2846 /* Select expansion interrupt status register */
2847 tg3_writephy(tp
, 0x17, 0x0f01);
2848 tg3_readphy(tp
, 0x15, &phy2
);
2849 tg3_readphy(tp
, 0x15, &phy2
);
2851 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
2852 /* We have signal detect and not receiving
2853 * config code words, link is up by parallel
2857 bmcr
&= ~BMCR_ANENABLE
;
2858 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
2859 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2860 tp
->tg3_flags2
|= TG3_FLG2_PARALLEL_DETECT
;
2864 else if (netif_carrier_ok(tp
->dev
) &&
2865 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
2866 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2869 /* Select expansion interrupt status register */
2870 tg3_writephy(tp
, 0x17, 0x0f01);
2871 tg3_readphy(tp
, 0x15, &phy2
);
2875 /* Config code words received, turn on autoneg. */
2876 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2877 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
2879 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2885 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
2889 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
2890 err
= tg3_setup_fiber_phy(tp
, force_reset
);
2891 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
2892 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
2894 err
= tg3_setup_copper_phy(tp
, force_reset
);
2897 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2898 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2899 tw32(MAC_TX_LENGTHS
,
2900 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2901 (6 << TX_LENGTHS_IPG_SHIFT
) |
2902 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2904 tw32(MAC_TX_LENGTHS
,
2905 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2906 (6 << TX_LENGTHS_IPG_SHIFT
) |
2907 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2909 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
2910 if (netif_carrier_ok(tp
->dev
)) {
2911 tw32(HOSTCC_STAT_COAL_TICKS
,
2912 tp
->coal
.stats_block_coalesce_usecs
);
2914 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
2921 /* Tigon3 never reports partial packet sends. So we do not
2922 * need special logic to handle SKBs that have not had all
2923 * of their frags sent yet, like SunGEM does.
2925 static void tg3_tx(struct tg3
*tp
)
2927 u32 hw_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
2928 u32 sw_idx
= tp
->tx_cons
;
2930 while (sw_idx
!= hw_idx
) {
2931 struct tx_ring_info
*ri
= &tp
->tx_buffers
[sw_idx
];
2932 struct sk_buff
*skb
= ri
->skb
;
2935 if (unlikely(skb
== NULL
))
2938 pci_unmap_single(tp
->pdev
,
2939 pci_unmap_addr(ri
, mapping
),
2945 sw_idx
= NEXT_TX(sw_idx
);
2947 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2948 if (unlikely(sw_idx
== hw_idx
))
2951 ri
= &tp
->tx_buffers
[sw_idx
];
2952 if (unlikely(ri
->skb
!= NULL
))
2955 pci_unmap_page(tp
->pdev
,
2956 pci_unmap_addr(ri
, mapping
),
2957 skb_shinfo(skb
)->frags
[i
].size
,
2960 sw_idx
= NEXT_TX(sw_idx
);
2966 tp
->tx_cons
= sw_idx
;
2968 if (unlikely(netif_queue_stopped(tp
->dev
))) {
2969 spin_lock(&tp
->tx_lock
);
2970 if (netif_queue_stopped(tp
->dev
) &&
2971 (TX_BUFFS_AVAIL(tp
) > TG3_TX_WAKEUP_THRESH
))
2972 netif_wake_queue(tp
->dev
);
2973 spin_unlock(&tp
->tx_lock
);
2977 /* Returns size of skb allocated or < 0 on error.
2979 * We only need to fill in the address because the other members
2980 * of the RX descriptor are invariant, see tg3_init_rings.
2982 * Note the purposeful assymetry of cpu vs. chip accesses. For
2983 * posting buffers we only dirty the first cache line of the RX
2984 * descriptor (containing the address). Whereas for the RX status
2985 * buffers the cpu only reads the last cacheline of the RX descriptor
2986 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2988 static int tg3_alloc_rx_skb(struct tg3
*tp
, u32 opaque_key
,
2989 int src_idx
, u32 dest_idx_unmasked
)
2991 struct tg3_rx_buffer_desc
*desc
;
2992 struct ring_info
*map
, *src_map
;
2993 struct sk_buff
*skb
;
2995 int skb_size
, dest_idx
;
2998 switch (opaque_key
) {
2999 case RXD_OPAQUE_RING_STD
:
3000 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3001 desc
= &tp
->rx_std
[dest_idx
];
3002 map
= &tp
->rx_std_buffers
[dest_idx
];
3004 src_map
= &tp
->rx_std_buffers
[src_idx
];
3005 skb_size
= tp
->rx_pkt_buf_sz
;
3008 case RXD_OPAQUE_RING_JUMBO
:
3009 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3010 desc
= &tp
->rx_jumbo
[dest_idx
];
3011 map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3013 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3014 skb_size
= RX_JUMBO_PKT_BUF_SZ
;
3021 /* Do not overwrite any of the map or rp information
3022 * until we are sure we can commit to a new buffer.
3024 * Callers depend upon this behavior and assume that
3025 * we leave everything unchanged if we fail.
3027 skb
= dev_alloc_skb(skb_size
);
3032 skb_reserve(skb
, tp
->rx_offset
);
3034 mapping
= pci_map_single(tp
->pdev
, skb
->data
,
3035 skb_size
- tp
->rx_offset
,
3036 PCI_DMA_FROMDEVICE
);
3039 pci_unmap_addr_set(map
, mapping
, mapping
);
3041 if (src_map
!= NULL
)
3042 src_map
->skb
= NULL
;
3044 desc
->addr_hi
= ((u64
)mapping
>> 32);
3045 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
3050 /* We only need to move over in the address because the other
3051 * members of the RX descriptor are invariant. See notes above
3052 * tg3_alloc_rx_skb for full details.
3054 static void tg3_recycle_rx(struct tg3
*tp
, u32 opaque_key
,
3055 int src_idx
, u32 dest_idx_unmasked
)
3057 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
3058 struct ring_info
*src_map
, *dest_map
;
3061 switch (opaque_key
) {
3062 case RXD_OPAQUE_RING_STD
:
3063 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3064 dest_desc
= &tp
->rx_std
[dest_idx
];
3065 dest_map
= &tp
->rx_std_buffers
[dest_idx
];
3066 src_desc
= &tp
->rx_std
[src_idx
];
3067 src_map
= &tp
->rx_std_buffers
[src_idx
];
3070 case RXD_OPAQUE_RING_JUMBO
:
3071 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3072 dest_desc
= &tp
->rx_jumbo
[dest_idx
];
3073 dest_map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3074 src_desc
= &tp
->rx_jumbo
[src_idx
];
3075 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3082 dest_map
->skb
= src_map
->skb
;
3083 pci_unmap_addr_set(dest_map
, mapping
,
3084 pci_unmap_addr(src_map
, mapping
));
3085 dest_desc
->addr_hi
= src_desc
->addr_hi
;
3086 dest_desc
->addr_lo
= src_desc
->addr_lo
;
3088 src_map
->skb
= NULL
;
3091 #if TG3_VLAN_TAG_USED
3092 static int tg3_vlan_rx(struct tg3
*tp
, struct sk_buff
*skb
, u16 vlan_tag
)
3094 return vlan_hwaccel_receive_skb(skb
, tp
->vlgrp
, vlan_tag
);
3098 /* The RX ring scheme is composed of multiple rings which post fresh
3099 * buffers to the chip, and one special ring the chip uses to report
3100 * status back to the host.
3102 * The special ring reports the status of received packets to the
3103 * host. The chip does not write into the original descriptor the
3104 * RX buffer was obtained from. The chip simply takes the original
3105 * descriptor as provided by the host, updates the status and length
3106 * field, then writes this into the next status ring entry.
3108 * Each ring the host uses to post buffers to the chip is described
3109 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3110 * it is first placed into the on-chip ram. When the packet's length
3111 * is known, it walks down the TG3_BDINFO entries to select the ring.
3112 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3113 * which is within the range of the new packet's length is chosen.
3115 * The "separate ring for rx status" scheme may sound queer, but it makes
3116 * sense from a cache coherency perspective. If only the host writes
3117 * to the buffer post rings, and only the chip writes to the rx status
3118 * rings, then cache lines never move beyond shared-modified state.
3119 * If both the host and chip were to write into the same ring, cache line
3120 * eviction could occur since both entities want it in an exclusive state.
3122 static int tg3_rx(struct tg3
*tp
, int budget
)
3125 u32 sw_idx
= tp
->rx_rcb_ptr
;
3129 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3131 * We need to order the read of hw_idx and the read of
3132 * the opaque cookie.
3137 while (sw_idx
!= hw_idx
&& budget
> 0) {
3138 struct tg3_rx_buffer_desc
*desc
= &tp
->rx_rcb
[sw_idx
];
3140 struct sk_buff
*skb
;
3141 dma_addr_t dma_addr
;
3142 u32 opaque_key
, desc_idx
, *post_ptr
;
3144 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
3145 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
3146 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
3147 dma_addr
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
],
3149 skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
3150 post_ptr
= &tp
->rx_std_ptr
;
3151 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
3152 dma_addr
= pci_unmap_addr(&tp
->rx_jumbo_buffers
[desc_idx
],
3154 skb
= tp
->rx_jumbo_buffers
[desc_idx
].skb
;
3155 post_ptr
= &tp
->rx_jumbo_ptr
;
3158 goto next_pkt_nopost
;
3161 work_mask
|= opaque_key
;
3163 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
3164 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
3166 tg3_recycle_rx(tp
, opaque_key
,
3167 desc_idx
, *post_ptr
);
3169 /* Other statistics kept track of by card. */
3170 tp
->net_stats
.rx_dropped
++;
3174 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4; /* omit crc */
3176 if (len
> RX_COPY_THRESHOLD
3177 && tp
->rx_offset
== 2
3178 /* rx_offset != 2 iff this is a 5701 card running
3179 * in PCI-X mode [see tg3_get_invariants()] */
3183 skb_size
= tg3_alloc_rx_skb(tp
, opaque_key
,
3184 desc_idx
, *post_ptr
);
3188 pci_unmap_single(tp
->pdev
, dma_addr
,
3189 skb_size
- tp
->rx_offset
,
3190 PCI_DMA_FROMDEVICE
);
3194 struct sk_buff
*copy_skb
;
3196 tg3_recycle_rx(tp
, opaque_key
,
3197 desc_idx
, *post_ptr
);
3199 copy_skb
= dev_alloc_skb(len
+ 2);
3200 if (copy_skb
== NULL
)
3201 goto drop_it_no_recycle
;
3203 copy_skb
->dev
= tp
->dev
;
3204 skb_reserve(copy_skb
, 2);
3205 skb_put(copy_skb
, len
);
3206 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3207 memcpy(copy_skb
->data
, skb
->data
, len
);
3208 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3210 /* We'll reuse the original ring buffer. */
3214 if ((tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) &&
3215 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
3216 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
3217 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
3218 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3220 skb
->ip_summed
= CHECKSUM_NONE
;
3222 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
3223 #if TG3_VLAN_TAG_USED
3224 if (tp
->vlgrp
!= NULL
&&
3225 desc
->type_flags
& RXD_FLAG_VLAN
) {
3226 tg3_vlan_rx(tp
, skb
,
3227 desc
->err_vlan
& RXD_VLAN_MASK
);
3230 netif_receive_skb(skb
);
3232 tp
->dev
->last_rx
= jiffies
;
3240 sw_idx
%= TG3_RX_RCB_RING_SIZE(tp
);
3242 /* Refresh hw_idx to see if there is new work */
3243 if (sw_idx
== hw_idx
) {
3244 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3249 /* ACK the status ring. */
3250 tp
->rx_rcb_ptr
= sw_idx
;
3251 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, sw_idx
);
3253 /* Refill RX ring(s). */
3254 if (work_mask
& RXD_OPAQUE_RING_STD
) {
3255 sw_idx
= tp
->rx_std_ptr
% TG3_RX_RING_SIZE
;
3256 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3259 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
3260 sw_idx
= tp
->rx_jumbo_ptr
% TG3_RX_JUMBO_RING_SIZE
;
3261 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3269 static int tg3_poll(struct net_device
*netdev
, int *budget
)
3271 struct tg3
*tp
= netdev_priv(netdev
);
3272 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3275 /* handle link change and other phy events */
3276 if (!(tp
->tg3_flags
&
3277 (TG3_FLAG_USE_LINKCHG_REG
|
3278 TG3_FLAG_POLL_SERDES
))) {
3279 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
3280 sblk
->status
= SD_STATUS_UPDATED
|
3281 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
3282 spin_lock(&tp
->lock
);
3283 tg3_setup_phy(tp
, 0);
3284 spin_unlock(&tp
->lock
);
3288 /* run TX completion thread */
3289 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
) {
3293 /* run RX thread, within the bounds set by NAPI.
3294 * All RX "locking" is done by ensuring outside
3295 * code synchronizes with dev->poll()
3297 if (sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
) {
3298 int orig_budget
= *budget
;
3301 if (orig_budget
> netdev
->quota
)
3302 orig_budget
= netdev
->quota
;
3304 work_done
= tg3_rx(tp
, orig_budget
);
3306 *budget
-= work_done
;
3307 netdev
->quota
-= work_done
;
3310 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
3311 tp
->last_tag
= sblk
->status_tag
;
3314 sblk
->status
&= ~SD_STATUS_UPDATED
;
3316 /* if no more work, tell net stack and NIC we're done */
3317 done
= !tg3_has_work(tp
);
3319 netif_rx_complete(netdev
);
3320 tg3_restart_ints(tp
);
3323 return (done
? 0 : 1);
3326 static void tg3_irq_quiesce(struct tg3
*tp
)
3328 BUG_ON(tp
->irq_sync
);
3333 synchronize_irq(tp
->pdev
->irq
);
3336 static inline int tg3_irq_sync(struct tg3
*tp
)
3338 return tp
->irq_sync
;
3341 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3342 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3343 * with as well. Most of the time, this is not necessary except when
3344 * shutting down the device.
3346 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
3349 tg3_irq_quiesce(tp
);
3350 spin_lock_bh(&tp
->lock
);
3351 spin_lock(&tp
->tx_lock
);
3354 static inline void tg3_full_unlock(struct tg3
*tp
)
3356 spin_unlock(&tp
->tx_lock
);
3357 spin_unlock_bh(&tp
->lock
);
3360 /* MSI ISR - No need to check for interrupt sharing and no need to
3361 * flush status block and interrupt mailbox. PCI ordering rules
3362 * guarantee that MSI will arrive after the status block.
3364 static irqreturn_t
tg3_msi(int irq
, void *dev_id
, struct pt_regs
*regs
)
3366 struct net_device
*dev
= dev_id
;
3367 struct tg3
*tp
= netdev_priv(dev
);
3369 prefetch(tp
->hw_status
);
3370 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3372 * Writing any value to intr-mbox-0 clears PCI INTA# and
3373 * chip-internal interrupt pending events.
3374 * Writing non-zero to intr-mbox-0 additional tells the
3375 * NIC to stop sending us irqs, engaging "in-intr-handler"
3378 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
3379 if (likely(!tg3_irq_sync(tp
)))
3380 netif_rx_schedule(dev
); /* schedule NAPI poll */
3382 return IRQ_RETVAL(1);
3385 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
3387 struct net_device
*dev
= dev_id
;
3388 struct tg3
*tp
= netdev_priv(dev
);
3389 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3390 unsigned int handled
= 1;
3392 /* In INTx mode, it is possible for the interrupt to arrive at
3393 * the CPU before the status block posted prior to the interrupt.
3394 * Reading the PCI State register will confirm whether the
3395 * interrupt is ours and will flush the status block.
3397 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3398 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3400 * Writing any value to intr-mbox-0 clears PCI INTA# and
3401 * chip-internal interrupt pending events.
3402 * Writing non-zero to intr-mbox-0 additional tells the
3403 * NIC to stop sending us irqs, engaging "in-intr-handler"
3406 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3408 if (tg3_irq_sync(tp
))
3410 sblk
->status
&= ~SD_STATUS_UPDATED
;
3411 if (likely(tg3_has_work(tp
))) {
3412 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3413 netif_rx_schedule(dev
); /* schedule NAPI poll */
3415 /* No work, shared interrupt perhaps? re-enable
3416 * interrupts, and flush that PCI write
3418 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3421 } else { /* shared interrupt */
3425 return IRQ_RETVAL(handled
);
3428 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
, struct pt_regs
*regs
)
3430 struct net_device
*dev
= dev_id
;
3431 struct tg3
*tp
= netdev_priv(dev
);
3432 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3433 unsigned int handled
= 1;
3435 /* In INTx mode, it is possible for the interrupt to arrive at
3436 * the CPU before the status block posted prior to the interrupt.
3437 * Reading the PCI State register will confirm whether the
3438 * interrupt is ours and will flush the status block.
3440 if ((sblk
->status_tag
!= tp
->last_tag
) ||
3441 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3443 * writing any value to intr-mbox-0 clears PCI INTA# and
3444 * chip-internal interrupt pending events.
3445 * writing non-zero to intr-mbox-0 additional tells the
3446 * NIC to stop sending us irqs, engaging "in-intr-handler"
3449 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3451 if (tg3_irq_sync(tp
))
3453 if (netif_rx_schedule_prep(dev
)) {
3454 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3455 /* Update last_tag to mark that this status has been
3456 * seen. Because interrupt may be shared, we may be
3457 * racing with tg3_poll(), so only update last_tag
3458 * if tg3_poll() is not scheduled.
3460 tp
->last_tag
= sblk
->status_tag
;
3461 __netif_rx_schedule(dev
);
3463 } else { /* shared interrupt */
3467 return IRQ_RETVAL(handled
);
3470 /* ISR for interrupt test */
3471 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
,
3472 struct pt_regs
*regs
)
3474 struct net_device
*dev
= dev_id
;
3475 struct tg3
*tp
= netdev_priv(dev
);
3476 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3478 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3479 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3480 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3482 return IRQ_RETVAL(1);
3484 return IRQ_RETVAL(0);
3487 static int tg3_init_hw(struct tg3
*);
3488 static int tg3_halt(struct tg3
*, int, int);
3490 #ifdef CONFIG_NET_POLL_CONTROLLER
3491 static void tg3_poll_controller(struct net_device
*dev
)
3493 struct tg3
*tp
= netdev_priv(dev
);
3495 tg3_interrupt(tp
->pdev
->irq
, dev
, NULL
);
3499 static void tg3_reset_task(void *_data
)
3501 struct tg3
*tp
= _data
;
3502 unsigned int restart_timer
;
3504 tg3_full_lock(tp
, 0);
3505 tp
->tg3_flags
|= TG3_FLAG_IN_RESET_TASK
;
3507 if (!netif_running(tp
->dev
)) {
3508 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3509 tg3_full_unlock(tp
);
3513 tg3_full_unlock(tp
);
3517 tg3_full_lock(tp
, 1);
3519 restart_timer
= tp
->tg3_flags2
& TG3_FLG2_RESTART_TIMER
;
3520 tp
->tg3_flags2
&= ~TG3_FLG2_RESTART_TIMER
;
3522 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
3525 tg3_netif_start(tp
);
3528 mod_timer(&tp
->timer
, jiffies
+ 1);
3530 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3532 tg3_full_unlock(tp
);
3535 static void tg3_tx_timeout(struct net_device
*dev
)
3537 struct tg3
*tp
= netdev_priv(dev
);
3539 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
3542 schedule_work(&tp
->reset_task
);
3545 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3546 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
3548 u32 base
= (u32
) mapping
& 0xffffffff;
3550 return ((base
> 0xffffdcc0) &&
3551 (base
+ len
+ 8 < base
));
3554 /* Test for DMA addresses > 40-bit */
3555 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
3558 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3559 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
3560 return (((u64
) mapping
+ len
) > DMA_40BIT_MASK
);
3567 static void tg3_set_txd(struct tg3
*, int, dma_addr_t
, int, u32
, u32
);
3569 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3570 static int tigon3_dma_hwbug_workaround(struct tg3
*tp
, struct sk_buff
*skb
,
3571 u32 last_plus_one
, u32
*start
,
3572 u32 base_flags
, u32 mss
)
3574 struct sk_buff
*new_skb
= skb_copy(skb
, GFP_ATOMIC
);
3575 dma_addr_t new_addr
= 0;
3582 /* New SKB is guaranteed to be linear. */
3584 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
3586 /* Make sure new skb does not cross any 4G boundaries.
3587 * Drop the packet if it does.
3589 if (tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
3591 dev_kfree_skb(new_skb
);
3594 tg3_set_txd(tp
, entry
, new_addr
, new_skb
->len
,
3595 base_flags
, 1 | (mss
<< 1));
3596 *start
= NEXT_TX(entry
);
3600 /* Now clean up the sw ring entries. */
3602 while (entry
!= last_plus_one
) {
3606 len
= skb_headlen(skb
);
3608 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
3609 pci_unmap_single(tp
->pdev
,
3610 pci_unmap_addr(&tp
->tx_buffers
[entry
], mapping
),
3611 len
, PCI_DMA_TODEVICE
);
3613 tp
->tx_buffers
[entry
].skb
= new_skb
;
3614 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, new_addr
);
3616 tp
->tx_buffers
[entry
].skb
= NULL
;
3618 entry
= NEXT_TX(entry
);
3627 static void tg3_set_txd(struct tg3
*tp
, int entry
,
3628 dma_addr_t mapping
, int len
, u32 flags
,
3631 struct tg3_tx_buffer_desc
*txd
= &tp
->tx_ring
[entry
];
3632 int is_end
= (mss_and_is_end
& 0x1);
3633 u32 mss
= (mss_and_is_end
>> 1);
3637 flags
|= TXD_FLAG_END
;
3638 if (flags
& TXD_FLAG_VLAN
) {
3639 vlan_tag
= flags
>> 16;
3642 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
3644 txd
->addr_hi
= ((u64
) mapping
>> 32);
3645 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
3646 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
3647 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
3650 static int tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3652 struct tg3
*tp
= netdev_priv(dev
);
3654 u32 len
, entry
, base_flags
, mss
;
3655 int would_hit_hwbug
;
3657 len
= skb_headlen(skb
);
3659 /* No BH disabling for tx_lock here. We are running in BH disabled
3660 * context and TX reclaim runs via tp->poll inside of a software
3661 * interrupt. Furthermore, IRQ processing runs lockless so we have
3662 * no IRQ context deadlocks to worry about either. Rejoice!
3664 if (!spin_trylock(&tp
->tx_lock
))
3665 return NETDEV_TX_LOCKED
;
3667 if (unlikely(TX_BUFFS_AVAIL(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
3668 if (!netif_queue_stopped(dev
)) {
3669 netif_stop_queue(dev
);
3671 /* This is a hard error, log it. */
3672 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
3673 "queue awake!\n", dev
->name
);
3675 spin_unlock(&tp
->tx_lock
);
3676 return NETDEV_TX_BUSY
;
3679 entry
= tp
->tx_prod
;
3681 if (skb
->ip_summed
== CHECKSUM_HW
)
3682 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3683 #if TG3_TSO_SUPPORT != 0
3685 if (skb
->len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
3686 (mss
= skb_shinfo(skb
)->tso_size
) != 0) {
3687 int tcp_opt_len
, ip_tcp_len
;
3689 if (skb_header_cloned(skb
) &&
3690 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
3695 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
3696 ip_tcp_len
= (skb
->nh
.iph
->ihl
* 4) + sizeof(struct tcphdr
);
3698 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
3699 TXD_FLAG_CPU_POST_DMA
);
3701 skb
->nh
.iph
->check
= 0;
3702 skb
->nh
.iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
3703 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
3704 skb
->h
.th
->check
= 0;
3705 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
3709 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
3714 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) ||
3715 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)) {
3716 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3719 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3720 (tcp_opt_len
>> 2));
3721 mss
|= (tsflags
<< 11);
3724 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3727 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3728 (tcp_opt_len
>> 2));
3729 base_flags
|= tsflags
<< 12;
3736 #if TG3_VLAN_TAG_USED
3737 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
3738 base_flags
|= (TXD_FLAG_VLAN
|
3739 (vlan_tx_tag_get(skb
) << 16));
3742 /* Queue skb data, a.k.a. the main skb fragment. */
3743 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
3745 tp
->tx_buffers
[entry
].skb
= skb
;
3746 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3748 would_hit_hwbug
= 0;
3750 if (tg3_4g_overflow_test(mapping
, len
))
3751 would_hit_hwbug
= 1;
3753 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
3754 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
3756 entry
= NEXT_TX(entry
);
3758 /* Now loop through additional data fragments, and queue them. */
3759 if (skb_shinfo(skb
)->nr_frags
> 0) {
3760 unsigned int i
, last
;
3762 last
= skb_shinfo(skb
)->nr_frags
- 1;
3763 for (i
= 0; i
<= last
; i
++) {
3764 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3767 mapping
= pci_map_page(tp
->pdev
,
3770 len
, PCI_DMA_TODEVICE
);
3772 tp
->tx_buffers
[entry
].skb
= NULL
;
3773 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3775 if (tg3_4g_overflow_test(mapping
, len
))
3776 would_hit_hwbug
= 1;
3778 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
3779 would_hit_hwbug
= 1;
3781 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
3782 tg3_set_txd(tp
, entry
, mapping
, len
,
3783 base_flags
, (i
== last
)|(mss
<< 1));
3785 tg3_set_txd(tp
, entry
, mapping
, len
,
3786 base_flags
, (i
== last
));
3788 entry
= NEXT_TX(entry
);
3792 if (would_hit_hwbug
) {
3793 u32 last_plus_one
= entry
;
3796 start
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
3797 start
&= (TG3_TX_RING_SIZE
- 1);
3799 /* If the workaround fails due to memory/mapping
3800 * failure, silently drop this packet.
3802 if (tigon3_dma_hwbug_workaround(tp
, skb
, last_plus_one
,
3803 &start
, base_flags
, mss
))
3809 /* Packets are ready, update Tx producer idx local and on card. */
3810 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
3812 tp
->tx_prod
= entry
;
3813 if (TX_BUFFS_AVAIL(tp
) <= (MAX_SKB_FRAGS
+ 1)) {
3814 netif_stop_queue(dev
);
3815 if (TX_BUFFS_AVAIL(tp
) > TG3_TX_WAKEUP_THRESH
)
3816 netif_wake_queue(tp
->dev
);
3821 spin_unlock(&tp
->tx_lock
);
3823 dev
->trans_start
= jiffies
;
3825 return NETDEV_TX_OK
;
3828 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
3833 if (new_mtu
> ETH_DATA_LEN
) {
3834 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
3835 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
3836 ethtool_op_set_tso(dev
, 0);
3839 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
3841 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
3842 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
3843 tp
->tg3_flags
&= ~TG3_FLAG_JUMBO_RING_ENABLE
;
3847 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
3849 struct tg3
*tp
= netdev_priv(dev
);
3851 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
3854 if (!netif_running(dev
)) {
3855 /* We'll just catch it later when the
3858 tg3_set_mtu(dev
, tp
, new_mtu
);
3864 tg3_full_lock(tp
, 1);
3866 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
3868 tg3_set_mtu(dev
, tp
, new_mtu
);
3872 tg3_netif_start(tp
);
3874 tg3_full_unlock(tp
);
3879 /* Free up pending packets in all rx/tx rings.
3881 * The chip has been shut down and the driver detached from
3882 * the networking, so no interrupts or new tx packets will
3883 * end up in the driver. tp->{tx,}lock is not held and we are not
3884 * in an interrupt context and thus may sleep.
3886 static void tg3_free_rings(struct tg3
*tp
)
3888 struct ring_info
*rxp
;
3891 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
3892 rxp
= &tp
->rx_std_buffers
[i
];
3894 if (rxp
->skb
== NULL
)
3896 pci_unmap_single(tp
->pdev
,
3897 pci_unmap_addr(rxp
, mapping
),
3898 tp
->rx_pkt_buf_sz
- tp
->rx_offset
,
3899 PCI_DMA_FROMDEVICE
);
3900 dev_kfree_skb_any(rxp
->skb
);
3904 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
3905 rxp
= &tp
->rx_jumbo_buffers
[i
];
3907 if (rxp
->skb
== NULL
)
3909 pci_unmap_single(tp
->pdev
,
3910 pci_unmap_addr(rxp
, mapping
),
3911 RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
,
3912 PCI_DMA_FROMDEVICE
);
3913 dev_kfree_skb_any(rxp
->skb
);
3917 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
3918 struct tx_ring_info
*txp
;
3919 struct sk_buff
*skb
;
3922 txp
= &tp
->tx_buffers
[i
];
3930 pci_unmap_single(tp
->pdev
,
3931 pci_unmap_addr(txp
, mapping
),
3938 for (j
= 0; j
< skb_shinfo(skb
)->nr_frags
; j
++) {
3939 txp
= &tp
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
3940 pci_unmap_page(tp
->pdev
,
3941 pci_unmap_addr(txp
, mapping
),
3942 skb_shinfo(skb
)->frags
[j
].size
,
3947 dev_kfree_skb_any(skb
);
3951 /* Initialize tx/rx rings for packet processing.
3953 * The chip has been shut down and the driver detached from
3954 * the networking, so no interrupts or new tx packets will
3955 * end up in the driver. tp->{tx,}lock are held and thus
3958 static void tg3_init_rings(struct tg3
*tp
)
3962 /* Free up all the SKBs. */
3965 /* Zero out all descriptors. */
3966 memset(tp
->rx_std
, 0, TG3_RX_RING_BYTES
);
3967 memset(tp
->rx_jumbo
, 0, TG3_RX_JUMBO_RING_BYTES
);
3968 memset(tp
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
3969 memset(tp
->tx_ring
, 0, TG3_TX_RING_BYTES
);
3971 tp
->rx_pkt_buf_sz
= RX_PKT_BUF_SZ
;
3972 if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) &&
3973 (tp
->dev
->mtu
> ETH_DATA_LEN
))
3974 tp
->rx_pkt_buf_sz
= RX_JUMBO_PKT_BUF_SZ
;
3976 /* Initialize invariants of the rings, we only set this
3977 * stuff once. This works because the card does not
3978 * write into the rx buffer posting rings.
3980 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
3981 struct tg3_rx_buffer_desc
*rxd
;
3983 rxd
= &tp
->rx_std
[i
];
3984 rxd
->idx_len
= (tp
->rx_pkt_buf_sz
- tp
->rx_offset
- 64)
3986 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
3987 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
3988 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
3991 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
3992 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
3993 struct tg3_rx_buffer_desc
*rxd
;
3995 rxd
= &tp
->rx_jumbo
[i
];
3996 rxd
->idx_len
= (RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
- 64)
3998 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
4000 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
4001 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4005 /* Now allocate fresh SKBs for each rx ring. */
4006 for (i
= 0; i
< tp
->rx_pending
; i
++) {
4007 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_STD
,
4012 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4013 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
4014 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_JUMBO
,
4022 * Must not be invoked with interrupt sources disabled and
4023 * the hardware shutdown down.
4025 static void tg3_free_consistent(struct tg3
*tp
)
4027 kfree(tp
->rx_std_buffers
);
4028 tp
->rx_std_buffers
= NULL
;
4030 pci_free_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4031 tp
->rx_std
, tp
->rx_std_mapping
);
4035 pci_free_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4036 tp
->rx_jumbo
, tp
->rx_jumbo_mapping
);
4037 tp
->rx_jumbo
= NULL
;
4040 pci_free_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4041 tp
->rx_rcb
, tp
->rx_rcb_mapping
);
4045 pci_free_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4046 tp
->tx_ring
, tp
->tx_desc_mapping
);
4049 if (tp
->hw_status
) {
4050 pci_free_consistent(tp
->pdev
, TG3_HW_STATUS_SIZE
,
4051 tp
->hw_status
, tp
->status_mapping
);
4052 tp
->hw_status
= NULL
;
4055 pci_free_consistent(tp
->pdev
, sizeof(struct tg3_hw_stats
),
4056 tp
->hw_stats
, tp
->stats_mapping
);
4057 tp
->hw_stats
= NULL
;
4062 * Must not be invoked with interrupt sources disabled and
4063 * the hardware shutdown down. Can sleep.
4065 static int tg3_alloc_consistent(struct tg3
*tp
)
4067 tp
->rx_std_buffers
= kmalloc((sizeof(struct ring_info
) *
4069 TG3_RX_JUMBO_RING_SIZE
)) +
4070 (sizeof(struct tx_ring_info
) *
4073 if (!tp
->rx_std_buffers
)
4076 memset(tp
->rx_std_buffers
, 0,
4077 (sizeof(struct ring_info
) *
4079 TG3_RX_JUMBO_RING_SIZE
)) +
4080 (sizeof(struct tx_ring_info
) *
4083 tp
->rx_jumbo_buffers
= &tp
->rx_std_buffers
[TG3_RX_RING_SIZE
];
4084 tp
->tx_buffers
= (struct tx_ring_info
*)
4085 &tp
->rx_jumbo_buffers
[TG3_RX_JUMBO_RING_SIZE
];
4087 tp
->rx_std
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4088 &tp
->rx_std_mapping
);
4092 tp
->rx_jumbo
= pci_alloc_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4093 &tp
->rx_jumbo_mapping
);
4098 tp
->rx_rcb
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4099 &tp
->rx_rcb_mapping
);
4103 tp
->tx_ring
= pci_alloc_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4104 &tp
->tx_desc_mapping
);
4108 tp
->hw_status
= pci_alloc_consistent(tp
->pdev
,
4110 &tp
->status_mapping
);
4114 tp
->hw_stats
= pci_alloc_consistent(tp
->pdev
,
4115 sizeof(struct tg3_hw_stats
),
4116 &tp
->stats_mapping
);
4120 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4121 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4126 tg3_free_consistent(tp
);
4130 #define MAX_WAIT_CNT 1000
4132 /* To stop a block, clear the enable bit and poll till it
4133 * clears. tp->lock is held.
4135 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
4140 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
4147 /* We can't enable/disable these bits of the
4148 * 5705/5750, just say success.
4161 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4164 if ((val
& enable_bit
) == 0)
4168 if (i
== MAX_WAIT_CNT
&& !silent
) {
4169 printk(KERN_ERR PFX
"tg3_stop_block timed out, "
4170 "ofs=%lx enable_bit=%x\n",
4178 /* tp->lock is held. */
4179 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
4183 tg3_disable_ints(tp
);
4185 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
4186 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
4189 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
4190 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
4191 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
4192 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
4193 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
4194 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
4196 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
4197 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
4198 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
4199 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
4200 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
4201 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
4202 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
4204 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
4205 tw32_f(MAC_MODE
, tp
->mac_mode
);
4208 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
4209 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
4211 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4213 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
4216 if (i
>= MAX_WAIT_CNT
) {
4217 printk(KERN_ERR PFX
"tg3_abort_hw timed out for %s, "
4218 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4219 tp
->dev
->name
, tr32(MAC_TX_MODE
));
4223 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
4224 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
4225 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
4227 tw32(FTQ_RESET
, 0xffffffff);
4228 tw32(FTQ_RESET
, 0x00000000);
4230 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
4231 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
4234 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4236 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4241 /* tp->lock is held. */
4242 static int tg3_nvram_lock(struct tg3
*tp
)
4244 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4247 if (tp
->nvram_lock_cnt
== 0) {
4248 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
4249 for (i
= 0; i
< 8000; i
++) {
4250 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
4255 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4259 tp
->nvram_lock_cnt
++;
4264 /* tp->lock is held. */
4265 static void tg3_nvram_unlock(struct tg3
*tp
)
4267 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4268 if (tp
->nvram_lock_cnt
> 0)
4269 tp
->nvram_lock_cnt
--;
4270 if (tp
->nvram_lock_cnt
== 0)
4271 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4275 /* tp->lock is held. */
4276 static void tg3_enable_nvram_access(struct tg3
*tp
)
4278 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4279 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4280 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4282 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
4286 /* tp->lock is held. */
4287 static void tg3_disable_nvram_access(struct tg3
*tp
)
4289 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4290 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4291 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4293 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
4297 /* tp->lock is held. */
4298 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
4300 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
))
4301 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
4302 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
4304 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4306 case RESET_KIND_INIT
:
4307 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4311 case RESET_KIND_SHUTDOWN
:
4312 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4316 case RESET_KIND_SUSPEND
:
4317 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4327 /* tp->lock is held. */
4328 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
4330 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4332 case RESET_KIND_INIT
:
4333 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4334 DRV_STATE_START_DONE
);
4337 case RESET_KIND_SHUTDOWN
:
4338 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4339 DRV_STATE_UNLOAD_DONE
);
4348 /* tp->lock is held. */
4349 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
4351 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4353 case RESET_KIND_INIT
:
4354 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4358 case RESET_KIND_SHUTDOWN
:
4359 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4363 case RESET_KIND_SUSPEND
:
4364 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4374 static void tg3_stop_fw(struct tg3
*);
4376 /* tp->lock is held. */
4377 static int tg3_chip_reset(struct tg3
*tp
)
4380 void (*write_op
)(struct tg3
*, u32
, u32
);
4383 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)) {
4385 /* No matching tg3_nvram_unlock() after this because
4386 * chip reset below will undo the nvram lock.
4388 tp
->nvram_lock_cnt
= 0;
4392 * We must avoid the readl() that normally takes place.
4393 * It locks machines, causes machine checks, and other
4394 * fun things. So, temporarily disable the 5701
4395 * hardware workaround, while we do the reset.
4397 write_op
= tp
->write32
;
4398 if (write_op
== tg3_write_flush_reg32
)
4399 tp
->write32
= tg3_write32
;
4402 val
= GRC_MISC_CFG_CORECLK_RESET
;
4404 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4405 if (tr32(0x7e2c) == 0x60) {
4408 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4409 tw32(GRC_MISC_CFG
, (1 << 29));
4414 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
4415 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
4416 tw32(GRC_MISC_CFG
, val
);
4418 /* restore 5701 hardware bug workaround write method */
4419 tp
->write32
= write_op
;
4421 /* Unfortunately, we have to delay before the PCI read back.
4422 * Some 575X chips even will not respond to a PCI cfg access
4423 * when the reset command is given to the chip.
4425 * How do these hardware designers expect things to work
4426 * properly if the PCI write is posted for a long period
4427 * of time? It is always necessary to have some method by
4428 * which a register read back can occur to push the write
4429 * out which does the reset.
4431 * For most tg3 variants the trick below was working.
4436 /* Flush PCI posted writes. The normal MMIO registers
4437 * are inaccessible at this time so this is the only
4438 * way to make this reliably (actually, this is no longer
4439 * the case, see above). I tried to use indirect
4440 * register read/write but this upset some 5701 variants.
4442 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
4446 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4447 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
4451 /* Wait for link training to complete. */
4452 for (i
= 0; i
< 5000; i
++)
4455 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
4456 pci_write_config_dword(tp
->pdev
, 0xc4,
4457 cfg_val
| (1 << 15));
4459 /* Set PCIE max payload size and clear error status. */
4460 pci_write_config_dword(tp
->pdev
, 0xd8, 0xf5000);
4463 /* Re-enable indirect register accesses. */
4464 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
4465 tp
->misc_host_ctrl
);
4467 /* Set MAX PCI retry to zero. */
4468 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
4469 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
4470 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
))
4471 val
|= PCISTATE_RETRY_SAME_DMA
;
4472 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
4474 pci_restore_state(tp
->pdev
);
4476 /* Make sure PCI-X relaxed ordering bit is clear. */
4477 pci_read_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, &val
);
4478 val
&= ~PCIX_CAPS_RELAXED_ORDERING
;
4479 pci_write_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, val
);
4481 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
4484 /* Chip reset on 5780 will reset MSI enable bit,
4485 * so need to restore it.
4487 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
4490 pci_read_config_word(tp
->pdev
,
4491 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4493 pci_write_config_word(tp
->pdev
,
4494 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4495 ctrl
| PCI_MSI_FLAGS_ENABLE
);
4496 val
= tr32(MSGINT_MODE
);
4497 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
4500 val
= tr32(MEMARB_MODE
);
4501 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
4504 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
4506 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
4508 tw32(0x5000, 0x400);
4511 tw32(GRC_MODE
, tp
->grc_mode
);
4513 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
4514 u32 val
= tr32(0xc4);
4516 tw32(0xc4, val
| (1 << 15));
4519 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
4520 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
4521 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
4522 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
4523 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
4524 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
4527 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
4528 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4529 tw32_f(MAC_MODE
, tp
->mac_mode
);
4530 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
4531 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4532 tw32_f(MAC_MODE
, tp
->mac_mode
);
4534 tw32_f(MAC_MODE
, 0);
4537 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)) {
4538 /* Wait for firmware initialization to complete. */
4539 for (i
= 0; i
< 100000; i
++) {
4540 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
4541 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4546 printk(KERN_ERR PFX
"tg3_reset_hw timed out for %s, "
4547 "firmware will not restart magic=%08x\n",
4548 tp
->dev
->name
, val
);
4553 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
4554 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4555 u32 val
= tr32(0x7c00);
4557 tw32(0x7c00, val
| (1 << 25));
4560 /* Reprobe ASF enable state. */
4561 tp
->tg3_flags
&= ~TG3_FLAG_ENABLE_ASF
;
4562 tp
->tg3_flags2
&= ~TG3_FLG2_ASF_NEW_HANDSHAKE
;
4563 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
4564 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
4567 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
4568 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
4569 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
4570 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
4571 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
4578 /* tp->lock is held. */
4579 static void tg3_stop_fw(struct tg3
*tp
)
4581 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4585 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
4586 val
= tr32(GRC_RX_CPU_EVENT
);
4588 tw32(GRC_RX_CPU_EVENT
, val
);
4590 /* Wait for RX cpu to ACK the event. */
4591 for (i
= 0; i
< 100; i
++) {
4592 if (!(tr32(GRC_RX_CPU_EVENT
) & (1 << 14)))
4599 /* tp->lock is held. */
4600 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
4606 tg3_write_sig_pre_reset(tp
, kind
);
4608 tg3_abort_hw(tp
, silent
);
4609 err
= tg3_chip_reset(tp
);
4611 tg3_write_sig_legacy(tp
, kind
);
4612 tg3_write_sig_post_reset(tp
, kind
);
4620 #define TG3_FW_RELEASE_MAJOR 0x0
4621 #define TG3_FW_RELASE_MINOR 0x0
4622 #define TG3_FW_RELEASE_FIX 0x0
4623 #define TG3_FW_START_ADDR 0x08000000
4624 #define TG3_FW_TEXT_ADDR 0x08000000
4625 #define TG3_FW_TEXT_LEN 0x9c0
4626 #define TG3_FW_RODATA_ADDR 0x080009c0
4627 #define TG3_FW_RODATA_LEN 0x60
4628 #define TG3_FW_DATA_ADDR 0x08000a40
4629 #define TG3_FW_DATA_LEN 0x20
4630 #define TG3_FW_SBSS_ADDR 0x08000a60
4631 #define TG3_FW_SBSS_LEN 0xc
4632 #define TG3_FW_BSS_ADDR 0x08000a70
4633 #define TG3_FW_BSS_LEN 0x10
4635 static u32 tg3FwText
[(TG3_FW_TEXT_LEN
/ sizeof(u32
)) + 1] = {
4636 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4637 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4638 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4639 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4640 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4641 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4642 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4643 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4644 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4645 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4646 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4647 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4648 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4649 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4650 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4651 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4652 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4653 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4654 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4655 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4656 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4657 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4658 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4659 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4660 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4662 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4663 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4664 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4665 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4666 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4667 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4668 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4669 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4670 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4671 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4672 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4673 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4674 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4675 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4676 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4677 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4678 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4679 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4680 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4681 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4682 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4683 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4684 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4685 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4686 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4687 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4688 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4689 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4690 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4691 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4692 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4693 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4694 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4695 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4696 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4697 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4698 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4699 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4700 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4701 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4702 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4703 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4704 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4705 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4706 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4707 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4708 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4709 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4710 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4711 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4712 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4713 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4714 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4715 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4716 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4717 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4718 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4719 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4720 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4721 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4722 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4723 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4724 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4725 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4726 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4729 static u32 tg3FwRodata
[(TG3_FW_RODATA_LEN
/ sizeof(u32
)) + 1] = {
4730 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4731 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4732 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4733 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4737 #if 0 /* All zeros, don't eat up space with it. */
4738 u32 tg3FwData
[(TG3_FW_DATA_LEN
/ sizeof(u32
)) + 1] = {
4739 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4740 0x00000000, 0x00000000, 0x00000000, 0x00000000
4744 #define RX_CPU_SCRATCH_BASE 0x30000
4745 #define RX_CPU_SCRATCH_SIZE 0x04000
4746 #define TX_CPU_SCRATCH_BASE 0x34000
4747 #define TX_CPU_SCRATCH_SIZE 0x04000
4749 /* tp->lock is held. */
4750 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
4754 if (offset
== TX_CPU_BASE
&&
4755 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
4758 if (offset
== RX_CPU_BASE
) {
4759 for (i
= 0; i
< 10000; i
++) {
4760 tw32(offset
+ CPU_STATE
, 0xffffffff);
4761 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4762 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
4766 tw32(offset
+ CPU_STATE
, 0xffffffff);
4767 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4770 for (i
= 0; i
< 10000; i
++) {
4771 tw32(offset
+ CPU_STATE
, 0xffffffff);
4772 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4773 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
4779 printk(KERN_ERR PFX
"tg3_reset_cpu timed out for %s, "
4782 (offset
== RX_CPU_BASE
? "RX" : "TX"));
4786 /* Clear firmware's nvram arbitration. */
4787 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
4788 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
4793 unsigned int text_base
;
4794 unsigned int text_len
;
4796 unsigned int rodata_base
;
4797 unsigned int rodata_len
;
4799 unsigned int data_base
;
4800 unsigned int data_len
;
4804 /* tp->lock is held. */
4805 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
4806 int cpu_scratch_size
, struct fw_info
*info
)
4808 int err
, lock_err
, i
;
4809 void (*write_op
)(struct tg3
*, u32
, u32
);
4811 if (cpu_base
== TX_CPU_BASE
&&
4812 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
4813 printk(KERN_ERR PFX
"tg3_load_firmware_cpu: Trying to load "
4814 "TX cpu firmware on %s which is 5705.\n",
4819 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
4820 write_op
= tg3_write_mem
;
4822 write_op
= tg3_write_indirect_reg32
;
4824 /* It is possible that bootcode is still loading at this point.
4825 * Get the nvram lock first before halting the cpu.
4827 lock_err
= tg3_nvram_lock(tp
);
4828 err
= tg3_halt_cpu(tp
, cpu_base
);
4830 tg3_nvram_unlock(tp
);
4834 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
4835 write_op(tp
, cpu_scratch_base
+ i
, 0);
4836 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
4837 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
4838 for (i
= 0; i
< (info
->text_len
/ sizeof(u32
)); i
++)
4839 write_op(tp
, (cpu_scratch_base
+
4840 (info
->text_base
& 0xffff) +
4843 info
->text_data
[i
] : 0));
4844 for (i
= 0; i
< (info
->rodata_len
/ sizeof(u32
)); i
++)
4845 write_op(tp
, (cpu_scratch_base
+
4846 (info
->rodata_base
& 0xffff) +
4848 (info
->rodata_data
?
4849 info
->rodata_data
[i
] : 0));
4850 for (i
= 0; i
< (info
->data_len
/ sizeof(u32
)); i
++)
4851 write_op(tp
, (cpu_scratch_base
+
4852 (info
->data_base
& 0xffff) +
4855 info
->data_data
[i
] : 0));
4863 /* tp->lock is held. */
4864 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
4866 struct fw_info info
;
4869 info
.text_base
= TG3_FW_TEXT_ADDR
;
4870 info
.text_len
= TG3_FW_TEXT_LEN
;
4871 info
.text_data
= &tg3FwText
[0];
4872 info
.rodata_base
= TG3_FW_RODATA_ADDR
;
4873 info
.rodata_len
= TG3_FW_RODATA_LEN
;
4874 info
.rodata_data
= &tg3FwRodata
[0];
4875 info
.data_base
= TG3_FW_DATA_ADDR
;
4876 info
.data_len
= TG3_FW_DATA_LEN
;
4877 info
.data_data
= NULL
;
4879 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
4880 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
4885 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
4886 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
4891 /* Now startup only the RX cpu. */
4892 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4893 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
4895 for (i
= 0; i
< 5; i
++) {
4896 if (tr32(RX_CPU_BASE
+ CPU_PC
) == TG3_FW_TEXT_ADDR
)
4898 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4899 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
4900 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
4904 printk(KERN_ERR PFX
"tg3_load_firmware fails for %s "
4905 "to set RX CPU PC, is %08x should be %08x\n",
4906 tp
->dev
->name
, tr32(RX_CPU_BASE
+ CPU_PC
),
4910 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4911 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
4916 #if TG3_TSO_SUPPORT != 0
4918 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4919 #define TG3_TSO_FW_RELASE_MINOR 0x6
4920 #define TG3_TSO_FW_RELEASE_FIX 0x0
4921 #define TG3_TSO_FW_START_ADDR 0x08000000
4922 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4923 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4924 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4925 #define TG3_TSO_FW_RODATA_LEN 0x60
4926 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4927 #define TG3_TSO_FW_DATA_LEN 0x30
4928 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4929 #define TG3_TSO_FW_SBSS_LEN 0x2c
4930 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4931 #define TG3_TSO_FW_BSS_LEN 0x894
4933 static u32 tg3TsoFwText
[(TG3_TSO_FW_TEXT_LEN
/ 4) + 1] = {
4934 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4935 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4936 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4937 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4938 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4939 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4940 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4941 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4942 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4943 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4944 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4945 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4946 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4947 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4948 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4949 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4950 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4951 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4952 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4953 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4954 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4955 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4956 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4957 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4958 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4959 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4960 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4961 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4962 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4963 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4964 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4965 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4966 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4967 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4968 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4969 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4970 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4971 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4972 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4973 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4974 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4975 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4976 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4977 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4978 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4979 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4980 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4981 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4982 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4983 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4984 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4985 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4986 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4987 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4988 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4989 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4990 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4991 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4992 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4993 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4994 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4995 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4996 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4997 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4998 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4999 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5000 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5001 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5002 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5003 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5004 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5005 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5006 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5007 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5008 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5009 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5010 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5011 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5012 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5013 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5014 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5015 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5016 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5017 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5018 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5019 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5020 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5021 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5022 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5023 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5024 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5025 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5026 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5027 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5028 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5029 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5030 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5031 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5032 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5033 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5034 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5035 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5036 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5037 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5038 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5039 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5040 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5041 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5042 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5043 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5044 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5045 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5046 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5047 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5048 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5049 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5050 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5051 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5052 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5053 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5054 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5055 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5056 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5057 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5058 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5059 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5060 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5061 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5062 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5063 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5064 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5065 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5066 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5067 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5068 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5069 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5070 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5071 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5072 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5073 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5074 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5075 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5076 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5077 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5078 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5079 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5080 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5081 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5082 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5083 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5084 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5085 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5086 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5087 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5088 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5089 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5090 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5091 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5092 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5093 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5094 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5095 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5096 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5097 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5098 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5099 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5100 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5101 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5102 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5103 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5104 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5105 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5106 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5107 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5108 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5109 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5110 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5111 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5112 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5113 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5114 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5115 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5116 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5117 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5118 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5119 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5120 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5121 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5122 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5123 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5124 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5125 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5126 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5127 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5128 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5129 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5130 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5131 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5132 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5133 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5134 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5135 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5136 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5137 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5138 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5139 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5140 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5141 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5142 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5143 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5144 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5145 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5146 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5147 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5148 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5149 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5150 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5151 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5152 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5153 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5154 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5155 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5156 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5157 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5158 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5159 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5160 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5161 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5162 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5163 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5164 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5165 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5166 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5167 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5168 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5169 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5170 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5171 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5172 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5173 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5174 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5175 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5176 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5177 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5178 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5179 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5180 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5181 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5182 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5183 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5184 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5185 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5186 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5187 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5188 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5189 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5190 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5191 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5192 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5193 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5194 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5195 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5196 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5197 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5198 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5199 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5200 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5201 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5202 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5203 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5204 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5205 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5206 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5207 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5208 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5209 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5210 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5211 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5212 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5213 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5214 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5215 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5216 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5217 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5220 static u32 tg3TsoFwRodata
[] = {
5221 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5222 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5223 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5224 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5228 static u32 tg3TsoFwData
[] = {
5229 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5230 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5234 /* 5705 needs a special version of the TSO firmware. */
5235 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5236 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5237 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5238 #define TG3_TSO5_FW_START_ADDR 0x00010000
5239 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5240 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5241 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5242 #define TG3_TSO5_FW_RODATA_LEN 0x50
5243 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5244 #define TG3_TSO5_FW_DATA_LEN 0x20
5245 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5246 #define TG3_TSO5_FW_SBSS_LEN 0x28
5247 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5248 #define TG3_TSO5_FW_BSS_LEN 0x88
5250 static u32 tg3Tso5FwText
[(TG3_TSO5_FW_TEXT_LEN
/ 4) + 1] = {
5251 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5252 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5253 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5254 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5255 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5256 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5257 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5258 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5259 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5260 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5261 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5262 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5263 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5264 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5265 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5266 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5267 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5268 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5269 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5270 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5271 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5272 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5273 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5274 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5275 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5276 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5277 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5278 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5279 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5280 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5281 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5282 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5283 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5284 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5285 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5286 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5287 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5288 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5289 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5290 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5291 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5292 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5293 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5294 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5295 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5296 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5297 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5298 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5299 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5300 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5301 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5302 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5303 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5304 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5305 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5306 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5307 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5308 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5309 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5310 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5311 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5312 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5313 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5314 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5315 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5316 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5317 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5318 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5319 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5320 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5321 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5322 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5323 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5324 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5325 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5326 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5327 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5328 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5329 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5330 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5331 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5332 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5333 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5334 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5335 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5336 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5337 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5338 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5339 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5340 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5341 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5342 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5343 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5344 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5345 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5346 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5347 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5348 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5349 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5350 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5351 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5352 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5353 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5354 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5355 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5356 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5357 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5358 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5359 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5360 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5361 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5362 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5363 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5364 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5365 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5366 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5367 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5368 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5369 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5370 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5371 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5372 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5373 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5374 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5375 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5376 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5377 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5378 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5379 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5380 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5381 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5382 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5383 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5384 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5385 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5386 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5387 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5388 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5389 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5390 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5391 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5392 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5393 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5394 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5395 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5396 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5397 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5398 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5399 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5400 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5401 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5402 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5403 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5404 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5405 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5406 0x00000000, 0x00000000, 0x00000000,
5409 static u32 tg3Tso5FwRodata
[(TG3_TSO5_FW_RODATA_LEN
/ 4) + 1] = {
5410 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5411 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5412 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5413 0x00000000, 0x00000000, 0x00000000,
5416 static u32 tg3Tso5FwData
[(TG3_TSO5_FW_DATA_LEN
/ 4) + 1] = {
5417 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5418 0x00000000, 0x00000000, 0x00000000,
5421 /* tp->lock is held. */
5422 static int tg3_load_tso_firmware(struct tg3
*tp
)
5424 struct fw_info info
;
5425 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
5428 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5431 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5432 info
.text_base
= TG3_TSO5_FW_TEXT_ADDR
;
5433 info
.text_len
= TG3_TSO5_FW_TEXT_LEN
;
5434 info
.text_data
= &tg3Tso5FwText
[0];
5435 info
.rodata_base
= TG3_TSO5_FW_RODATA_ADDR
;
5436 info
.rodata_len
= TG3_TSO5_FW_RODATA_LEN
;
5437 info
.rodata_data
= &tg3Tso5FwRodata
[0];
5438 info
.data_base
= TG3_TSO5_FW_DATA_ADDR
;
5439 info
.data_len
= TG3_TSO5_FW_DATA_LEN
;
5440 info
.data_data
= &tg3Tso5FwData
[0];
5441 cpu_base
= RX_CPU_BASE
;
5442 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
5443 cpu_scratch_size
= (info
.text_len
+
5446 TG3_TSO5_FW_SBSS_LEN
+
5447 TG3_TSO5_FW_BSS_LEN
);
5449 info
.text_base
= TG3_TSO_FW_TEXT_ADDR
;
5450 info
.text_len
= TG3_TSO_FW_TEXT_LEN
;
5451 info
.text_data
= &tg3TsoFwText
[0];
5452 info
.rodata_base
= TG3_TSO_FW_RODATA_ADDR
;
5453 info
.rodata_len
= TG3_TSO_FW_RODATA_LEN
;
5454 info
.rodata_data
= &tg3TsoFwRodata
[0];
5455 info
.data_base
= TG3_TSO_FW_DATA_ADDR
;
5456 info
.data_len
= TG3_TSO_FW_DATA_LEN
;
5457 info
.data_data
= &tg3TsoFwData
[0];
5458 cpu_base
= TX_CPU_BASE
;
5459 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
5460 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
5463 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
5464 cpu_scratch_base
, cpu_scratch_size
,
5469 /* Now startup the cpu. */
5470 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5471 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5473 for (i
= 0; i
< 5; i
++) {
5474 if (tr32(cpu_base
+ CPU_PC
) == info
.text_base
)
5476 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5477 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
5478 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5482 printk(KERN_ERR PFX
"tg3_load_tso_firmware fails for %s "
5483 "to set CPU PC, is %08x should be %08x\n",
5484 tp
->dev
->name
, tr32(cpu_base
+ CPU_PC
),
5488 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5489 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
5493 #endif /* TG3_TSO_SUPPORT != 0 */
5495 /* tp->lock is held. */
5496 static void __tg3_set_mac_addr(struct tg3
*tp
)
5498 u32 addr_high
, addr_low
;
5501 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
5502 tp
->dev
->dev_addr
[1]);
5503 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
5504 (tp
->dev
->dev_addr
[3] << 16) |
5505 (tp
->dev
->dev_addr
[4] << 8) |
5506 (tp
->dev
->dev_addr
[5] << 0));
5507 for (i
= 0; i
< 4; i
++) {
5508 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
5509 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
5512 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
5513 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
5514 for (i
= 0; i
< 12; i
++) {
5515 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
5516 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
5520 addr_high
= (tp
->dev
->dev_addr
[0] +
5521 tp
->dev
->dev_addr
[1] +
5522 tp
->dev
->dev_addr
[2] +
5523 tp
->dev
->dev_addr
[3] +
5524 tp
->dev
->dev_addr
[4] +
5525 tp
->dev
->dev_addr
[5]) &
5526 TX_BACKOFF_SEED_MASK
;
5527 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
5530 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
5532 struct tg3
*tp
= netdev_priv(dev
);
5533 struct sockaddr
*addr
= p
;
5535 if (!is_valid_ether_addr(addr
->sa_data
))
5538 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5540 if (!netif_running(dev
))
5543 spin_lock_bh(&tp
->lock
);
5544 __tg3_set_mac_addr(tp
);
5545 spin_unlock_bh(&tp
->lock
);
5550 /* tp->lock is held. */
5551 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
5552 dma_addr_t mapping
, u32 maxlen_flags
,
5556 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
5557 ((u64
) mapping
>> 32));
5559 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
5560 ((u64
) mapping
& 0xffffffff));
5562 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
5565 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5567 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
5571 static void __tg3_set_rx_mode(struct net_device
*);
5572 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
5574 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
5575 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
5576 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
5577 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
5578 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5579 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
5580 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
5582 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
5583 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
5584 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5585 u32 val
= ec
->stats_block_coalesce_usecs
;
5587 if (!netif_carrier_ok(tp
->dev
))
5590 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
5594 /* tp->lock is held. */
5595 static int tg3_reset_hw(struct tg3
*tp
)
5597 u32 val
, rdmac_mode
;
5600 tg3_disable_ints(tp
);
5604 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
5606 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) {
5607 tg3_abort_hw(tp
, 1);
5610 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
5613 err
= tg3_chip_reset(tp
);
5617 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
5619 /* This works around an issue with Athlon chipsets on
5620 * B3 tigon3 silicon. This bit has no effect on any
5621 * other revision. But do not set this on PCI Express
5624 if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
5625 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
5626 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
5628 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
5629 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
5630 val
= tr32(TG3PCI_PCISTATE
);
5631 val
|= PCISTATE_RETRY_SAME_DMA
;
5632 tw32(TG3PCI_PCISTATE
, val
);
5635 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
5636 /* Enable some hw fixes. */
5637 val
= tr32(TG3PCI_MSI_DATA
);
5638 val
|= (1 << 26) | (1 << 28) | (1 << 29);
5639 tw32(TG3PCI_MSI_DATA
, val
);
5642 /* Descriptor ring init may make accesses to the
5643 * NIC SRAM area to setup the TX descriptors, so we
5644 * can only do this after the hardware has been
5645 * successfully reset.
5649 /* This value is determined during the probe time DMA
5650 * engine test, tg3_test_dma.
5652 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
5654 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
5655 GRC_MODE_4X_NIC_SEND_RINGS
|
5656 GRC_MODE_NO_TX_PHDR_CSUM
|
5657 GRC_MODE_NO_RX_PHDR_CSUM
);
5658 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
5659 if (tp
->tg3_flags
& TG3_FLAG_NO_TX_PSEUDO_CSUM
)
5660 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
5661 if (tp
->tg3_flags
& TG3_FLAG_NO_RX_PSEUDO_CSUM
)
5662 tp
->grc_mode
|= GRC_MODE_NO_RX_PHDR_CSUM
;
5666 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
5668 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5669 val
= tr32(GRC_MISC_CFG
);
5671 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
5672 tw32(GRC_MISC_CFG
, val
);
5674 /* Initialize MBUF/DESC pool. */
5675 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
5677 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
5678 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
5679 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
5680 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
5682 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
5683 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
5684 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
5686 #if TG3_TSO_SUPPORT != 0
5687 else if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
5690 fw_len
= (TG3_TSO5_FW_TEXT_LEN
+
5691 TG3_TSO5_FW_RODATA_LEN
+
5692 TG3_TSO5_FW_DATA_LEN
+
5693 TG3_TSO5_FW_SBSS_LEN
+
5694 TG3_TSO5_FW_BSS_LEN
);
5695 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
5696 tw32(BUFMGR_MB_POOL_ADDR
,
5697 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
5698 tw32(BUFMGR_MB_POOL_SIZE
,
5699 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
5703 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
5704 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
5705 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
5706 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
5707 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
5708 tw32(BUFMGR_MB_HIGH_WATER
,
5709 tp
->bufmgr_config
.mbuf_high_water
);
5711 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
5712 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
5713 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
5714 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
5715 tw32(BUFMGR_MB_HIGH_WATER
,
5716 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
5718 tw32(BUFMGR_DMA_LOW_WATER
,
5719 tp
->bufmgr_config
.dma_low_water
);
5720 tw32(BUFMGR_DMA_HIGH_WATER
,
5721 tp
->bufmgr_config
.dma_high_water
);
5723 tw32(BUFMGR_MODE
, BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
);
5724 for (i
= 0; i
< 2000; i
++) {
5725 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
5730 printk(KERN_ERR PFX
"tg3_reset_hw cannot enable BUFMGR for %s.\n",
5735 /* Setup replenish threshold. */
5736 tw32(RCVBDI_STD_THRESH
, tp
->rx_pending
/ 8);
5738 /* Initialize TG3_BDINFO's at:
5739 * RCVDBDI_STD_BD: standard eth size rx ring
5740 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5741 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5744 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5745 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5746 * ring attribute flags
5747 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5749 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5750 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5752 * The size of each ring is fixed in the firmware, but the location is
5755 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5756 ((u64
) tp
->rx_std_mapping
>> 32));
5757 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5758 ((u64
) tp
->rx_std_mapping
& 0xffffffff));
5759 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
5760 NIC_SRAM_RX_BUFFER_DESC
);
5762 /* Don't even try to program the JUMBO/MINI buffer descriptor
5765 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
5766 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5767 RX_STD_MAX_SIZE_5705
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5769 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5770 RX_STD_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5772 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5773 BDINFO_FLAGS_DISABLED
);
5775 /* Setup replenish threshold. */
5776 tw32(RCVBDI_JUMBO_THRESH
, tp
->rx_jumbo_pending
/ 8);
5778 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
5779 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5780 ((u64
) tp
->rx_jumbo_mapping
>> 32));
5781 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5782 ((u64
) tp
->rx_jumbo_mapping
& 0xffffffff));
5783 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5784 RX_JUMBO_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5785 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
5786 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
5788 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5789 BDINFO_FLAGS_DISABLED
);
5794 /* There is only one send ring on 5705/5750, no need to explicitly
5795 * disable the others.
5797 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5798 /* Clear out send RCB ring in SRAM. */
5799 for (i
= NIC_SRAM_SEND_RCB
; i
< NIC_SRAM_RCV_RET_RCB
; i
+= TG3_BDINFO_SIZE
)
5800 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
5801 BDINFO_FLAGS_DISABLED
);
5806 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5807 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5809 tg3_set_bdinfo(tp
, NIC_SRAM_SEND_RCB
,
5810 tp
->tx_desc_mapping
,
5811 (TG3_TX_RING_SIZE
<<
5812 BDINFO_FLAGS_MAXLEN_SHIFT
),
5813 NIC_SRAM_TX_BUFFER_DESC
);
5815 /* There is only one receive return ring on 5705/5750, no need
5816 * to explicitly disable the others.
5818 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5819 for (i
= NIC_SRAM_RCV_RET_RCB
; i
< NIC_SRAM_STATS_BLK
;
5820 i
+= TG3_BDINFO_SIZE
) {
5821 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
5822 BDINFO_FLAGS_DISABLED
);
5827 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5829 tg3_set_bdinfo(tp
, NIC_SRAM_RCV_RET_RCB
,
5831 (TG3_RX_RCB_RING_SIZE(tp
) <<
5832 BDINFO_FLAGS_MAXLEN_SHIFT
),
5835 tp
->rx_std_ptr
= tp
->rx_pending
;
5836 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
5839 tp
->rx_jumbo_ptr
= (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) ?
5840 tp
->rx_jumbo_pending
: 0;
5841 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
5844 /* Initialize MAC address and backoff seed. */
5845 __tg3_set_mac_addr(tp
);
5847 /* MTU + ethernet header + FCS + optional VLAN tag */
5848 tw32(MAC_RX_MTU_SIZE
, tp
->dev
->mtu
+ ETH_HLEN
+ 8);
5850 /* The slot time is changed by tg3_setup_phy if we
5851 * run at gigabit with half duplex.
5853 tw32(MAC_TX_LENGTHS
,
5854 (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5855 (6 << TX_LENGTHS_IPG_SHIFT
) |
5856 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5858 /* Receive rules. */
5859 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
5860 tw32(RCVLPC_CONFIG
, 0x0181);
5862 /* Calculate RDMAC_MODE setting early, we need it to determine
5863 * the RCVLPC_STATE_ENABLE mask.
5865 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
5866 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
5867 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
5868 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
5869 RDMAC_MODE_LNGREAD_ENAB
);
5870 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
5871 rdmac_mode
|= RDMAC_MODE_SPLIT_ENABLE
;
5873 /* If statement applies to 5705 and 5750 PCI devices only */
5874 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
5875 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
5876 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)) {
5877 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
&&
5878 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
5879 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
5880 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
5881 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
5882 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
5883 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
5887 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
5888 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
5890 #if TG3_TSO_SUPPORT != 0
5891 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5892 rdmac_mode
|= (1 << 27);
5895 /* Receive/send statistics. */
5896 if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
5897 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
5898 val
= tr32(RCVLPC_STATS_ENABLE
);
5899 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
5900 tw32(RCVLPC_STATS_ENABLE
, val
);
5902 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
5904 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
5905 tw32(SNDDATAI_STATSENAB
, 0xffffff);
5906 tw32(SNDDATAI_STATSCTRL
,
5907 (SNDDATAI_SCTRL_ENABLE
|
5908 SNDDATAI_SCTRL_FASTUPD
));
5910 /* Setup host coalescing engine. */
5911 tw32(HOSTCC_MODE
, 0);
5912 for (i
= 0; i
< 2000; i
++) {
5913 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
5918 __tg3_set_coalesce(tp
, &tp
->coal
);
5920 /* set status block DMA address */
5921 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5922 ((u64
) tp
->status_mapping
>> 32));
5923 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5924 ((u64
) tp
->status_mapping
& 0xffffffff));
5926 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5927 /* Status/statistics block address. See tg3_timer,
5928 * the tg3_periodic_fetch_stats call there, and
5929 * tg3_get_stats to see how this works for 5705/5750 chips.
5931 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5932 ((u64
) tp
->stats_mapping
>> 32));
5933 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5934 ((u64
) tp
->stats_mapping
& 0xffffffff));
5935 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
5936 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
5939 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
5941 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
5942 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
5943 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5944 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
5946 /* Clear statistics/status block in chip, and status block in ram. */
5947 for (i
= NIC_SRAM_STATS_BLK
;
5948 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
5950 tg3_write_mem(tp
, i
, 0);
5953 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
5955 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
5956 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
5957 /* reset to prevent losing 1st rx packet intermittently */
5958 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
5962 tp
->mac_mode
= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
5963 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
5964 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
5967 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5968 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5969 * register to preserve the GPIO settings for LOMs. The GPIOs,
5970 * whether used as inputs or outputs, are set by boot code after
5973 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
5976 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE2
|
5977 GRC_LCLCTRL_GPIO_OUTPUT0
| GRC_LCLCTRL_GPIO_OUTPUT2
;
5979 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
5980 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
5981 GRC_LCLCTRL_GPIO_OUTPUT3
;
5983 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
5985 /* GPIO1 must be driven high for eeprom write protect */
5986 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
5987 GRC_LCLCTRL_GPIO_OUTPUT1
);
5989 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
5992 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0);
5995 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5996 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
6000 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
6001 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
6002 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
6003 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
6004 WDMAC_MODE_LNGREAD_ENAB
);
6006 /* If statement applies to 5705 and 5750 PCI devices only */
6007 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
6008 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
6009 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) {
6010 if ((tp
->tg3_flags
& TG3_FLG2_TSO_CAPABLE
) &&
6011 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
6012 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
6014 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
6015 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
6016 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
6017 val
|= WDMAC_MODE_RX_ACCEL
;
6021 tw32_f(WDMAC_MODE
, val
);
6024 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0) {
6025 val
= tr32(TG3PCI_X_CAPS
);
6026 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
6027 val
&= ~PCIX_CAPS_BURST_MASK
;
6028 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6029 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
6030 val
&= ~(PCIX_CAPS_SPLIT_MASK
| PCIX_CAPS_BURST_MASK
);
6031 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6032 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
6033 val
|= (tp
->split_mode_max_reqs
<<
6034 PCIX_CAPS_SPLIT_SHIFT
);
6036 tw32(TG3PCI_X_CAPS
, val
);
6039 tw32_f(RDMAC_MODE
, rdmac_mode
);
6042 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
6043 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6044 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
6045 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
6046 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
6047 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
6048 tw32(RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
);
6049 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
6050 #if TG3_TSO_SUPPORT != 0
6051 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6052 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
6054 tw32(SNDBDI_MODE
, SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
);
6055 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
6057 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
6058 err
= tg3_load_5701_a0_firmware_fix(tp
);
6063 #if TG3_TSO_SUPPORT != 0
6064 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
6065 err
= tg3_load_tso_firmware(tp
);
6071 tp
->tx_mode
= TX_MODE_ENABLE
;
6072 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6075 tp
->rx_mode
= RX_MODE_ENABLE
;
6076 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6079 if (tp
->link_config
.phy_is_low_power
) {
6080 tp
->link_config
.phy_is_low_power
= 0;
6081 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
6082 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
6083 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
6086 tp
->mi_mode
= MAC_MI_MODE_BASE
;
6087 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
6090 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
6092 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
6093 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6094 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6097 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6100 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6101 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
6102 !(tp
->tg3_flags2
& TG3_FLG2_SERDES_PREEMPHASIS
)) {
6103 /* Set drive transmission level to 1.2V */
6104 /* only if the signal pre-emphasis bit is not set */
6105 val
= tr32(MAC_SERDES_CFG
);
6108 tw32(MAC_SERDES_CFG
, val
);
6110 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
6111 tw32(MAC_SERDES_CFG
, 0x616000);
6114 /* Prevent chip from dropping frames when flow control
6117 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, 2);
6119 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
6120 (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6121 /* Use hardware link auto-negotiation */
6122 tp
->tg3_flags2
|= TG3_FLG2_HW_AUTONEG
;
6125 if ((tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) &&
6126 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
6129 tmp
= tr32(SERDES_RX_CTRL
);
6130 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
6131 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
6132 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
6133 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
6136 err
= tg3_setup_phy(tp
, 1);
6140 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6143 /* Clear CRC stats. */
6144 if (!tg3_readphy(tp
, 0x1e, &tmp
)) {
6145 tg3_writephy(tp
, 0x1e, tmp
| 0x8000);
6146 tg3_readphy(tp
, 0x14, &tmp
);
6150 __tg3_set_rx_mode(tp
->dev
);
6152 /* Initialize receive rules. */
6153 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
6154 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6155 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
6156 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6158 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
6159 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
6163 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
6167 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
6169 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
6171 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
6173 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
6175 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
6177 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
6179 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
6181 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
6183 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
6185 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
6187 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
6189 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
6191 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6193 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6201 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
6206 /* Called at device open time to get the chip ready for
6207 * packet processing. Invoked with tp->lock held.
6209 static int tg3_init_hw(struct tg3
*tp
)
6213 /* Force the chip into D0. */
6214 err
= tg3_set_power_state(tp
, PCI_D0
);
6218 tg3_switch_clocks(tp
);
6220 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
6222 err
= tg3_reset_hw(tp
);
6228 #define TG3_STAT_ADD32(PSTAT, REG) \
6229 do { u32 __val = tr32(REG); \
6230 (PSTAT)->low += __val; \
6231 if ((PSTAT)->low < __val) \
6232 (PSTAT)->high += 1; \
6235 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
6237 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
6239 if (!netif_carrier_ok(tp
->dev
))
6242 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
6243 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
6244 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
6245 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
6246 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
6247 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
6248 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
6249 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
6250 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
6251 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
6252 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
6253 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
6254 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
6256 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
6257 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
6258 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
6259 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
6260 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
6261 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
6262 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
6263 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
6264 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
6265 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
6266 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
6267 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
6268 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
6269 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
6272 static void tg3_timer(unsigned long __opaque
)
6274 struct tg3
*tp
= (struct tg3
*) __opaque
;
6276 spin_lock(&tp
->lock
);
6278 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6279 /* All of this garbage is because when using non-tagged
6280 * IRQ status the mailbox/status_block protocol the chip
6281 * uses with the cpu is race prone.
6283 if (tp
->hw_status
->status
& SD_STATUS_UPDATED
) {
6284 tw32(GRC_LOCAL_CTRL
,
6285 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
6287 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6288 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
6291 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
6292 tp
->tg3_flags2
|= TG3_FLG2_RESTART_TIMER
;
6293 spin_unlock(&tp
->lock
);
6294 schedule_work(&tp
->reset_task
);
6299 /* This part only runs once per second. */
6300 if (!--tp
->timer_counter
) {
6301 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
6302 tg3_periodic_fetch_stats(tp
);
6304 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
6308 mac_stat
= tr32(MAC_STATUS
);
6311 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) {
6312 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
6314 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
6318 tg3_setup_phy(tp
, 0);
6319 } else if (tp
->tg3_flags
& TG3_FLAG_POLL_SERDES
) {
6320 u32 mac_stat
= tr32(MAC_STATUS
);
6323 if (netif_carrier_ok(tp
->dev
) &&
6324 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
6327 if (! netif_carrier_ok(tp
->dev
) &&
6328 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
6329 MAC_STATUS_SIGNAL_DET
))) {
6335 ~MAC_MODE_PORT_MODE_MASK
));
6337 tw32_f(MAC_MODE
, tp
->mac_mode
);
6339 tg3_setup_phy(tp
, 0);
6341 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
6342 tg3_serdes_parallel_detect(tp
);
6344 tp
->timer_counter
= tp
->timer_multiplier
;
6347 /* Heartbeat is only sent once every 2 seconds. */
6348 if (!--tp
->asf_counter
) {
6349 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
6352 tg3_write_mem_fast(tp
, NIC_SRAM_FW_CMD_MBOX
,
6353 FWCMD_NICDRV_ALIVE2
);
6354 tg3_write_mem_fast(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
6355 /* 5 seconds timeout */
6356 tg3_write_mem_fast(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, 5);
6357 val
= tr32(GRC_RX_CPU_EVENT
);
6359 tw32(GRC_RX_CPU_EVENT
, val
);
6361 tp
->asf_counter
= tp
->asf_multiplier
;
6364 spin_unlock(&tp
->lock
);
6366 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
6367 add_timer(&tp
->timer
);
6370 static int tg3_test_interrupt(struct tg3
*tp
)
6372 struct net_device
*dev
= tp
->dev
;
6376 if (!netif_running(dev
))
6379 tg3_disable_ints(tp
);
6381 free_irq(tp
->pdev
->irq
, dev
);
6383 err
= request_irq(tp
->pdev
->irq
, tg3_test_isr
,
6384 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6388 tp
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
6389 tg3_enable_ints(tp
);
6391 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
6394 for (i
= 0; i
< 5; i
++) {
6395 int_mbox
= tr32_mailbox(MAILBOX_INTERRUPT_0
+
6402 tg3_disable_ints(tp
);
6404 free_irq(tp
->pdev
->irq
, dev
);
6406 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)
6407 err
= request_irq(tp
->pdev
->irq
, tg3_msi
,
6408 SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6410 irqreturn_t (*fn
)(int, void *, struct pt_regs
*)=tg3_interrupt
;
6411 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6412 fn
= tg3_interrupt_tagged
;
6413 err
= request_irq(tp
->pdev
->irq
, fn
,
6414 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6426 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6427 * successfully restored
6429 static int tg3_test_msi(struct tg3
*tp
)
6431 struct net_device
*dev
= tp
->dev
;
6435 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSI
))
6438 /* Turn off SERR reporting in case MSI terminates with Master
6441 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
6442 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
6443 pci_cmd
& ~PCI_COMMAND_SERR
);
6445 err
= tg3_test_interrupt(tp
);
6447 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
6452 /* other failures */
6456 /* MSI test failed, go back to INTx mode */
6457 printk(KERN_WARNING PFX
"%s: No interrupt was generated using MSI, "
6458 "switching to INTx mode. Please report this failure to "
6459 "the PCI maintainer and include system chipset information.\n",
6462 free_irq(tp
->pdev
->irq
, dev
);
6463 pci_disable_msi(tp
->pdev
);
6465 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6468 irqreturn_t (*fn
)(int, void *, struct pt_regs
*)=tg3_interrupt
;
6469 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6470 fn
= tg3_interrupt_tagged
;
6472 err
= request_irq(tp
->pdev
->irq
, fn
,
6473 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6478 /* Need to reset the chip because the MSI cycle may have terminated
6479 * with Master Abort.
6481 tg3_full_lock(tp
, 1);
6483 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6484 err
= tg3_init_hw(tp
);
6486 tg3_full_unlock(tp
);
6489 free_irq(tp
->pdev
->irq
, dev
);
6494 static int tg3_open(struct net_device
*dev
)
6496 struct tg3
*tp
= netdev_priv(dev
);
6499 tg3_full_lock(tp
, 0);
6501 err
= tg3_set_power_state(tp
, PCI_D0
);
6505 tg3_disable_ints(tp
);
6506 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
6508 tg3_full_unlock(tp
);
6510 /* The placement of this call is tied
6511 * to the setup and use of Host TX descriptors.
6513 err
= tg3_alloc_consistent(tp
);
6517 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
6518 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_AX
) &&
6519 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_BX
) &&
6520 !((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) &&
6521 (tp
->pdev_peer
== tp
->pdev
))) {
6522 /* All MSI supporting chips should support tagged
6523 * status. Assert that this is the case.
6525 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6526 printk(KERN_WARNING PFX
"%s: MSI without TAGGED? "
6527 "Not using MSI.\n", tp
->dev
->name
);
6528 } else if (pci_enable_msi(tp
->pdev
) == 0) {
6531 msi_mode
= tr32(MSGINT_MODE
);
6532 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
6533 tp
->tg3_flags2
|= TG3_FLG2_USING_MSI
;
6536 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)
6537 err
= request_irq(tp
->pdev
->irq
, tg3_msi
,
6538 SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6540 irqreturn_t (*fn
)(int, void *, struct pt_regs
*)=tg3_interrupt
;
6541 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6542 fn
= tg3_interrupt_tagged
;
6544 err
= request_irq(tp
->pdev
->irq
, fn
,
6545 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6549 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6550 pci_disable_msi(tp
->pdev
);
6551 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6553 tg3_free_consistent(tp
);
6557 tg3_full_lock(tp
, 0);
6559 err
= tg3_init_hw(tp
);
6561 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6564 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6565 tp
->timer_offset
= HZ
;
6567 tp
->timer_offset
= HZ
/ 10;
6569 BUG_ON(tp
->timer_offset
> HZ
);
6570 tp
->timer_counter
= tp
->timer_multiplier
=
6571 (HZ
/ tp
->timer_offset
);
6572 tp
->asf_counter
= tp
->asf_multiplier
=
6573 ((HZ
/ tp
->timer_offset
) * 2);
6575 init_timer(&tp
->timer
);
6576 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
6577 tp
->timer
.data
= (unsigned long) tp
;
6578 tp
->timer
.function
= tg3_timer
;
6581 tg3_full_unlock(tp
);
6584 free_irq(tp
->pdev
->irq
, dev
);
6585 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6586 pci_disable_msi(tp
->pdev
);
6587 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6589 tg3_free_consistent(tp
);
6593 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6594 err
= tg3_test_msi(tp
);
6597 tg3_full_lock(tp
, 0);
6599 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6600 pci_disable_msi(tp
->pdev
);
6601 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6603 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6605 tg3_free_consistent(tp
);
6607 tg3_full_unlock(tp
);
6613 tg3_full_lock(tp
, 0);
6615 add_timer(&tp
->timer
);
6616 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
6617 tg3_enable_ints(tp
);
6619 tg3_full_unlock(tp
);
6621 netif_start_queue(dev
);
6627 /*static*/ void tg3_dump_state(struct tg3
*tp
)
6629 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
6633 pci_read_config_word(tp
->pdev
, PCI_STATUS
, &val16
);
6634 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, &val32
);
6635 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6639 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6640 tr32(MAC_MODE
), tr32(MAC_STATUS
));
6641 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6642 tr32(MAC_EVENT
), tr32(MAC_LED_CTRL
));
6643 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6644 tr32(MAC_TX_MODE
), tr32(MAC_TX_STATUS
));
6645 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6646 tr32(MAC_RX_MODE
), tr32(MAC_RX_STATUS
));
6648 /* Send data initiator control block */
6649 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6650 tr32(SNDDATAI_MODE
), tr32(SNDDATAI_STATUS
));
6651 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6652 tr32(SNDDATAI_STATSCTRL
));
6654 /* Send data completion control block */
6655 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE
));
6657 /* Send BD ring selector block */
6658 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6659 tr32(SNDBDS_MODE
), tr32(SNDBDS_STATUS
));
6661 /* Send BD initiator control block */
6662 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6663 tr32(SNDBDI_MODE
), tr32(SNDBDI_STATUS
));
6665 /* Send BD completion control block */
6666 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE
));
6668 /* Receive list placement control block */
6669 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6670 tr32(RCVLPC_MODE
), tr32(RCVLPC_STATUS
));
6671 printk(" RCVLPC_STATSCTRL[%08x]\n",
6672 tr32(RCVLPC_STATSCTRL
));
6674 /* Receive data and receive BD initiator control block */
6675 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6676 tr32(RCVDBDI_MODE
), tr32(RCVDBDI_STATUS
));
6678 /* Receive data completion control block */
6679 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6682 /* Receive BD initiator control block */
6683 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6684 tr32(RCVBDI_MODE
), tr32(RCVBDI_STATUS
));
6686 /* Receive BD completion control block */
6687 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6688 tr32(RCVCC_MODE
), tr32(RCVCC_STATUS
));
6690 /* Receive list selector control block */
6691 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6692 tr32(RCVLSC_MODE
), tr32(RCVLSC_STATUS
));
6694 /* Mbuf cluster free block */
6695 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6696 tr32(MBFREE_MODE
), tr32(MBFREE_STATUS
));
6698 /* Host coalescing control block */
6699 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6700 tr32(HOSTCC_MODE
), tr32(HOSTCC_STATUS
));
6701 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6702 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
6703 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
6704 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6705 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
6706 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
6707 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6708 tr32(HOSTCC_STATS_BLK_NIC_ADDR
));
6709 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6710 tr32(HOSTCC_STATUS_BLK_NIC_ADDR
));
6712 /* Memory arbiter control block */
6713 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6714 tr32(MEMARB_MODE
), tr32(MEMARB_STATUS
));
6716 /* Buffer manager control block */
6717 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6718 tr32(BUFMGR_MODE
), tr32(BUFMGR_STATUS
));
6719 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6720 tr32(BUFMGR_MB_POOL_ADDR
), tr32(BUFMGR_MB_POOL_SIZE
));
6721 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6722 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6723 tr32(BUFMGR_DMA_DESC_POOL_ADDR
),
6724 tr32(BUFMGR_DMA_DESC_POOL_SIZE
));
6726 /* Read DMA control block */
6727 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6728 tr32(RDMAC_MODE
), tr32(RDMAC_STATUS
));
6730 /* Write DMA control block */
6731 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6732 tr32(WDMAC_MODE
), tr32(WDMAC_STATUS
));
6734 /* DMA completion block */
6735 printk("DEBUG: DMAC_MODE[%08x]\n",
6739 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6740 tr32(GRC_MODE
), tr32(GRC_MISC_CFG
));
6741 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6742 tr32(GRC_LOCAL_CTRL
));
6745 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6746 tr32(RCVDBDI_JUMBO_BD
+ 0x0),
6747 tr32(RCVDBDI_JUMBO_BD
+ 0x4),
6748 tr32(RCVDBDI_JUMBO_BD
+ 0x8),
6749 tr32(RCVDBDI_JUMBO_BD
+ 0xc));
6750 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6751 tr32(RCVDBDI_STD_BD
+ 0x0),
6752 tr32(RCVDBDI_STD_BD
+ 0x4),
6753 tr32(RCVDBDI_STD_BD
+ 0x8),
6754 tr32(RCVDBDI_STD_BD
+ 0xc));
6755 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6756 tr32(RCVDBDI_MINI_BD
+ 0x0),
6757 tr32(RCVDBDI_MINI_BD
+ 0x4),
6758 tr32(RCVDBDI_MINI_BD
+ 0x8),
6759 tr32(RCVDBDI_MINI_BD
+ 0xc));
6761 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x0, &val32
);
6762 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x4, &val32_2
);
6763 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x8, &val32_3
);
6764 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0xc, &val32_4
);
6765 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6766 val32
, val32_2
, val32_3
, val32_4
);
6768 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x0, &val32
);
6769 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x4, &val32_2
);
6770 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x8, &val32_3
);
6771 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0xc, &val32_4
);
6772 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6773 val32
, val32_2
, val32_3
, val32_4
);
6775 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x0, &val32
);
6776 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x4, &val32_2
);
6777 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x8, &val32_3
);
6778 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0xc, &val32_4
);
6779 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x10, &val32_5
);
6780 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6781 val32
, val32_2
, val32_3
, val32_4
, val32_5
);
6783 /* SW status block */
6784 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6785 tp
->hw_status
->status
,
6786 tp
->hw_status
->status_tag
,
6787 tp
->hw_status
->rx_jumbo_consumer
,
6788 tp
->hw_status
->rx_consumer
,
6789 tp
->hw_status
->rx_mini_consumer
,
6790 tp
->hw_status
->idx
[0].rx_producer
,
6791 tp
->hw_status
->idx
[0].tx_consumer
);
6793 /* SW statistics block */
6794 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6795 ((u32
*)tp
->hw_stats
)[0],
6796 ((u32
*)tp
->hw_stats
)[1],
6797 ((u32
*)tp
->hw_stats
)[2],
6798 ((u32
*)tp
->hw_stats
)[3]);
6801 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6802 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x0),
6803 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x4),
6804 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x0),
6805 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x4));
6807 /* NIC side send descriptors. */
6808 for (i
= 0; i
< 6; i
++) {
6811 txd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_TX_BUFFER_DESC
6812 + (i
* sizeof(struct tg3_tx_buffer_desc
));
6813 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6815 readl(txd
+ 0x0), readl(txd
+ 0x4),
6816 readl(txd
+ 0x8), readl(txd
+ 0xc));
6819 /* NIC side RX descriptors. */
6820 for (i
= 0; i
< 6; i
++) {
6823 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_BUFFER_DESC
6824 + (i
* sizeof(struct tg3_rx_buffer_desc
));
6825 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6827 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6828 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6829 rxd
+= (4 * sizeof(u32
));
6830 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6832 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6833 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6836 for (i
= 0; i
< 6; i
++) {
6839 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC
6840 + (i
* sizeof(struct tg3_rx_buffer_desc
));
6841 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6843 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6844 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6845 rxd
+= (4 * sizeof(u32
));
6846 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6848 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6849 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6854 static struct net_device_stats
*tg3_get_stats(struct net_device
*);
6855 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
6857 static int tg3_close(struct net_device
*dev
)
6859 struct tg3
*tp
= netdev_priv(dev
);
6861 /* Calling flush_scheduled_work() may deadlock because
6862 * linkwatch_event() may be on the workqueue and it will try to get
6863 * the rtnl_lock which we are holding.
6865 while (tp
->tg3_flags
& TG3_FLAG_IN_RESET_TASK
)
6868 netif_stop_queue(dev
);
6870 del_timer_sync(&tp
->timer
);
6872 tg3_full_lock(tp
, 1);
6877 tg3_disable_ints(tp
);
6879 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6882 ~(TG3_FLAG_INIT_COMPLETE
|
6883 TG3_FLAG_GOT_SERDES_FLOWCTL
);
6885 tg3_full_unlock(tp
);
6887 free_irq(tp
->pdev
->irq
, dev
);
6888 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6889 pci_disable_msi(tp
->pdev
);
6890 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6893 memcpy(&tp
->net_stats_prev
, tg3_get_stats(tp
->dev
),
6894 sizeof(tp
->net_stats_prev
));
6895 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
6896 sizeof(tp
->estats_prev
));
6898 tg3_free_consistent(tp
);
6900 tg3_set_power_state(tp
, PCI_D3hot
);
6902 netif_carrier_off(tp
->dev
);
6907 static inline unsigned long get_stat64(tg3_stat64_t
*val
)
6911 #if (BITS_PER_LONG == 32)
6914 ret
= ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
6919 static unsigned long calc_crc_errors(struct tg3
*tp
)
6921 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
6923 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
6924 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
6925 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
6928 spin_lock_bh(&tp
->lock
);
6929 if (!tg3_readphy(tp
, 0x1e, &val
)) {
6930 tg3_writephy(tp
, 0x1e, val
| 0x8000);
6931 tg3_readphy(tp
, 0x14, &val
);
6934 spin_unlock_bh(&tp
->lock
);
6936 tp
->phy_crc_errors
+= val
;
6938 return tp
->phy_crc_errors
;
6941 return get_stat64(&hw_stats
->rx_fcs_errors
);
6944 #define ESTAT_ADD(member) \
6945 estats->member = old_estats->member + \
6946 get_stat64(&hw_stats->member)
6948 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
6950 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
6951 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
6952 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
6957 ESTAT_ADD(rx_octets
);
6958 ESTAT_ADD(rx_fragments
);
6959 ESTAT_ADD(rx_ucast_packets
);
6960 ESTAT_ADD(rx_mcast_packets
);
6961 ESTAT_ADD(rx_bcast_packets
);
6962 ESTAT_ADD(rx_fcs_errors
);
6963 ESTAT_ADD(rx_align_errors
);
6964 ESTAT_ADD(rx_xon_pause_rcvd
);
6965 ESTAT_ADD(rx_xoff_pause_rcvd
);
6966 ESTAT_ADD(rx_mac_ctrl_rcvd
);
6967 ESTAT_ADD(rx_xoff_entered
);
6968 ESTAT_ADD(rx_frame_too_long_errors
);
6969 ESTAT_ADD(rx_jabbers
);
6970 ESTAT_ADD(rx_undersize_packets
);
6971 ESTAT_ADD(rx_in_length_errors
);
6972 ESTAT_ADD(rx_out_length_errors
);
6973 ESTAT_ADD(rx_64_or_less_octet_packets
);
6974 ESTAT_ADD(rx_65_to_127_octet_packets
);
6975 ESTAT_ADD(rx_128_to_255_octet_packets
);
6976 ESTAT_ADD(rx_256_to_511_octet_packets
);
6977 ESTAT_ADD(rx_512_to_1023_octet_packets
);
6978 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
6979 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
6980 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
6981 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
6982 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
6984 ESTAT_ADD(tx_octets
);
6985 ESTAT_ADD(tx_collisions
);
6986 ESTAT_ADD(tx_xon_sent
);
6987 ESTAT_ADD(tx_xoff_sent
);
6988 ESTAT_ADD(tx_flow_control
);
6989 ESTAT_ADD(tx_mac_errors
);
6990 ESTAT_ADD(tx_single_collisions
);
6991 ESTAT_ADD(tx_mult_collisions
);
6992 ESTAT_ADD(tx_deferred
);
6993 ESTAT_ADD(tx_excessive_collisions
);
6994 ESTAT_ADD(tx_late_collisions
);
6995 ESTAT_ADD(tx_collide_2times
);
6996 ESTAT_ADD(tx_collide_3times
);
6997 ESTAT_ADD(tx_collide_4times
);
6998 ESTAT_ADD(tx_collide_5times
);
6999 ESTAT_ADD(tx_collide_6times
);
7000 ESTAT_ADD(tx_collide_7times
);
7001 ESTAT_ADD(tx_collide_8times
);
7002 ESTAT_ADD(tx_collide_9times
);
7003 ESTAT_ADD(tx_collide_10times
);
7004 ESTAT_ADD(tx_collide_11times
);
7005 ESTAT_ADD(tx_collide_12times
);
7006 ESTAT_ADD(tx_collide_13times
);
7007 ESTAT_ADD(tx_collide_14times
);
7008 ESTAT_ADD(tx_collide_15times
);
7009 ESTAT_ADD(tx_ucast_packets
);
7010 ESTAT_ADD(tx_mcast_packets
);
7011 ESTAT_ADD(tx_bcast_packets
);
7012 ESTAT_ADD(tx_carrier_sense_errors
);
7013 ESTAT_ADD(tx_discards
);
7014 ESTAT_ADD(tx_errors
);
7016 ESTAT_ADD(dma_writeq_full
);
7017 ESTAT_ADD(dma_write_prioq_full
);
7018 ESTAT_ADD(rxbds_empty
);
7019 ESTAT_ADD(rx_discards
);
7020 ESTAT_ADD(rx_errors
);
7021 ESTAT_ADD(rx_threshold_hit
);
7023 ESTAT_ADD(dma_readq_full
);
7024 ESTAT_ADD(dma_read_prioq_full
);
7025 ESTAT_ADD(tx_comp_queue_full
);
7027 ESTAT_ADD(ring_set_send_prod_index
);
7028 ESTAT_ADD(ring_status_update
);
7029 ESTAT_ADD(nic_irqs
);
7030 ESTAT_ADD(nic_avoided_irqs
);
7031 ESTAT_ADD(nic_tx_threshold_hit
);
7036 static struct net_device_stats
*tg3_get_stats(struct net_device
*dev
)
7038 struct tg3
*tp
= netdev_priv(dev
);
7039 struct net_device_stats
*stats
= &tp
->net_stats
;
7040 struct net_device_stats
*old_stats
= &tp
->net_stats_prev
;
7041 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7046 stats
->rx_packets
= old_stats
->rx_packets
+
7047 get_stat64(&hw_stats
->rx_ucast_packets
) +
7048 get_stat64(&hw_stats
->rx_mcast_packets
) +
7049 get_stat64(&hw_stats
->rx_bcast_packets
);
7051 stats
->tx_packets
= old_stats
->tx_packets
+
7052 get_stat64(&hw_stats
->tx_ucast_packets
) +
7053 get_stat64(&hw_stats
->tx_mcast_packets
) +
7054 get_stat64(&hw_stats
->tx_bcast_packets
);
7056 stats
->rx_bytes
= old_stats
->rx_bytes
+
7057 get_stat64(&hw_stats
->rx_octets
);
7058 stats
->tx_bytes
= old_stats
->tx_bytes
+
7059 get_stat64(&hw_stats
->tx_octets
);
7061 stats
->rx_errors
= old_stats
->rx_errors
+
7062 get_stat64(&hw_stats
->rx_errors
);
7063 stats
->tx_errors
= old_stats
->tx_errors
+
7064 get_stat64(&hw_stats
->tx_errors
) +
7065 get_stat64(&hw_stats
->tx_mac_errors
) +
7066 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
7067 get_stat64(&hw_stats
->tx_discards
);
7069 stats
->multicast
= old_stats
->multicast
+
7070 get_stat64(&hw_stats
->rx_mcast_packets
);
7071 stats
->collisions
= old_stats
->collisions
+
7072 get_stat64(&hw_stats
->tx_collisions
);
7074 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
7075 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
7076 get_stat64(&hw_stats
->rx_undersize_packets
);
7078 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
7079 get_stat64(&hw_stats
->rxbds_empty
);
7080 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
7081 get_stat64(&hw_stats
->rx_align_errors
);
7082 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
7083 get_stat64(&hw_stats
->tx_discards
);
7084 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
7085 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
7087 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
7088 calc_crc_errors(tp
);
7090 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
7091 get_stat64(&hw_stats
->rx_discards
);
7096 static inline u32
calc_crc(unsigned char *buf
, int len
)
7104 for (j
= 0; j
< len
; j
++) {
7107 for (k
= 0; k
< 8; k
++) {
7121 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
7123 /* accept or reject all multicast frames */
7124 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
7125 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
7126 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
7127 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
7130 static void __tg3_set_rx_mode(struct net_device
*dev
)
7132 struct tg3
*tp
= netdev_priv(dev
);
7135 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
7136 RX_MODE_KEEP_VLAN_TAG
);
7138 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7141 #if TG3_VLAN_TAG_USED
7143 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7144 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7146 /* By definition, VLAN is disabled always in this
7149 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7150 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7153 if (dev
->flags
& IFF_PROMISC
) {
7154 /* Promiscuous mode. */
7155 rx_mode
|= RX_MODE_PROMISC
;
7156 } else if (dev
->flags
& IFF_ALLMULTI
) {
7157 /* Accept all multicast. */
7158 tg3_set_multi (tp
, 1);
7159 } else if (dev
->mc_count
< 1) {
7160 /* Reject all multicast. */
7161 tg3_set_multi (tp
, 0);
7163 /* Accept one or more multicast(s). */
7164 struct dev_mc_list
*mclist
;
7166 u32 mc_filter
[4] = { 0, };
7171 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
7172 i
++, mclist
= mclist
->next
) {
7174 crc
= calc_crc (mclist
->dmi_addr
, ETH_ALEN
);
7176 regidx
= (bit
& 0x60) >> 5;
7178 mc_filter
[regidx
] |= (1 << bit
);
7181 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
7182 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
7183 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
7184 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
7187 if (rx_mode
!= tp
->rx_mode
) {
7188 tp
->rx_mode
= rx_mode
;
7189 tw32_f(MAC_RX_MODE
, rx_mode
);
7194 static void tg3_set_rx_mode(struct net_device
*dev
)
7196 struct tg3
*tp
= netdev_priv(dev
);
7198 if (!netif_running(dev
))
7201 tg3_full_lock(tp
, 0);
7202 __tg3_set_rx_mode(dev
);
7203 tg3_full_unlock(tp
);
7206 #define TG3_REGDUMP_LEN (32 * 1024)
7208 static int tg3_get_regs_len(struct net_device
*dev
)
7210 return TG3_REGDUMP_LEN
;
7213 static void tg3_get_regs(struct net_device
*dev
,
7214 struct ethtool_regs
*regs
, void *_p
)
7217 struct tg3
*tp
= netdev_priv(dev
);
7223 memset(p
, 0, TG3_REGDUMP_LEN
);
7225 if (tp
->link_config
.phy_is_low_power
)
7228 tg3_full_lock(tp
, 0);
7230 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7231 #define GET_REG32_LOOP(base,len) \
7232 do { p = (u32 *)(orig_p + (base)); \
7233 for (i = 0; i < len; i += 4) \
7234 __GET_REG32((base) + i); \
7236 #define GET_REG32_1(reg) \
7237 do { p = (u32 *)(orig_p + (reg)); \
7238 __GET_REG32((reg)); \
7241 GET_REG32_LOOP(TG3PCI_VENDOR
, 0xb0);
7242 GET_REG32_LOOP(MAILBOX_INTERRUPT_0
, 0x200);
7243 GET_REG32_LOOP(MAC_MODE
, 0x4f0);
7244 GET_REG32_LOOP(SNDDATAI_MODE
, 0xe0);
7245 GET_REG32_1(SNDDATAC_MODE
);
7246 GET_REG32_LOOP(SNDBDS_MODE
, 0x80);
7247 GET_REG32_LOOP(SNDBDI_MODE
, 0x48);
7248 GET_REG32_1(SNDBDC_MODE
);
7249 GET_REG32_LOOP(RCVLPC_MODE
, 0x20);
7250 GET_REG32_LOOP(RCVLPC_SELLST_BASE
, 0x15c);
7251 GET_REG32_LOOP(RCVDBDI_MODE
, 0x0c);
7252 GET_REG32_LOOP(RCVDBDI_JUMBO_BD
, 0x3c);
7253 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0
, 0x44);
7254 GET_REG32_1(RCVDCC_MODE
);
7255 GET_REG32_LOOP(RCVBDI_MODE
, 0x20);
7256 GET_REG32_LOOP(RCVCC_MODE
, 0x14);
7257 GET_REG32_LOOP(RCVLSC_MODE
, 0x08);
7258 GET_REG32_1(MBFREE_MODE
);
7259 GET_REG32_LOOP(HOSTCC_MODE
, 0x100);
7260 GET_REG32_LOOP(MEMARB_MODE
, 0x10);
7261 GET_REG32_LOOP(BUFMGR_MODE
, 0x58);
7262 GET_REG32_LOOP(RDMAC_MODE
, 0x08);
7263 GET_REG32_LOOP(WDMAC_MODE
, 0x08);
7264 GET_REG32_1(RX_CPU_MODE
);
7265 GET_REG32_1(RX_CPU_STATE
);
7266 GET_REG32_1(RX_CPU_PGMCTR
);
7267 GET_REG32_1(RX_CPU_HWBKPT
);
7268 GET_REG32_1(TX_CPU_MODE
);
7269 GET_REG32_1(TX_CPU_STATE
);
7270 GET_REG32_1(TX_CPU_PGMCTR
);
7271 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0
, 0x110);
7272 GET_REG32_LOOP(FTQ_RESET
, 0x120);
7273 GET_REG32_LOOP(MSGINT_MODE
, 0x0c);
7274 GET_REG32_1(DMAC_MODE
);
7275 GET_REG32_LOOP(GRC_MODE
, 0x4c);
7276 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
7277 GET_REG32_LOOP(NVRAM_CMD
, 0x24);
7280 #undef GET_REG32_LOOP
7283 tg3_full_unlock(tp
);
7286 static int tg3_get_eeprom_len(struct net_device
*dev
)
7288 struct tg3
*tp
= netdev_priv(dev
);
7290 return tp
->nvram_size
;
7293 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
);
7295 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7297 struct tg3
*tp
= netdev_priv(dev
);
7300 u32 i
, offset
, len
, val
, b_offset
, b_count
;
7302 if (tp
->link_config
.phy_is_low_power
)
7305 offset
= eeprom
->offset
;
7309 eeprom
->magic
= TG3_EEPROM_MAGIC
;
7312 /* adjustments to start on required 4 byte boundary */
7313 b_offset
= offset
& 3;
7314 b_count
= 4 - b_offset
;
7315 if (b_count
> len
) {
7316 /* i.e. offset=1 len=2 */
7319 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &val
);
7322 val
= cpu_to_le32(val
);
7323 memcpy(data
, ((char*)&val
) + b_offset
, b_count
);
7326 eeprom
->len
+= b_count
;
7329 /* read bytes upto the last 4 byte boundary */
7330 pd
= &data
[eeprom
->len
];
7331 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
7332 ret
= tg3_nvram_read(tp
, offset
+ i
, &val
);
7337 val
= cpu_to_le32(val
);
7338 memcpy(pd
+ i
, &val
, 4);
7343 /* read last bytes not ending on 4 byte boundary */
7344 pd
= &data
[eeprom
->len
];
7346 b_offset
= offset
+ len
- b_count
;
7347 ret
= tg3_nvram_read(tp
, b_offset
, &val
);
7350 val
= cpu_to_le32(val
);
7351 memcpy(pd
, ((char*)&val
), b_count
);
7352 eeprom
->len
+= b_count
;
7357 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
7359 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7361 struct tg3
*tp
= netdev_priv(dev
);
7363 u32 offset
, len
, b_offset
, odd_len
, start
, end
;
7366 if (tp
->link_config
.phy_is_low_power
)
7369 if (eeprom
->magic
!= TG3_EEPROM_MAGIC
)
7372 offset
= eeprom
->offset
;
7375 if ((b_offset
= (offset
& 3))) {
7376 /* adjustments to start on required 4 byte boundary */
7377 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &start
);
7380 start
= cpu_to_le32(start
);
7389 /* adjustments to end on required 4 byte boundary */
7391 len
= (len
+ 3) & ~3;
7392 ret
= tg3_nvram_read(tp
, offset
+len
-4, &end
);
7395 end
= cpu_to_le32(end
);
7399 if (b_offset
|| odd_len
) {
7400 buf
= kmalloc(len
, GFP_KERNEL
);
7404 memcpy(buf
, &start
, 4);
7406 memcpy(buf
+len
-4, &end
, 4);
7407 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
7410 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
7418 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7420 struct tg3
*tp
= netdev_priv(dev
);
7422 cmd
->supported
= (SUPPORTED_Autoneg
);
7424 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
7425 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
7426 SUPPORTED_1000baseT_Full
);
7428 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
7429 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
7430 SUPPORTED_100baseT_Full
|
7431 SUPPORTED_10baseT_Half
|
7432 SUPPORTED_10baseT_Full
|
7435 cmd
->supported
|= SUPPORTED_FIBRE
;
7437 cmd
->advertising
= tp
->link_config
.advertising
;
7438 if (netif_running(dev
)) {
7439 cmd
->speed
= tp
->link_config
.active_speed
;
7440 cmd
->duplex
= tp
->link_config
.active_duplex
;
7443 cmd
->phy_address
= PHY_ADDR
;
7444 cmd
->transceiver
= 0;
7445 cmd
->autoneg
= tp
->link_config
.autoneg
;
7451 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7453 struct tg3
*tp
= netdev_priv(dev
);
7455 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) {
7456 /* These are the only valid advertisement bits allowed. */
7457 if (cmd
->autoneg
== AUTONEG_ENABLE
&&
7458 (cmd
->advertising
& ~(ADVERTISED_1000baseT_Half
|
7459 ADVERTISED_1000baseT_Full
|
7460 ADVERTISED_Autoneg
|
7463 /* Fiber can only do SPEED_1000. */
7464 else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7465 (cmd
->speed
!= SPEED_1000
))
7467 /* Copper cannot force SPEED_1000. */
7468 } else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7469 (cmd
->speed
== SPEED_1000
))
7471 else if ((cmd
->speed
== SPEED_1000
) &&
7472 (tp
->tg3_flags2
& TG3_FLAG_10_100_ONLY
))
7475 tg3_full_lock(tp
, 0);
7477 tp
->link_config
.autoneg
= cmd
->autoneg
;
7478 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7479 tp
->link_config
.advertising
= cmd
->advertising
;
7480 tp
->link_config
.speed
= SPEED_INVALID
;
7481 tp
->link_config
.duplex
= DUPLEX_INVALID
;
7483 tp
->link_config
.advertising
= 0;
7484 tp
->link_config
.speed
= cmd
->speed
;
7485 tp
->link_config
.duplex
= cmd
->duplex
;
7488 if (netif_running(dev
))
7489 tg3_setup_phy(tp
, 1);
7491 tg3_full_unlock(tp
);
7496 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
7498 struct tg3
*tp
= netdev_priv(dev
);
7500 strcpy(info
->driver
, DRV_MODULE_NAME
);
7501 strcpy(info
->version
, DRV_MODULE_VERSION
);
7502 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
7505 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7507 struct tg3
*tp
= netdev_priv(dev
);
7509 wol
->supported
= WAKE_MAGIC
;
7511 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)
7512 wol
->wolopts
= WAKE_MAGIC
;
7513 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7516 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7518 struct tg3
*tp
= netdev_priv(dev
);
7520 if (wol
->wolopts
& ~WAKE_MAGIC
)
7522 if ((wol
->wolopts
& WAKE_MAGIC
) &&
7523 tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
&&
7524 !(tp
->tg3_flags
& TG3_FLAG_SERDES_WOL_CAP
))
7527 spin_lock_bh(&tp
->lock
);
7528 if (wol
->wolopts
& WAKE_MAGIC
)
7529 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
7531 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
7532 spin_unlock_bh(&tp
->lock
);
7537 static u32
tg3_get_msglevel(struct net_device
*dev
)
7539 struct tg3
*tp
= netdev_priv(dev
);
7540 return tp
->msg_enable
;
7543 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
7545 struct tg3
*tp
= netdev_priv(dev
);
7546 tp
->msg_enable
= value
;
7549 #if TG3_TSO_SUPPORT != 0
7550 static int tg3_set_tso(struct net_device
*dev
, u32 value
)
7552 struct tg3
*tp
= netdev_priv(dev
);
7554 if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
7559 return ethtool_op_set_tso(dev
, value
);
7563 static int tg3_nway_reset(struct net_device
*dev
)
7565 struct tg3
*tp
= netdev_priv(dev
);
7569 if (!netif_running(dev
))
7572 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
7575 spin_lock_bh(&tp
->lock
);
7577 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7578 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
7579 ((bmcr
& BMCR_ANENABLE
) ||
7580 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
))) {
7581 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
7585 spin_unlock_bh(&tp
->lock
);
7590 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7592 struct tg3
*tp
= netdev_priv(dev
);
7594 ering
->rx_max_pending
= TG3_RX_RING_SIZE
- 1;
7595 ering
->rx_mini_max_pending
= 0;
7596 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
7597 ering
->rx_jumbo_max_pending
= TG3_RX_JUMBO_RING_SIZE
- 1;
7599 ering
->rx_jumbo_max_pending
= 0;
7601 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
7603 ering
->rx_pending
= tp
->rx_pending
;
7604 ering
->rx_mini_pending
= 0;
7605 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
7606 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
7608 ering
->rx_jumbo_pending
= 0;
7610 ering
->tx_pending
= tp
->tx_pending
;
7613 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7615 struct tg3
*tp
= netdev_priv(dev
);
7618 if ((ering
->rx_pending
> TG3_RX_RING_SIZE
- 1) ||
7619 (ering
->rx_jumbo_pending
> TG3_RX_JUMBO_RING_SIZE
- 1) ||
7620 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1))
7623 if (netif_running(dev
)) {
7628 tg3_full_lock(tp
, irq_sync
);
7630 tp
->rx_pending
= ering
->rx_pending
;
7632 if ((tp
->tg3_flags2
& TG3_FLG2_MAX_RXPEND_64
) &&
7633 tp
->rx_pending
> 63)
7634 tp
->rx_pending
= 63;
7635 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
7636 tp
->tx_pending
= ering
->tx_pending
;
7638 if (netif_running(dev
)) {
7639 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7641 tg3_netif_start(tp
);
7644 tg3_full_unlock(tp
);
7649 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7651 struct tg3
*tp
= netdev_priv(dev
);
7653 epause
->autoneg
= (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) != 0;
7654 epause
->rx_pause
= (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) != 0;
7655 epause
->tx_pause
= (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) != 0;
7658 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7660 struct tg3
*tp
= netdev_priv(dev
);
7663 if (netif_running(dev
)) {
7668 tg3_full_lock(tp
, irq_sync
);
7670 if (epause
->autoneg
)
7671 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
7673 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
7674 if (epause
->rx_pause
)
7675 tp
->tg3_flags
|= TG3_FLAG_RX_PAUSE
;
7677 tp
->tg3_flags
&= ~TG3_FLAG_RX_PAUSE
;
7678 if (epause
->tx_pause
)
7679 tp
->tg3_flags
|= TG3_FLAG_TX_PAUSE
;
7681 tp
->tg3_flags
&= ~TG3_FLAG_TX_PAUSE
;
7683 if (netif_running(dev
)) {
7684 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7686 tg3_netif_start(tp
);
7689 tg3_full_unlock(tp
);
7694 static u32
tg3_get_rx_csum(struct net_device
*dev
)
7696 struct tg3
*tp
= netdev_priv(dev
);
7697 return (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0;
7700 static int tg3_set_rx_csum(struct net_device
*dev
, u32 data
)
7702 struct tg3
*tp
= netdev_priv(dev
);
7704 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
7710 spin_lock_bh(&tp
->lock
);
7712 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
7714 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
7715 spin_unlock_bh(&tp
->lock
);
7720 static int tg3_set_tx_csum(struct net_device
*dev
, u32 data
)
7722 struct tg3
*tp
= netdev_priv(dev
);
7724 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
7731 dev
->features
|= NETIF_F_IP_CSUM
;
7733 dev
->features
&= ~NETIF_F_IP_CSUM
;
7738 static int tg3_get_stats_count (struct net_device
*dev
)
7740 return TG3_NUM_STATS
;
7743 static int tg3_get_test_count (struct net_device
*dev
)
7745 return TG3_NUM_TEST
;
7748 static void tg3_get_strings (struct net_device
*dev
, u32 stringset
, u8
*buf
)
7750 switch (stringset
) {
7752 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
7755 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
7758 WARN_ON(1); /* we need a WARN() */
7763 static int tg3_phys_id(struct net_device
*dev
, u32 data
)
7765 struct tg3
*tp
= netdev_priv(dev
);
7768 if (!netif_running(tp
->dev
))
7774 for (i
= 0; i
< (data
* 2); i
++) {
7776 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
7777 LED_CTRL_1000MBPS_ON
|
7778 LED_CTRL_100MBPS_ON
|
7779 LED_CTRL_10MBPS_ON
|
7780 LED_CTRL_TRAFFIC_OVERRIDE
|
7781 LED_CTRL_TRAFFIC_BLINK
|
7782 LED_CTRL_TRAFFIC_LED
);
7785 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
7786 LED_CTRL_TRAFFIC_OVERRIDE
);
7788 if (msleep_interruptible(500))
7791 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
7795 static void tg3_get_ethtool_stats (struct net_device
*dev
,
7796 struct ethtool_stats
*estats
, u64
*tmp_stats
)
7798 struct tg3
*tp
= netdev_priv(dev
);
7799 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
7802 #define NVRAM_TEST_SIZE 0x100
7804 static int tg3_test_nvram(struct tg3
*tp
)
7809 buf
= kmalloc(NVRAM_TEST_SIZE
, GFP_KERNEL
);
7813 for (i
= 0, j
= 0; i
< NVRAM_TEST_SIZE
; i
+= 4, j
++) {
7816 if ((err
= tg3_nvram_read(tp
, i
, &val
)) != 0)
7818 buf
[j
] = cpu_to_le32(val
);
7820 if (i
< NVRAM_TEST_SIZE
)
7824 if (cpu_to_be32(buf
[0]) != TG3_EEPROM_MAGIC
)
7827 /* Bootstrap checksum at offset 0x10 */
7828 csum
= calc_crc((unsigned char *) buf
, 0x10);
7829 if(csum
!= cpu_to_le32(buf
[0x10/4]))
7832 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7833 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
7834 if (csum
!= cpu_to_le32(buf
[0xfc/4]))
7844 #define TG3_SERDES_TIMEOUT_SEC 2
7845 #define TG3_COPPER_TIMEOUT_SEC 6
7847 static int tg3_test_link(struct tg3
*tp
)
7851 if (!netif_running(tp
->dev
))
7854 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
7855 max
= TG3_SERDES_TIMEOUT_SEC
;
7857 max
= TG3_COPPER_TIMEOUT_SEC
;
7859 for (i
= 0; i
< max
; i
++) {
7860 if (netif_carrier_ok(tp
->dev
))
7863 if (msleep_interruptible(1000))
7870 /* Only test the commonly used registers */
7871 static const int tg3_test_registers(struct tg3
*tp
)
7874 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
7878 #define TG3_FL_5705 0x1
7879 #define TG3_FL_NOT_5705 0x2
7880 #define TG3_FL_NOT_5788 0x4
7884 /* MAC Control Registers */
7885 { MAC_MODE
, TG3_FL_NOT_5705
,
7886 0x00000000, 0x00ef6f8c },
7887 { MAC_MODE
, TG3_FL_5705
,
7888 0x00000000, 0x01ef6b8c },
7889 { MAC_STATUS
, TG3_FL_NOT_5705
,
7890 0x03800107, 0x00000000 },
7891 { MAC_STATUS
, TG3_FL_5705
,
7892 0x03800100, 0x00000000 },
7893 { MAC_ADDR_0_HIGH
, 0x0000,
7894 0x00000000, 0x0000ffff },
7895 { MAC_ADDR_0_LOW
, 0x0000,
7896 0x00000000, 0xffffffff },
7897 { MAC_RX_MTU_SIZE
, 0x0000,
7898 0x00000000, 0x0000ffff },
7899 { MAC_TX_MODE
, 0x0000,
7900 0x00000000, 0x00000070 },
7901 { MAC_TX_LENGTHS
, 0x0000,
7902 0x00000000, 0x00003fff },
7903 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
7904 0x00000000, 0x000007fc },
7905 { MAC_RX_MODE
, TG3_FL_5705
,
7906 0x00000000, 0x000007dc },
7907 { MAC_HASH_REG_0
, 0x0000,
7908 0x00000000, 0xffffffff },
7909 { MAC_HASH_REG_1
, 0x0000,
7910 0x00000000, 0xffffffff },
7911 { MAC_HASH_REG_2
, 0x0000,
7912 0x00000000, 0xffffffff },
7913 { MAC_HASH_REG_3
, 0x0000,
7914 0x00000000, 0xffffffff },
7916 /* Receive Data and Receive BD Initiator Control Registers. */
7917 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
7918 0x00000000, 0xffffffff },
7919 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
7920 0x00000000, 0xffffffff },
7921 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
7922 0x00000000, 0x00000003 },
7923 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
7924 0x00000000, 0xffffffff },
7925 { RCVDBDI_STD_BD
+0, 0x0000,
7926 0x00000000, 0xffffffff },
7927 { RCVDBDI_STD_BD
+4, 0x0000,
7928 0x00000000, 0xffffffff },
7929 { RCVDBDI_STD_BD
+8, 0x0000,
7930 0x00000000, 0xffff0002 },
7931 { RCVDBDI_STD_BD
+0xc, 0x0000,
7932 0x00000000, 0xffffffff },
7934 /* Receive BD Initiator Control Registers. */
7935 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
7936 0x00000000, 0xffffffff },
7937 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
7938 0x00000000, 0x000003ff },
7939 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
7940 0x00000000, 0xffffffff },
7942 /* Host Coalescing Control Registers. */
7943 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
7944 0x00000000, 0x00000004 },
7945 { HOSTCC_MODE
, TG3_FL_5705
,
7946 0x00000000, 0x000000f6 },
7947 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
7948 0x00000000, 0xffffffff },
7949 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
7950 0x00000000, 0x000003ff },
7951 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
7952 0x00000000, 0xffffffff },
7953 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
7954 0x00000000, 0x000003ff },
7955 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
7956 0x00000000, 0xffffffff },
7957 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7958 0x00000000, 0x000000ff },
7959 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
7960 0x00000000, 0xffffffff },
7961 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7962 0x00000000, 0x000000ff },
7963 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
7964 0x00000000, 0xffffffff },
7965 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
7966 0x00000000, 0xffffffff },
7967 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
7968 0x00000000, 0xffffffff },
7969 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7970 0x00000000, 0x000000ff },
7971 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
7972 0x00000000, 0xffffffff },
7973 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7974 0x00000000, 0x000000ff },
7975 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
7976 0x00000000, 0xffffffff },
7977 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
7978 0x00000000, 0xffffffff },
7979 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
7980 0x00000000, 0xffffffff },
7981 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
7982 0x00000000, 0xffffffff },
7983 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
7984 0x00000000, 0xffffffff },
7985 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
7986 0xffffffff, 0x00000000 },
7987 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
7988 0xffffffff, 0x00000000 },
7990 /* Buffer Manager Control Registers. */
7991 { BUFMGR_MB_POOL_ADDR
, 0x0000,
7992 0x00000000, 0x007fff80 },
7993 { BUFMGR_MB_POOL_SIZE
, 0x0000,
7994 0x00000000, 0x007fffff },
7995 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
7996 0x00000000, 0x0000003f },
7997 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
7998 0x00000000, 0x000001ff },
7999 { BUFMGR_MB_HIGH_WATER
, 0x0000,
8000 0x00000000, 0x000001ff },
8001 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
8002 0xffffffff, 0x00000000 },
8003 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
8004 0xffffffff, 0x00000000 },
8006 /* Mailbox Registers */
8007 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
8008 0x00000000, 0x000001ff },
8009 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
8010 0x00000000, 0x000001ff },
8011 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
8012 0x00000000, 0x000007ff },
8013 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
8014 0x00000000, 0x000001ff },
8016 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8019 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
8024 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
8025 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
8028 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
8031 if ((tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
8032 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
8035 offset
= (u32
) reg_tbl
[i
].offset
;
8036 read_mask
= reg_tbl
[i
].read_mask
;
8037 write_mask
= reg_tbl
[i
].write_mask
;
8039 /* Save the original register content */
8040 save_val
= tr32(offset
);
8042 /* Determine the read-only value. */
8043 read_val
= save_val
& read_mask
;
8045 /* Write zero to the register, then make sure the read-only bits
8046 * are not changed and the read/write bits are all zeros.
8052 /* Test the read-only and read/write bits. */
8053 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
8056 /* Write ones to all the bits defined by RdMask and WrMask, then
8057 * make sure the read-only bits are not changed and the
8058 * read/write bits are all ones.
8060 tw32(offset
, read_mask
| write_mask
);
8064 /* Test the read-only bits. */
8065 if ((val
& read_mask
) != read_val
)
8068 /* Test the read/write bits. */
8069 if ((val
& write_mask
) != write_mask
)
8072 tw32(offset
, save_val
);
8078 printk(KERN_ERR PFX
"Register test failed at offset %x\n", offset
);
8079 tw32(offset
, save_val
);
8083 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
8085 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8089 for (i
= 0; i
< sizeof(test_pattern
)/sizeof(u32
); i
++) {
8090 for (j
= 0; j
< len
; j
+= 4) {
8093 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
8094 tg3_read_mem(tp
, offset
+ j
, &val
);
8095 if (val
!= test_pattern
[i
])
8102 static int tg3_test_memory(struct tg3
*tp
)
8104 static struct mem_entry
{
8107 } mem_tbl_570x
[] = {
8108 { 0x00000000, 0x00b50},
8109 { 0x00002000, 0x1c000},
8110 { 0xffffffff, 0x00000}
8111 }, mem_tbl_5705
[] = {
8112 { 0x00000100, 0x0000c},
8113 { 0x00000200, 0x00008},
8114 { 0x00004000, 0x00800},
8115 { 0x00006000, 0x01000},
8116 { 0x00008000, 0x02000},
8117 { 0x00010000, 0x0e000},
8118 { 0xffffffff, 0x00000}
8120 struct mem_entry
*mem_tbl
;
8124 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
8125 mem_tbl
= mem_tbl_5705
;
8127 mem_tbl
= mem_tbl_570x
;
8129 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
8130 if ((err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
,
8131 mem_tbl
[i
].len
)) != 0)
8138 #define TG3_MAC_LOOPBACK 0
8139 #define TG3_PHY_LOOPBACK 1
8141 static int tg3_run_loopback(struct tg3
*tp
, int loopback_mode
)
8143 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
8145 struct sk_buff
*skb
, *rx_skb
;
8148 int num_pkts
, tx_len
, rx_len
, i
, err
;
8149 struct tg3_rx_buffer_desc
*desc
;
8151 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
8152 /* HW errata - mac loopback fails in some cases on 5780.
8153 * Normal traffic and PHY loopback are not affected by
8156 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
)
8159 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8160 MAC_MODE_PORT_INT_LPBACK
| MAC_MODE_LINK_POLARITY
|
8161 MAC_MODE_PORT_MODE_GMII
;
8162 tw32(MAC_MODE
, mac_mode
);
8163 } else if (loopback_mode
== TG3_PHY_LOOPBACK
) {
8164 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
8167 /* reset to prevent losing 1st rx packet intermittently */
8168 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
8169 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8171 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8173 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8174 MAC_MODE_LINK_POLARITY
| MAC_MODE_PORT_MODE_GMII
;
8175 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)
8176 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8177 tw32(MAC_MODE
, mac_mode
);
8185 skb
= dev_alloc_skb(tx_len
);
8186 tx_data
= skb_put(skb
, tx_len
);
8187 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
8188 memset(tx_data
+ 6, 0x0, 8);
8190 tw32(MAC_RX_MTU_SIZE
, tx_len
+ 4);
8192 for (i
= 14; i
< tx_len
; i
++)
8193 tx_data
[i
] = (u8
) (i
& 0xff);
8195 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
8197 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8202 rx_start_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8206 tg3_set_txd(tp
, tp
->tx_prod
, map
, tx_len
, 0, 1);
8211 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
,
8213 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
);
8217 for (i
= 0; i
< 10; i
++) {
8218 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8223 tx_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
8224 rx_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8225 if ((tx_idx
== tp
->tx_prod
) &&
8226 (rx_idx
== (rx_start_idx
+ num_pkts
)))
8230 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
8233 if (tx_idx
!= tp
->tx_prod
)
8236 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8239 desc
= &tp
->rx_rcb
[rx_start_idx
];
8240 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
8241 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
8242 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
8245 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
8246 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
8249 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4;
8250 if (rx_len
!= tx_len
)
8253 rx_skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
8255 map
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
], mapping
);
8256 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
, PCI_DMA_FROMDEVICE
);
8258 for (i
= 14; i
< tx_len
; i
++) {
8259 if (*(rx_skb
->data
+ i
) != (u8
) (i
& 0xff))
8264 /* tg3_free_rings will unmap and free the rx_skb */
8269 #define TG3_MAC_LOOPBACK_FAILED 1
8270 #define TG3_PHY_LOOPBACK_FAILED 2
8271 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8272 TG3_PHY_LOOPBACK_FAILED)
8274 static int tg3_test_loopback(struct tg3
*tp
)
8278 if (!netif_running(tp
->dev
))
8279 return TG3_LOOPBACK_FAILED
;
8283 if (tg3_run_loopback(tp
, TG3_MAC_LOOPBACK
))
8284 err
|= TG3_MAC_LOOPBACK_FAILED
;
8285 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
8286 if (tg3_run_loopback(tp
, TG3_PHY_LOOPBACK
))
8287 err
|= TG3_PHY_LOOPBACK_FAILED
;
8293 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
8296 struct tg3
*tp
= netdev_priv(dev
);
8298 if (tp
->link_config
.phy_is_low_power
)
8299 tg3_set_power_state(tp
, PCI_D0
);
8301 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
8303 if (tg3_test_nvram(tp
) != 0) {
8304 etest
->flags
|= ETH_TEST_FL_FAILED
;
8307 if (tg3_test_link(tp
) != 0) {
8308 etest
->flags
|= ETH_TEST_FL_FAILED
;
8311 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8312 int err
, irq_sync
= 0;
8314 if (netif_running(dev
)) {
8319 tg3_full_lock(tp
, irq_sync
);
8321 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
8322 err
= tg3_nvram_lock(tp
);
8323 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8324 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
8325 tg3_halt_cpu(tp
, TX_CPU_BASE
);
8327 tg3_nvram_unlock(tp
);
8329 if (tg3_test_registers(tp
) != 0) {
8330 etest
->flags
|= ETH_TEST_FL_FAILED
;
8333 if (tg3_test_memory(tp
) != 0) {
8334 etest
->flags
|= ETH_TEST_FL_FAILED
;
8337 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
8338 etest
->flags
|= ETH_TEST_FL_FAILED
;
8340 tg3_full_unlock(tp
);
8342 if (tg3_test_interrupt(tp
) != 0) {
8343 etest
->flags
|= ETH_TEST_FL_FAILED
;
8347 tg3_full_lock(tp
, 0);
8349 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8350 if (netif_running(dev
)) {
8351 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
8353 tg3_netif_start(tp
);
8356 tg3_full_unlock(tp
);
8358 if (tp
->link_config
.phy_is_low_power
)
8359 tg3_set_power_state(tp
, PCI_D3hot
);
8363 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
8365 struct mii_ioctl_data
*data
= if_mii(ifr
);
8366 struct tg3
*tp
= netdev_priv(dev
);
8371 data
->phy_id
= PHY_ADDR
;
8377 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8378 break; /* We have no PHY */
8380 if (tp
->link_config
.phy_is_low_power
)
8383 spin_lock_bh(&tp
->lock
);
8384 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
8385 spin_unlock_bh(&tp
->lock
);
8387 data
->val_out
= mii_regval
;
8393 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8394 break; /* We have no PHY */
8396 if (!capable(CAP_NET_ADMIN
))
8399 if (tp
->link_config
.phy_is_low_power
)
8402 spin_lock_bh(&tp
->lock
);
8403 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
8404 spin_unlock_bh(&tp
->lock
);
8415 #if TG3_VLAN_TAG_USED
8416 static void tg3_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
8418 struct tg3
*tp
= netdev_priv(dev
);
8420 tg3_full_lock(tp
, 0);
8424 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8425 __tg3_set_rx_mode(dev
);
8427 tg3_full_unlock(tp
);
8430 static void tg3_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
8432 struct tg3
*tp
= netdev_priv(dev
);
8434 tg3_full_lock(tp
, 0);
8436 tp
->vlgrp
->vlan_devices
[vid
] = NULL
;
8437 tg3_full_unlock(tp
);
8441 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8443 struct tg3
*tp
= netdev_priv(dev
);
8445 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
8449 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8451 struct tg3
*tp
= netdev_priv(dev
);
8452 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
8453 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
8455 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
8456 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
8457 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
8458 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
8459 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
8462 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
8463 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
8464 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
8465 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
8466 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
8467 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
8468 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
8469 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
8470 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
8471 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
8474 /* No rx interrupts will be generated if both are zero */
8475 if ((ec
->rx_coalesce_usecs
== 0) &&
8476 (ec
->rx_max_coalesced_frames
== 0))
8479 /* No tx interrupts will be generated if both are zero */
8480 if ((ec
->tx_coalesce_usecs
== 0) &&
8481 (ec
->tx_max_coalesced_frames
== 0))
8484 /* Only copy relevant parameters, ignore all others. */
8485 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
8486 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
8487 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
8488 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
8489 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
8490 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
8491 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
8492 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
8493 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
8495 if (netif_running(dev
)) {
8496 tg3_full_lock(tp
, 0);
8497 __tg3_set_coalesce(tp
, &tp
->coal
);
8498 tg3_full_unlock(tp
);
8503 static struct ethtool_ops tg3_ethtool_ops
= {
8504 .get_settings
= tg3_get_settings
,
8505 .set_settings
= tg3_set_settings
,
8506 .get_drvinfo
= tg3_get_drvinfo
,
8507 .get_regs_len
= tg3_get_regs_len
,
8508 .get_regs
= tg3_get_regs
,
8509 .get_wol
= tg3_get_wol
,
8510 .set_wol
= tg3_set_wol
,
8511 .get_msglevel
= tg3_get_msglevel
,
8512 .set_msglevel
= tg3_set_msglevel
,
8513 .nway_reset
= tg3_nway_reset
,
8514 .get_link
= ethtool_op_get_link
,
8515 .get_eeprom_len
= tg3_get_eeprom_len
,
8516 .get_eeprom
= tg3_get_eeprom
,
8517 .set_eeprom
= tg3_set_eeprom
,
8518 .get_ringparam
= tg3_get_ringparam
,
8519 .set_ringparam
= tg3_set_ringparam
,
8520 .get_pauseparam
= tg3_get_pauseparam
,
8521 .set_pauseparam
= tg3_set_pauseparam
,
8522 .get_rx_csum
= tg3_get_rx_csum
,
8523 .set_rx_csum
= tg3_set_rx_csum
,
8524 .get_tx_csum
= ethtool_op_get_tx_csum
,
8525 .set_tx_csum
= tg3_set_tx_csum
,
8526 .get_sg
= ethtool_op_get_sg
,
8527 .set_sg
= ethtool_op_set_sg
,
8528 #if TG3_TSO_SUPPORT != 0
8529 .get_tso
= ethtool_op_get_tso
,
8530 .set_tso
= tg3_set_tso
,
8532 .self_test_count
= tg3_get_test_count
,
8533 .self_test
= tg3_self_test
,
8534 .get_strings
= tg3_get_strings
,
8535 .phys_id
= tg3_phys_id
,
8536 .get_stats_count
= tg3_get_stats_count
,
8537 .get_ethtool_stats
= tg3_get_ethtool_stats
,
8538 .get_coalesce
= tg3_get_coalesce
,
8539 .set_coalesce
= tg3_set_coalesce
,
8540 .get_perm_addr
= ethtool_op_get_perm_addr
,
8543 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
8547 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
8549 if (tg3_nvram_read(tp
, 0, &val
) != 0)
8552 if (swab32(val
) != TG3_EEPROM_MAGIC
)
8556 * Size the chip by reading offsets at increasing powers of two.
8557 * When we encounter our validation signature, we know the addressing
8558 * has wrapped around, and thus have our chip size.
8562 while (cursize
< tp
->nvram_size
) {
8563 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
8566 if (swab32(val
) == TG3_EEPROM_MAGIC
)
8572 tp
->nvram_size
= cursize
;
8575 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
8579 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
8581 tp
->nvram_size
= (val
>> 16) * 1024;
8585 tp
->nvram_size
= 0x20000;
8588 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
8592 nvcfg1
= tr32(NVRAM_CFG1
);
8593 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
8594 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
8597 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
8598 tw32(NVRAM_CFG1
, nvcfg1
);
8601 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) ||
8602 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
8603 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
8604 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
8605 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8606 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
8607 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8609 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
8610 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8611 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
8613 case FLASH_VENDOR_ATMEL_EEPROM
:
8614 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8615 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
8616 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8618 case FLASH_VENDOR_ST
:
8619 tp
->nvram_jedecnum
= JEDEC_ST
;
8620 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
8621 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8623 case FLASH_VENDOR_SAIFUN
:
8624 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
8625 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
8627 case FLASH_VENDOR_SST_SMALL
:
8628 case FLASH_VENDOR_SST_LARGE
:
8629 tp
->nvram_jedecnum
= JEDEC_SST
;
8630 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
8635 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8636 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
8637 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8641 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
8645 nvcfg1
= tr32(NVRAM_CFG1
);
8647 /* NVRAM protection for TPM */
8648 if (nvcfg1
& (1 << 27))
8649 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
8651 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
8652 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
8653 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
8654 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8655 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8657 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
8658 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8659 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8660 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
8662 case FLASH_5752VENDOR_ST_M45PE10
:
8663 case FLASH_5752VENDOR_ST_M45PE20
:
8664 case FLASH_5752VENDOR_ST_M45PE40
:
8665 tp
->nvram_jedecnum
= JEDEC_ST
;
8666 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8667 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
8671 if (tp
->tg3_flags2
& TG3_FLG2_FLASH
) {
8672 switch (nvcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
8673 case FLASH_5752PAGE_SIZE_256
:
8674 tp
->nvram_pagesize
= 256;
8676 case FLASH_5752PAGE_SIZE_512
:
8677 tp
->nvram_pagesize
= 512;
8679 case FLASH_5752PAGE_SIZE_1K
:
8680 tp
->nvram_pagesize
= 1024;
8682 case FLASH_5752PAGE_SIZE_2K
:
8683 tp
->nvram_pagesize
= 2048;
8685 case FLASH_5752PAGE_SIZE_4K
:
8686 tp
->nvram_pagesize
= 4096;
8688 case FLASH_5752PAGE_SIZE_264
:
8689 tp
->nvram_pagesize
= 264;
8694 /* For eeprom, set pagesize to maximum eeprom size */
8695 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
8697 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
8698 tw32(NVRAM_CFG1
, nvcfg1
);
8702 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8703 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
8707 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)
8710 tw32_f(GRC_EEPROM_ADDR
,
8711 (EEPROM_ADDR_FSM_RESET
|
8712 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
8713 EEPROM_ADDR_CLKPERD_SHIFT
)));
8715 /* XXX schedule_timeout() ... */
8716 for (j
= 0; j
< 100; j
++)
8719 /* Enable seeprom accesses. */
8720 tw32_f(GRC_LOCAL_CTRL
,
8721 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
8724 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
8725 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
8726 tp
->tg3_flags
|= TG3_FLAG_NVRAM
;
8728 if (tg3_nvram_lock(tp
)) {
8729 printk(KERN_WARNING PFX
"%s: Cannot get nvarm lock, "
8730 "tg3_nvram_init failed.\n", tp
->dev
->name
);
8733 tg3_enable_nvram_access(tp
);
8735 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8736 tg3_get_5752_nvram_info(tp
);
8738 tg3_get_nvram_info(tp
);
8740 tg3_get_nvram_size(tp
);
8742 tg3_disable_nvram_access(tp
);
8743 tg3_nvram_unlock(tp
);
8746 tp
->tg3_flags
&= ~(TG3_FLAG_NVRAM
| TG3_FLAG_NVRAM_BUFFERED
);
8748 tg3_get_eeprom_size(tp
);
8752 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
8753 u32 offset
, u32
*val
)
8758 if (offset
> EEPROM_ADDR_ADDR_MASK
||
8762 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
8763 EEPROM_ADDR_DEVID_MASK
|
8765 tw32(GRC_EEPROM_ADDR
,
8767 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
8768 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
8769 EEPROM_ADDR_ADDR_MASK
) |
8770 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
8772 for (i
= 0; i
< 10000; i
++) {
8773 tmp
= tr32(GRC_EEPROM_ADDR
);
8775 if (tmp
& EEPROM_ADDR_COMPLETE
)
8779 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
8782 *val
= tr32(GRC_EEPROM_DATA
);
8786 #define NVRAM_CMD_TIMEOUT 10000
8788 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
8792 tw32(NVRAM_CMD
, nvram_cmd
);
8793 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
8795 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
8800 if (i
== NVRAM_CMD_TIMEOUT
) {
8806 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
8810 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
8811 printk(KERN_ERR PFX
"Attempt to do nvram_read on Sun 570X\n");
8815 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
))
8816 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
8818 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
8819 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
8820 (tp
->nvram_jedecnum
== JEDEC_ATMEL
)) {
8822 offset
= ((offset
/ tp
->nvram_pagesize
) <<
8823 ATMEL_AT45DB0X1B_PAGE_POS
) +
8824 (offset
% tp
->nvram_pagesize
);
8827 if (offset
> NVRAM_ADDR_MSK
)
8830 ret
= tg3_nvram_lock(tp
);
8834 tg3_enable_nvram_access(tp
);
8836 tw32(NVRAM_ADDR
, offset
);
8837 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
8838 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
8841 *val
= swab32(tr32(NVRAM_RDDATA
));
8843 tg3_disable_nvram_access(tp
);
8845 tg3_nvram_unlock(tp
);
8850 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
8851 u32 offset
, u32 len
, u8
*buf
)
8856 for (i
= 0; i
< len
; i
+= 4) {
8861 memcpy(&data
, buf
+ i
, 4);
8863 tw32(GRC_EEPROM_DATA
, cpu_to_le32(data
));
8865 val
= tr32(GRC_EEPROM_ADDR
);
8866 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
8868 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
8870 tw32(GRC_EEPROM_ADDR
, val
|
8871 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
8872 (addr
& EEPROM_ADDR_ADDR_MASK
) |
8876 for (j
= 0; j
< 10000; j
++) {
8877 val
= tr32(GRC_EEPROM_ADDR
);
8879 if (val
& EEPROM_ADDR_COMPLETE
)
8883 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
8892 /* offset and length are dword aligned */
8893 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
8897 u32 pagesize
= tp
->nvram_pagesize
;
8898 u32 pagemask
= pagesize
- 1;
8902 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
8908 u32 phy_addr
, page_off
, size
;
8910 phy_addr
= offset
& ~pagemask
;
8912 for (j
= 0; j
< pagesize
; j
+= 4) {
8913 if ((ret
= tg3_nvram_read(tp
, phy_addr
+ j
,
8914 (u32
*) (tmp
+ j
))))
8920 page_off
= offset
& pagemask
;
8927 memcpy(tmp
+ page_off
, buf
, size
);
8929 offset
= offset
+ (pagesize
- page_off
);
8931 tg3_enable_nvram_access(tp
);
8934 * Before we can erase the flash page, we need
8935 * to issue a special "write enable" command.
8937 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
8939 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
8942 /* Erase the target page */
8943 tw32(NVRAM_ADDR
, phy_addr
);
8945 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
8946 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
8948 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
8951 /* Issue another write enable to start the write. */
8952 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
8954 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
8957 for (j
= 0; j
< pagesize
; j
+= 4) {
8960 data
= *((u32
*) (tmp
+ j
));
8961 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
8963 tw32(NVRAM_ADDR
, phy_addr
+ j
);
8965 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
8969 nvram_cmd
|= NVRAM_CMD_FIRST
;
8970 else if (j
== (pagesize
- 4))
8971 nvram_cmd
|= NVRAM_CMD_LAST
;
8973 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
8980 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
8981 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
8988 /* offset and length are dword aligned */
8989 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
8994 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
8995 u32 data
, page_off
, phy_addr
, nvram_cmd
;
8997 memcpy(&data
, buf
+ i
, 4);
8998 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
9000 page_off
= offset
% tp
->nvram_pagesize
;
9002 if ((tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
9003 (tp
->nvram_jedecnum
== JEDEC_ATMEL
)) {
9005 phy_addr
= ((offset
/ tp
->nvram_pagesize
) <<
9006 ATMEL_AT45DB0X1B_PAGE_POS
) + page_off
;
9012 tw32(NVRAM_ADDR
, phy_addr
);
9014 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
9016 if ((page_off
== 0) || (i
== 0))
9017 nvram_cmd
|= NVRAM_CMD_FIRST
;
9018 else if (page_off
== (tp
->nvram_pagesize
- 4))
9019 nvram_cmd
|= NVRAM_CMD_LAST
;
9022 nvram_cmd
|= NVRAM_CMD_LAST
;
9024 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
) &&
9025 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
9026 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
9028 if ((ret
= tg3_nvram_exec_cmd(tp
,
9029 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
9034 if (!(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
9035 /* We always do complete word writes to eeprom. */
9036 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
9039 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
9045 /* offset and length are dword aligned */
9046 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
9050 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
9051 printk(KERN_ERR PFX
"Attempt to do nvram_write on Sun 570X\n");
9055 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9056 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
9057 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
9061 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
)) {
9062 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
9067 ret
= tg3_nvram_lock(tp
);
9071 tg3_enable_nvram_access(tp
);
9072 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
9073 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
))
9074 tw32(NVRAM_WRITE1
, 0x406);
9076 grc_mode
= tr32(GRC_MODE
);
9077 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
9079 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) ||
9080 !(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
9082 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
9086 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
9090 grc_mode
= tr32(GRC_MODE
);
9091 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
9093 tg3_disable_nvram_access(tp
);
9094 tg3_nvram_unlock(tp
);
9097 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9098 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9105 struct subsys_tbl_ent
{
9106 u16 subsys_vendor
, subsys_devid
;
9110 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
9111 /* Broadcom boards. */
9112 { PCI_VENDOR_ID_BROADCOM
, 0x1644, PHY_ID_BCM5401
}, /* BCM95700A6 */
9113 { PCI_VENDOR_ID_BROADCOM
, 0x0001, PHY_ID_BCM5701
}, /* BCM95701A5 */
9114 { PCI_VENDOR_ID_BROADCOM
, 0x0002, PHY_ID_BCM8002
}, /* BCM95700T6 */
9115 { PCI_VENDOR_ID_BROADCOM
, 0x0003, 0 }, /* BCM95700A9 */
9116 { PCI_VENDOR_ID_BROADCOM
, 0x0005, PHY_ID_BCM5701
}, /* BCM95701T1 */
9117 { PCI_VENDOR_ID_BROADCOM
, 0x0006, PHY_ID_BCM5701
}, /* BCM95701T8 */
9118 { PCI_VENDOR_ID_BROADCOM
, 0x0007, 0 }, /* BCM95701A7 */
9119 { PCI_VENDOR_ID_BROADCOM
, 0x0008, PHY_ID_BCM5701
}, /* BCM95701A10 */
9120 { PCI_VENDOR_ID_BROADCOM
, 0x8008, PHY_ID_BCM5701
}, /* BCM95701A12 */
9121 { PCI_VENDOR_ID_BROADCOM
, 0x0009, PHY_ID_BCM5703
}, /* BCM95703Ax1 */
9122 { PCI_VENDOR_ID_BROADCOM
, 0x8009, PHY_ID_BCM5703
}, /* BCM95703Ax2 */
9125 { PCI_VENDOR_ID_3COM
, 0x1000, PHY_ID_BCM5401
}, /* 3C996T */
9126 { PCI_VENDOR_ID_3COM
, 0x1006, PHY_ID_BCM5701
}, /* 3C996BT */
9127 { PCI_VENDOR_ID_3COM
, 0x1004, 0 }, /* 3C996SX */
9128 { PCI_VENDOR_ID_3COM
, 0x1007, PHY_ID_BCM5701
}, /* 3C1000T */
9129 { PCI_VENDOR_ID_3COM
, 0x1008, PHY_ID_BCM5701
}, /* 3C940BR01 */
9132 { PCI_VENDOR_ID_DELL
, 0x00d1, PHY_ID_BCM5401
}, /* VIPER */
9133 { PCI_VENDOR_ID_DELL
, 0x0106, PHY_ID_BCM5401
}, /* JAGUAR */
9134 { PCI_VENDOR_ID_DELL
, 0x0109, PHY_ID_BCM5411
}, /* MERLOT */
9135 { PCI_VENDOR_ID_DELL
, 0x010a, PHY_ID_BCM5411
}, /* SLIM_MERLOT */
9137 /* Compaq boards. */
9138 { PCI_VENDOR_ID_COMPAQ
, 0x007c, PHY_ID_BCM5701
}, /* BANSHEE */
9139 { PCI_VENDOR_ID_COMPAQ
, 0x009a, PHY_ID_BCM5701
}, /* BANSHEE_2 */
9140 { PCI_VENDOR_ID_COMPAQ
, 0x007d, 0 }, /* CHANGELING */
9141 { PCI_VENDOR_ID_COMPAQ
, 0x0085, PHY_ID_BCM5701
}, /* NC7780 */
9142 { PCI_VENDOR_ID_COMPAQ
, 0x0099, PHY_ID_BCM5701
}, /* NC7780_2 */
9145 { PCI_VENDOR_ID_IBM
, 0x0281, 0 } /* IBM??? */
9148 static inline struct subsys_tbl_ent
*lookup_by_subsys(struct tg3
*tp
)
9152 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
9153 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
9154 tp
->pdev
->subsystem_vendor
) &&
9155 (subsys_id_to_phy_id
[i
].subsys_devid
==
9156 tp
->pdev
->subsystem_device
))
9157 return &subsys_id_to_phy_id
[i
];
9162 /* Since this function may be called in D3-hot power state during
9163 * tg3_init_one(), only config cycles are allowed.
9165 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
9169 /* Make sure register accesses (indirect or otherwise)
9170 * will function correctly.
9172 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9173 tp
->misc_host_ctrl
);
9175 tp
->phy_id
= PHY_ID_INVALID
;
9176 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9178 /* Do not even try poking around in here on Sun parts. */
9179 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)
9182 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9183 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9184 u32 nic_cfg
, led_cfg
;
9185 u32 nic_phy_id
, ver
, cfg2
= 0, eeprom_phy_id
;
9186 int eeprom_phy_serdes
= 0;
9188 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9189 tp
->nic_sram_data_cfg
= nic_cfg
;
9191 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
9192 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
9193 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
9194 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
9195 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
9196 (ver
> 0) && (ver
< 0x100))
9197 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
9199 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
9200 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
9201 eeprom_phy_serdes
= 1;
9203 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
9204 if (nic_phy_id
!= 0) {
9205 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
9206 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
9208 eeprom_phy_id
= (id1
>> 16) << 10;
9209 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
9210 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
9214 tp
->phy_id
= eeprom_phy_id
;
9215 if (eeprom_phy_serdes
) {
9216 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
9217 tp
->tg3_flags2
|= TG3_FLG2_MII_SERDES
;
9219 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9222 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9223 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
9224 SHASTA_EXT_LED_MODE_MASK
);
9226 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
9230 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
9231 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9234 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
9235 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9238 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
9239 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
9241 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9242 * read on some older 5700/5701 bootcode.
9244 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9246 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9248 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9252 case SHASTA_EXT_LED_SHARED
:
9253 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
9254 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
9255 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
9256 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9257 LED_CTRL_MODE_PHY_2
);
9260 case SHASTA_EXT_LED_MAC
:
9261 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
9264 case SHASTA_EXT_LED_COMBO
:
9265 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
9266 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
9267 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9268 LED_CTRL_MODE_PHY_2
);
9273 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9274 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
9275 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
9276 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9278 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
9279 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
9280 (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
))
9281 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
9283 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9284 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
9285 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9286 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
9288 if (nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
)
9289 tp
->tg3_flags
|= TG3_FLAG_SERDES_WOL_CAP
;
9291 if (cfg2
& (1 << 17))
9292 tp
->tg3_flags2
|= TG3_FLG2_CAPACITIVE_COUPLING
;
9294 /* serdes signal pre-emphasis in register 0x590 set by */
9295 /* bootcode if bit 18 is set */
9296 if (cfg2
& (1 << 18))
9297 tp
->tg3_flags2
|= TG3_FLG2_SERDES_PREEMPHASIS
;
9301 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
9303 u32 hw_phy_id_1
, hw_phy_id_2
;
9304 u32 hw_phy_id
, hw_phy_id_masked
;
9307 /* Reading the PHY ID register can conflict with ASF
9308 * firwmare access to the PHY hardware.
9311 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
9312 hw_phy_id
= hw_phy_id_masked
= PHY_ID_INVALID
;
9314 /* Now read the physical PHY_ID from the chip and verify
9315 * that it is sane. If it doesn't look good, we fall back
9316 * to either the hard-coded table based PHY_ID and failing
9317 * that the value found in the eeprom area.
9319 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
9320 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
9322 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
9323 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
9324 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
9326 hw_phy_id_masked
= hw_phy_id
& PHY_ID_MASK
;
9329 if (!err
&& KNOWN_PHY_ID(hw_phy_id_masked
)) {
9330 tp
->phy_id
= hw_phy_id
;
9331 if (hw_phy_id_masked
== PHY_ID_BCM8002
)
9332 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9334 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_SERDES
;
9336 if (tp
->phy_id
!= PHY_ID_INVALID
) {
9337 /* Do nothing, phy ID already set up in
9338 * tg3_get_eeprom_hw_cfg().
9341 struct subsys_tbl_ent
*p
;
9343 /* No eeprom signature? Try the hardcoded
9344 * subsys device table.
9346 p
= lookup_by_subsys(tp
);
9350 tp
->phy_id
= p
->phy_id
;
9352 tp
->phy_id
== PHY_ID_BCM8002
)
9353 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9357 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) &&
9358 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
9359 u32 bmsr
, adv_reg
, tg3_ctrl
;
9361 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
9362 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
9363 (bmsr
& BMSR_LSTATUS
))
9364 goto skip_phy_reset
;
9366 err
= tg3_phy_reset(tp
);
9370 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
9371 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
9372 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
9374 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
9375 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
9376 MII_TG3_CTRL_ADV_1000_FULL
);
9377 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
9378 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
9379 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
9380 MII_TG3_CTRL_ENABLE_AS_MASTER
);
9383 if (!tg3_copper_is_advertising_all(tp
)) {
9384 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
9386 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
9387 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
9389 tg3_writephy(tp
, MII_BMCR
,
9390 BMCR_ANENABLE
| BMCR_ANRESTART
);
9392 tg3_phy_set_wirespeed(tp
);
9394 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
9395 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
9396 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
9400 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
9401 err
= tg3_init_5401phy_dsp(tp
);
9406 if (!err
&& ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)) {
9407 err
= tg3_init_5401phy_dsp(tp
);
9410 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
9411 tp
->link_config
.advertising
=
9412 (ADVERTISED_1000baseT_Half
|
9413 ADVERTISED_1000baseT_Full
|
9414 ADVERTISED_Autoneg
|
9416 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
9417 tp
->link_config
.advertising
&=
9418 ~(ADVERTISED_1000baseT_Half
|
9419 ADVERTISED_1000baseT_Full
);
9424 static void __devinit
tg3_read_partno(struct tg3
*tp
)
9426 unsigned char vpd_data
[256];
9429 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
9430 /* Sun decided not to put the necessary bits in the
9431 * NVRAM of their onboard tg3 parts :(
9433 strcpy(tp
->board_part_number
, "Sun 570X");
9437 for (i
= 0; i
< 256; i
+= 4) {
9440 if (tg3_nvram_read(tp
, 0x100 + i
, &tmp
))
9443 vpd_data
[i
+ 0] = ((tmp
>> 0) & 0xff);
9444 vpd_data
[i
+ 1] = ((tmp
>> 8) & 0xff);
9445 vpd_data
[i
+ 2] = ((tmp
>> 16) & 0xff);
9446 vpd_data
[i
+ 3] = ((tmp
>> 24) & 0xff);
9449 /* Now parse and find the part number. */
9450 for (i
= 0; i
< 256; ) {
9451 unsigned char val
= vpd_data
[i
];
9454 if (val
== 0x82 || val
== 0x91) {
9457 (vpd_data
[i
+ 2] << 8)));
9464 block_end
= (i
+ 3 +
9466 (vpd_data
[i
+ 2] << 8)));
9468 while (i
< block_end
) {
9469 if (vpd_data
[i
+ 0] == 'P' &&
9470 vpd_data
[i
+ 1] == 'N') {
9471 int partno_len
= vpd_data
[i
+ 2];
9473 if (partno_len
> 24)
9476 memcpy(tp
->board_part_number
,
9485 /* Part number not found. */
9490 strcpy(tp
->board_part_number
, "none");
9493 #ifdef CONFIG_SPARC64
9494 static int __devinit
tg3_is_sun_570X(struct tg3
*tp
)
9496 struct pci_dev
*pdev
= tp
->pdev
;
9497 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
9500 int node
= pcp
->prom_node
;
9504 err
= prom_getproperty(node
, "subsystem-vendor-id",
9505 (char *) &venid
, sizeof(venid
));
9506 if (err
== 0 || err
== -1)
9508 if (venid
== PCI_VENDOR_ID_SUN
)
9511 /* TG3 chips onboard the SunBlade-2500 don't have the
9512 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9513 * are distinguishable from non-Sun variants by being
9514 * named "network" by the firmware. Non-Sun cards will
9515 * show up as being named "ethernet".
9517 if (!strcmp(pcp
->prom_name
, "network"))
9524 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
9526 static struct pci_device_id write_reorder_chipsets
[] = {
9527 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
9528 PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
9529 { PCI_DEVICE(PCI_VENDOR_ID_VIA
,
9530 PCI_DEVICE_ID_VIA_8385_0
) },
9534 u32 cacheline_sz_reg
;
9535 u32 pci_state_reg
, grc_misc_cfg
;
9540 #ifdef CONFIG_SPARC64
9541 if (tg3_is_sun_570X(tp
))
9542 tp
->tg3_flags2
|= TG3_FLG2_SUN_570X
;
9545 /* Force memory write invalidate off. If we leave it on,
9546 * then on 5700_BX chips we have to enable a workaround.
9547 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9548 * to match the cacheline size. The Broadcom driver have this
9549 * workaround but turns MWI off all the times so never uses
9550 * it. This seems to suggest that the workaround is insufficient.
9552 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9553 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
9554 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9556 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9557 * has the register indirect write enable bit set before
9558 * we try to access any of the MMIO registers. It is also
9559 * critical that the PCI-X hw workaround situation is decided
9560 * before that as well.
9562 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9565 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
9566 MISC_HOST_CTRL_CHIPREV_SHIFT
);
9568 /* Wrong chip ID in 5752 A0. This code can be removed later
9569 * as A0 is not in production.
9571 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
9572 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
9574 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9575 * we need to disable memory and use config. cycles
9576 * only to access all registers. The 5702/03 chips
9577 * can mistakenly decode the special cycles from the
9578 * ICH chipsets as memory write cycles, causing corruption
9579 * of register and memory space. Only certain ICH bridges
9580 * will drive special cycles with non-zero data during the
9581 * address phase which can fall within the 5703's address
9582 * range. This is not an ICH bug as the PCI spec allows
9583 * non-zero address during special cycles. However, only
9584 * these ICH bridges are known to drive non-zero addresses
9585 * during special cycles.
9587 * Since special cycles do not cross PCI bridges, we only
9588 * enable this workaround if the 5703 is on the secondary
9589 * bus of these ICH bridges.
9591 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
9592 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
9593 static struct tg3_dev_id
{
9597 } ich_chipsets
[] = {
9598 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
9600 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
9602 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
9604 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
9608 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
9609 struct pci_dev
*bridge
= NULL
;
9611 while (pci_id
->vendor
!= 0) {
9612 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
9618 if (pci_id
->rev
!= PCI_ANY_ID
) {
9621 pci_read_config_byte(bridge
, PCI_REVISION_ID
,
9623 if (rev
> pci_id
->rev
)
9626 if (bridge
->subordinate
&&
9627 (bridge
->subordinate
->number
==
9628 tp
->pdev
->bus
->number
)) {
9630 tp
->tg3_flags2
|= TG3_FLG2_ICH_WORKAROUND
;
9631 pci_dev_put(bridge
);
9637 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9638 * DMA addresses > 40-bit. This bridge may have other additional
9639 * 57xx devices behind it in some 4-port NIC designs for example.
9640 * Any tg3 device found behind the bridge will also need the 40-bit
9643 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
9644 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9645 tp
->tg3_flags2
|= TG3_FLG2_5780_CLASS
;
9646 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
9647 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
9650 struct pci_dev
*bridge
= NULL
;
9653 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
9654 PCI_DEVICE_ID_SERVERWORKS_EPB
,
9656 if (bridge
&& bridge
->subordinate
&&
9657 (bridge
->subordinate
->number
<=
9658 tp
->pdev
->bus
->number
) &&
9659 (bridge
->subordinate
->subordinate
>=
9660 tp
->pdev
->bus
->number
)) {
9661 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
9662 pci_dev_put(bridge
);
9668 /* Initialize misc host control in PCI block. */
9669 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
9670 MISC_HOST_CTRL_CHIPREV
);
9671 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9672 tp
->misc_host_ctrl
);
9674 pci_read_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
9677 tp
->pci_cacheline_sz
= (cacheline_sz_reg
>> 0) & 0xff;
9678 tp
->pci_lat_timer
= (cacheline_sz_reg
>> 8) & 0xff;
9679 tp
->pci_hdr_type
= (cacheline_sz_reg
>> 16) & 0xff;
9680 tp
->pci_bist
= (cacheline_sz_reg
>> 24) & 0xff;
9682 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
9683 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
9684 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
9685 tp
->tg3_flags2
|= TG3_FLG2_5750_PLUS
;
9687 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
9688 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
9689 tp
->tg3_flags2
|= TG3_FLG2_5705_PLUS
;
9691 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9692 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO
;
9694 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
&&
9695 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5750
&&
9696 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
)
9697 tp
->tg3_flags2
|= TG3_FLG2_JUMBO_CAPABLE
;
9699 if (pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
) != 0)
9700 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
9702 /* If we have an AMD 762 or VIA K8T800 chipset, write
9703 * reordering to the mailbox registers done by the host
9704 * controller can cause major troubles. We read back from
9705 * every mailbox register write to force the writes to be
9706 * posted to the chip in order.
9708 if (pci_dev_present(write_reorder_chipsets
) &&
9709 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
9710 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
9712 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
9713 tp
->pci_lat_timer
< 64) {
9714 tp
->pci_lat_timer
= 64;
9716 cacheline_sz_reg
= ((tp
->pci_cacheline_sz
& 0xff) << 0);
9717 cacheline_sz_reg
|= ((tp
->pci_lat_timer
& 0xff) << 8);
9718 cacheline_sz_reg
|= ((tp
->pci_hdr_type
& 0xff) << 16);
9719 cacheline_sz_reg
|= ((tp
->pci_bist
& 0xff) << 24);
9721 pci_write_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
9725 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
9728 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0) {
9729 tp
->tg3_flags
|= TG3_FLAG_PCIX_MODE
;
9731 /* If this is a 5700 BX chipset, and we are in PCI-X
9732 * mode, enable register write workaround.
9734 * The workaround is to use indirect register accesses
9735 * for all chip writes not to mailbox registers.
9737 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
9741 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
9743 /* The chip can have it's power management PCI config
9744 * space registers clobbered due to this bug.
9745 * So explicitly force the chip into D0 here.
9747 pci_read_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
9749 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
9750 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
9751 pci_write_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
9754 /* Also, force SERR#/PERR# in PCI command. */
9755 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9756 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
9757 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9761 /* 5700 BX chips need to have their TX producer index mailboxes
9762 * written twice to workaround a bug.
9764 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
)
9765 tp
->tg3_flags
|= TG3_FLAG_TXD_MBOX_HWBUG
;
9767 /* Back to back register writes can cause problems on this chip,
9768 * the workaround is to read back all reg writes except those to
9769 * mailbox regs. See tg3_write_indirect_reg32().
9771 * PCI Express 5750_A0 rev chips need this workaround too.
9773 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
9774 ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
9775 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
))
9776 tp
->tg3_flags
|= TG3_FLAG_5701_REG_WRITE_BUG
;
9778 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
9779 tp
->tg3_flags
|= TG3_FLAG_PCI_HIGH_SPEED
;
9780 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
9781 tp
->tg3_flags
|= TG3_FLAG_PCI_32BIT
;
9783 /* Chip-specific fixup from Broadcom driver */
9784 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
9785 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
9786 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
9787 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
9790 /* Default fast path register access methods */
9791 tp
->read32
= tg3_read32
;
9792 tp
->write32
= tg3_write32
;
9793 tp
->read32_mbox
= tg3_read32
;
9794 tp
->write32_mbox
= tg3_write32
;
9795 tp
->write32_tx_mbox
= tg3_write32
;
9796 tp
->write32_rx_mbox
= tg3_write32
;
9798 /* Various workaround register access methods */
9799 if (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
)
9800 tp
->write32
= tg3_write_indirect_reg32
;
9801 else if (tp
->tg3_flags
& TG3_FLAG_5701_REG_WRITE_BUG
)
9802 tp
->write32
= tg3_write_flush_reg32
;
9804 if ((tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
) ||
9805 (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)) {
9806 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
9807 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
9808 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
9811 if (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
) {
9812 tp
->read32
= tg3_read_indirect_reg32
;
9813 tp
->write32
= tg3_write_indirect_reg32
;
9814 tp
->read32_mbox
= tg3_read_indirect_mbox
;
9815 tp
->write32_mbox
= tg3_write_indirect_mbox
;
9816 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
9817 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
9822 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9823 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
9824 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9827 /* Get eeprom hw config before calling tg3_set_power_state().
9828 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9829 * determined before calling tg3_set_power_state() so that
9830 * we know whether or not to switch out of Vaux power.
9831 * When the flag is set, it means that GPIO1 is used for eeprom
9832 * write protect and also implies that it is a LOM where GPIOs
9833 * are not used to switch power.
9835 tg3_get_eeprom_hw_cfg(tp
);
9837 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9838 * GPIO1 driven high will bring 5700's external PHY out of reset.
9839 * It is also used as eeprom write protect on LOMs.
9841 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
9842 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
9843 (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
9844 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9845 GRC_LCLCTRL_GPIO_OUTPUT1
);
9846 /* Unused GPIO3 must be driven as output on 5752 because there
9847 * are no pull-up resistors on unused GPIO pins.
9849 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
9850 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
9852 /* Force the chip into D0. */
9853 err
= tg3_set_power_state(tp
, PCI_D0
);
9855 printk(KERN_ERR PFX
"(%s) transition to D0 failed\n",
9856 pci_name(tp
->pdev
));
9860 /* 5700 B0 chips do not support checksumming correctly due
9863 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5700_B0
)
9864 tp
->tg3_flags
|= TG3_FLAG_BROKEN_CHECKSUMS
;
9866 /* Pseudo-header checksum is done by hardware logic and not
9867 * the offload processers, so make the chip do the pseudo-
9868 * header checksums on receive. For transmit it is more
9869 * convenient to do the pseudo-header checksum in software
9870 * as Linux does that on transmit for us in all cases.
9872 tp
->tg3_flags
|= TG3_FLAG_NO_TX_PSEUDO_CSUM
;
9873 tp
->tg3_flags
&= ~TG3_FLAG_NO_RX_PSEUDO_CSUM
;
9875 /* Derive initial jumbo mode from MTU assigned in
9876 * ether_setup() via the alloc_etherdev() call
9878 if (tp
->dev
->mtu
> ETH_DATA_LEN
&&
9879 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
9880 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
9882 /* Determine WakeOnLan speed to use. */
9883 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9884 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
9885 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
9886 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
9887 tp
->tg3_flags
&= ~(TG3_FLAG_WOL_SPEED_100MB
);
9889 tp
->tg3_flags
|= TG3_FLAG_WOL_SPEED_100MB
;
9892 /* A few boards don't want Ethernet@WireSpeed phy feature */
9893 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
9894 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
9895 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
9896 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
9897 (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
9898 tp
->tg3_flags2
|= TG3_FLG2_NO_ETH_WIRE_SPEED
;
9900 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
9901 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
9902 tp
->tg3_flags2
|= TG3_FLG2_PHY_ADC_BUG
;
9903 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
9904 tp
->tg3_flags2
|= TG3_FLG2_PHY_5704_A0_BUG
;
9906 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
9907 tp
->tg3_flags2
|= TG3_FLG2_PHY_BER_BUG
;
9909 tp
->coalesce_mode
= 0;
9910 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
9911 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
9912 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
9914 /* Initialize MAC MI mode, polling disabled. */
9915 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
9918 /* Initialize data/descriptor byte/word swapping. */
9919 val
= tr32(GRC_MODE
);
9920 val
&= GRC_MODE_HOST_STACKUP
;
9921 tw32(GRC_MODE
, val
| tp
->grc_mode
);
9923 tg3_switch_clocks(tp
);
9925 /* Clear this out for sanity. */
9926 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9928 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
9930 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
9931 (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) == 0) {
9932 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
9934 if (chiprevid
== CHIPREV_ID_5701_A0
||
9935 chiprevid
== CHIPREV_ID_5701_B0
||
9936 chiprevid
== CHIPREV_ID_5701_B2
||
9937 chiprevid
== CHIPREV_ID_5701_B5
) {
9938 void __iomem
*sram_base
;
9940 /* Write some dummy words into the SRAM status block
9941 * area, see if it reads back correctly. If the return
9942 * value is bad, force enable the PCIX workaround.
9944 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
9946 writel(0x00000000, sram_base
);
9947 writel(0x00000000, sram_base
+ 4);
9948 writel(0xffffffff, sram_base
+ 4);
9949 if (readl(sram_base
) != 0x00000000)
9950 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
9957 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
9958 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
9960 /* Broadcom's driver says that CIOBE multisplit has a bug */
9962 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9963 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5704CIOBE
) {
9964 tp
->tg3_flags
|= TG3_FLAG_SPLIT_MODE
;
9965 tp
->split_mode_max_reqs
= SPLIT_MODE_5704_MAX_REQ
;
9968 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9969 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
9970 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
9971 tp
->tg3_flags2
|= TG3_FLG2_IS_5788
;
9973 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
9974 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
))
9975 tp
->tg3_flags
|= TG3_FLAG_TAGGED_STATUS
;
9976 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
9977 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
9978 HOSTCC_MODE_CLRTICK_TXBD
);
9980 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
9981 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9982 tp
->misc_host_ctrl
);
9985 /* these are limited to 10/100 only */
9986 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
9987 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
9988 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9989 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
9990 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
9991 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
9992 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
9993 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
9994 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
9995 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
)))
9996 tp
->tg3_flags
|= TG3_FLAG_10_100_ONLY
;
9998 err
= tg3_phy_probe(tp
);
10000 printk(KERN_ERR PFX
"(%s) phy probe failed, err %d\n",
10001 pci_name(tp
->pdev
), err
);
10002 /* ... but do not return immediately ... */
10005 tg3_read_partno(tp
);
10007 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
10008 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
10010 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
10011 tp
->tg3_flags
|= TG3_FLAG_USE_MI_INTERRUPT
;
10013 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
10016 /* 5700 {AX,BX} chips have a broken status block link
10017 * change bit implementation, so we must use the
10018 * status register in those cases.
10020 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
10021 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
10023 tp
->tg3_flags
&= ~TG3_FLAG_USE_LINKCHG_REG
;
10025 /* The led_ctrl is set during tg3_phy_probe, here we might
10026 * have to force the link status polling mechanism based
10027 * upon subsystem IDs.
10029 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
10030 !(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
10031 tp
->tg3_flags
|= (TG3_FLAG_USE_MI_INTERRUPT
|
10032 TG3_FLAG_USE_LINKCHG_REG
);
10035 /* For all SERDES we poll the MAC status register. */
10036 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
10037 tp
->tg3_flags
|= TG3_FLAG_POLL_SERDES
;
10039 tp
->tg3_flags
&= ~TG3_FLAG_POLL_SERDES
;
10041 /* It seems all chips can get confused if TX buffers
10042 * straddle the 4GB address boundary in some cases.
10044 tp
->dev
->hard_start_xmit
= tg3_start_xmit
;
10047 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
10048 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0)
10051 /* By default, disable wake-on-lan. User can change this
10052 * using ETHTOOL_SWOL.
10054 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
10059 #ifdef CONFIG_SPARC64
10060 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
10062 struct net_device
*dev
= tp
->dev
;
10063 struct pci_dev
*pdev
= tp
->pdev
;
10064 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
10067 int node
= pcp
->prom_node
;
10069 if (prom_getproplen(node
, "local-mac-address") == 6) {
10070 prom_getproperty(node
, "local-mac-address",
10072 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
10079 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
10081 struct net_device
*dev
= tp
->dev
;
10083 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
10084 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
10089 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
10091 struct net_device
*dev
= tp
->dev
;
10092 u32 hi
, lo
, mac_offset
;
10094 #ifdef CONFIG_SPARC64
10095 if (!tg3_get_macaddr_sparc(tp
))
10100 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
10101 !(tp
->tg3_flags
& TG3_FLG2_SUN_570X
)) ||
10102 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
10103 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
10105 if (tg3_nvram_lock(tp
))
10106 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
10108 tg3_nvram_unlock(tp
);
10111 /* First try to get it from MAC address mailbox. */
10112 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
10113 if ((hi
>> 16) == 0x484b) {
10114 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10115 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
10117 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
10118 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10119 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10120 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10121 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
10123 /* Next, try NVRAM. */
10124 else if (!(tp
->tg3_flags
& TG3_FLG2_SUN_570X
) &&
10125 !tg3_nvram_read(tp
, mac_offset
+ 0, &hi
) &&
10126 !tg3_nvram_read(tp
, mac_offset
+ 4, &lo
)) {
10127 dev
->dev_addr
[0] = ((hi
>> 16) & 0xff);
10128 dev
->dev_addr
[1] = ((hi
>> 24) & 0xff);
10129 dev
->dev_addr
[2] = ((lo
>> 0) & 0xff);
10130 dev
->dev_addr
[3] = ((lo
>> 8) & 0xff);
10131 dev
->dev_addr
[4] = ((lo
>> 16) & 0xff);
10132 dev
->dev_addr
[5] = ((lo
>> 24) & 0xff);
10134 /* Finally just fetch it out of the MAC control regs. */
10136 hi
= tr32(MAC_ADDR_0_HIGH
);
10137 lo
= tr32(MAC_ADDR_0_LOW
);
10139 dev
->dev_addr
[5] = lo
& 0xff;
10140 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10141 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10142 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10143 dev
->dev_addr
[1] = hi
& 0xff;
10144 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10147 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
10148 #ifdef CONFIG_SPARC64
10149 if (!tg3_get_default_macaddr_sparc(tp
))
10154 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10158 #define BOUNDARY_SINGLE_CACHELINE 1
10159 #define BOUNDARY_MULTI_CACHELINE 2
10161 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
10163 int cacheline_size
;
10167 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
10169 cacheline_size
= 1024;
10171 cacheline_size
= (int) byte
* 4;
10173 /* On 5703 and later chips, the boundary bits have no
10176 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
10177 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
10178 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
10181 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10182 goal
= BOUNDARY_MULTI_CACHELINE
;
10184 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10185 goal
= BOUNDARY_SINGLE_CACHELINE
;
10194 /* PCI controllers on most RISC systems tend to disconnect
10195 * when a device tries to burst across a cache-line boundary.
10196 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10198 * Unfortunately, for PCI-E there are only limited
10199 * write-side controls for this, and thus for reads
10200 * we will still get the disconnects. We'll also waste
10201 * these PCI cycles for both read and write for chips
10202 * other than 5700 and 5701 which do not implement the
10205 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
10206 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
10207 switch (cacheline_size
) {
10212 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10213 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
10214 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
10216 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10217 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10222 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
10223 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
10227 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10228 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10231 } else if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10232 switch (cacheline_size
) {
10236 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10237 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10238 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
10244 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10245 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
10249 switch (cacheline_size
) {
10251 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10252 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
10253 DMA_RWCTRL_WRITE_BNDRY_16
);
10258 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10259 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
10260 DMA_RWCTRL_WRITE_BNDRY_32
);
10265 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10266 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
10267 DMA_RWCTRL_WRITE_BNDRY_64
);
10272 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10273 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
10274 DMA_RWCTRL_WRITE_BNDRY_128
);
10279 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
10280 DMA_RWCTRL_WRITE_BNDRY_256
);
10283 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
10284 DMA_RWCTRL_WRITE_BNDRY_512
);
10288 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
10289 DMA_RWCTRL_WRITE_BNDRY_1024
);
10298 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
10300 struct tg3_internal_buffer_desc test_desc
;
10301 u32 sram_dma_descs
;
10304 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
10306 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
10307 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
10308 tw32(RDMAC_STATUS
, 0);
10309 tw32(WDMAC_STATUS
, 0);
10311 tw32(BUFMGR_MODE
, 0);
10312 tw32(FTQ_RESET
, 0);
10314 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
10315 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
10316 test_desc
.nic_mbuf
= 0x00002100;
10317 test_desc
.len
= size
;
10320 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10321 * the *second* time the tg3 driver was getting loaded after an
10324 * Broadcom tells me:
10325 * ...the DMA engine is connected to the GRC block and a DMA
10326 * reset may affect the GRC block in some unpredictable way...
10327 * The behavior of resets to individual blocks has not been tested.
10329 * Broadcom noted the GRC reset will also reset all sub-components.
10332 test_desc
.cqid_sqid
= (13 << 8) | 2;
10334 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
10337 test_desc
.cqid_sqid
= (16 << 8) | 7;
10339 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
10342 test_desc
.flags
= 0x00000005;
10344 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
10347 val
= *(((u32
*)&test_desc
) + i
);
10348 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
10349 sram_dma_descs
+ (i
* sizeof(u32
)));
10350 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
10352 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10355 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
10357 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
10361 for (i
= 0; i
< 40; i
++) {
10365 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
10367 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
10368 if ((val
& 0xffff) == sram_dma_descs
) {
10379 #define TEST_BUFFER_SIZE 0x2000
10381 static int __devinit
tg3_test_dma(struct tg3
*tp
)
10383 dma_addr_t buf_dma
;
10384 u32
*buf
, saved_dma_rwctrl
;
10387 buf
= pci_alloc_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, &buf_dma
);
10393 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
10394 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
10396 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
10398 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10399 /* DMA read watermark not used on PCIE */
10400 tp
->dma_rwctrl
|= 0x00180000;
10401 } else if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
10402 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
10403 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
10404 tp
->dma_rwctrl
|= 0x003f0000;
10406 tp
->dma_rwctrl
|= 0x003f000f;
10408 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
10409 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
10410 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
10412 /* If the 5704 is behind the EPB bridge, we can
10413 * do the less restrictive ONE_DMA workaround for
10414 * better performance.
10416 if ((tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) &&
10417 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
10418 tp
->dma_rwctrl
|= 0x8000;
10419 else if (ccval
== 0x6 || ccval
== 0x7)
10420 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
10422 /* Set bit 23 to enable PCIX hw bug fix */
10423 tp
->dma_rwctrl
|= 0x009f0000;
10424 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
10425 /* 5780 always in PCIX mode */
10426 tp
->dma_rwctrl
|= 0x00144000;
10427 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
10428 /* 5714 always in PCIX mode */
10429 tp
->dma_rwctrl
|= 0x00148000;
10431 tp
->dma_rwctrl
|= 0x001b000f;
10435 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
10436 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
10437 tp
->dma_rwctrl
&= 0xfffffff0;
10439 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10440 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
10441 /* Remove this if it causes problems for some boards. */
10442 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
10444 /* On 5700/5701 chips, we need to set this bit.
10445 * Otherwise the chip will issue cacheline transactions
10446 * to streamable DMA memory with not all the byte
10447 * enables turned on. This is an error on several
10448 * RISC PCI controllers, in particular sparc64.
10450 * On 5703/5704 chips, this bit has been reassigned
10451 * a different meaning. In particular, it is used
10452 * on those chips to enable a PCI-X workaround.
10454 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
10457 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10460 /* Unneeded, already done by tg3_get_invariants. */
10461 tg3_switch_clocks(tp
);
10465 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
10466 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
10469 /* It is best to perform DMA test with maximum write burst size
10470 * to expose the 5700/5701 write DMA bug.
10472 saved_dma_rwctrl
= tp
->dma_rwctrl
;
10473 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
10474 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10479 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
10482 /* Send the buffer to the chip. */
10483 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
10485 printk(KERN_ERR
"tg3_test_dma() Write the buffer failed %d\n", ret
);
10490 /* validate data reached card RAM correctly. */
10491 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
10493 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
10494 if (le32_to_cpu(val
) != p
[i
]) {
10495 printk(KERN_ERR
" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val
, i
);
10496 /* ret = -ENODEV here? */
10501 /* Now read it back. */
10502 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
10504 printk(KERN_ERR
"tg3_test_dma() Read the buffer failed %d\n", ret
);
10510 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
10514 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
10515 DMA_RWCTRL_WRITE_BNDRY_16
) {
10516 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
10517 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
10518 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10521 printk(KERN_ERR
"tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p
[i
], i
);
10527 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
10533 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
10534 DMA_RWCTRL_WRITE_BNDRY_16
) {
10535 static struct pci_device_id dma_wait_state_chipsets
[] = {
10536 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
,
10537 PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
10541 /* DMA test passed without adjusting DMA boundary,
10542 * now look for chipsets that are known to expose the
10543 * DMA bug without failing the test.
10545 if (pci_dev_present(dma_wait_state_chipsets
)) {
10546 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
10547 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
10550 /* Safe to use the calculated DMA boundary. */
10551 tp
->dma_rwctrl
= saved_dma_rwctrl
;
10553 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10557 pci_free_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
10562 static void __devinit
tg3_init_link_config(struct tg3
*tp
)
10564 tp
->link_config
.advertising
=
10565 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
10566 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
10567 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
10568 ADVERTISED_Autoneg
| ADVERTISED_MII
);
10569 tp
->link_config
.speed
= SPEED_INVALID
;
10570 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10571 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
10572 netif_carrier_off(tp
->dev
);
10573 tp
->link_config
.active_speed
= SPEED_INVALID
;
10574 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
10575 tp
->link_config
.phy_is_low_power
= 0;
10576 tp
->link_config
.orig_speed
= SPEED_INVALID
;
10577 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
10578 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
10581 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
10583 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
10584 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
10585 DEFAULT_MB_RDMA_LOW_WATER_5705
;
10586 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
10587 DEFAULT_MB_MACRX_LOW_WATER_5705
;
10588 tp
->bufmgr_config
.mbuf_high_water
=
10589 DEFAULT_MB_HIGH_WATER_5705
;
10591 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
10592 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
10593 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
10594 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
10595 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
10596 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
10598 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
10599 DEFAULT_MB_RDMA_LOW_WATER
;
10600 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
10601 DEFAULT_MB_MACRX_LOW_WATER
;
10602 tp
->bufmgr_config
.mbuf_high_water
=
10603 DEFAULT_MB_HIGH_WATER
;
10605 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
10606 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
10607 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
10608 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
10609 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
10610 DEFAULT_MB_HIGH_WATER_JUMBO
;
10613 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
10614 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
10617 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
10619 switch (tp
->phy_id
& PHY_ID_MASK
) {
10620 case PHY_ID_BCM5400
: return "5400";
10621 case PHY_ID_BCM5401
: return "5401";
10622 case PHY_ID_BCM5411
: return "5411";
10623 case PHY_ID_BCM5701
: return "5701";
10624 case PHY_ID_BCM5703
: return "5703";
10625 case PHY_ID_BCM5704
: return "5704";
10626 case PHY_ID_BCM5705
: return "5705";
10627 case PHY_ID_BCM5750
: return "5750";
10628 case PHY_ID_BCM5752
: return "5752";
10629 case PHY_ID_BCM5714
: return "5714";
10630 case PHY_ID_BCM5780
: return "5780";
10631 case PHY_ID_BCM8002
: return "8002/serdes";
10632 case 0: return "serdes";
10633 default: return "unknown";
10637 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
10639 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10640 strcpy(str
, "PCI Express");
10642 } else if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
10643 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
10645 strcpy(str
, "PCIX:");
10647 if ((clock_ctrl
== 7) ||
10648 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
10649 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
10650 strcat(str
, "133MHz");
10651 else if (clock_ctrl
== 0)
10652 strcat(str
, "33MHz");
10653 else if (clock_ctrl
== 2)
10654 strcat(str
, "50MHz");
10655 else if (clock_ctrl
== 4)
10656 strcat(str
, "66MHz");
10657 else if (clock_ctrl
== 6)
10658 strcat(str
, "100MHz");
10660 strcpy(str
, "PCI:");
10661 if (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
)
10662 strcat(str
, "66MHz");
10664 strcat(str
, "33MHz");
10666 if (tp
->tg3_flags
& TG3_FLAG_PCI_32BIT
)
10667 strcat(str
, ":32-bit");
10669 strcat(str
, ":64-bit");
10673 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
10675 struct pci_dev
*peer
;
10676 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
10678 for (func
= 0; func
< 8; func
++) {
10679 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
10680 if (peer
&& peer
!= tp
->pdev
)
10684 /* 5704 can be configured in single-port mode, set peer to
10685 * tp->pdev in that case.
10693 * We don't need to keep the refcount elevated; there's no way
10694 * to remove one half of this device without removing the other
10701 static void __devinit
tg3_init_coal(struct tg3
*tp
)
10703 struct ethtool_coalesce
*ec
= &tp
->coal
;
10705 memset(ec
, 0, sizeof(*ec
));
10706 ec
->cmd
= ETHTOOL_GCOALESCE
;
10707 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
10708 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
10709 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
10710 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
10711 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
10712 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
10713 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
10714 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
10715 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
10717 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
10718 HOSTCC_MODE_CLRTICK_TXBD
)) {
10719 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
10720 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
10721 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
10722 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
10725 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
10726 ec
->rx_coalesce_usecs_irq
= 0;
10727 ec
->tx_coalesce_usecs_irq
= 0;
10728 ec
->stats_block_coalesce_usecs
= 0;
10732 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
10733 const struct pci_device_id
*ent
)
10735 static int tg3_version_printed
= 0;
10736 unsigned long tg3reg_base
, tg3reg_len
;
10737 struct net_device
*dev
;
10739 int i
, err
, pm_cap
;
10741 u64 dma_mask
, persist_dma_mask
;
10743 if (tg3_version_printed
++ == 0)
10744 printk(KERN_INFO
"%s", version
);
10746 err
= pci_enable_device(pdev
);
10748 printk(KERN_ERR PFX
"Cannot enable PCI device, "
10753 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
10754 printk(KERN_ERR PFX
"Cannot find proper PCI device "
10755 "base address, aborting.\n");
10757 goto err_out_disable_pdev
;
10760 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
10762 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
10764 goto err_out_disable_pdev
;
10767 pci_set_master(pdev
);
10769 /* Find power-management capability. */
10770 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
10772 printk(KERN_ERR PFX
"Cannot find PowerManagement capability, "
10775 goto err_out_free_res
;
10778 tg3reg_base
= pci_resource_start(pdev
, 0);
10779 tg3reg_len
= pci_resource_len(pdev
, 0);
10781 dev
= alloc_etherdev(sizeof(*tp
));
10783 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
10785 goto err_out_free_res
;
10788 SET_MODULE_OWNER(dev
);
10789 SET_NETDEV_DEV(dev
, &pdev
->dev
);
10791 dev
->features
|= NETIF_F_LLTX
;
10792 #if TG3_VLAN_TAG_USED
10793 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
10794 dev
->vlan_rx_register
= tg3_vlan_rx_register
;
10795 dev
->vlan_rx_kill_vid
= tg3_vlan_rx_kill_vid
;
10798 tp
= netdev_priv(dev
);
10801 tp
->pm_cap
= pm_cap
;
10802 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
10803 tp
->rx_mode
= TG3_DEF_RX_MODE
;
10804 tp
->tx_mode
= TG3_DEF_TX_MODE
;
10805 tp
->mi_mode
= MAC_MI_MODE_BASE
;
10807 tp
->msg_enable
= tg3_debug
;
10809 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
10811 /* The word/byte swap controls here control register access byte
10812 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10815 tp
->misc_host_ctrl
=
10816 MISC_HOST_CTRL_MASK_PCI_INT
|
10817 MISC_HOST_CTRL_WORD_SWAP
|
10818 MISC_HOST_CTRL_INDIR_ACCESS
|
10819 MISC_HOST_CTRL_PCISTATE_RW
;
10821 /* The NONFRM (non-frame) byte/word swap controls take effect
10822 * on descriptor entries, anything which isn't packet data.
10824 * The StrongARM chips on the board (one for tx, one for rx)
10825 * are running in big-endian mode.
10827 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
10828 GRC_MODE_WSWAP_NONFRM_DATA
);
10829 #ifdef __BIG_ENDIAN
10830 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
10832 spin_lock_init(&tp
->lock
);
10833 spin_lock_init(&tp
->tx_lock
);
10834 spin_lock_init(&tp
->indirect_lock
);
10835 INIT_WORK(&tp
->reset_task
, tg3_reset_task
, tp
);
10837 tp
->regs
= ioremap_nocache(tg3reg_base
, tg3reg_len
);
10838 if (tp
->regs
== 0UL) {
10839 printk(KERN_ERR PFX
"Cannot map device registers, "
10842 goto err_out_free_dev
;
10845 tg3_init_link_config(tp
);
10847 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
10848 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
10849 tp
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
10851 dev
->open
= tg3_open
;
10852 dev
->stop
= tg3_close
;
10853 dev
->get_stats
= tg3_get_stats
;
10854 dev
->set_multicast_list
= tg3_set_rx_mode
;
10855 dev
->set_mac_address
= tg3_set_mac_addr
;
10856 dev
->do_ioctl
= tg3_ioctl
;
10857 dev
->tx_timeout
= tg3_tx_timeout
;
10858 dev
->poll
= tg3_poll
;
10859 dev
->ethtool_ops
= &tg3_ethtool_ops
;
10861 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
10862 dev
->change_mtu
= tg3_change_mtu
;
10863 dev
->irq
= pdev
->irq
;
10864 #ifdef CONFIG_NET_POLL_CONTROLLER
10865 dev
->poll_controller
= tg3_poll_controller
;
10868 err
= tg3_get_invariants(tp
);
10870 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
10872 goto err_out_iounmap
;
10875 /* The EPB bridge inside 5714, 5715, and 5780 and any
10876 * device behind the EPB cannot support DMA addresses > 40-bit.
10877 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10878 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10879 * do DMA address check in tg3_start_xmit().
10881 if (tp
->tg3_flags2
& TG3_FLG2_IS_5788
)
10882 persist_dma_mask
= dma_mask
= DMA_32BIT_MASK
;
10883 else if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) {
10884 persist_dma_mask
= dma_mask
= DMA_40BIT_MASK
;
10885 #ifdef CONFIG_HIGHMEM
10886 dma_mask
= DMA_64BIT_MASK
;
10889 persist_dma_mask
= dma_mask
= DMA_64BIT_MASK
;
10891 /* Configure DMA attributes. */
10892 if (dma_mask
> DMA_32BIT_MASK
) {
10893 err
= pci_set_dma_mask(pdev
, dma_mask
);
10895 dev
->features
|= NETIF_F_HIGHDMA
;
10896 err
= pci_set_consistent_dma_mask(pdev
,
10899 printk(KERN_ERR PFX
"Unable to obtain 64 bit "
10900 "DMA for consistent allocations\n");
10901 goto err_out_iounmap
;
10905 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
10906 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
10908 printk(KERN_ERR PFX
"No usable DMA configuration, "
10910 goto err_out_iounmap
;
10914 tg3_init_bufmgr_config(tp
);
10916 #if TG3_TSO_SUPPORT != 0
10917 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
10918 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
10920 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10921 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
10922 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
||
10923 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
10924 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
10926 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
10929 /* TSO is on by default on chips that support hardware TSO.
10930 * Firmware TSO on older chips gives lower performance, so it
10931 * is off by default, but can be enabled using ethtool.
10933 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
10934 dev
->features
|= NETIF_F_TSO
;
10938 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
10939 !(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
10940 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
10941 tp
->tg3_flags2
|= TG3_FLG2_MAX_RXPEND_64
;
10942 tp
->rx_pending
= 63;
10945 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
10946 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
))
10947 tp
->pdev_peer
= tg3_find_peer(tp
);
10949 err
= tg3_get_device_address(tp
);
10951 printk(KERN_ERR PFX
"Could not obtain valid ethernet address, "
10953 goto err_out_iounmap
;
10957 * Reset chip in case UNDI or EFI driver did not shutdown
10958 * DMA self test will enable WDMAC and we'll see (spurious)
10959 * pending DMA on the PCI bus at that point.
10961 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
10962 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10963 pci_save_state(tp
->pdev
);
10964 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
10965 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10968 err
= tg3_test_dma(tp
);
10970 printk(KERN_ERR PFX
"DMA engine test failed, aborting.\n");
10971 goto err_out_iounmap
;
10974 /* Tigon3 can do ipv4 only... and some chips have buggy
10977 if ((tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) == 0) {
10978 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
10979 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
10981 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
10983 /* flow control autonegotiation is default behavior */
10984 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
10988 /* Now that we have fully setup the chip, save away a snapshot
10989 * of the PCI config space. We need to restore this after
10990 * GRC_MISC_CFG core clock resets and some resume events.
10992 pci_save_state(tp
->pdev
);
10994 err
= register_netdev(dev
);
10996 printk(KERN_ERR PFX
"Cannot register net device, "
10998 goto err_out_iounmap
;
11001 pci_set_drvdata(pdev
, dev
);
11003 printk(KERN_INFO
"%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11005 tp
->board_part_number
,
11006 tp
->pci_chip_rev_id
,
11007 tg3_phy_string(tp
),
11008 tg3_bus_string(tp
, str
),
11009 (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) ? "10/100" : "10/100/1000");
11011 for (i
= 0; i
< 6; i
++)
11012 printk("%2.2x%c", dev
->dev_addr
[i
],
11013 i
== 5 ? '\n' : ':');
11015 printk(KERN_INFO
"%s: RXcsums[%d] LinkChgREG[%d] "
11016 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11019 (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0,
11020 (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) != 0,
11021 (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) != 0,
11022 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0,
11023 (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
) != 0,
11024 (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
) == 0,
11025 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) != 0);
11026 printk(KERN_INFO
"%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11027 dev
->name
, tp
->dma_rwctrl
,
11028 (pdev
->dma_mask
== DMA_32BIT_MASK
) ? 32 :
11029 (((u64
) pdev
->dma_mask
== DMA_40BIT_MASK
) ? 40 : 64));
11043 pci_release_regions(pdev
);
11045 err_out_disable_pdev
:
11046 pci_disable_device(pdev
);
11047 pci_set_drvdata(pdev
, NULL
);
11051 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
11053 struct net_device
*dev
= pci_get_drvdata(pdev
);
11056 struct tg3
*tp
= netdev_priv(dev
);
11058 flush_scheduled_work();
11059 unregister_netdev(dev
);
11065 pci_release_regions(pdev
);
11066 pci_disable_device(pdev
);
11067 pci_set_drvdata(pdev
, NULL
);
11071 static int tg3_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11073 struct net_device
*dev
= pci_get_drvdata(pdev
);
11074 struct tg3
*tp
= netdev_priv(dev
);
11077 if (!netif_running(dev
))
11080 flush_scheduled_work();
11081 tg3_netif_stop(tp
);
11083 del_timer_sync(&tp
->timer
);
11085 tg3_full_lock(tp
, 1);
11086 tg3_disable_ints(tp
);
11087 tg3_full_unlock(tp
);
11089 netif_device_detach(dev
);
11091 tg3_full_lock(tp
, 0);
11092 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11093 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
11094 tg3_full_unlock(tp
);
11096 err
= tg3_set_power_state(tp
, pci_choose_state(pdev
, state
));
11098 tg3_full_lock(tp
, 0);
11100 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11103 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11104 add_timer(&tp
->timer
);
11106 netif_device_attach(dev
);
11107 tg3_netif_start(tp
);
11109 tg3_full_unlock(tp
);
11115 static int tg3_resume(struct pci_dev
*pdev
)
11117 struct net_device
*dev
= pci_get_drvdata(pdev
);
11118 struct tg3
*tp
= netdev_priv(dev
);
11121 if (!netif_running(dev
))
11124 pci_restore_state(tp
->pdev
);
11126 err
= tg3_set_power_state(tp
, PCI_D0
);
11130 netif_device_attach(dev
);
11132 tg3_full_lock(tp
, 0);
11134 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11137 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11138 add_timer(&tp
->timer
);
11140 tg3_netif_start(tp
);
11142 tg3_full_unlock(tp
);
11147 static struct pci_driver tg3_driver
= {
11148 .name
= DRV_MODULE_NAME
,
11149 .id_table
= tg3_pci_tbl
,
11150 .probe
= tg3_init_one
,
11151 .remove
= __devexit_p(tg3_remove_one
),
11152 .suspend
= tg3_suspend
,
11153 .resume
= tg3_resume
11156 static int __init
tg3_init(void)
11158 return pci_module_init(&tg3_driver
);
11161 static void __exit
tg3_cleanup(void)
11163 pci_unregister_driver(&tg3_driver
);
11166 module_init(tg3_init
);
11167 module_exit(tg3_cleanup
);